diff --git a/.config/nextest.toml b/.config/nextest.toml index 37b7dfcea0..c911f7bd63 100644 --- a/.config/nextest.toml +++ b/.config/nextest.toml @@ -8,9 +8,15 @@ slow-timeout = { period = "60s", terminate-after = 2 } filter = """ test(=zenoh_session_unicast) | test(=zenoh_session_multicast) | +test(=zenoh_unicity_p2p) | +test(=zenoh_unicity_brokered) | test(=transport_tcp_intermittent) | test(=transport_tcp_intermittent_for_lowlatency_transport) | -test(=three_node_combination) +test(=three_node_combination) | +test(=watchdog_alloc_concurrent) | +test(=header_check_memory_concurrent) | +test(=header_link_concurrent) | +test(=header_link_failure_concurrent) """ threads-required = 'num-cpus' slow-timeout = { period = "60s", terminate-after = 6 } diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 634d3ec500..e932f26bb1 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -49,13 +49,28 @@ jobs: uses: taiki-e/install-action@cargo-deny - name: Code format check - run: cargo fmt --check + run: cargo fmt --check -- --config "unstable_features=true,imports_granularity=Crate,group_imports=StdExternalCrate" - - name: Clippy - run: cargo +stable clippy --all-targets -- --deny warnings + - name: Clippy zenoh no-default-features + run: cargo +stable clippy -p zenoh --all-targets --no-default-features -- --deny warnings - - name: Clippy unstable targets - run: cargo +stable clippy --all-targets --features unstable -- --deny warnings + - name: Clippy zenoh + run: cargo +stable clippy -p zenoh --all-targets -- --deny warnings + + - name: Clippy zenoh unstable + run: cargo +stable clippy -p zenoh --all-targets --features unstable -- --deny warnings + + - name: Clippy zenoh internal + run: cargo +stable clippy -p zenoh --all-targets --features unstable,internal -- --deny warnings + + - name: Clippy zenoh shared-memory + run: cargo +stable clippy -p zenoh --all-targets --features unstable,shared-memory -- --deny warnings + + - name: Clippy workspace + run: cargo +stable clippy --all-targets --features test -- --deny warnings + + - name: Clippy workspace unstable + run: cargo +stable clippy --all-targets --features unstable,test -- --deny warnings - name: Clippy all features if: ${{ matrix.os == 'ubuntu-latest' || matrix.os == 'macOS-latest' }} @@ -71,6 +86,11 @@ jobs: - name: Run doctests run: cargo test --doc + - name: Build doc + run: cargo doc --no-deps --features unstable + env: + RUSTDOCFLAGS: -Dwarnings + - name: Check licenses run: cargo deny check licenses @@ -103,11 +123,15 @@ jobs: uses: taiki-e/install-action@nextest - name: Run tests - run: cargo nextest run --exclude zenoh-examples --exclude zenoh-plugin-example --workspace + run: cargo nextest run -F test --exclude zenoh-examples --exclude zenoh-plugin-example --workspace - name: Run tests with SHM + if: ${{ matrix.os == 'macOS-latest' || matrix.os == 'windows-latest' }} + run: cargo nextest run -F test -F shared-memory -F unstable -E 'not (test(test_default_features))' --exclude zenoh-examples --exclude zenoh-plugin-example --workspace + + - name: Run tests with SHM + unixpipe if: ${{ matrix.os == 'ubuntu-latest' }} - run: cargo nextest run -F shared-memory -F transport_unixpipe -p zenoh-transport + run: cargo nextest run -F test -F shared-memory -F unstable -F transport_unixpipe -E 'not (test(test_default_features))' --exclude zenoh-examples --exclude zenoh-plugin-example --workspace - name: Check for feature leaks if: ${{ matrix.os == 'ubuntu-latest' }} @@ -137,7 +161,7 @@ jobs: name: Typos Check runs-on: ubuntu-latest steps: - - name: Clone this repository + - name: Clone this repository uses: actions/checkout@v4 - name: Check spelling diff --git a/.github/workflows/pre-release.yml b/.github/workflows/pre-release.yml index 9452e0da86..bb245d4747 100644 --- a/.github/workflows/pre-release.yml +++ b/.github/workflows/pre-release.yml @@ -53,7 +53,7 @@ jobs: run: rustup component add rustfmt clippy - name: Code format check - run: cargo fmt --check + run: cargo fmt --check -- cargo fmt --check -- --config "unstable_features=true,imports_granularity=Crate,group_imports=StdExternalCrate" env: CARGO_REGISTRIES_CRATES_IO_PROTOCOL: sparse diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 0000000000..86dc1703ed --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,21 @@ +# +# Copyright (c) 2024 ZettaScale Technology +# +# This program and the accompanying materials are made available under the +# terms of the Eclipse Public License 2.0 which is available at +# http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +# which is available at https://www.apache.org/licenses/LICENSE-2.0. +# +# SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +# +# Contributors: +# ZettaScale Zenoh Team, +# +repos: + - repo: local + hooks: + - id: fmt + name: fmt + entry: cargo fmt -- --config "unstable_features=true,imports_granularity=Crate,group_imports=StdExternalCrate" + language: system + types: [rust] diff --git a/Cargo.lock b/Cargo.lock index a28a498391..8768c911cf 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -70,9 +70,9 @@ dependencies = [ [[package]] name = "aes" -version = "0.8.3" +version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac1f845298e95f983ff1944b728ae08b8cebab80d684f0a832ed0fc74dfa27e2" +checksum = "b169f7a6d4742236a0a00c541b845991d0ac43e546831af1249753ab4c3aa3a0" dependencies = [ "cfg-if 1.0.0", "cipher 0.4.4", @@ -115,9 +115,9 @@ dependencies = [ [[package]] name = "ahash" -version = "0.8.7" +version = "0.8.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77c3a9648d43b9cd48db467b3f87fdd6e146bcc88ab0180006cef2179fe11d01" +checksum = "e89da841a80418a9b391ebaea17f5c112ffaaa96f621d2c285b5174da76b9011" dependencies = [ "cfg-if 1.0.0", "getrandom 0.2.10", @@ -179,9 +179,9 @@ dependencies = [ [[package]] name = "anstyle" -version = "1.0.6" +version = "1.0.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8901269c6307e8d93993578286ac0edf7f195079ffff5ebdeea6a59ffb7e36bc" +checksum = "1bec1de6f59aedf83baf9ff929c98f2ad654b97c9510f4e70cf6f661d49fd5b1" [[package]] name = "anstyle-parse" @@ -213,9 +213,9 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.75" +version = "1.0.86" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4668cab20f66d8d020e1fbc0ebe47217433c1b6c8f2040faf858554e394ace6" +checksum = "b3d1d046238990b9cf5bcde22a3fb3584ee5cf65fb2765f454ed428c7a0063da" [[package]] name = "array-init" @@ -236,13 +236,42 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "23b62fc65de8e4e7f52534fb52b0f3ed04746ae267519eef2a83941e8085068b" [[package]] -name = "async-attributes" -version = "1.1.2" +name = "asn1-rs" +version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a3203e79f4dd9bdda415ed03cf14dae5a2bf775c683a00f94e9cd1faf0f596e5" +checksum = "22ad1373757efa0f70ec53939aabc7152e1591cb485208052993070ac8d2429d" dependencies = [ + "asn1-rs-derive", + "asn1-rs-impl", + "displaydoc", + "nom", + "num-traits", + "rusticata-macros", + "thiserror", + "time 0.3.36", +] + +[[package]] +name = "asn1-rs-derive" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7378575ff571966e99a744addeff0bff98b8ada0dedf1956d59e634db95eaac1" +dependencies = [ + "proc-macro2", "quote", - "syn 1.0.109", + "syn 2.0.52", + "synstructure", +] + +[[package]] +name = "asn1-rs-impl" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7b18050c2cd6fe86c3a76584ef5e0baf286d038cda203eb6223df2cc413565f7" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.52", ] [[package]] @@ -256,6 +285,18 @@ dependencies = [ "futures-core", ] +[[package]] +name = "async-channel" +version = "2.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "89b47800b0be77592da0afd425cc03468052844aff33b84e33cc696f64e77b6a" +dependencies = [ + "concurrent-queue", + "event-listener-strategy", + "futures-core", + "pin-project-lite 0.2.13", +] + [[package]] name = "async-dup" version = "1.2.2" @@ -268,30 +309,29 @@ dependencies = [ [[package]] name = "async-executor" -version = "1.5.1" +version = "1.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6fa3dc5f2a8564f07759c008b9109dc0d39de92a88d5588b8a5036d286383afb" +checksum = "d7ebdfa2ebdab6b1760375fa7d6f382b9f486eac35fc994625a00e89280bdbb7" dependencies = [ - "async-lock", "async-task", "concurrent-queue", - "fastrand 1.9.0", - "futures-lite", + "fastrand 2.0.2", + "futures-lite 2.0.0", "slab", ] [[package]] name = "async-global-executor" -version = "2.3.1" +version = "2.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1b6f5d7df27bd294849f8eec66ecfc63d11814df7a4f5d74168a2394467b776" +checksum = "05b1b633a2115cd122d73b955eadd9916c18c8f510ec9cd1686404c60ad1c29c" dependencies = [ - "async-channel", + "async-channel 2.3.1", "async-executor", - "async-io", - "async-lock", + "async-io 2.3.3", + "async-lock 3.4.0", "blocking", - "futures-lite", + "futures-lite 2.0.0", "once_cell", "tokio", ] @@ -302,7 +342,7 @@ version = "2.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8101020758a4fc3a7c326cb42aa99e9fa77cbfb76987c128ad956406fe1f70a7" dependencies = [ - "async-channel", + "async-channel 1.9.0", "async-dup", "async-std", "futures-core", @@ -318,20 +358,39 @@ version = "1.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0fc5b45d93ef0529756f812ca52e44c221b35341892d3dcc34132ac02f3dd2af" dependencies = [ - "async-lock", + "async-lock 2.8.0", "autocfg", "cfg-if 1.0.0", "concurrent-queue", - "futures-lite", + "futures-lite 1.13.0", "log", "parking", - "polling", + "polling 2.8.0", "rustix 0.37.25", "slab", "socket2 0.4.9", "waker-fn", ] +[[package]] +name = "async-io" +version = "2.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0d6baa8f0178795da0e71bc42c9e5d13261aac7ee549853162e66a241ba17964" +dependencies = [ + "async-lock 3.4.0", + "cfg-if 1.0.0", + "concurrent-queue", + "futures-io", + "futures-lite 2.0.0", + "parking", + "polling 3.7.2", + "rustix 0.38.32", + "slab", + "tracing", + "windows-sys 0.52.0", +] + [[package]] name = "async-lock" version = "2.8.0" @@ -341,19 +400,30 @@ dependencies = [ "event-listener 2.5.3", ] +[[package]] +name = "async-lock" +version = "3.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ff6e472cdea888a4bd64f342f09b3f50e1886d32afe8df3d663c01140b811b18" +dependencies = [ + "event-listener 5.3.1", + "event-listener-strategy", + "pin-project-lite 0.2.13", +] + [[package]] name = "async-process" version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7a9d28b1d97e08915212e2e45310d47854eafa69600756fc735fb788f75199c9" dependencies = [ - "async-io", - "async-lock", + "async-io 1.13.0", + "async-lock 2.8.0", "autocfg", "blocking", "cfg-if 1.0.0", "event-listener 2.5.3", - "futures-lite", + "futures-lite 1.13.0", "rustix 0.37.25", "signal-hook", "windows-sys 0.48.0", @@ -386,7 +456,7 @@ version = "4.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "53bba003996b8fd22245cd0c59b869ba764188ed435392cf2796d03b805ade10" dependencies = [ - "async-channel", + "async-channel 1.9.0", "async-std", "http-types", "log", @@ -400,17 +470,16 @@ version = "1.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "62565bb4402e926b29953c785397c6dc0391b7b446e45008b0049eb43cec6f5d" dependencies = [ - "async-attributes", - "async-channel", + "async-channel 1.9.0", "async-global-executor", - "async-io", - "async-lock", + "async-io 1.13.0", + "async-lock 2.8.0", "async-process", "crossbeam-utils", "futures-channel", "futures-core", "futures-io", - "futures-lite", + "futures-lite 1.13.0", "gloo-timers", "kv-log-macro", "log", @@ -430,13 +499,13 @@ checksum = "ecc7ab41815b3c653ccd2978ec3255c81349336702dfdf62ee6f7069b12a3aae" [[package]] name = "async-trait" -version = "0.1.73" +version = "0.1.81" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc00ceb34980c03614e35a3a4e218276a0a824e911d07651cd0d858a51e8c0f0" +checksum = "6e0c28dcc82d7c8ead5cb13beb15405b57b8546e93215673ff8ca0349a028107" dependencies = [ "proc-macro2", "quote", - "syn 2.0.33", + "syn 2.0.52", ] [[package]] @@ -490,6 +559,12 @@ version = "0.21.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9ba43ea6f343b788c8764558649e08df62f86c6ef251fdaeb1ffd010a9ae50a2" +[[package]] +name = "base64" +version = "0.22.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "72b3254f16251a8381aa12e40e3c4d2f0199f8c6508fbecb9d91f575e0fbb8c6" + [[package]] name = "base64ct" version = "1.6.0" @@ -570,17 +645,15 @@ dependencies = [ [[package]] name = "blocking" -version = "1.3.1" +version = "1.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77231a1c8f801696fc0123ec6150ce92cffb8e164a02afb9c8ddee0e9b65ad65" +checksum = "703f41c54fc768e63e091340b424302bb1c29ef4aa0c7f10fe849dfb114d29ea" dependencies = [ - "async-channel", - "async-lock", + "async-channel 2.3.1", "async-task", - "atomic-waker", - "fastrand 1.9.0", - "futures-lite", - "log", + "futures-io", + "futures-lite 2.0.0", + "piper", ] [[package]] @@ -603,9 +676,9 @@ checksum = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610" [[package]] name = "bytes" -version = "1.5.0" +version = "1.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2bd12c1caf447e69cd4528f47f94d203fd2582878ecb9e9465484c4148a8223" +checksum = "8318a53db07bb3f8dca91a600466bdb3f2eaadeedfdbcf02e1accbad9271ba50" [[package]] name = "cache-padded" @@ -646,6 +719,18 @@ version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" +[[package]] +name = "cfg_aliases" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fd16c4719339c4530435d38e511904438d07cce7950afa3718a84ac36c10e89e" + +[[package]] +name = "cfg_aliases" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "613afe47fcd5fac7ccf1db93babcb082c5994d996f20b8b159f2ad1658eb5724" + [[package]] name = "chrono" version = "0.4.30" @@ -709,9 +794,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.4.11" +version = "4.5.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bfaff671f6b22ca62406885ece523383b9b64022e341e53e009a62ebc47a45f2" +checksum = "ed6719fffa43d0d87e5fd8caeab59be1554fb028cd30edc88fc4369b17971019" dependencies = [ "clap_builder", "clap_derive", @@ -719,9 +804,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.4.11" +version = "4.5.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a216b506622bb1d316cd51328dce24e07bdff4a6128a47c7e7fad11878d5adbb" +checksum = "216aec2b177652e3846684cbfe25c9964d18ec45234f0f5da5157b207ed1aab6" dependencies = [ "anstream", "anstyle", @@ -731,21 +816,21 @@ dependencies = [ [[package]] name = "clap_derive" -version = "4.4.7" +version = "4.5.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf9804afaaf59a91e75b022a30fb7229a7901f60c755489cc61c9b423b836442" +checksum = "501d359d5f3dcaf6ecdeee48833ae73ec6e42723a1e52419c79abf9507eec0a0" dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.33", + "syn 2.0.52", ] [[package]] name = "clap_lex" -version = "0.6.0" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "702fc72eb24e5a1e48ce58027a675bc24edd52096d5397d4aea7c6dd9eca0bd1" +checksum = "1462739cb27611015575c0c11df5df7601141071f07518d56fcc1be504cbec97" [[package]] name = "cobs" @@ -771,9 +856,9 @@ dependencies = [ [[package]] name = "concurrent-queue" -version = "2.2.0" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62ec6771ecfa0762d24683ee5a32ad78487a3d3afdc0fb8cae19d2c5deb50b7c" +checksum = "4ca0197aee26d1ae37445ee532fefce43251d24cc7c166799f4d46817f1d3973" dependencies = [ "crossbeam-utils", ] @@ -792,18 +877,18 @@ checksum = "fbdcdcb6d86f71c5e97409ad45898af11cbc995b4ee8112d59095a28d376c935" [[package]] name = "const_format" -version = "0.2.31" +version = "0.2.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c990efc7a285731f9a4378d81aff2f0e85a2c8781a05ef0f8baa8dac54d0ff48" +checksum = "e3a214c7af3d04997541b18d432afaff4c455e79e2029079647e72fc2bd27673" dependencies = [ "const_format_proc_macros", ] [[package]] name = "const_format_proc_macros" -version = "0.2.31" +version = "0.2.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e026b6ce194a874cb9cf32cd5772d1ef9767cc8fcb5765948d74f37a9d8b2bf6" +checksum = "c7f6ff08fd20f4f299298a28e2dfa8a8ba1036e6cd2460ac1de7b425d76f2500" dependencies = [ "proc-macro2", "quote", @@ -816,12 +901,6 @@ version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "245097e9a4535ee1e3e3931fcfcd55a796a44c643e8596ff6566d68f09b87bbc" -[[package]] -name = "convert_case" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6245d59a3e82a7fc217c5828a6692dbc6dfb63a0c8c90495621f7b9d79704a0e" - [[package]] name = "cookie" version = "0.14.4" @@ -872,18 +951,18 @@ checksum = "dcb25d077389e53838a8158c8e99174c5a9d902dee4904320db714f3c653ffba" [[package]] name = "crc" -version = "3.0.1" +version = "3.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "86ec7a15cbe22e59248fc7eadb1907dab5ba09372595da4d73dd805ed4417dfe" +checksum = "69e6e4d7b33a94f0991c26729976b10ebde1d34c3ee82408fb536164fa10d636" dependencies = [ "crc-catalog", ] [[package]] name = "crc-catalog" -version = "2.2.0" +version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9cace84e55f07e7301bae1c519df89cdad8cc3cd868413d3fdbdeca9ff3db484" +checksum = "19d374276b40fb8bbdee95aef7c7fa6b5316ec764510eb64b8dd0e2ed0d7e7f5" [[package]] name = "criterion" @@ -897,7 +976,7 @@ dependencies = [ "clap", "criterion-plot", "is-terminal", - "itertools", + "itertools 0.10.5", "num-traits", "once_cell", "oorandom", @@ -918,7 +997,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6b50826342786a51a89e2da3a28f1c32b06e387201bc2d19791f622c673706b1" dependencies = [ "cast", - "itertools", + "itertools 0.10.5", ] [[package]] @@ -957,12 +1036,9 @@ dependencies = [ [[package]] name = "crossbeam-utils" -version = "0.8.16" +version = "0.8.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a22b2d63d4d1dc0b7f1b6b2747dd0088008a9be28b6ddf0b1e7d335e3037294" -dependencies = [ - "cfg-if 1.0.0", -] +checksum = "22ec99545bb0ed0ea7bb9b8e1e9122ea386ff8a48c0922e43f36d45ab09e0e80" [[package]] name = "crypto-common" @@ -1020,6 +1096,20 @@ dependencies = [ "zeroize", ] +[[package]] +name = "der-parser" +version = "9.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5cd0a5c643689626bec213c4d8bd4d96acc8ffdb4ad4bb6bc16abf27d5f4b553" +dependencies = [ + "asn1-rs", + "displaydoc", + "nom", + "num-bigint", + "num-traits", + "rusticata-macros", +] + [[package]] name = "deranged" version = "0.3.11" @@ -1031,26 +1121,33 @@ dependencies = [ [[package]] name = "derive-new" -version = "0.6.0" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d150dea618e920167e5973d70ae6ece4385b7164e0d799fe7c122dd0a5d912ad" +checksum = "2cdc8d50f426189eef89dac62fabfa0abb27d5cc008f25bf4156a0203325becc" dependencies = [ "proc-macro2", "quote", - "syn 2.0.33", + "syn 2.0.52", ] [[package]] name = "derive_more" -version = "0.99.17" +version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4fb810d30a7c1953f91334de7244731fc3f3c10d7fe163338a35b9f640960321" +checksum = "4a9b99b9cbbe49445b21764dc0625032a89b145a2642e67603e1c936f5458d05" +dependencies = [ + "derive_more-impl", +] + +[[package]] +name = "derive_more-impl" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cb7330aeadfbe296029522e6c40f315320aba36fc43a5b3632f3795348f3bd22" dependencies = [ - "convert_case", "proc-macro2", "quote", - "rustc_version 0.4.0", - "syn 1.0.109", + "syn 2.0.52", ] [[package]] @@ -1101,6 +1198,17 @@ version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "212d0f5754cb6769937f4501cc0e67f4f4483c8d2c3e1e922ee9edbe4ab4c7c0" +[[package]] +name = "displaydoc" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "487585f4d0c6655fe74905e2504d8ad6908e4db67f744eb140876906c2f3175d" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.52", +] + [[package]] name = "dyn-clone" version = "1.0.13" @@ -1109,9 +1217,9 @@ checksum = "bbfc4744c1b8f2a09adc0e55242f60b1af195d88596bd8700be74418c056c555" [[package]] name = "either" -version = "1.9.0" +version = "1.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a26ae43d7bcc3b814de94796a5e736d4029efb0ee900c12e2d54c993ad1a1e07" +checksum = "60b1af1c220855b6ceac025d3f6ecdd2b7c4894bfe9cd9bda4fbb4bc7c0d4cf0" [[package]] name = "encoding_rs" @@ -1176,23 +1284,34 @@ checksum = "0206175f82b8d6bf6652ff7d71a1e27fd2e4efde587fd368662814d6ec1d9ce0" [[package]] name = "event-listener" -version = "4.0.0" +version = "5.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "770d968249b5d99410d61f5bf89057f3199a077a04d087092f58e7d10692baae" +checksum = "6032be9bd27023a771701cc49f9f053c751055f71efb2e0ae5c15809093675ba" dependencies = [ "concurrent-queue", "parking", "pin-project-lite 0.2.13", ] +[[package]] +name = "event-listener-strategy" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0f214dc438f977e6d4e3500aaa277f5ad94ca83fbbd9b1a15713ce2344ccc5a1" +dependencies = [ + "event-listener 5.3.1", + "pin-project-lite 0.2.13", +] + [[package]] name = "fancy-regex" -version = "0.11.0" +version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b95f7c0680e4142284cf8b22c14a476e87d61b004a3a0861872b32ef7ead40a2" +checksum = "531e46835a22af56d1e3b66f04844bed63158bc094a628bec1d321d9b4c44bf2" dependencies = [ "bit-set", - "regex", + "regex-automata 0.4.7", + "regex-syntax 0.8.4", ] [[package]] @@ -1251,7 +1370,7 @@ dependencies = [ "futures-core", "futures-sink", "nanorand", - "spin 0.9.8", + "spin", ] [[package]] @@ -1277,18 +1396,18 @@ checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b" [[package]] name = "form_urlencoded" -version = "1.2.0" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a62bc1cf6f830c2ec14a513a9fb124d0a213a629668a4186f329db21fe045652" +checksum = "e13624c2627564efccf4934284bdd98cbaa14e79b0b5a141218e507b3a823456" dependencies = [ "percent-encoding", ] [[package]] name = "fraction" -version = "0.13.1" +version = "0.15.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3027ae1df8d41b4bed2241c8fdad4acc1e7af60c8e17743534b545e77182d678" +checksum = "0f158e3ff0a1b334408dc9fb811cd99b446986f4d8b741bb08f9df1604085ae7" dependencies = [ "lazy_static", "num", @@ -1296,9 +1415,9 @@ dependencies = [ [[package]] name = "futures" -version = "0.3.28" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23342abe12aba583913b2e62f22225ff9c950774065e4bfb61a19cd9770fec40" +checksum = "645c6916888f6cb6350d2550b80fb63e734897a8498abe35cfb732b6487804b0" dependencies = [ "futures-channel", "futures-core", @@ -1311,9 +1430,9 @@ dependencies = [ [[package]] name = "futures-channel" -version = "0.3.28" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "955518d47e09b25bbebc7a18df10b81f0c766eaf4c4f1cccef2fca5f2a4fb5f2" +checksum = "eac8f7d7865dcb88bd4373ab671c8cf4508703796caa2b1985a9ca867b3fcb78" dependencies = [ "futures-core", "futures-sink", @@ -1321,15 +1440,15 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.28" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4bca583b7e26f571124fe5b7561d49cb2868d79116cfa0eefce955557c6fee8c" +checksum = "dfc6580bb841c5a68e9ef15c77ccc837b40a7504914d52e47b8b0e9bbda25a1d" [[package]] name = "futures-executor" -version = "0.3.28" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ccecee823288125bd88b4d7f565c9e58e41858e47ab72e8ea2d64e93624386e0" +checksum = "a576fc72ae164fca6b9db127eaa9a9dda0d61316034f33a0a0d4eda41f02b01d" dependencies = [ "futures-core", "futures-task", @@ -1338,9 +1457,9 @@ dependencies = [ [[package]] name = "futures-io" -version = "0.3.28" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4fff74096e71ed47f8e023204cfd0aa1289cd54ae5430a9523be060cdb849964" +checksum = "a44623e20b9681a318efdd71c299b6b222ed6f231972bfe2f224ebad6311f0c1" [[package]] name = "futures-lite" @@ -1357,34 +1476,49 @@ dependencies = [ "waker-fn", ] +[[package]] +name = "futures-lite" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9c1155db57329dca6d018b61e76b1488ce9a2e5e44028cac420a5898f4fcef63" +dependencies = [ + "fastrand 2.0.2", + "futures-core", + "futures-io", + "memchr", + "parking", + "pin-project-lite 0.2.13", + "waker-fn", +] + [[package]] name = "futures-macro" -version = "0.3.28" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89ca545a94061b6365f2c7355b4b32bd20df3ff95f02da9329b34ccc3bd6ee72" +checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac" dependencies = [ "proc-macro2", "quote", - "syn 2.0.33", + "syn 2.0.52", ] [[package]] name = "futures-sink" -version = "0.3.28" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f43be4fe21a13b9781a69afa4985b0f6ee0e1afab2c6f454a8cf30e2b2237b6e" +checksum = "9fb8e00e87438d937621c1c6269e53f536c14d3fbd6a042bb24879e57d474fb5" [[package]] name = "futures-task" -version = "0.3.28" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "76d3d132be6c0e6aa1534069c705a74a5997a356c0dc2f86a47765e5617c5b65" +checksum = "38d84fa142264698cdce1a9f9172cf383a0c82de1bddcf3092901442c4097004" [[package]] name = "futures-util" -version = "0.3.28" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26b01e40b772d54cf6c6d721c1d1abd0647a0106a12ecaa1c186273392a69533" +checksum = "3d6401deb83407ab3da39eba7e33987a73c3df0c82b4bb5813ee871c19c41d48" dependencies = [ "futures-channel", "futures-core", @@ -1450,24 +1584,22 @@ checksum = "6fb8d784f27acf97159b40fc4db5ecd8aa23b9ad5ef69cdd136d3bc80665f0c0" [[package]] name = "git-version" -version = "0.3.5" +version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f6b0decc02f4636b9ccad390dcbe77b722a77efedfa393caf8379a51d5c61899" +checksum = "1ad568aa3db0fcbc81f2f116137f263d7304f512a1209b35b85150d3ef88ad19" dependencies = [ "git-version-macro", - "proc-macro-hack", ] [[package]] name = "git-version-macro" -version = "0.3.5" +version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fe69f1cbdb6e28af2bac214e943b99ce8a0a06b447d15d3e61161b0423139f3f" +checksum = "53010ccb100b96a67bc32c0175f0ed1426b31b655d562898e57325f81c023ac0" dependencies = [ - "proc-macro-hack", "proc-macro2", "quote", - "syn 1.0.109", + "syn 2.0.52", ] [[package]] @@ -1509,34 +1641,31 @@ checksum = "eabb4a44450da02c90444cf74558da904edde8fb4e9035a9a6a4e15445af0bd7" [[package]] name = "hashbrown" -version = "0.13.2" +version = "0.14.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43a3c133739dddd0d2990f9a4bdf8eb4b21ef50e4851ca85ab661199821d510e" +checksum = "e5274423e17b7c9fc20b6e7e208532f9b19825d82dfd615708b70edd83df41f1" dependencies = [ "ahash", + "allocator-api2", ] [[package]] -name = "hashbrown" -version = "0.14.0" +name = "heck" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2c6201b9ff9fd90a5a3bac2e56a830d0caa509576f0e503818ee82c181b3437a" -dependencies = [ - "ahash", - "allocator-api2", -] +checksum = "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea" [[package]] -name = "heck" -version = "0.4.1" +name = "hermit-abi" +version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8" +checksum = "d231dfb89cfffdbc30e7fc41579ed6066ad03abda9e567ccafae602b97ec5024" [[package]] name = "hermit-abi" -version = "0.3.2" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "443144c8cdadd93ebf52ddb4056d257f5b52c04d3c804e657d19eb73fc33668b" +checksum = "fbf6a919d6cf397374f7dfeeea91d974c7c0a7221d0d0f4f20d859d329e53fcc" [[package]] name = "hkdf" @@ -1579,11 +1708,11 @@ dependencies = [ [[package]] name = "home" -version = "0.5.5" +version = "0.5.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5444c27eef6923071f7ebcc33e3444508466a76f7a2b93da00ed6e19f30c1ddb" +checksum = "e3d1354bf6b7235cb4a0576c2619fd4ed18183f689b12b006a0ee7329eeff9a5" dependencies = [ - "windows-sys 0.48.0", + "windows-sys 0.52.0", ] [[package]] @@ -1638,11 +1767,11 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6e9b187a72d63adbfba487f48095306ac823049cb504ee195541e91c7775f5ad" dependencies = [ "anyhow", - "async-channel", + "async-channel 1.9.0", "async-std", "base64 0.13.1", "cookie", - "futures-lite", + "futures-lite 1.13.0", "infer", "pin-project-lite 0.2.13", "rand 0.7.3", @@ -1688,7 +1817,7 @@ dependencies = [ "httpdate", "itoa", "pin-project-lite 0.2.13", - "socket2 0.5.6", + "socket2 0.4.9", "tokio", "tower-service", "tracing", @@ -1733,9 +1862,9 @@ dependencies = [ [[package]] name = "idna" -version = "0.4.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d20d6b07bfbc108882d88ed8e37d39636dcc260e15e30c45e6ba089610b917c" +checksum = "634d9b1461af396cad843f47fdba5597a4f9e6ddd4bfb6ff5d85028c25cb12f6" dependencies = [ "unicode-bidi", "unicode-normalization", @@ -1743,12 +1872,12 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.0.0" +version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d5477fe2230a79769d8dc68e0eabf5437907c0457a5614a9e8dddb67f65eb65d" +checksum = "93ead53efc7ea8ed3cfb0c79fc8023fbb782a5432b52830b6518941cebe6505c" dependencies = [ "equivalent", - "hashbrown 0.14.0", + "hashbrown", ] [[package]] @@ -1781,7 +1910,7 @@ version = "1.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "eae7b9aee968036d54dce06cebaefd919e4472e753296daccd6d344e3e2df0c2" dependencies = [ - "hermit-abi", + "hermit-abi 0.3.9", "libc", "windows-sys 0.48.0", ] @@ -1807,7 +1936,7 @@ version = "0.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cb0889898416213fab133e1d33a0e5858a48177452750691bde3666d0fdbaf8b" dependencies = [ - "hermit-abi", + "hermit-abi 0.3.9", "rustix 0.38.32", "windows-sys 0.48.0", ] @@ -1821,6 +1950,12 @@ dependencies = [ "nom", ] +[[package]] +name = "iter-read" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c397ca3ea05ad509c4ec451fea28b4771236a376ca1c69fd5143aae0cf8f93c4" + [[package]] name = "itertools" version = "0.10.5" @@ -1830,6 +1965,15 @@ dependencies = [ "either", ] +[[package]] +name = "itertools" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "413ee7dfc52ee1a4949ceeb7dbc8a33f2d6c088194d9f922fb8318faf1f01186" +dependencies = [ + "either", +] + [[package]] name = "itoa" version = "1.0.9" @@ -1878,13 +2022,13 @@ dependencies = [ [[package]] name = "jsonschema" -version = "0.17.1" +version = "0.18.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a071f4f7efc9a9118dfb627a0a94ef247986e1ab8606a4c806ae2b3aa3b6978" +checksum = "f5f037c58cadb17e8591b620b523cc6a7ab2b91b6ce3121f8eb4171f8d80115c" dependencies = [ "ahash", "anyhow", - "base64 0.21.4", + "base64 0.22.1", "bytecount", "fancy-regex", "fraction", @@ -1894,7 +2038,7 @@ dependencies = [ "memchr", "num-cmp", "once_cell", - "parking_lot", + "parking_lot 0.12.1", "percent-encoding", "regex", "serde", @@ -1915,11 +2059,11 @@ dependencies = [ [[package]] name = "keyed-set" -version = "0.4.5" +version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b79e110283e09081809ca488cf3a9709270c6d4d4c4a32674c39cc438366615a" +checksum = "0a3ec39d2dc17953a1540d63906a112088f79b2e46833b4ed65bc9de3904ae34" dependencies = [ - "hashbrown 0.13.2", + "hashbrown", ] [[package]] @@ -1933,18 +2077,18 @@ dependencies = [ [[package]] name = "lazy_static" -version = "1.4.0" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" +checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe" dependencies = [ - "spin 0.5.2", + "spin", ] [[package]] name = "libc" -version = "0.2.153" +version = "0.2.158" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c198f91728a82281a64e1f4f9eeb25d82cb32a5de251c6bd1b5154d63a8e7bd" +checksum = "d8adc4bb1803a324070e64a98ae98f38934d91957a99cfb3a43dcbc01bc56439" [[package]] name = "libloading" @@ -1984,6 +2128,15 @@ dependencies = [ "scopeguard", ] +[[package]] +name = "lockfree" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "74ee94b5ad113c7cb98c5a040f783d0952ee4fe100993881d1673c2cb002dd23" +dependencies = [ + "owned-alloc", +] + [[package]] name = "log" version = "0.4.20" @@ -2000,8 +2153,8 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f56d36f573486ba7f462b62cbae597fef7d5d93665e7047956b457531b8a1ced" dependencies = [ - "prost", - "prost-types", + "prost 0.11.9", + "prost-types 0.11.9", ] [[package]] @@ -2042,9 +2195,9 @@ dependencies = [ [[package]] name = "memchr" -version = "2.6.3" +version = "2.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f232d6ef707e1956a43342693d2a31e72989554d58299d7a88738cc95b0d35c" +checksum = "78ca9ab1a0babb1e7d5695e3530886289c18cf2f87ec19a575a0abdce112e3a3" [[package]] name = "memoffset" @@ -2106,6 +2259,18 @@ dependencies = [ "windows-sys 0.48.0", ] +[[package]] +name = "mio" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "80e04d1dcff3aae0704555fe5fee3bcfaf3d1fdf8a7e521d5b9d2b42acb52cec" +dependencies = [ + "hermit-abi 0.3.9", + "libc", + "wasi 0.11.0+wasi-snapshot-preview1", + "windows-sys 0.52.0", +] + [[package]] name = "mio-serial" version = "5.0.5" @@ -2113,7 +2278,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "20a4c60ca5c9c0e114b3bd66ff4aa5f9b2b175442be51ca6c4365d687a97a2ac" dependencies = [ "log", - "mio", + "mio 0.8.11", "nix 0.26.4", "serialport", "winapi", @@ -2184,6 +2349,18 @@ dependencies = [ "memoffset 0.9.0", ] +[[package]] +name = "nix" +version = "0.29.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "71e2746dc3a24dd78b3cfcb7be93368c6de9963d30f43a6a73998a9cf4b17b46" +dependencies = [ + "bitflags 2.5.0", + "cfg-if 1.0.0", + "cfg_aliases 0.2.1", + "libc", +] + [[package]] name = "no-std-net" version = "0.6.0" @@ -2212,9 +2389,9 @@ dependencies = [ [[package]] name = "num" -version = "0.4.1" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b05180d69e3da0e530ba2a1dae5110317e49e3b7f3d41be227dc5f92e49ee7af" +checksum = "3135b08af27d103b0a51f2ae0f8632117b7b185ccf931445affa8df530576a41" dependencies = [ "num-bigint", "num-complex", @@ -2260,9 +2437,9 @@ checksum = "63335b2e2c34fae2fb0aa2cecfd9f0832a1e24b3b32ecec612c3426d46dc8aaa" [[package]] name = "num-complex" -version = "0.4.4" +version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ba157ca0885411de85d6ca030ba7e2a83a28636056c7c699b07c8b6f7383214" +checksum = "73f88a1307638156682bada9d7604135552957b7818057dcef22705b4d509495" dependencies = [ "num-traits", ] @@ -2275,19 +2452,18 @@ checksum = "51d515d32fb182ee37cda2ccdcb92950d6a3c2893aa280e540671c2cd0f3b1d9" [[package]] name = "num-integer" -version = "0.1.45" +version = "0.1.46" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "225d3389fb3509a24c93f5c29eb6bde2586b98d9f016636dff58d7c6f7569cd9" +checksum = "7969661fd2958a5cb096e56c8e1ad0444ac2bbcd0061bd28660485a44879858f" dependencies = [ - "autocfg", "num-traits", ] [[package]] name = "num-iter" -version = "0.1.43" +version = "0.1.45" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d03e6c028c5dc5cac6e2dec0efda81fc887605bb3d884578bb6d6bf7514e252" +checksum = "1429034a0490724d0075ebb2bc9e875d6503c3cf69e235a8941aa757d83ef5bf" dependencies = [ "autocfg", "num-integer", @@ -2308,9 +2484,9 @@ dependencies = [ [[package]] name = "num-traits" -version = "0.2.16" +version = "0.2.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f30b0abd723be7e2ffca1272140fac1a2f084c77ec3e123c192b66af1ee9e6c2" +checksum = "071dfc062690e90b734c0b2273ce72ad0ffa95f0c74596bc250dcfd960262841" dependencies = [ "autocfg", "libm", @@ -2322,7 +2498,7 @@ version = "1.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4161fcb6d602d4d2081af7c3a45852d875a03dd337a6bfdd6e06407b61342a43" dependencies = [ - "hermit-abi", + "hermit-abi 0.3.9", "libc", ] @@ -2335,6 +2511,15 @@ dependencies = [ "memchr", ] +[[package]] +name = "oid-registry" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1c958dd45046245b9c3c2547369bb634eb461670b2e7e0de552905801a648d1d" +dependencies = [ + "asn1-rs", +] + [[package]] name = "once_cell" version = "1.19.0" @@ -2376,7 +2561,7 @@ checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.33", + "syn 2.0.52", ] [[package]] @@ -2405,9 +2590,9 @@ checksum = "04744f49eae99ab78e0d5c0b603ab218f515ea8cfe5a456d7629ad883a3b6e7d" [[package]] name = "ordered-float" -version = "4.1.1" +version = "4.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "536900a8093134cf9ccf00a27deb3532421099e958d9dd431135d0c7543ca1e8" +checksum = "4a91171844676f8c7990ce64959210cd2eaef32c2612c50f9fae9f8aaa6065a6" dependencies = [ "num-traits", ] @@ -2418,12 +2603,29 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b15813163c1d831bf4a13c3610c05c0d03b39feb07f7e09fa234dac9b15aaf39" +[[package]] +name = "owned-alloc" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "30fceb411f9a12ff9222c5f824026be368ff15dc2f13468d850c7d3f502205d6" + [[package]] name = "parking" version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "14f2252c834a40ed9bb5422029649578e63aa341ac401f74e719dd1afda8394e" +[[package]] +name = "parking_lot" +version = "0.11.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7d17b78036a60663b797adeaee46f5c9dfebb86948d1255007a1d6be0271ff99" +dependencies = [ + "instant", + "lock_api", + "parking_lot_core 0.8.6", +] + [[package]] name = "parking_lot" version = "0.12.1" @@ -2431,7 +2633,21 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3742b2c103b9f06bc9fff0a37ff4912935851bee6d36f3c02bcc755bcfec228f" dependencies = [ "lock_api", - "parking_lot_core", + "parking_lot_core 0.9.8", +] + +[[package]] +name = "parking_lot_core" +version = "0.8.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "60a2cfe6f0ad2bfc16aefa463b497d5c7a5ecd44a23efa72aa342d90177356dc" +dependencies = [ + "cfg-if 1.0.0", + "instant", + "libc", + "redox_syscall 0.2.16", + "smallvec", + "winapi", ] [[package]] @@ -2449,17 +2665,17 @@ dependencies = [ [[package]] name = "paste" -version = "1.0.14" +version = "1.0.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "de3145af08024dea9fa9914f381a17b8fc6034dfb00f3a84013f7ff43f29ed4c" +checksum = "57c0d7b74b563b49d38dae00a0c37d4d6de9b432382b2892f0574ddcae73fd0a" [[package]] name = "pem" -version = "2.0.1" +version = "3.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6b13fe415cdf3c8e44518e18a7c95a13431d9bdf6d15367d82b23c377fdd441a" +checksum = "8e459365e590736a54c3fa561947c84837534b8e9af6fc5bf781307e82658fae" dependencies = [ - "base64 0.21.4", + "base64 0.22.1", "serde", ] @@ -2474,9 +2690,9 @@ dependencies = [ [[package]] name = "percent-encoding" -version = "2.3.0" +version = "2.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b2a4787296e9989611394c33f193f676704af1686e70b8f8033ab5ba9a35a94" +checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" [[package]] name = "pest" @@ -2509,7 +2725,7 @@ dependencies = [ "pest_meta", "proc-macro2", "quote", - "syn 2.0.33", + "syn 2.0.52", ] [[package]] @@ -2525,14 +2741,56 @@ dependencies = [ [[package]] name = "petgraph" -version = "0.6.4" +version = "0.6.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e1d3afd2628e69da2be385eb6f2fd57c8ac7977ceeff6dc166ff1657b0e386a9" +checksum = "b4c5cc86750666a3ed20bdaf5ca2a0344f9c67674cae0515bec2da16fbaa47db" dependencies = [ "fixedbitset", "indexmap", ] +[[package]] +name = "phf" +version = "0.11.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ade2d8b8f33c7333b51bcf0428d37e217e9f32192ae4772156f65063b8ce03dc" +dependencies = [ + "phf_macros", + "phf_shared", +] + +[[package]] +name = "phf_generator" +version = "0.11.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "48e4cc64c2ad9ebe670cb8fd69dd50ae301650392e81c05f9bfcb2d5bdbc24b0" +dependencies = [ + "phf_shared", + "rand 0.8.5", +] + +[[package]] +name = "phf_macros" +version = "0.11.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3444646e286606587e49f3bcf1679b8cef1dc2c5ecc29ddacaffc305180d464b" +dependencies = [ + "phf_generator", + "phf_shared", + "proc-macro2", + "quote", + "syn 2.0.52", +] + +[[package]] +name = "phf_shared" +version = "0.11.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "90fcb95eef784c2ac79119d1dd819e162b5da872ce6f3c3abe1e8ca1c082f72b" +dependencies = [ + "siphasher", +] + [[package]] name = "pin-project" version = "1.1.3" @@ -2550,7 +2808,7 @@ checksum = "4359fd9c9171ec6e8c62926d6faaf553a8dc3f64e1507e76da7911b4f6a04405" dependencies = [ "proc-macro2", "quote", - "syn 2.0.33", + "syn 2.0.52", ] [[package]] @@ -2571,6 +2829,17 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" +[[package]] +name = "piper" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "96c8c490f422ef9a4efd2cb5b42b76c8613d7e7dfc1caf667b8a3350a5acc066" +dependencies = [ + "atomic-waker", + "fastrand 2.0.2", + "futures-io", +] + [[package]] name = "pkcs1" version = "0.7.5" @@ -2628,18 +2897,18 @@ dependencies = [ [[package]] name = "pnet_base" -version = "0.34.0" +version = "0.35.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fe4cf6fb3ab38b68d01ab2aea03ed3d1132b4868fa4e06285f29f16da01c5f4c" +checksum = "ffc190d4067df16af3aba49b3b74c469e611cad6314676eaf1157f31aa0fb2f7" dependencies = [ "no-std-net", ] [[package]] name = "pnet_datalink" -version = "0.34.0" +version = "0.35.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ad5854abf0067ebbd3967f7d45ebc8976ff577ff0c7bd101c4973ae3c70f98fe" +checksum = "e79e70ec0be163102a332e1d2d5586d362ad76b01cec86f830241f2b6452a7b7" dependencies = [ "ipnetwork", "libc", @@ -2650,9 +2919,9 @@ dependencies = [ [[package]] name = "pnet_sys" -version = "0.34.0" +version = "0.35.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "417c0becd1b573f6d544f73671070b039051e5ad819cc64aa96377b536128d00" +checksum = "7d4643d3d4db6b08741050c2f3afa9a892c4244c085a72fcda93c9c2c9a00f4b" dependencies = [ "libc", "winapi", @@ -2674,6 +2943,21 @@ dependencies = [ "windows-sys 0.48.0", ] +[[package]] +name = "polling" +version = "3.7.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a3ed00ed3fbf728b5816498ecd316d1716eecaced9c0c8d2c5a6740ca214985b" +dependencies = [ + "cfg-if 1.0.0", + "concurrent-queue", + "hermit-abi 0.4.0", + "pin-project-lite 0.2.13", + "rustix 0.38.32", + "tracing", + "windows-sys 0.52.0", +] + [[package]] name = "polyval" version = "0.4.5" @@ -2697,6 +2981,15 @@ version = "0.2.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de" +[[package]] +name = "proc-macro-crate" +version = "3.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6d37c51ca738a55da99dc0c4a34860fd675453b8b36209178c2249bb13651284" +dependencies = [ + "toml_edit", +] + [[package]] name = "proc-macro-hack" version = "0.5.20+deprecated" @@ -2705,9 +2998,9 @@ checksum = "dc375e1527247fe1a97d8b7156678dfe7c1af2fc075c9a4db3690ecd2a148068" [[package]] name = "proc-macro2" -version = "1.0.67" +version = "1.0.86" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d433d9f1a3e8c1263d9456598b16fec66f4acc9a74dacffd35c7bb09b3a1328" +checksum = "5e719e8df665df0d1c8fbfd238015744736151d4445ec0836b8e628aae103b77" dependencies = [ "unicode-ident", ] @@ -2719,7 +3012,17 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0b82eaa1d779e9a4bc1c3217db8ffbeabaae1dca241bf70183242128d48681cd" dependencies = [ "bytes", - "prost-derive", + "prost-derive 0.11.9", +] + +[[package]] +name = "prost" +version = "0.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e13db3d3fde688c61e2446b4d843bc27a7e8af269a69440c0308021dc92333cc" +dependencies = [ + "bytes", + "prost-derive 0.13.1", ] [[package]] @@ -2729,33 +3032,56 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e5d2d8d10f3c6ded6da8b05b5fb3b8a5082514344d56c9f871412d29b4e075b4" dependencies = [ "anyhow", - "itertools", + "itertools 0.10.5", "proc-macro2", "quote", "syn 1.0.109", ] +[[package]] +name = "prost-derive" +version = "0.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "18bec9b0adc4eba778b33684b7ba3e7137789434769ee3ce3930463ef904cfca" +dependencies = [ + "anyhow", + "itertools 0.13.0", + "proc-macro2", + "quote", + "syn 2.0.52", +] + [[package]] name = "prost-types" version = "0.11.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "213622a1460818959ac1181aaeb2dc9c7f63df720db7d788b3e24eacd1983e13" dependencies = [ - "prost", + "prost 0.11.9", +] + +[[package]] +name = "prost-types" +version = "0.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cee5168b05f49d4b0ca581206eb14a7b22fafd963efe729ac48eb03266e25cc2" +dependencies = [ + "prost 0.13.1", ] [[package]] name = "quinn" -version = "0.11.1" +version = "0.11.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "904e3d3ba178131798c6d9375db2b13b34337d489b089fc5ba0825a2ff1bee73" +checksum = "b22d8e7369034b9a7132bc2008cac12f2013c8132b45e0554e6e20e2617f2156" dependencies = [ "bytes", "pin-project-lite 0.2.13", "quinn-proto", "quinn-udp", - "rustc-hash", + "rustc-hash 2.0.0", "rustls", + "socket2 0.5.7", "thiserror", "tokio", "tracing", @@ -2763,14 +3089,14 @@ dependencies = [ [[package]] name = "quinn-proto" -version = "0.11.2" +version = "0.11.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e974563a4b1c2206bbc61191ca4da9c22e4308b4c455e8906751cc7828393f08" +checksum = "ddf517c03a109db8100448a4be38d498df8a210a99fe0e1b9eaf39e78c640efe" dependencies = [ "bytes", "rand 0.8.5", - "ring 0.17.6", - "rustc-hash", + "ring", + "rustc-hash 1.1.0", "rustls", "rustls-platform-verifier", "slab", @@ -2781,22 +3107,22 @@ dependencies = [ [[package]] name = "quinn-udp" -version = "0.5.1" +version = "0.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e4f0def2590301f4f667db5a77f9694fb004f82796dc1a8b1508fafa3d0e8b72" +checksum = "8bffec3605b73c6f1754535084a85229fa8a30f86014e6c81aeec4abb68b0285" dependencies = [ "libc", "once_cell", - "socket2 0.5.6", + "socket2 0.5.7", "tracing", "windows-sys 0.52.0", ] [[package]] name = "quote" -version = "1.0.33" +version = "1.0.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5267fca4496028628a95160fc423a33e8b2e6af8a5302579e322e4b520293cae" +checksum = "b5b9d34b8991d19d98081b46eacdd8eb58c6f2b201139f7c5f643cc155a633af" dependencies = [ "proc-macro2", ] @@ -2896,12 +3222,13 @@ dependencies = [ [[package]] name = "rcgen" -version = "0.11.1" +version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4954fbc00dcd4d8282c987710e50ba513d351400dbdd00e803a05172a90d8976" +checksum = "54077e1872c46788540de1ea3d7f4ccb1983d12f9aa909b234468676c1a36779" dependencies = [ "pem", - "ring 0.16.20", + "ring", + "rustls-pki-types", "time 0.3.36", "yasna", ] @@ -2937,14 +3264,14 @@ dependencies = [ [[package]] name = "regex" -version = "1.9.5" +version = "1.10.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "697061221ea1b4a94a624f67d0ae2bfe4e22b8a17b6a192afb11046542cc8c47" +checksum = "4219d74c6b67a3654a9fbebc4b419e22126d13d2f3c4a07ee0cb61ff79a79619" dependencies = [ "aho-corasick", "memchr", - "regex-automata 0.3.8", - "regex-syntax 0.7.5", + "regex-automata 0.4.7", + "regex-syntax 0.8.4", ] [[package]] @@ -2958,13 +3285,13 @@ dependencies = [ [[package]] name = "regex-automata" -version = "0.3.8" +version = "0.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c2f401f4955220693b56f8ec66ee9c78abffd8d1c4f23dc41a23839eb88f0795" +checksum = "38caf58cc5ef2fed281f89292ef23f6365465ed9a41b7a7754eb4e26496c92df" dependencies = [ "aho-corasick", "memchr", - "regex-syntax 0.7.5", + "regex-syntax 0.8.4", ] [[package]] @@ -2975,9 +3302,9 @@ checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1" [[package]] name = "regex-syntax" -version = "0.7.5" +version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dbb5fb1acd8a1a18b3dd5be62d25485eb770e05afb408a9627d14d451bae12da" +checksum = "7a66a03ae7c801facd77a29370b4faec201768915ac14a721ba36f20bc9c209b" [[package]] name = "reqwest" @@ -3019,21 +3346,6 @@ dependencies = [ "winreg", ] -[[package]] -name = "ring" -version = "0.16.20" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3053cf52e236a3ed746dfc745aa9cacf1b791d846bdaf412f60a8d7d6e17c8fc" -dependencies = [ - "cc", - "libc", - "once_cell", - "spin 0.5.2", - "untrusted 0.7.1", - "web-sys", - "winapi", -] - [[package]] name = "ring" version = "0.17.6" @@ -3043,8 +3355,8 @@ dependencies = [ "cc", "getrandom 0.2.10", "libc", - "spin 0.9.8", - "untrusted 0.9.0", + "spin", + "untrusted", "windows-sys 0.48.0", ] @@ -3110,6 +3422,12 @@ version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "08d43f7aa6b08d49f382cde6a7982047c3426db949b1424bc4b7ec9ae12c6ce2" +[[package]] +name = "rustc-hash" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "583034fd73374156e66797ed8e5b0d5690409c9226b22d87cb7f19821c05d152" + [[package]] name = "rustc_version" version = "0.2.3" @@ -3121,13 +3439,22 @@ dependencies = [ [[package]] name = "rustc_version" -version = "0.4.0" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bfa0f585226d2e68097d4f95d113b15b83a82e819ab25717ec0590d9584ef366" +checksum = "cfcb3a22ef46e85b45de6ee7e79d063319ebb6594faafcf1c225ea92ab6e9b92" dependencies = [ "semver 1.0.18", ] +[[package]] +name = "rusticata-macros" +version = "4.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "faf0c4a6ece9950b9abdb62b1cfcf2a68b3b67a10ba445b3bb85be2a293d0632" +dependencies = [ + "nom", +] + [[package]] name = "rustix" version = "0.37.25" @@ -3157,13 +3484,13 @@ dependencies = [ [[package]] name = "rustls" -version = "0.23.9" +version = "0.23.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a218f0f6d05669de4eabfb24f31ce802035c952429d037507b4a4a39f0e60c5b" +checksum = "c58f8c84392efc0a126acce10fa59ff7b3d2ac06ab451a33f2741989b806b044" dependencies = [ "log", "once_cell", - "ring 0.17.6", + "ring", "rustls-pki-types", "rustls-webpki", "subtle", @@ -3177,7 +3504,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8f1fb85efa936c42c6d5fc28d2629bb51e4b2f4b8a5211e297d599cc5a093792" dependencies = [ "openssl-probe", - "rustls-pemfile 2.0.0", + "rustls-pemfile 2.1.3", "rustls-pki-types", "schannel", "security-framework", @@ -3194,19 +3521,19 @@ dependencies = [ [[package]] name = "rustls-pemfile" -version = "2.0.0" +version = "2.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "35e4980fa29e4c4b212ffb3db068a564cbf560e51d3944b7c88bd8bf5bec64f4" +checksum = "196fe16b00e106300d3e45ecfcb764fa292a535d7326a29a5875c579c7417425" dependencies = [ - "base64 0.21.4", + "base64 0.22.1", "rustls-pki-types", ] [[package]] name = "rustls-pki-types" -version = "1.7.0" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "976295e77ce332211c0d24d92c0e83e50f5c5f046d11082cea19f3df13a3562d" +checksum = "fc0a2ce646f8655401bb81e7927b812614bd5d91dbc968696be50603510fcaf0" [[package]] name = "rustls-platform-verifier" @@ -3237,15 +3564,21 @@ checksum = "84e217e7fdc8466b5b35d30f8c0a30febd29173df4a3a0c2115d306b9c4117ad" [[package]] name = "rustls-webpki" -version = "0.102.4" +version = "0.102.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ff448f7e92e913c4b7d4c6d8e4540a1724b319b4152b8aef6d4cf8339712b33e" +checksum = "84678086bd54edf2b415183ed7a94d0efb049f1b646a33e22a36f3794be6ae56" dependencies = [ - "ring 0.17.6", + "ring", "rustls-pki-types", - "untrusted 0.9.0", + "untrusted", ] +[[package]] +name = "rustversion" +version = "1.0.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7ffc183a10b4478d04cbbbfc96d0873219d962dd5accaff2ffbd4ceb7df837f4" + [[package]] name = "ryu" version = "1.0.15" @@ -3272,11 +3605,12 @@ dependencies = [ [[package]] name = "schemars" -version = "0.8.13" +version = "0.8.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "763f8cd0d4c71ed8389c90cb8100cba87e763bd01a8e614d4f0af97bcd50a161" +checksum = "09c024468a378b7e36765cd36702b7a90cc3cba11654f6685c8f233408e89e92" dependencies = [ "dyn-clone", + "either", "schemars_derive", "serde", "serde_json", @@ -3284,14 +3618,14 @@ dependencies = [ [[package]] name = "schemars_derive" -version = "0.8.13" +version = "0.8.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec0f696e21e10fa546b7ffb1c9672c6de8fbc7a81acf59524386d8639bf12737" +checksum = "b1eee588578aff73f856ab961cd2f79e36bc45d7ded33a7562adba4667aecc0e" dependencies = [ "proc-macro2", "quote", "serde_derive_internals", - "syn 1.0.109", + "syn 2.0.52", ] [[package]] @@ -3357,33 +3691,56 @@ checksum = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3" [[package]] name = "serde" -version = "1.0.188" +version = "1.0.209" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf9e0fcba69a370eed61bcf2b728575f726b50b55cba78064753d708ddc7549e" +checksum = "99fce0ffe7310761ca6bf9faf5115afbc19688edd00171d81b1bb1b116c63e09" dependencies = [ "serde_derive", ] +[[package]] +name = "serde-pickle" +version = "1.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c762ad136a26407c6a80825813600ceeab5e613660d93d79a41f0ec877171e71" +dependencies = [ + "byteorder", + "iter-read", + "num-bigint", + "num-traits", + "serde", +] + +[[package]] +name = "serde_cbor" +version = "0.11.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2bef2ebfde456fb76bbcf9f59315333decc4fda0b2b44b420243c11e0f5ec1f5" +dependencies = [ + "half", + "serde", +] + [[package]] name = "serde_derive" -version = "1.0.188" +version = "1.0.209" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4eca7ac642d82aa35b60049a6eccb4be6be75e599bd2e9adb5f875a737654af2" +checksum = "a5831b979fd7b5439637af1752d535ff49f4860c0f341d1baeb6faf0f4242170" dependencies = [ "proc-macro2", "quote", - "syn 2.0.33", + "syn 2.0.52", ] [[package]] name = "serde_derive_internals" -version = "0.26.0" +version = "0.29.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85bf8229e7920a9f636479437026331ce11aa132b4dde37d121944a44d6e5f3c" +checksum = "18d26a20a969b9e3fdf2fc2d9f21eda6c40e2de84c9408bb5d3b05d499aae711" dependencies = [ "proc-macro2", "quote", - "syn 1.0.109", + "syn 2.0.52", ] [[package]] @@ -3397,11 +3754,12 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.107" +version = "1.0.127" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6b420ce6e3d8bd882e9b243c6eed35dbc9a6110c9769e74b584e0d68d1f20c65" +checksum = "8043c06d9f82bd7271361ed64f415fe5e12a77fdb52e573e7f06a516dea329ad" dependencies = [ "itoa", + "memchr", "ryu", "serde", ] @@ -3431,9 +3789,9 @@ dependencies = [ [[package]] name = "serde_yaml" -version = "0.9.25" +version = "0.9.34+deprecated" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a49e178e4452f45cb61d0cd8cebc1b0fafd3e41929e996cef79aa3aca91f574" +checksum = "6a8b1a1a2ebf674015cc02edccce75287f1a0130d394307b36743c2f5d504b47" dependencies = [ "indexmap", "itoa", @@ -3509,6 +3867,12 @@ dependencies = [ "digest 0.10.7", ] +[[package]] +name = "sha2-const-stable" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5f179d4e11094a893b82fff208f74d448a7512f99f5a0acbd5c679b705f83ed9" + [[package]] name = "sha3" version = "0.10.8" @@ -3588,6 +3952,12 @@ dependencies = [ "event-listener 2.5.3", ] +[[package]] +name = "siphasher" +version = "0.3.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "38b58827f4464d87d377d175e90bf58eb00fd8716ff0a62f80356b5e61555d0d" + [[package]] name = "slab" version = "0.4.9" @@ -3599,9 +3969,9 @@ dependencies = [ [[package]] name = "smallvec" -version = "1.11.0" +version = "1.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62bb4feee49fdd9f707ef802e22365a35de4b7b299de4763d44bfea899442ff9" +checksum = "3c5e1a9a646d36c3599cd173a41282daf47c44583ad367b8e6837255952e5c67" [[package]] name = "snap" @@ -3621,20 +3991,14 @@ dependencies = [ [[package]] name = "socket2" -version = "0.5.6" +version = "0.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05ffd9c0a93b7543e062e759284fcf5f5e3b098501104bfbdde4d404db792871" +checksum = "ce305eb0b4296696835b71df73eb912e0f1ffd2556a501fcede6e0c50349191c" dependencies = [ "libc", "windows-sys 0.52.0", ] -[[package]] -name = "spin" -version = "0.5.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" - [[package]] name = "spin" version = "0.9.8" @@ -3654,6 +4018,40 @@ dependencies = [ "der", ] +[[package]] +name = "stabby" +version = "36.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "311d6bcf0070c462ff626122ec2246f42bd2acd44b28908eedbfd07d500c7d99" +dependencies = [ + "rustversion", + "stabby-abi", +] + +[[package]] +name = "stabby-abi" +version = "36.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e6daae1a0707399f56d27fce7f212e50e31d215112a447e1bbcd837ae1bf5f49" +dependencies = [ + "rustversion", + "sha2-const-stable", + "stabby-macros", +] + +[[package]] +name = "stabby-macros" +version = "36.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "43cf89a0cc9131279235baf8599b0e073fbcb096419204de0cc5d1a48ae73f74" +dependencies = [ + "proc-macro-crate", + "proc-macro2", + "quote", + "rand 0.8.5", + "syn 1.0.109", +] + [[package]] name = "standback" version = "0.2.17" @@ -3669,6 +4067,34 @@ version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f" +[[package]] +name = "static_init" +version = "1.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8a2a1c578e98c1c16fc3b8ec1328f7659a500737d7a0c6d625e73e830ff9c1f6" +dependencies = [ + "bitflags 1.3.2", + "cfg_aliases 0.1.1", + "libc", + "parking_lot 0.11.2", + "parking_lot_core 0.8.6", + "static_init_macro", + "winapi", +] + +[[package]] +name = "static_init_macro" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "70a2595fc3aa78f2d0e45dd425b22282dd863273761cc77780914b2cf3003acf" +dependencies = [ + "cfg_aliases 0.1.1", + "memchr", + "proc-macro2", + "quote", + "syn 1.0.109", +] + [[package]] name = "stdweb" version = "0.4.20" @@ -3724,7 +4150,7 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "af91f480ee899ab2d9f8435bfdfc14d08a5754bd9d3fef1f1a1c23336aad6c8b" dependencies = [ - "async-channel", + "async-channel 1.9.0", "cfg-if 1.0.0", "futures-core", "pin-project-lite 0.2.13", @@ -3732,9 +4158,9 @@ dependencies = [ [[package]] name = "strsim" -version = "0.10.0" +version = "0.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623" +checksum = "7da8b5736845d9f2fcb837ea5d9e2628564b3b043a70948a3f0b778838c5fb4f" [[package]] name = "subtle" @@ -3823,9 +4249,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.33" +version = "2.0.52" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9caece70c63bfba29ec2fed841a09851b14a235c60010fa4de58089b6c025668" +checksum = "b699d15b36d1f02c3e7c69f8ffef53de37aefae075d8488d4ba1a7788d574a07" dependencies = [ "proc-macro2", "quote", @@ -3838,6 +4264,17 @@ version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2047c6ded9c721764247e62cd3b03c09ffc529b2ba5b10ec482ae507a4a70160" +[[package]] +name = "synstructure" +version = "0.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c8af7666ab7b6390ab78131fb5b0fce11d6b7a6951602017c35fa82800708971" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.52", +] + [[package]] name = "system-configuration" version = "0.5.1" @@ -3888,7 +4325,21 @@ checksum = "49922ecae66cc8a249b77e68d1d0623c1b2c514f0060c27cdc68bd62a1219d35" dependencies = [ "proc-macro2", "quote", - "syn 2.0.33", + "syn 2.0.52", +] + +[[package]] +name = "thread-priority" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0d3b04d33c9633b8662b167b847c7ab521f83d1ae20f2321b65b5b925e532e36" +dependencies = [ + "bitflags 2.5.0", + "cfg-if 1.0.0", + "libc", + "log", + "rustversion", + "winapi", ] [[package]] @@ -3946,6 +4397,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5dfd88e563464686c916c7e46e623e520ddc6d79fa6641390f2e3fa86e83e885" dependencies = [ "deranged", + "itoa", "num-conv", "powerfmt", "serde", @@ -4028,30 +4480,29 @@ dependencies = [ [[package]] name = "tokio" -version = "1.36.0" +version = "1.39.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61285f6515fa018fb2d1e46eb21223fff441ee8db5d0f1435e8ab4f5cdb80931" +checksum = "9babc99b9923bfa4804bd74722ff02c0381021eafa4db9949217e3be8e84fff5" dependencies = [ "backtrace", "bytes", "libc", - "mio", - "num_cpus", + "mio 1.0.2", "pin-project-lite 0.2.13", - "socket2 0.5.6", + "socket2 0.5.7", "tokio-macros", - "windows-sys 0.48.0", + "windows-sys 0.52.0", ] [[package]] name = "tokio-macros" -version = "2.2.0" +version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b8a1e28f2deaa14e508979454cb3a223b10b938b45af148bc0986de36f1923b" +checksum = "693d596312e88961bc67d7f1f97af8a70227d9f90c31bba5806eec004978d752" dependencies = [ "proc-macro2", "quote", - "syn 2.0.33", + "syn 2.0.52", ] [[package]] @@ -4101,9 +4552,9 @@ dependencies = [ [[package]] name = "tokio-tungstenite" -version = "0.21.0" +version = "0.23.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c83b561d025642014097b66e6c1bb422783339e0909e4429cde4749d1990bc38" +checksum = "c6989540ced10490aaf14e6bad2e3d33728a2813310a0c71d1574304c49631cd" dependencies = [ "futures-util", "log", @@ -4113,18 +4564,17 @@ dependencies = [ [[package]] name = "tokio-util" -version = "0.7.10" +version = "0.7.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5419f34732d9eb6ee4c3578b7989078579b7f039cbbb9ca2c4da015749371e15" +checksum = "9cf6b47b3771c49ac75ad09a6162f53ad4b8088b76ac60e8ec1455b31a189fe1" dependencies = [ "bytes", "futures-core", "futures-sink", "futures-util", - "hashbrown 0.14.0", + "hashbrown", "pin-project-lite 0.2.13", "tokio", - "tracing", ] [[package]] @@ -4140,6 +4590,23 @@ dependencies = [ "vsock", ] +[[package]] +name = "toml_datetime" +version = "0.6.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3550f4e9685620ac18a50ed434eb3aec30db8ba93b0287467bca5826ea25baf1" + +[[package]] +name = "toml_edit" +version = "0.21.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6a8534fd7f78b5405e860340ad6575217ce99f38d4d5c8f2442cb5ecb50090e1" +dependencies = [ + "indexmap", + "toml_datetime", + "winnow", +] + [[package]] name = "tower-service" version = "0.3.2" @@ -4167,7 +4634,7 @@ checksum = "5f4f31f56159e98206da9efd823404b79b6ef3143b4a7ab76e67b1751b25a4ab" dependencies = [ "proc-macro2", "quote", - "syn 2.0.33", + "syn 2.0.52", ] [[package]] @@ -4262,9 +4729,9 @@ checksum = "e421abadd41a4225275504ea4d6566923418b7f05506fbc9c0fe86ba7396114b" [[package]] name = "tungstenite" -version = "0.21.0" +version = "0.23.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ef1a641ea34f399a848dea702823bbecfb4c486f911735368f1f137cb8257e1" +checksum = "6e2e2ce1e47ed2994fd43b04c8f618008d4cabdd5ee34027cf14f9d918edd9c8" dependencies = [ "byteorder", "bytes", @@ -4275,7 +4742,6 @@ dependencies = [ "rand 0.8.5", "sha1 0.10.5", "thiserror", - "url", "utf-8", ] @@ -4303,23 +4769,23 @@ checksum = "ed646292ffc8188ef8ea4d1e0e0150fb15a5c2e12ad9b8fc191ae7a8a7f3c4b9" [[package]] name = "uhlc" -version = "0.7.0" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "99b6df3f3e948b40e20c38a6d1fd6d8f91b3573922fc164e068ad3331560487e" +checksum = "79ac3c37bd9506595768f0387bd39d644525728b4a1d783218acabfb56356db7" dependencies = [ "humantime", "lazy_static", "log", "rand 0.8.5", "serde", - "spin 0.9.8", + "spin", ] [[package]] name = "unicode-bidi" -version = "0.3.13" +version = "0.3.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "92888ba5573ff080736b3648696b70cafad7d250551175acbaa4e0385b3e1460" +checksum = "08f95100a766bf4f8f28f90d77e0a5461bbdb219042e7679bebe79004fed8d75" [[package]] name = "unicode-ident" @@ -4329,9 +4795,9 @@ checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b" [[package]] name = "unicode-normalization" -version = "0.1.22" +version = "0.1.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c5713f0fc4b5db668a2ac63cdb7bb4469d8c9fed047b1d0292cc7b0ce2ba921" +checksum = "a56d1686db2308d901306f92a263857ef59ea39678a5458e7cb17f01415101f5" dependencies = [ "tinyvec", ] @@ -4364,21 +4830,21 @@ dependencies = [ [[package]] name = "unsafe-libyaml" -version = "0.2.10" +version = "0.2.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab4c90930b95a82d00dc9e9ac071b4991924390d46cbd0dfe566148667605e4b" +checksum = "673aac59facbab8a9007c7f6108d11f63b603f7cabff99fabf650fea5c32b861" [[package]] name = "untrusted" -version = "0.7.1" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a156c684c91ea7d62626509bce3cb4e1d9ed5c4d978f7b4352658f96a4c26b4a" +checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1" [[package]] -name = "untrusted" -version = "0.9.0" +name = "unwrap-infallible" +version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1" +checksum = "151ac09978d3c2862c4e39b557f4eceee2cc72150bc4cb4f16abf061b6e381fb" [[package]] name = "unzip-n" @@ -4393,9 +4859,9 @@ dependencies = [ [[package]] name = "url" -version = "2.4.1" +version = "2.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "143b538f18257fac9cad154828a57c6bf5157e1aa604d4816b5995bf6de87ae5" +checksum = "22784dbdf76fdde8af1aeda5622b546b422b6fc585325248a2bf9f5e41e94d6c" dependencies = [ "form_urlencoded", "idna", @@ -4423,9 +4889,9 @@ checksum = "711b9620af191e0cdc7468a8d14e709c3dcdb115b36f838e601583af800a370a" [[package]] name = "uuid" -version = "1.4.1" +version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "79daa5ed5740825c40b389c5e50312b9c86df53fccd33f281df655642b43869d" +checksum = "81dfa00651efa65069b0b6b651f4aaa31ba9e3c3ce0137aaad053604ee7e0314" dependencies = [ "getrandom 0.2.10", ] @@ -4584,7 +5050,7 @@ dependencies = [ "once_cell", "proc-macro2", "quote", - "syn 2.0.33", + "syn 2.0.52", "wasm-bindgen-shared", ] @@ -4618,7 +5084,7 @@ checksum = "54681b18a46765f095758388f2d0cf16eb8d4169b639ab575a8f5693af210c7b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.33", + "syn 2.0.52", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -4641,9 +5107,9 @@ dependencies = [ [[package]] name = "webpki-roots" -version = "0.26.0" +version = "0.26.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0de2cfda980f21be5a7ed2eadb3e6fe074d56022bea2cdeb1a62eb220fc04188" +checksum = "bd7c23921eeb1713a4e851530e9b9756e4fb0e89978582942612524cf09f01cd" dependencies = [ "rustls-pki-types", ] @@ -4872,6 +5338,15 @@ version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dff9641d1cd4be8d1a070daf9e3773c5f67e78b4d9d42263020c057706765c04" +[[package]] +name = "winnow" +version = "0.5.40" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f593a95398737aeed53e489c785df13f3618e41dbcd6718c6addbf1395aa6876" +dependencies = [ + "memchr", +] + [[package]] name = "winreg" version = "0.50.0" @@ -4882,6 +5357,23 @@ dependencies = [ "windows-sys 0.48.0", ] +[[package]] +name = "x509-parser" +version = "0.16.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fcbc162f30700d6f3f82a24bf7cc62ffe7caea42c0b2cba8bf7f3ae50cf51f69" +dependencies = [ + "asn1-rs", + "data-encoding", + "der-parser", + "lazy_static", + "nom", + "oid-registry", + "rusticata-macros", + "thiserror", + "time 0.3.36", +] + [[package]] name = "yasna" version = "0.5.2" @@ -4906,32 +5398,39 @@ dependencies = [ [[package]] name = "zenoh" -version = "0.11.0-dev" +version = "1.0.0-dev" dependencies = [ "ahash", "async-trait", - "base64 0.21.4", - "const_format", - "event-listener 4.0.0", + "base64 0.22.1", + "bytes", + "event-listener 5.3.1", "flume", "form_urlencoded", "futures", "git-version", + "itertools 0.13.0", "lazy_static", + "once_cell", "ordered-float", "paste", "petgraph", + "phf", "rand 0.8.5", "regex", - "rustc_version 0.4.0", + "rustc_version 0.4.1", "serde", + "serde-pickle", + "serde_cbor", "serde_json", - "socket2 0.5.6", + "serde_yaml", + "socket2 0.5.7", "stop-token", "tokio", "tokio-util", "tracing", "uhlc", + "unwrap-infallible", "uuid", "vec_map", "zenoh-buffers", @@ -4956,26 +5455,23 @@ dependencies = [ [[package]] name = "zenoh-backend-example" -version = "0.11.0-dev" +version = "1.0.0-dev" dependencies = [ - "async-std", "async-trait", "const_format", "futures", "git-version", "serde_json", + "tokio", "tracing", "zenoh", - "zenoh-core", "zenoh-plugin-trait", - "zenoh-result", - "zenoh-util", "zenoh_backend_traits", ] [[package]] name = "zenoh-buffers" -version = "0.11.0-dev" +version = "1.0.0-dev" dependencies = [ "rand 0.8.5", "zenoh-collections", @@ -4983,7 +5479,7 @@ dependencies = [ [[package]] name = "zenoh-codec" -version = "0.11.0-dev" +version = "1.0.0-dev" dependencies = [ "criterion", "rand 0.8.5", @@ -4998,11 +5494,14 @@ dependencies = [ [[package]] name = "zenoh-collections" -version = "0.11.0-dev" +version = "1.0.0-dev" +dependencies = [ + "rand 0.8.5", +] [[package]] name = "zenoh-config" -version = "0.11.0-dev" +version = "1.0.0-dev" dependencies = [ "flume", "json5", @@ -5012,8 +5511,10 @@ dependencies = [ "serde_json", "serde_yaml", "tracing", + "uhlc", "validated_struct", "zenoh-core", + "zenoh-macros", "zenoh-protocol", "zenoh-result", "zenoh-util", @@ -5021,7 +5522,7 @@ dependencies = [ [[package]] name = "zenoh-core" -version = "0.11.0-dev" +version = "1.0.0-dev" dependencies = [ "async-global-executor", "lazy_static", @@ -5032,9 +5533,9 @@ dependencies = [ [[package]] name = "zenoh-crypto" -version = "0.11.0-dev" +version = "1.0.0-dev" dependencies = [ - "aes 0.8.3", + "aes 0.8.4", "hmac 0.12.1", "rand 0.8.5", "rand_chacha 0.3.1", @@ -5044,45 +5545,48 @@ dependencies = [ [[package]] name = "zenoh-examples" -version = "0.11.0-dev" +version = "1.0.0-dev" dependencies = [ "clap", "flume", "futures", "git-version", "json5", + "prost 0.13.1", + "prost-types 0.13.1", "rand 0.8.5", - "rustc_version 0.4.0", + "rustc_version 0.4.1", + "serde_json", + "serde_yaml", "tokio", "tracing", "zenoh", + "zenoh-collections", "zenoh-ext", "zenoh-util", ] [[package]] name = "zenoh-ext" -version = "0.11.0-dev" +version = "1.0.0-dev" dependencies = [ "bincode", "flume", "futures", + "phf", "serde", + "serde_cbor", + "serde_json", "tokio", "tracing", "zenoh", - "zenoh-core", "zenoh-macros", - "zenoh-result", - "zenoh-runtime", - "zenoh-sync", - "zenoh-task", "zenoh-util", ] [[package]] name = "zenoh-ext-examples" -version = "0.11.0-dev" +version = "1.0.0-dev" dependencies = [ "bincode", "clap", @@ -5098,11 +5602,11 @@ dependencies = [ [[package]] name = "zenoh-keyexpr" -version = "0.11.0-dev" +version = "1.0.0-dev" dependencies = [ "ahash", "criterion", - "hashbrown 0.14.0", + "hashbrown", "keyed-set", "lazy_static", "rand 0.8.5", @@ -5114,7 +5618,7 @@ dependencies = [ [[package]] name = "zenoh-link" -version = "0.11.0-dev" +version = "1.0.0-dev" dependencies = [ "async-trait", "rcgen", @@ -5135,10 +5639,10 @@ dependencies = [ [[package]] name = "zenoh-link-commons" -version = "0.11.0-dev" +version = "1.0.0-dev" dependencies = [ "async-trait", - "base64 0.21.4", + "base64 0.22.1", "flume", "futures", "rustls", @@ -5160,14 +5664,14 @@ dependencies = [ [[package]] name = "zenoh-link-quic" -version = "0.11.0-dev" +version = "1.0.0-dev" dependencies = [ "async-trait", - "base64 0.21.4", + "base64 0.22.1", "futures", "quinn", "rustls", - "rustls-pemfile 2.0.0", + "rustls-pemfile 2.1.3", "rustls-pki-types", "rustls-webpki", "secrecy", @@ -5176,6 +5680,8 @@ dependencies = [ "tokio-util", "tracing", "webpki-roots", + "x509-parser", + "zenoh-collections", "zenoh-config", "zenoh-core", "zenoh-link-commons", @@ -5188,7 +5694,7 @@ dependencies = [ [[package]] name = "zenoh-link-serial" -version = "0.11.0-dev" +version = "1.0.0-dev" dependencies = [ "async-trait", "futures", @@ -5209,9 +5715,10 @@ dependencies = [ [[package]] name = "zenoh-link-tcp" -version = "0.11.0-dev" +version = "1.0.0-dev" dependencies = [ "async-trait", + "socket2 0.5.7", "tokio", "tokio-util", "tracing", @@ -5226,21 +5733,24 @@ dependencies = [ [[package]] name = "zenoh-link-tls" -version = "0.11.0-dev" +version = "1.0.0-dev" dependencies = [ "async-trait", - "base64 0.21.4", + "base64 0.22.1", "futures", "rustls", - "rustls-pemfile 2.0.0", + "rustls-pemfile 2.1.3", "rustls-pki-types", "rustls-webpki", "secrecy", + "socket2 0.5.7", "tokio", "tokio-rustls", "tokio-util", "tracing", "webpki-roots", + "x509-parser", + "zenoh-collections", "zenoh-config", "zenoh-core", "zenoh-link-commons", @@ -5253,10 +5763,10 @@ dependencies = [ [[package]] name = "zenoh-link-udp" -version = "0.11.0-dev" +version = "1.0.0-dev" dependencies = [ "async-trait", - "socket2 0.5.6", + "socket2 0.5.7", "tokio", "tokio-util", "tracing", @@ -5273,18 +5783,19 @@ dependencies = [ [[package]] name = "zenoh-link-unixpipe" -version = "0.11.0-dev" +version = "1.0.0-dev" dependencies = [ "advisory-lock", "async-trait", "filepath", - "nix 0.27.1", + "nix 0.29.0", "rand 0.8.5", "tokio", "tokio-util", "tracing", "unix-named-pipe", "zenoh-buffers", + "zenoh-collections", "zenoh-config", "zenoh-core", "zenoh-link-commons", @@ -5295,11 +5806,11 @@ dependencies = [ [[package]] name = "zenoh-link-unixsock_stream" -version = "0.11.0-dev" +version = "1.0.0-dev" dependencies = [ "async-trait", "futures", - "nix 0.27.1", + "nix 0.29.0", "tokio", "tokio-util", "tracing", @@ -5314,7 +5825,7 @@ dependencies = [ [[package]] name = "zenoh-link-vsock" -version = "0.11.0-dev" +version = "1.0.0-dev" dependencies = [ "async-trait", "libc", @@ -5333,7 +5844,7 @@ dependencies = [ [[package]] name = "zenoh-link-ws" -version = "0.11.0-dev" +version = "1.0.0-dev" dependencies = [ "async-trait", "futures-util", @@ -5353,38 +5864,36 @@ dependencies = [ [[package]] name = "zenoh-macros" -version = "0.11.0-dev" +version = "1.0.0-dev" dependencies = [ "proc-macro2", "quote", - "syn 2.0.33", + "syn 2.0.52", "zenoh-keyexpr", ] [[package]] name = "zenoh-plugin-example" -version = "0.11.0-dev" +version = "1.0.0-dev" dependencies = [ - "async-std", "const_format", "futures", "git-version", + "lazy_static", "serde_json", + "tokio", "tracing", "zenoh", - "zenoh-core", "zenoh-plugin-trait", - "zenoh-result", "zenoh-util", ] [[package]] name = "zenoh-plugin-rest" -version = "0.11.0-dev" +version = "1.0.0-dev" dependencies = [ "anyhow", - "async-std", - "base64 0.21.4", + "base64 0.22.1", "clap", "const_format", "flume", @@ -5393,24 +5902,22 @@ dependencies = [ "http-types", "jsonschema", "lazy_static", - "rustc_version 0.4.0", + "rustc_version 0.4.1", "schemars", "serde", "serde_json", "tide", + "tokio", "tracing", "zenoh", "zenoh-plugin-trait", - "zenoh-result", - "zenoh-util", ] [[package]] name = "zenoh-plugin-storage-manager" -version = "0.11.0-dev" +version = "1.0.0-dev" dependencies = [ "async-global-executor", - "async-std", "async-trait", "const_format", "crc", @@ -5419,28 +5926,25 @@ dependencies = [ "futures", "git-version", "jsonschema", + "lazy_static", "libloading", - "rustc_version 0.4.0", + "rustc_version 0.4.1", "schemars", "serde", "serde_json", + "tokio", "tracing", "urlencoding", "zenoh", - "zenoh-collections", - "zenoh-core", - "zenoh-keyexpr", "zenoh-plugin-trait", - "zenoh-result", "zenoh-util", "zenoh_backend_traits", ] [[package]] name = "zenoh-plugin-trait" -version = "0.11.0-dev" +version = "1.0.0-dev" dependencies = [ - "const_format", "libloading", "serde", "serde_json", @@ -5453,7 +5957,7 @@ dependencies = [ [[package]] name = "zenoh-protocol" -version = "0.11.0-dev" +version = "1.0.0-dev" dependencies = [ "const_format", "lazy_static", @@ -5461,20 +5965,21 @@ dependencies = [ "serde", "uhlc", "zenoh-buffers", + "zenoh-collections", "zenoh-keyexpr", "zenoh-result", ] [[package]] name = "zenoh-result" -version = "0.11.0-dev" +version = "1.0.0-dev" dependencies = [ "anyhow", ] [[package]] name = "zenoh-runtime" -version = "0.11.0-dev" +version = "1.0.0-dev" dependencies = [ "futures", "lazy_static", @@ -5482,27 +5987,40 @@ dependencies = [ "ron", "serde", "tokio", - "zenoh-collections", "zenoh-macros", "zenoh-result", ] [[package]] name = "zenoh-shm" -version = "0.11.0-dev" +version = "1.0.0-dev" dependencies = [ + "async-trait", + "bincode", + "crc", + "libc", + "lockfree", + "num-traits", + "num_cpus", + "rand 0.8.5", "serde", "shared_memory", + "stabby", + "static_init", + "thread-priority", + "tokio", "tracing", "zenoh-buffers", + "zenoh-core", + "zenoh-macros", "zenoh-result", ] [[package]] name = "zenoh-sync" -version = "0.11.0-dev" +version = "1.0.0-dev" dependencies = [ - "event-listener 4.0.0", + "event-listener 5.3.1", "futures", "tokio", "zenoh-buffers", @@ -5514,7 +6032,7 @@ dependencies = [ [[package]] name = "zenoh-task" -version = "0.11.0-dev" +version = "1.0.0-dev" dependencies = [ "futures", "tokio", @@ -5526,12 +6044,14 @@ dependencies = [ [[package]] name = "zenoh-transport" -version = "0.11.0-dev" +version = "1.0.0-dev" dependencies = [ "async-trait", + "crossbeam-utils", "flume", "futures", "futures-util", + "lazy_static", "lz4_flex", "paste", "rand 0.8.5", @@ -5561,10 +6081,10 @@ dependencies = [ [[package]] name = "zenoh-util" -version = "0.11.0-dev" +version = "1.0.0-dev" dependencies = [ - "async-std", "async-trait", + "const_format", "flume", "home", "humantime", @@ -5572,6 +6092,8 @@ dependencies = [ "libc", "libloading", "pnet_datalink", + "serde", + "serde_json", "shellexpand", "tokio", "tracing", @@ -5583,12 +6105,12 @@ dependencies = [ [[package]] name = "zenoh_backend_traits" -version = "0.11.0-dev" +version = "1.0.0-dev" dependencies = [ - "async-std", "async-trait", "const_format", "derive_more", + "either", "schemars", "serde_json", "zenoh", @@ -5599,7 +6121,7 @@ dependencies = [ [[package]] name = "zenohd" -version = "0.11.0-dev" +version = "1.0.0-dev" dependencies = [ "clap", "futures", @@ -5607,7 +6129,7 @@ dependencies = [ "json5", "lazy_static", "rand 0.8.5", - "rustc_version 0.4.0", + "rustc_version 0.4.1", "tokio", "tracing", "tracing-loki", @@ -5634,7 +6156,7 @@ checksum = "9ce1b18ccd8e73a9321186f97e46f9f04b778851177567b1975109d26a08d2a6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.33", + "syn 2.0.52", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index 80d1990dfd..484baaaef0 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -56,8 +56,8 @@ members = [ exclude = ["ci/nostd-check", "ci/valgrind-check"] [workspace.package] -rust-version = "1.72.0" -version = "0.11.0-dev" # Zenoh version +rust-version = "1.75.0" +version = "1.0.0-dev" repository = "https://github.com/eclipse-zenoh/zenoh" homepage = "http://zenoh.io" authors = [ @@ -75,137 +75,151 @@ description = "Zenoh: The Zero Overhead Pub/Sub/Query Protocol." # DEFAULT-FEATURES NOTE: Be careful with default-features and additivity! # (https://github.com/rust-lang/cargo/issues/11329) [workspace.dependencies] -aes = "0.8.2" -ahash = "0.8.7" -anyhow = { version = "1.0.69", default-features = false } # Default features are disabled due to usage in no_std crates -async-executor = "1.5.0" -async-global-executor = "2.3.1" -async-io = "1.13.0" -async-std = { version = "=1.12.0", default-features = false } # Default features are disabled due to some crates' requirements -async-trait = "0.1.60" -base64 = "0.21.4" +aes = "0.8.4" +ahash = "0.8.11" +anyhow = { version = "1.0.86", default-features = false } # Default features are disabled due to usage in no_std crates +async-executor = "1.13.0" +async-global-executor = "2.4.1" +async-io = "2.3.4" +async-trait = "0.1.81" +base64 = "0.22.1" bincode = "1.3.3" -clap = { version = "4.4.11", features = ["derive"] } -const_format = "0.2.30" -crc = "3.0.1" +bytes = "1.7.1" +clap = { version = "4.5.16", features = ["derive"] } +console-subscriber = "0.4.0" +const_format = "0.2.32" +crc = "3.2.1" criterion = "0.5" -derive_more = "0.99.17" -derive-new = "0.6.0" +crossbeam-utils = "0.8.20" +derive_more = { version = "1.0.0", features = ["as_ref"] } +derive-new = "0.7.0" tracing-subscriber = { version = "0.3", features = ["json", "env-filter"] } tracing-loki = "0.2" -event-listener = "4.0.0" +event-listener = "5.3.1" flume = "0.11" -form_urlencoded = "1.1.0" -futures = "0.3.25" -futures-util = { version = "0.3.25", default-features = false } # Default features are disabled due to some crates' requirements -git-version = "0.3.5" +form_urlencoded = "1.2.1" +futures = "0.3.30" +futures-util = { version = "0.3.30", default-features = false } # Default features are disabled due to some crates' requirements +git-version = "0.3.9" hashbrown = "0.14" hex = { version = "0.4.3", default-features = false } # Default features are disabled due to usage in no_std crates hmac = { version = "0.12.1", features = ["std"] } -home = "0.5.4" +home = "0.5.9" http-types = "2.12.0" humantime = "2.1.0" +itertools = "0.13.0" json5 = "0.4.1" -jsonschema = { version = "0.17.1", default-features = false } -keyed-set = "0.4.4" -lazy_static = "1.4.0" -libc = "0.2.139" +jsonschema = { version = "0.18.1", default-features = false } +keyed-set = "1.0.0" +lazy_static = "1.5.0" +libc = "0.2.158" libloading = "0.8" tracing = "0.1" +lockfree = "0.5" lz4_flex = "0.11" -nix = { version = "0.27", features = ["fs"] } -num_cpus = "1.15.0" -ordered-float = "4.1.1" +nix = { version = "0.29.0", features = ["fs"] } +num_cpus = "1.16.0" +num-traits = { version = "0.2.19", default-features = false } +once_cell = "1.19.0" +ordered-float = "4.2.2" panic-message = "0.3.0" -paste = "1.0.12" -petgraph = "0.6.3" -pnet = "0.34" -pnet_datalink = "0.34" -proc-macro2 = "1.0.51" -quinn = "0.11.1" -quote = "1.0.23" +paste = "1.0.15" +petgraph = "0.6.5" +phf = { version = "0.11.2", features = ["macros"] } +pnet = "0.35.0" +pnet_datalink = "0.35.0" +proc-macro2 = "1.0.86" +quinn = "0.11.3" +quote = "1.0.37" rand = { version = "0.8.5", default-features = false } # Default features are disabled due to usage in no_std crates rand_chacha = "0.3.1" -rcgen = "0.11" -regex = "1.7.1" +rcgen = "0.13.1" +regex = "1.10.6" ron = "0.8.1" ringbuffer-spsc = "0.1.9" rsa = "0.9" -rustc_version = "0.4.0" -rustls = { version = "0.23.9", default-features = false, features = [ +rustc_version = "0.4.1" +rustls = { version = "0.23.12", default-features = false, features = [ "logging", "tls12", "ring", ] } -rustls-native-certs = "0.7.0" -rustls-pemfile = "2.0.0" -rustls-webpki = "0.102.0" -rustls-pki-types = "1.1.0" -schemars = "0.8.12" +rustls-native-certs = "0.7.3" +rustls-pemfile = "2.1.3" +rustls-webpki = "0.102.7" +rustls-pki-types = "1.8.0" +schemars = { version = "0.8.21", features = ["either"] } secrecy = { version = "0.8.0", features = ["serde", "alloc"] } -serde = { version = "1.0.154", default-features = false, features = [ +serde = { version = "1.0.209", default-features = false, features = [ "derive", ] } # Default features are disabled due to usage in no_std crates -serde_json = "1.0.94" -serde_yaml = "0.9.19" -sha3 = "0.10.6" +serde_cbor = "0.11.2" +serde_json = "1.0.127" +serde-pickle = "1.1.1" +serde_yaml = "0.9.34" +static_init = "1.0.3" +stabby = "36.1.1" +sha3 = "0.10.8" shared_memory = "0.12.4" -shellexpand = "3.0.0" -socket2 = { version = "0.5.1", features = ["all"] } +shellexpand = "3.1.0" +socket2 = { version = "0.5.7", features = ["all"] } stop-token = "0.7.0" syn = "2.0" tide = "0.16.0" -token-cell = { version = "1.4.2", default-features = false } -tokio = { version = "1.35.1", default-features = false } # Default features are disabled due to some crates' requirements -tokio-util = "0.7.10" -tokio-tungstenite = "0.21" +token-cell = { version = "1.5.0", default-features = false } +tokio = { version = "1.39.3", default-features = false } # Default features are disabled due to some crates' requirements +tokio-util = "0.7.11" +tokio-tungstenite = "0.23.1" tokio-rustls = { version = "0.26.0", default-features = false } # tokio-vsock = see: io/zenoh-links/zenoh-link-vsock/Cargo.toml (workspaces does not support platform dependent dependencies) -console-subscriber = "0.2" -typenum = "1.16.0" -uhlc = { version = "0.7.0", default-features = false } # Default features are disabled due to usage in no_std crates +thread-priority = "1.1.0" +typenum = "1.17.0" +uhlc = { version = "0.8.0", default-features = false } # Default features are disabled due to usage in no_std crates +unwrap-infallible = "0.1.5" unzip-n = "0.1.2" -url = "2.3.1" -urlencoding = "2.1.2" -uuid = { version = "1.3.0", default-features = false, features = [ +url = "2.5.2" +urlencoding = "2.1.3" +uuid = { version = "1.10.0", default-features = false, features = [ "v4", ] } # Default features are disabled due to usage in no_std crates validated_struct = "2.1.0" vec_map = "0.8.2" -webpki-roots = "0.26.0" -winapi = { version = "0.3.9", features = ["iphlpapi"] } +webpki-roots = "0.26.3" +winapi = { version = "0.3.9", features = ["iphlpapi", "winerror"] } +x509-parser = "0.16.0" z-serial = "0.2.3" -zenoh-ext = { version = "0.11.0-dev", path = "zenoh-ext" } -zenoh-shm = { version = "0.11.0-dev", path = "commons/zenoh-shm" } -zenoh-result = { version = "0.11.0-dev", path = "commons/zenoh-result", default-features = false } -zenoh-config = { version = "0.11.0-dev", path = "commons/zenoh-config" } -zenoh-protocol = { version = "0.11.0-dev", path = "commons/zenoh-protocol", default-features = false } -zenoh-keyexpr = { version = "0.11.0-dev", path = "commons/zenoh-keyexpr", default-features = false } -zenoh-core = { version = "0.11.0-dev", path = "commons/zenoh-core" } -zenoh-buffers = { version = "0.11.0-dev", path = "commons/zenoh-buffers", default-features = false } -zenoh-util = { version = "0.11.0-dev", path = "commons/zenoh-util" } -zenoh-crypto = { version = "0.11.0-dev", path = "commons/zenoh-crypto" } -zenoh-codec = { version = "0.11.0-dev", path = "commons/zenoh-codec" } -zenoh-sync = { version = "0.11.0-dev", path = "commons/zenoh-sync" } -zenoh-collections = { version = "0.11.0-dev", path = "commons/zenoh-collections", default-features = false } -zenoh-macros = { version = "0.11.0-dev", path = "commons/zenoh-macros" } -zenoh-plugin-trait = { version = "0.11.0-dev", path = "plugins/zenoh-plugin-trait", default-features = false } -zenoh_backend_traits = { version = "0.11.0-dev", path = "plugins/zenoh-backend-traits" } -zenoh-transport = { version = "0.11.0-dev", path = "io/zenoh-transport", default-features = false } -zenoh-link-tls = { version = "0.11.0-dev", path = "io/zenoh-links/zenoh-link-tls" } -zenoh-link-tcp = { version = "0.11.0-dev", path = "io/zenoh-links/zenoh-link-tcp" } -zenoh-link-unixsock_stream = { version = "0.11.0-dev", path = "io/zenoh-links/zenoh-link-unixsock_stream" } -zenoh-link-quic = { version = "0.11.0-dev", path = "io/zenoh-links/zenoh-link-quic" } -zenoh-link-udp = { version = "0.11.0-dev", path = "io/zenoh-links/zenoh-link-udp" } -zenoh-link-ws = { version = "0.11.0-dev", path = "io/zenoh-links/zenoh-link-ws" } -zenoh-link-unixpipe = { version = "0.11.0-dev", path = "io/zenoh-links/zenoh-link-unixpipe" } -zenoh-link-serial = { version = "0.11.0-dev", path = "io/zenoh-links/zenoh-link-serial" } -zenoh-link-vsock = { version = "0.11.0-dev", path = "io/zenoh-links/zenoh-link-vsock" } -zenoh-link = { version = "0.11.0-dev", path = "io/zenoh-link" } -zenoh-link-commons = { version = "0.11.0-dev", path = "io/zenoh-link-commons" } -zenoh = { version = "0.11.0-dev", path = "zenoh", default-features = false } -zenoh-runtime = { version = "0.11.0-dev", path = "commons/zenoh-runtime" } -zenoh-task = { version = "0.11.0-dev", path = "commons/zenoh-task" } +either = "1.13.0" +zenoh-ext = { version = "1.0.0-dev", path = "zenoh-ext" } +zenoh-shm = { version = "1.0.0-dev", path = "commons/zenoh-shm" } +zenoh-result = { version = "1.0.0-dev", path = "commons/zenoh-result", default-features = false } +zenoh-config = { version = "1.0.0-dev", path = "commons/zenoh-config" } +zenoh-protocol = { version = "1.0.0-dev", path = "commons/zenoh-protocol", default-features = false } +zenoh-keyexpr = { version = "1.0.0-dev", path = "commons/zenoh-keyexpr", default-features = false } +zenoh-core = { version = "1.0.0-dev", path = "commons/zenoh-core" } +zenoh-buffers = { version = "1.0.0-dev", path = "commons/zenoh-buffers", default-features = false } +zenoh-util = { version = "1.0.0-dev", path = "commons/zenoh-util" } +zenoh-crypto = { version = "1.0.0-dev", path = "commons/zenoh-crypto" } +zenoh-codec = { version = "1.0.0-dev", path = "commons/zenoh-codec" } +zenoh-sync = { version = "1.0.0-dev", path = "commons/zenoh-sync" } +zenoh-collections = { version = "1.0.0-dev", path = "commons/zenoh-collections", default-features = false } +zenoh-macros = { version = "1.0.0-dev", path = "commons/zenoh-macros" } +zenoh-plugin-trait = { version = "1.0.0-dev", path = "plugins/zenoh-plugin-trait", default-features = false } +zenoh_backend_traits = { version = "1.0.0-dev", path = "plugins/zenoh-backend-traits" } +zenoh-transport = { version = "1.0.0-dev", path = "io/zenoh-transport", default-features = false } +zenoh-link-tls = { version = "1.0.0-dev", path = "io/zenoh-links/zenoh-link-tls" } +zenoh-link-tcp = { version = "1.0.0-dev", path = "io/zenoh-links/zenoh-link-tcp" } +zenoh-link-unixsock_stream = { version = "1.0.0-dev", path = "io/zenoh-links/zenoh-link-unixsock_stream" } +zenoh-link-quic = { version = "1.0.0-dev", path = "io/zenoh-links/zenoh-link-quic" } +zenoh-link-udp = { version = "1.0.0-dev", path = "io/zenoh-links/zenoh-link-udp" } +zenoh-link-ws = { version = "1.0.0-dev", path = "io/zenoh-links/zenoh-link-ws" } +zenoh-link-unixpipe = { version = "1.0.0-dev", path = "io/zenoh-links/zenoh-link-unixpipe" } +zenoh-link-serial = { version = "1.0.0-dev", path = "io/zenoh-links/zenoh-link-serial" } +zenoh-link-vsock = { version = "1.0.0-dev", path = "io/zenoh-links/zenoh-link-vsock" } +zenoh-link = { version = "1.0.0-dev", path = "io/zenoh-link" } +zenoh-link-commons = { version = "1.0.0-dev", path = "io/zenoh-link-commons" } +zenoh = { version = "1.0.0-dev", path = "zenoh", default-features = false } +zenoh-runtime = { version = "1.0.0-dev", path = "commons/zenoh-runtime" } +zenoh-task = { version = "1.0.0-dev", path = "commons/zenoh-task" } [profile.dev] debug = true diff --git a/DEFAULT_CONFIG.json5 b/DEFAULT_CONFIG.json5 index 129b2d8a35..e7672c6057 100644 --- a/DEFAULT_CONFIG.json5 +++ b/DEFAULT_CONFIG.json5 @@ -11,10 +11,10 @@ /// The node's mode (router, peer or client) mode: "peer", - /// The node's metadata (name, location, DNS name, etc.) Arbitrary JSON data not interpreted by zenohd and available in admin space @/router/ + /// The node's metadata (name, location, DNS name, etc.) Arbitrary JSON data not interpreted by zenoh and available in admin space @//router, @//peer or @//client metadata: { name: "strawberry", - location: "Penny Lane" + location: "Penny Lane", }, /// Which endpoints to connect to. E.g. tcp/localhost:7447. @@ -23,9 +23,13 @@ /// E.g. tcp/192.168.0.1:7447#iface=eth0, for connect only if the IP address is reachable via the interface eth0 connect: { /// timeout waiting for all endpoints connected (0: no retry, -1: infinite timeout) - /// Accepts a single value or different values for router, peer and client. + /// Accepts a single value (e.g. timeout_ms: 0) + /// or different values for router, peer and client (e.g. timeout_ms: { router: -1, peer: -1, client: 0 }). timeout_ms: { router: -1, peer: -1, client: 0 }, + /// The list of endpoints to connect to. + /// Accepts a single list (e.g. endpoints: ["tcp/10.10.10.10:7447", "tcp/11.11.11.11:7447"]) + /// or different lists for router, peer and client (e.g. endpoints: { router: ["tcp/10.10.10.10:7447"], peer: ["tcp/11.11.11.11:7447"] }). endpoints: [ // "/
" ], @@ -49,19 +53,21 @@ }, }, - /// Which endpoints to listen on. E.g. tcp/localhost:7447. + /// Which endpoints to listen on. E.g. tcp/0.0.0.0:7447. /// By configuring the endpoints, it is possible to tell zenoh which are the endpoints that other routers, /// peers, or client can use to establish a zenoh session. /// For TCP/UDP on Linux, it is possible additionally specify the interface to be listened to: /// E.g. tcp/0.0.0.0:7447#iface=eth0, for listen connection only on eth0 listen: { /// timeout waiting for all listen endpoints (0: no retry, -1: infinite timeout) - /// Accepts a single value or different values for router, peer and client. + /// Accepts a single value (e.g. timeout_ms: 0) + /// or different values for router, peer and client (e.g. timeout_ms: { router: -1, peer: -1, client: 0 }). timeout_ms: 0, - endpoints: [ - // "/
" - ], + /// The list of endpoints to listen on. + /// Accepts a single list (e.g. endpoints: ["tcp/[::]:7447", "udp/[::]:7447"]) + /// or different lists for router, peer and client (e.g. endpoints: { router: ["tcp/[::]:7447"], peer: ["tcp/[::]:0"] }). + endpoints: { router: ["tcp/[::]:7447"], peer: ["tcp/[::]:0"] }, /// Global listen configuration, /// Accepts a single value or different values for router, peer and client. @@ -83,10 +89,10 @@ }, /// Configure the scouting mechanisms and their behaviours scouting: { - /// In client mode, the period dedicated to scouting for a router before failing + /// In client mode, the period in milliseconds dedicated to scouting for a router before failing. timeout: 3000, - /// In peer mode, the period dedicated to scouting remote peers before attempting other operations - delay: 200, + /// In peer mode, the maximum period in milliseconds dedicated to scouting remote peers before attempting other operations. + delay: 500, /// The multicast scouting configuration. multicast: { /// Whether multicast scouting is enabled or not @@ -98,9 +104,10 @@ /// The time-to-live on multicast scouting packets ttl: 1, /// Which type of Zenoh instances to automatically establish sessions with upon discovery on UDP multicast. - /// Accepts a single value or different values for router, peer and client. - /// Each value is bit-or-like combinations of "peer", "router" and "client". - autoconnect: { router: "", peer: "router|peer" }, + /// Accepts a single value (e.g. autoconnect: ["router", "peer"]) + /// or different values for router, peer and client (e.g. autoconnect: { router: [], peer: ["router", "peer"] }). + /// Each value is a list of: "peer", "router" and/or "client". + autoconnect: { router: [], peer: ["router", "peer"] }, /// Whether or not to listen for scout messages on UDP multicast and reply to them. listen: true, }, @@ -115,9 +122,10 @@ /// direct connectivity with each other. multihop: false, /// Which type of Zenoh instances to automatically establish sessions with upon discovery on gossip. - /// Accepts a single value or different values for router, peer and client. - /// Each value is bit-or-like combinations of "peer", "router" and "client". - autoconnect: { router: "", peer: "router|peer" }, + /// Accepts a single value (e.g. autoconnect: ["router", "peer"]) + /// or different values for router, peer and client (e.g. autoconnect: { router: [], peer: ["router", "peer"] }). + /// Each value is a list of: "peer", "router" and/or "client". + autoconnect: { router: [], peer: ["router", "peer"] }, }, }, @@ -178,29 +186,99 @@ // }, // ], - // /// configure access control (ACL) rules + // /// Configure access control (ACL) rules // access_control: { - // ///[true/false] acl will be activated only if this is set to true + // /// [true/false] acl will be activated only if this is set to true // "enabled": false, - // ///[deny/allow] default permission is deny (even if this is left empty or not specified) + // /// [deny/allow] default permission is deny (even if this is left empty or not specified) // "default_permission": "deny", - // ///rule set for permissions allowing or denying access to key-expressions + // /// Rule set for permissions allowing or denying access to key-expressions // "rules": // [ // { - // "actions": [ - // "put", "get", "declare_subscriber", "declare_queryable" + // /// Id has to be unique within the rule set + // "id": "rule1", + // "messages": [ + // "put", "delete", "declare_subscriber", + // "query", "reply", "declare_queryable", // ], // "flows":["egress","ingress"], // "permission": "allow", // "key_exprs": [ // "test/demo" // ], + // }, + // { + // "id": "rule2", + // "messages": [ + // "put", "delete", "declare_subscriber", + // "query", "reply", "declare_queryable", + // ], + // "flows":["ingress"], + // "permission": "allow", + // "key_exprs": [ + // "**" + // ], + // }, + // ], + // /// List of combinations of subjects. + // /// + // /// If a subject property (i.e. username, certificate common name or interface) is empty + // /// it is interpreted as a wildcard. Moreover, a subject property cannot be an empty list. + // "subjects": + // [ + // { + // /// Id has to be unique within the subjects list + // "id": "subject1", + // /// Subjects can be interfaces // "interfaces": [ - // "lo0" - // ] + // "lo0", + // "en0", + // ], + // /// Subjects can be cert_common_names when using TLS or Quic + // "cert_common_names": [ + // "example.zenoh.io" + // ], + // /// Subjects can be usernames when using user/password authentication + // "usernames": [ + // "zenoh-example" + // ], + // /// This instance translates internally to this filter: + // /// (interface="lo0" && cert_common_name="example.zenoh.io" && username="zenoh-example") || + // /// (interface="en0" && cert_common_name="example.zenoh.io" && username="zenoh-example") + // }, + // { + // "id": "subject2", + // "interfaces": [ + // "lo0", + // "en0", + // ], + // "cert_common_names": [ + // "example2.zenoh.io" + // ], + // /// This instance translates internally to this filter: + // /// (interface="lo0" && cert_common_name="example2.zenoh.io") || + // /// (interface="en0" && cert_common_name="example2.zenoh.io") // }, - // ] + // { + // "id": "subject3", + // /// An empty subject combination is a wildcard + // }, + // ], + // /// The policies list associates rules to subjects + // "policies": + // [ + // /// Each policy associates one or multiple rules to one or multiple subject combinations + // { + // /// Rules and Subjects are identified with their unique IDs declared above + // "rules": ["rule1"], + // "subjects": ["subject1", "subject2"], + // }, + // { + // "rules": ["rule2"], + // "subjects": ["subject3"], + // }, + // ] //}, /// Configure internal transport parameters @@ -221,7 +299,7 @@ /// NOTE: Currently, the LowLatency transport doesn't preserve QoS prioritization. /// NOTE: Due to the note above, 'lowlatency' is incompatible with 'qos' option, so in order to /// enable 'lowlatency' you need to explicitly disable 'qos'. - /// NOTE: LowLatency transport does not support the fragmentation, so the message size should be + /// NOTE: LowLatency transport does not support the fragmentation, so the message size should be /// smaller than the tx batch_size. lowlatency: false, /// Enables QoS on unicast communications. @@ -278,6 +356,7 @@ /// Each zenoh link has a transmission queue that can be configured queue: { /// The size of each priority queue indicates the number of batches a given queue can contain. + /// NOTE: the number of batches in each priority must be included between 1 and 16. Different values will result in an error. /// The amount of memory being allocated for each queue is then SIZE_XXX * BATCH_SIZE. /// In the case of the transport link MTU being smaller than the ZN_BATCH_SIZE, /// then amount of memory being allocated for each queue is SIZE_XXX * LINK_MTU. @@ -297,11 +376,18 @@ /// Using CongestionControl::Drop the message might be dropped, depending on conditions configured here. congestion_control: { /// The maximum time in microseconds to wait for an available batch before dropping the message if still no batch is available. - wait_before_drop: 1000 + wait_before_drop: 1000, }, - /// The initial exponential backoff time in nanoseconds to allow the batching to eventually progress. - /// Higher values lead to a more aggressive batching but it will introduce additional latency. - backoff: 100, + /// Perform batching of messages if they are smaller of the batch_size + batching: { + /// Perform adaptive batching of messages if they are smaller of the batch_size. + /// When the network is detected to not be fast enough to transmit every message individually, many small messages may be + /// batched together and sent all at once on the wire reducing the overall network overhead. This is typically of a high-throughput + /// scenario mainly composed of small messages. In other words, batching is activated by the network back-pressure. + enabled: true, + /// The maximum time limit (in ms) a message should be retained for batching when back-pressure happens. + time_limit: 1, + } }, }, /// Configure the zenoh RX parameters of a link @@ -340,9 +426,14 @@ server_name_verification: null, }, }, - /// Shared memory configuration + /// Shared memory configuration. + /// NOTE: shared memory can be used only if zenoh is compiled with "shared-memory" feature, otherwise + /// settings in this section have no effect. shared_memory: { - enabled: false, + /// A probing procedure for shared memory is performed upon session opening. To enable zenoh to operate + /// over shared memory (and to not fallback on network mode), shared memory needs to be enabled also on the + /// subscriber side. By doing so, the probing procedure will succeed and shared memory will operate as expected. + enabled: true, }, auth: { /// The configuration of authentication. @@ -381,11 +472,15 @@ /// // // plugins_loading: { - // // Enable plugins loading. + // /// Enable plugins loading. // enabled: false, // /// Directories where plugins configured by name should be looked for. Plugins configured by __path__ are not subject to lookup. - // /// If `enabled: true` and `search_dirs` is not specified then `search_dirs` falls back to the default value: ".:~/.zenoh/lib:/opt/homebrew/lib:/usr/local/lib:/usr/lib" - // search_dirs: [], + // /// Directories are specified as object with fields `kind` and `value` is accepted. + // /// 1. If `kind` is `current_exe_parent`, then the parent of the current executable's directory is searched and `value` should be `null`. + // /// In Bash notation, `{ "kind": "current_exe_parent" }` equals `$(dirname $(which zenohd))` while `"."` equals `$PWD`. + // /// 2. If `kind` is `path`, then `value` is interpreted as a filesystem path. Simply supplying a string instead of a object is equivalent to this. + // /// If `enabled: true` and `search_dirs` is not specified then `search_dirs` falls back to the default value: + // search_dirs: [{ "kind": "current_exe_parent" }, ".", "~/.zenoh/lib", "/opt/homebrew/lib", "/usr/local/lib", "/usr/lib"], // }, // /// Plugins are only loaded if `plugins_loading: { enabled: true }` and present in the configuration when starting. // /// Once loaded, they may react to changes in the configuration made through the zenoh instance's adminspace. @@ -408,6 +503,12 @@ // __config__: "./plugins/zenoh-plugin-rest/config.json5", // /// http port to answer to rest requests // http_port: 8000, + // /// The number of worker thread in TOKIO runtime (default: 2) + // /// The configuration only takes effect if running as a dynamic plugin, which can not reuse the current runtime. + // work_thread_num: 0, + // /// The number of blocking thread in TOKIO runtime (default: 50) + // /// The configuration only takes effect if running as a dynamic plugin, which can not reuse the current runtime. + // max_block_thread_num: 50, // }, // // /// Configure the storage manager plugin @@ -519,5 +620,4 @@ // __config__: "./plugins/zenoh-plugin-storage-manager/config.json5", // } // }, - } diff --git a/README.md b/README.md index 6ecdf65d88..05d3233139 100644 --- a/README.md +++ b/README.md @@ -8,6 +8,7 @@ [![License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://opensource.org/licenses/Apache-2.0) # Eclipse Zenoh + The Eclipse Zenoh: Zero Overhead Pub/sub, Store/Query and Compute. Zenoh (pronounce _/zeno/_) unifies data in motion, data at rest and computations. It carefully blends traditional pub/sub with geo-distributed storages, queries and computations, while retaining a level of time and space efficiency that is well beyond any of the mainstream stacks. @@ -15,21 +16,22 @@ Zenoh (pronounce _/zeno/_) unifies data in motion, data at rest and computations Check the website [zenoh.io](http://zenoh.io) and the [roadmap](https://github.com/eclipse-zenoh/roadmap) for more detailed information. ------------------------------- + ## Getting Started Zenoh is extremely easy to learn, the best place to master the fundamentals is our [getting started guide](https://zenoh.io/docs/getting-started/first-app/). ------------------------------- + ## How to install it To install the latest release of the Zenoh router (`zenohd`) and its default plugins (REST API plugin and Storages Manager plugin) you can do as follows: ### Manual installation (all platforms) -All release packages can be downloaded from: - - https://download.eclipse.org/zenoh/zenoh/latest/ +All release packages can be downloaded from [https://download.eclipse.org/zenoh/zenoh/latest/](https://download.eclipse.org/zenoh/zenoh/latest/). -Each subdirectory has the name of the Rust target. See the platforms each target corresponds to on https://doc.rust-lang.org/stable/rustc/platform-support.html +Each subdirectory has the name of the Rust target. See the platforms each target corresponds to on [https://doc.rust-lang.org/stable/rustc/platform-support.html](https://doc.rust-lang.org/stable/rustc/platform-support.html). Choose your platform and download the `.zip` file. Unzip it where you want, and run the extracted `zenohd` binary. @@ -43,6 +45,7 @@ echo "deb [trusted=yes] https://download.eclipse.org/zenoh/debian-repo/ /" | sud sudo apt update sudo apt install zenoh ``` + Then you can start run `zenohd`. ### MacOS @@ -53,86 +56,91 @@ Tap our brew package repository and install the `zenoh` formula: brew tap eclipse-zenoh/homebrew-zenoh brew install zenoh ``` + Then you can start run `zenohd`. +------------------------------- + +## Rust API -### Rust API +* [Docs.rs for Zenoh](https://docs.rs/zenoh/latest/zenoh/) ------------------------------- + ## How to build it > [!WARNING] > Zenoh and its ecosystem are under active development. When you build from git, make sure you also build from git any other Zenoh repository you plan to use (e.g. binding, plugin, backend, etc.). It may happen that some changes in git are not compatible with the most recent packaged Zenoh release (e.g. deb, docker, pip). We put particular effort in maintaining compatibility between the various git repositories in the Zenoh project. -Install [Cargo and Rust](https://doc.rust-lang.org/cargo/getting-started/installation.html). Zenoh can be successfully compiled with Rust stable (>= 1.71.0), so no special configuration is required from your side. If you already have the Rust toolchain installed, make sure it is up-to-date with: +Install [Cargo and Rust](https://doc.rust-lang.org/cargo/getting-started/installation.html). Zenoh can be successfully compiled with Rust stable (>= 1.75.0), so no special configuration is required from your side. If you already have the Rust toolchain installed, make sure it is up-to-date with: ```bash -$ rustup update +rustup update ``` To build Zenoh, just type the following command after having followed the previous instructions: ```bash -$ cargo build --release --all-targets +cargo build --release --all-targets ``` Zenoh's router is built as `target/release/zenohd`. All the examples are built into the `target/release/examples` directory. They can all work in peer-to-peer, or interconnected via the zenoh router. ------------------------------- -## Quick tests of your build: -**Peer-to-peer tests:** +## Quick tests of your build - - **pub/sub** - - run: `./target/release/examples/z_sub` - - in another shell run: `./target/release/examples/z_put` - - the subscriber should receive the publication. +### Peer-to-peer tests - - **get/queryable** - - run: `./target/release/examples/z_queryable` - - in another shell run: `./target/release/examples/z_get` - - the queryable should display the log in its listener, and the get should receive the queryable result. +* **pub/sub** + * run: `./target/release/examples/z_sub` + * in another shell run: `./target/release/examples/z_put` + * the subscriber should receive the publication. -**Routed tests:** +* **get/queryable** + * run: `./target/release/examples/z_queryable` + * in another shell run: `./target/release/examples/z_get` + * the queryable should display the log in its listener, and the get should receive the queryable result. + +### Routed tests > [!NOTE] > **Windows users**: to properly execute the commands below in PowerShell you need to escape `"` characters as `\"`. - - **put/store/get** - - run the Zenoh router with a memory storage: - `./target/release/zenohd --cfg='plugins/storage_manager/storages/demo:{key_expr:"demo/example/**",volume:"memory"}'` - - in another shell run: `./target/release/examples/z_put` - - then run `./target/release/examples/z_get` - - the get should receive the stored publication. - - - **REST API using `curl` tool** - - run the Zenoh router with a memory storage: - `./target/release/zenohd --cfg='plugins/storage_manager/storages/demo:{key_expr:"demo/example/**",volume:"memory"}'` - - in another shell, do a publication via the REST API: - `curl -X PUT -d '"Hello World!"' http://localhost:8000/demo/example/test` - - get it back via the REST API: - `curl http://localhost:8000/demo/example/test` - - - **router admin space via the REST API** - - run the Zenoh router with permission to perform config changes via the admin space, and with a memory storage: - `./target/release/zenohd --adminspace-permissions=rw --cfg='plugins/storage_manager/storages/demo:{key_expr:"demo/example/**",volume:"memory"}'` - - in another shell, get info of the zenoh router via the zenoh admin space: - `curl http://localhost:8000/@/router/local` - - get the volumes of the router (only memory by default): - `curl 'http://localhost:8000/@/router/local/**/volumes/*'` - - get the storages of the local router (the memory storage configured at startup on '/demo/example/**' should be present): - `curl 'http://localhost:8000/@/router/local/**/storages/*'` - - add another memory storage on `/demo/mystore/**`: - `curl -X PUT -H 'content-type:application/json' -d '{"key_expr":"demo/mystore/**","volume":"memory"}' http://localhost:8000/@/router/local/config/plugins/storage_manager/storages/mystore` - - check it has been created: - `curl 'http://localhost:8000/@/router/local/**/storages/*'` - -**Configuration options:** +* **put/store/get** + * run the Zenoh router with a memory storage: + `./target/release/zenohd --cfg='plugins/storage_manager/storages/demo:{key_expr:"demo/example/**",volume:"memory"}'` + * in another shell run: `./target/release/examples/z_put` + * then run `./target/release/examples/z_get` + * the get should receive the stored publication. + +* **REST API using `curl` tool** + * run the Zenoh router with a memory storage: + `./target/release/zenohd --cfg='plugins/storage_manager/storages/demo:{key_expr:"demo/example/**",volume:"memory"}'` + * in another shell, do a publication via the REST API: + `curl -X PUT -d '"Hello World!"' http://localhost:8000/demo/example/test` + * get it back via the REST API: + `curl http://localhost:8000/demo/example/test` + +* **router admin space via the REST API** + * run the Zenoh router with permission to perform config changes via the admin space, and with a memory storage: + `./target/release/zenohd --adminspace-permissions=rw --cfg='plugins/storage_manager/storages/demo:{key_expr:"demo/example/**",volume:"memory"}'` + * in another shell, get info of the zenoh router via the zenoh admin space: + `curl http://localhost:8000/@/local/router` + * get the volumes of the router (only memory by default): + `curl 'http://localhost:8000/@/local/router/**/volumes/*'` + * get the storages of the local router (the memory storage configured at startup on '/demo/example/**' should be present): + `curl 'http://localhost:8000/@/local/router/**/storages/*'` + * add another memory storage on `/demo/mystore/**`: + `curl -X PUT -H 'content-type:application/json' -d '{"key_expr":"demo/mystore/**","volume":"memory"}' http://localhost:8000/@/local/router/config/plugins/storage_manager/storages/mystore` + * check it has been created: + `curl 'http://localhost:8000/@/local/router/**/storages/*'` + +### Configuration options A Zenoh configuration file can be provided via CLI to all Zenoh examples and the Zenoh router. - * `-c, --config `: a [JSON5](https://json5.org) configuration file. [DEFAULT_CONFIG.json5](DEFAULT_CONFIG.json5) shows the schema of this file and the available options. - +* `-c, --config `: a [JSON5](https://json5.org) configuration file. [DEFAULT_CONFIG.json5](DEFAULT_CONFIG.json5) shows the schema of this file and the available options. See other examples of Zenoh usage in [examples/](examples) @@ -140,45 +148,44 @@ See other examples of Zenoh usage in [examples/](examples) > **Zenoh Runtime Configuration**: Starting from version 0.11.0-rc, Zenoh allows for configuring the number of worker threads and other advanced options of the runtime. For guidance on utilizing it, please refer to the [doc](https://docs.rs/zenoh-runtime/latest/zenoh_runtime/enum.ZRuntime.html). ------------------------------- + ## Zenoh router command line arguments -`zenohd` accepts the following arguments: - * `--adminspace-permissions <[r|w|rw|none]>`: Configure the read and/or write permissions on the admin space. Default is read only. - * `-c, --config `: a [JSON5](https://json5.org) configuration file. [DEFAULT_CONFIG.json5](DEFAULT_CONFIG.json5) shows the schema of this file. All properties of this configuration are optional, so you may not need such a large configuration for your use-case. - * `--cfg [:]...`: allows you to change specific parts of the configuration right after it has been constructed. VALUE must be a valid JSON5 value, and key must be a path through the configuration file, where each element is separated by a `/`. When inserting in parts of the config that are arrays, you may use indexes, or may use `+` to indicate that you want to append your value to the array. `--cfg` passed values will always override any previously existing value for their key in the configuration. - * `-l, --listen ...`: An endpoint on which this router will listen for incoming sessions. - Repeat this option to open several listeners. By default, `tcp/[::]:7447` is used. The following endpoints are currently supported: - - TCP: `tcp/:` - - UDP: `udp/:` - - [TCP+TLS](https://zenoh.io/docs/manual/tls/): `tls/:` - - [QUIC](https://zenoh.io/docs/manual/quic/): `quic/:` - * `-e, --connect ...`: An endpoint this router will try to connect to. Repeat this option to connect to several peers or routers. - * `--no-multicast-scouting`: By default zenohd replies to multicast scouting messages for being discovered by peers and clients. - This option disables this feature. - * `-i, --id `: The identifier (as an hexadecimal string - e.g.: A0B23...) that zenohd must use. - **WARNING**: this identifier must be unique in the system! If not set, a random unsigned 128bit integer will be used. - * `--no-timestamp`: By default zenohd adds a HLC-generated Timestamp to each routed Data if there isn't already one. - This option disables this feature. - * `-P, --plugin [ | :]...`: A [plugin](https://zenoh.io/docs/manual/plugins/) that must be loaded. Accepted values: - - a plugin name; zenohd will search for a library named `libzenoh_plugin_.so` on Unix, `libzenoh_plugin_.dylib` on MacOS or `zenoh_plugin_.dll` on Windows. - - `":"`; the plugin will be loaded from library file at ``. - - Repeat this option to load several plugins. - * `--plugin-search-dir ...`: A directory where to search for [plugins](https://zenoh.io/docs/manual/plugins/) libraries to load. - Repeat this option to specify several search directories'. By default, the plugins libraries will be searched in: - `'/usr/local/lib:/usr/lib:~/.zenoh/lib:.'` - * `--rest-http-port `: Configures the [REST plugin](https://zenoh.io/docs/manual/plugin-http/)'s HTTP port. Accepted values: - - a port number - - a string with format `:` (to bind the HTTP server to a specific interface) - - `"None"` to deactivate the REST plugin - - If not specified, the REST plugin will be active on any interface (`[::]`) and port `8000`. +`zenohd` accepts the following arguments: -> [!WARNING] -> The following documentation pertains to the v0.6+ API, which comes many changes to the behaviour and configuration of Zenoh. -To access the v0.5 version of the code and matching README, please go to the [0.5.0-beta.9](https://github.com/eclipse-zenoh/zenoh/tree/0.5.0-beta.9) tagged version. +* `--adminspace-permissions <[r|w|rw|none]>`: Configure the read and/or write permissions on the admin space. Default is read only. +* `-c, --config `: a [JSON5](https://json5.org) configuration file. [DEFAULT_CONFIG.json5](DEFAULT_CONFIG.json5) shows the schema of this file. All properties of this configuration are optional, so you may not need such a large configuration for your use-case. +* `--cfg :`: allows you to change specific parts of the configuration right after it has been constructed. VALUE must be a valid JSON5 value, and key must be a path through the configuration file, where each element is separated by a `/`. When inserting in parts of the config that are arrays, you may use indexes, or may use `+` to indicate that you want to append your value to the array. `--cfg` passed values will always override any previously existing value for their key in the configuration. +* `-l, --listen ...`: An endpoint on which this router will listen for incoming sessions. + Repeat this option to open several listeners. By default, `tcp/[::]:7447` is used. The following endpoints are currently supported: + * TCP: `tcp/:` + * UDP: `udp/:` + * [TCP+TLS](https://zenoh.io/docs/manual/tls/): `tls/:` + * [QUIC](https://zenoh.io/docs/manual/quic/): `quic/:` +* `-e, --connect ...`: An endpoint this router will try to connect to. Repeat this option to connect to several peers or routers. +* `--no-multicast-scouting`: By default zenohd replies to multicast scouting messages for being discovered by peers and clients. + This option disables this feature. +* `-i, --id `: The identifier (as an hexadecimal string - e.g.: A0B23...) that zenohd must use. + **WARNING**: this identifier must be unique in the system! If not set, a random unsigned 128bit integer will be used. +* `--no-timestamp`: By default zenohd adds a HLC-generated Timestamp to each routed Data if there isn't already one. + This option disables this feature. +* `-P, --plugin [ | :]...`: A [plugin](https://zenoh.io/docs/manual/plugins/) that must be loaded. Accepted values: + * a plugin name; zenohd will search for a library named `libzenoh_plugin_.so` on Unix, `libzenoh_plugin_.dylib` on MacOS or `zenoh_plugin_.dll` on Windows. + * `":"`; the plugin will be loaded from library file at ``. + + Repeat this option to load several plugins. +* `--plugin-search-dir ...`: A directory where to search for [plugins](https://zenoh.io/docs/manual/plugins/) libraries to load. + Repeat this option to specify several search directories'. By default, the plugins libraries will be searched in: + `'/usr/local/lib:/usr/lib:~/.zenoh/lib:.'` +* `--rest-http-port `: Configures the [REST plugin](https://zenoh.io/docs/manual/plugin-http/)'s HTTP port. Accepted values: + * a port number + * a string with format `:` (to bind the HTTP server to a specific interface) + * `"None"` to deactivate the REST plugin + + If not specified, the REST plugin will be active on any interface (`[::]`) and port `8000`. ------------------------------- + ## Plugins > [!WARNING] @@ -200,6 +207,7 @@ This plugin converts GET and PUT REST requests into Zenoh gets and puts respecti This plugin allows you to easily define storages. These will store key-value pairs they subscribed to, and send the most recent ones when queried. Check out [DEFAULT_CONFIG.json5](DEFAULT_CONFIG.json5) for info on how to configure them. ------------------------------- + ## Troubleshooting In case of troubles, please first check on [this page](https://zenoh.io/docs/getting-started/troubleshooting/) if the trouble and cause are already known. diff --git a/_typos.toml b/_typos.toml index eb9952004f..182770db32 100644 --- a/_typos.toml +++ b/_typos.toml @@ -1,8 +1,9 @@ [files] extend-exclude = [ - # Ignore all files in transport tests as they contain - # hashes that are treated as typos. + # Ignore files containing hexa. "io/zenoh-transport/tests/*.rs", + "zenoh/tests/open_time.rs", + "zenoh/tests/authentication.rs", ] diff --git a/ci/nostd-check/src/bin/nostd_check.rs b/ci/nostd-check/src/bin/nostd_check.rs index 74f85ae06c..b243c9d182 100644 --- a/ci/nostd-check/src/bin/nostd_check.rs +++ b/ci/nostd-check/src/bin/nostd_check.rs @@ -15,6 +15,7 @@ #![no_std] use core::panic::PanicInfo; + use getrandom::{register_custom_getrandom, Error}; use linked_list_allocator::LockedHeap; #[allow(unused_imports)] diff --git a/ci/valgrind-check/Cargo.toml b/ci/valgrind-check/Cargo.toml index 067e5e53ee..94ee27e7eb 100644 --- a/ci/valgrind-check/Cargo.toml +++ b/ci/valgrind-check/Cargo.toml @@ -22,7 +22,7 @@ categories = ["network-programming"] description = "Internal crate for zenoh." [dependencies] -tokio = { version = "1.35.1", features = ["rt-multi-thread", "time", "io-std"] } +tokio = { version = "1.35.1", features = ["rt-multi-thread", "time", "io-std"] } tracing-subscriber = { version = "0.3", features = ["json", "env-filter"] } futures = "0.3.25" zenoh = { path = "../../zenoh/" } diff --git a/ci/valgrind-check/src/pub_sub/bin/z_pub_sub.rs b/ci/valgrind-check/src/pub_sub/bin/z_pub_sub.rs index 9561f7016a..f3b1dd0efe 100644 --- a/ci/valgrind-check/src/pub_sub/bin/z_pub_sub.rs +++ b/ci/valgrind-check/src/pub_sub/bin/z_pub_sub.rs @@ -12,8 +12,8 @@ // ZettaScale Zenoh Team, // use std::time::Duration; -use zenoh::config::Config; -use zenoh::prelude::r#async::*; + +use zenoh::{config::Config, key_expr::KeyExpr, prelude::*}; #[tokio::main] async fn main() { @@ -24,26 +24,24 @@ async fn main() { let sub_key_expr = KeyExpr::try_from("test/valgrind/**").unwrap(); println!("Declaring Publisher on '{pub_key_expr}'..."); - let pub_session = zenoh::open(Config::default()).res().await.unwrap(); - let publisher = pub_session - .declare_publisher(&pub_key_expr) - .res() - .await - .unwrap(); + let pub_session = zenoh::open(Config::default()).await.unwrap(); + let publisher = pub_session.declare_publisher(&pub_key_expr).await.unwrap(); println!("Declaring Subscriber on '{sub_key_expr}'..."); - let sub_session = zenoh::open(Config::default()).res().await.unwrap(); + let sub_session = zenoh::open(Config::default()).await.unwrap(); let _subscriber = sub_session .declare_subscriber(&sub_key_expr) .callback(|sample| { println!( ">> [Subscriber] Received {} ('{}': '{}')", - sample.kind, - sample.key_expr.as_str(), - sample.value + sample.kind(), + sample.key_expr().as_str(), + sample + .payload() + .deserialize::() + .unwrap_or_else(|e| format!("{}", e)) ); }) - .res() .await .unwrap(); @@ -51,7 +49,7 @@ async fn main() { tokio::time::sleep(Duration::from_secs(1)).await; let buf = format!("[{idx:4}] data"); println!("Putting Data ('{}': '{}')...", &pub_key_expr, buf); - publisher.put(buf).res().await.unwrap(); + publisher.put(buf).await.unwrap(); } tokio::time::sleep(Duration::from_secs(1)).await; diff --git a/ci/valgrind-check/src/queryable_get/bin/z_queryable_get.rs b/ci/valgrind-check/src/queryable_get/bin/z_queryable_get.rs index 78fb705fe8..8ea7be201b 100644 --- a/ci/valgrind-check/src/queryable_get/bin/z_queryable_get.rs +++ b/ci/valgrind-check/src/queryable_get/bin/z_queryable_get.rs @@ -11,10 +11,14 @@ // Contributors: // ZettaScale Zenoh Team, // -use std::convert::TryFrom; -use std::time::Duration; -use zenoh::config::Config; -use zenoh::prelude::r#async::*; +use std::{convert::TryFrom, time::Duration}; + +use zenoh::{ + config::Config, + key_expr::KeyExpr, + prelude::*, + query::{QueryTarget, Selector}, +}; #[tokio::main] async fn main() { @@ -25,45 +29,51 @@ async fn main() { let get_selector = Selector::try_from("test/valgrind/**").unwrap(); println!("Declaring Queryable on '{queryable_key_expr}'..."); - let queryable_session = zenoh::open(Config::default()).res().await.unwrap(); + let queryable_session = zenoh::open(Config::default()).await.unwrap(); let _queryable = queryable_session - .declare_queryable(&queryable_key_expr.clone()) + .declare_queryable(queryable_key_expr.clone()) .callback(move |query| { println!(">> Handling query '{}'", query.selector()); - let reply = Ok(Sample::new( - queryable_key_expr.clone(), - query.value().unwrap().clone(), - )); + let queryable_key_expr = queryable_key_expr.clone(); zenoh_runtime::ZRuntime::Application.block_in_place(async move { - query.reply(reply).res().await.unwrap(); + query + .reply(queryable_key_expr, query.payload().unwrap().clone()) + .await + .unwrap(); }); }) .complete(true) - .res() .await .unwrap(); println!("Declaring Get session for '{get_selector}'..."); - let get_session = zenoh::open(Config::default()).res().await.unwrap(); + let get_session = zenoh::open(Config::default()).await.unwrap(); for idx in 0..5 { tokio::time::sleep(Duration::from_secs(1)).await; println!("Sending Query '{get_selector}'..."); let replies = get_session .get(&get_selector) - .with_value(idx) + .payload(idx) .target(QueryTarget::All) - .res() .await .unwrap(); while let Ok(reply) = replies.recv_async().await { - match reply.sample { + match reply.result() { Ok(sample) => println!( ">> Received ('{}': '{}')", - sample.key_expr.as_str(), - sample.value, + sample.key_expr().as_str(), + sample + .payload() + .deserialize::() + .unwrap_or_else(|e| format!("{}", e)) + ), + Err(err) => println!( + ">> Received (ERROR: '{}')", + err.payload() + .deserialize::() + .unwrap_or_else(|e| format!("{}", e)) ), - Err(err) => println!(">> Received (ERROR: '{}')", String::try_from(&err).unwrap()), } } } diff --git a/commons/zenoh-buffers/src/bbuf.rs b/commons/zenoh-buffers/src/bbuf.rs index 687961aa5e..55d341880c 100644 --- a/commons/zenoh-buffers/src/bbuf.rs +++ b/commons/zenoh-buffers/src/bbuf.rs @@ -11,6 +11,11 @@ // Contributors: // ZettaScale Zenoh Team, // +#[cfg(not(feature = "std"))] +use alloc::boxed::Box; +use alloc::sync::Arc; +use core::{fmt, num::NonZeroUsize, option}; + use crate::{ buffer::{Buffer, SplitBuffer}, reader::HasReader, @@ -18,11 +23,6 @@ use crate::{ writer::{BacktrackableWriter, DidntWrite, HasWriter, Writer}, ZSlice, }; -use alloc::sync::Arc; -use core::{fmt, num::NonZeroUsize, option}; - -#[cfg(not(feature = "std"))] -use alloc::boxed::Box; #[derive(Clone, PartialEq, Eq)] pub struct BBuf { @@ -127,7 +127,7 @@ impl Writer for &mut BBuf { self.capacity() - self.len() } - fn with_slot(&mut self, len: usize, f: F) -> Result + unsafe fn with_slot(&mut self, len: usize, write: F) -> Result where F: FnOnce(&mut [u8]) -> usize, { @@ -135,7 +135,8 @@ impl Writer for &mut BBuf { return Err(DidntWrite); } - let written = f(self.as_writable_slice()); + // SAFETY: self.remaining() >= len + let written = write(unsafe { self.as_writable_slice().get_unchecked_mut(..len) }); self.len += written; NonZeroUsize::new(written).ok_or(DidntWrite) @@ -184,13 +185,8 @@ impl<'a> HasReader for &'a BBuf { // From impls impl From for ZSlice { fn from(value: BBuf) -> Self { - ZSlice { - buf: Arc::new(value.buffer), - start: 0, - end: value.len, - #[cfg(feature = "shared-memory")] - kind: crate::ZSliceKind::Raw, - } + // SAFETY: buffer length is ensured to be lesser than its capacity + unsafe { ZSlice::new(Arc::new(value.buffer), 0, value.len).unwrap_unchecked() } } } @@ -199,6 +195,7 @@ impl BBuf { pub fn rand(len: usize) -> Self { #[cfg(not(feature = "std"))] use alloc::vec::Vec; + use rand::Rng; let mut rng = rand::thread_rng(); diff --git a/commons/zenoh-buffers/src/lib.rs b/commons/zenoh-buffers/src/lib.rs index 8641ff4afc..ee630b4201 100644 --- a/commons/zenoh-buffers/src/lib.rs +++ b/commons/zenoh-buffers/src/lib.rs @@ -116,9 +116,10 @@ pub mod buffer { } pub mod writer { - use crate::ZSlice; use core::num::NonZeroUsize; + use crate::ZSlice; + #[derive(Debug, Clone, Copy)] pub struct DidntWrite; @@ -136,9 +137,14 @@ pub mod writer { fn can_write(&self) -> bool { self.remaining() != 0 } - /// Provides a buffer of exactly `len` uninitialized bytes to `f` to allow in-place writing. - /// `f` must return the number of bytes it actually wrote. - fn with_slot(&mut self, len: usize, f: F) -> Result + /// Provides a buffer of exactly `len` uninitialized bytes to `write` to allow in-place writing. + /// `write` must return the number of bytes it actually wrote. + /// + /// # Safety + /// + /// Caller must ensure that `write` return an integer lesser than or equal to the length of + /// the slice passed in argument + unsafe fn with_slot(&mut self, len: usize, write: F) -> Result where F: FnOnce(&mut [u8]) -> usize; } @@ -159,9 +165,10 @@ pub mod writer { } pub mod reader { - use crate::ZSlice; use core::num::NonZeroUsize; + use crate::ZSlice; + #[derive(Debug, Clone, Copy)] pub struct DidntRead; @@ -202,6 +209,18 @@ pub mod reader { fn rewind(&mut self, mark: Self::Mark) -> bool; } + pub trait AdvanceableReader: Reader { + fn skip(&mut self, offset: usize) -> Result<(), DidntRead>; + fn backtrack(&mut self, offset: usize) -> Result<(), DidntRead>; + fn advance(&mut self, offset: isize) -> Result<(), DidntRead> { + if offset > 0 { + self.skip(offset as usize) + } else { + self.backtrack((-offset) as usize) + } + } + } + #[derive(Debug, Clone, Copy)] pub struct DidntSiphon; diff --git a/commons/zenoh-buffers/src/slice.rs b/commons/zenoh-buffers/src/slice.rs index 9b5d72ca51..1f3771c2eb 100644 --- a/commons/zenoh-buffers/src/slice.rs +++ b/commons/zenoh-buffers/src/slice.rs @@ -11,12 +11,6 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::{ - buffer::{Buffer, SplitBuffer}, - reader::{BacktrackableReader, DidntRead, DidntSiphon, HasReader, Reader, SiphonableReader}, - writer::{BacktrackableWriter, DidntWrite, HasWriter, Writer}, - ZSlice, -}; use core::{ marker::PhantomData, mem, @@ -25,6 +19,13 @@ use core::{ slice::{self}, }; +use crate::{ + buffer::{Buffer, SplitBuffer}, + reader::{BacktrackableReader, DidntRead, DidntSiphon, HasReader, Reader, SiphonableReader}, + writer::{BacktrackableWriter, DidntWrite, HasWriter, Writer}, + ZSlice, +}; + // Buffer impl Buffer for &[u8] { #[inline(always)] @@ -60,26 +61,13 @@ impl HasWriter for &mut [u8] { impl Writer for &mut [u8] { fn write(&mut self, bytes: &[u8]) -> Result { - let len = bytes.len().min(self.len()); - if len == 0 { + let Some(len) = NonZeroUsize::new(bytes.len().min(self.len())) else { return Err(DidntWrite); - } - - // SAFETY: len is guaranteed to be the minimum between lhs and rhs length. - // We early return if length is 0. - let lhs = crate::unsafe_slice_mut!(self, ..len); - let rhs = crate::unsafe_slice!(bytes, ..len); - lhs.copy_from_slice(rhs); - - // SAFETY: len is guaranteed to be the minimum between lhs and rhs length. - let lhs = crate::unsafe_slice_mut!(self, len..); - // SAFETY: this doesn't compile with simple assignment because the compiler - // doesn't believe that the subslice has the same lifetime as the original slice, - // so we transmute to assure it that it does. - *self = unsafe { mem::transmute::<&mut [u8], &mut [u8]>(lhs) }; - - // SAFETY: this operation is safe since we check if len is non-zero. - Ok(unsafe { NonZeroUsize::new_unchecked(len) }) + }; + let (to_write, remain) = mem::take(self).split_at_mut(len.get()); + to_write.copy_from_slice(&bytes[..len.get()]); + *self = remain; + Ok(len) } fn write_exact(&mut self, bytes: &[u8]) -> Result<(), DidntWrite> { @@ -87,19 +75,7 @@ impl Writer for &mut [u8] { if self.len() < len { return Err(DidntWrite); } - - // SAFETY: len is guaranteed to be the smaller than lhs length. - let lhs = crate::unsafe_slice_mut!(self, ..len); - let rhs = crate::unsafe_slice!(bytes, ..len); - lhs.copy_from_slice(rhs); - - // SAFETY: len is guaranteed to be the minimum between lhs and rhs length. - let lhs = crate::unsafe_slice_mut!(self, len..); - // SAFETY: this doesn't compile with simple assignment because the compiler - // doesn't believe that the subslice has the same lifetime as the original slice, - // so we transmute to assure it that it does. - *self = unsafe { mem::transmute::<&mut [u8], &mut [u8]>(lhs) }; - + let _ = self.write(bytes); Ok(()) } @@ -107,24 +83,17 @@ impl Writer for &mut [u8] { self.len() } - fn with_slot(&mut self, mut len: usize, f: F) -> Result + unsafe fn with_slot(&mut self, len: usize, write: F) -> Result where F: FnOnce(&mut [u8]) -> usize, { if len > self.len() { return Err(DidntWrite); } - // SAFETY: we early return in case len is greater than slice.len(). - let s = crate::unsafe_slice_mut!(self, ..len); - len = f(s); - // SAFETY: we early return in case len is greater than slice.len(). - let s = crate::unsafe_slice_mut!(self, len..); - // SAFETY: this doesn't compile with simple assignment because the compiler - // doesn't believe that the subslice has the same lifetime as the original slice, - // so we transmute to assure it that it does. - *self = unsafe { mem::transmute::<&mut [u8], &mut [u8]>(s) }; - - NonZeroUsize::new(len).ok_or(DidntWrite) + let written = write(&mut self[..len]); + // SAFETY: `written` < `len` is guaranteed by function contract + *self = unsafe { mem::take(self).get_unchecked_mut(written..) }; + NonZeroUsize::new(written).ok_or(DidntWrite) } } @@ -164,14 +133,13 @@ impl<'a> HasReader for &'a [u8] { impl Reader for &[u8] { fn read(&mut self, into: &mut [u8]) -> Result { - let len = self.len().min(into.len()); - // SAFETY: len is guaranteed to be the smaller than lhs length. - let lhs = crate::unsafe_slice_mut!(into, ..len); - let rhs = crate::unsafe_slice!(self, ..len); - lhs.copy_from_slice(rhs); - // SAFETY: len is guaranteed to be smaller than slice.len(). - *self = crate::unsafe_slice!(self, len..); - NonZeroUsize::new(len).ok_or(DidntRead) + let Some(len) = NonZeroUsize::new(self.len().min(into.len())) else { + return Err(DidntRead); + }; + let (to_write, remain) = self.split_at(len.get()); + into[..len.get()].copy_from_slice(to_write); + *self = remain; + Ok(len) } fn read_exact(&mut self, into: &mut [u8]) -> Result<(), DidntRead> { @@ -179,24 +147,16 @@ impl Reader for &[u8] { if self.len() < len { return Err(DidntRead); } - // SAFETY: len is guaranteed to be the smaller than lhs length. - let lhs = crate::unsafe_slice_mut!(into, ..len); - let rhs = crate::unsafe_slice!(self, ..len); - lhs.copy_from_slice(rhs); - // SAFETY: len is guaranteed to be smaller than slice.len(). - *self = crate::unsafe_slice!(self, len..); + let (to_write, remain) = self.split_at(len); + into[..len].copy_from_slice(to_write); + *self = remain; Ok(()) } fn read_u8(&mut self) -> Result { - if !self.can_read() { - return Err(DidntRead); - } - // SAFETY: we early return in case the slice is empty. - // Therefore, there is at least one element in the slice. - let ret = *crate::unsafe_slice!(self, 0); - *self = crate::unsafe_slice!(self, 1..); - Ok(ret) + let mut buf = [0; 1]; + self.read(&mut buf)?; + Ok(buf[0]) } fn read_zslices(&mut self, len: usize, mut f: F) -> Result<(), DidntRead> { diff --git a/commons/zenoh-buffers/src/vec.rs b/commons/zenoh-buffers/src/vec.rs index bc2edf87bb..fc81fa6687 100644 --- a/commons/zenoh-buffers/src/vec.rs +++ b/commons/zenoh-buffers/src/vec.rs @@ -11,15 +11,15 @@ // Contributors: // ZettaScale Zenoh Team, // +#[cfg(not(feature = "std"))] +use alloc::vec::Vec; +use core::{mem, num::NonZeroUsize, option}; + use crate::{ buffer::{Buffer, SplitBuffer}, reader::HasReader, writer::{BacktrackableWriter, DidntWrite, HasWriter, Writer}, }; -use core::{mem, num::NonZeroUsize, option}; - -#[cfg(not(feature = "std"))] -use alloc::vec::Vec; /// Allocate a vector with a given capacity and sets the length to that capacity. #[must_use] @@ -93,7 +93,7 @@ impl Writer for &mut Vec { usize::MAX } - fn with_slot(&mut self, mut len: usize, f: F) -> Result + unsafe fn with_slot(&mut self, mut len: usize, write: F) -> Result where F: FnOnce(&mut [u8]) -> usize, { @@ -103,7 +103,7 @@ impl Writer for &mut Vec { let s = crate::unsafe_slice_mut!(self.spare_capacity_mut(), ..len); // SAFETY: converting MaybeUninit into [u8] is safe because we are going to write on it. // The returned len tells us how many bytes have been written so as to update the len accordingly. - len = unsafe { f(&mut *(s as *mut [mem::MaybeUninit] as *mut [u8])) }; + len = unsafe { write(&mut *(s as *mut [mem::MaybeUninit] as *mut [u8])) }; // SAFETY: we already reserved len elements on the vector. unsafe { self.set_len(self.len() + len) }; diff --git a/commons/zenoh-buffers/src/zbuf.rs b/commons/zenoh-buffers/src/zbuf.rs index 1365397966..2d5bcca213 100644 --- a/commons/zenoh-buffers/src/zbuf.rs +++ b/commons/zenoh-buffers/src/zbuf.rs @@ -11,21 +11,22 @@ // Contributors: // ZettaScale Zenoh Team, // -#[cfg(feature = "shared-memory")] -use crate::ZSliceKind; +use alloc::{sync::Arc, vec::Vec}; +use core::{cmp, iter, num::NonZeroUsize, ptr::NonNull}; +#[cfg(feature = "std")] +use std::io; + +use zenoh_collections::SingleOrVec; + use crate::{ buffer::{Buffer, SplitBuffer}, - reader::{BacktrackableReader, DidntRead, DidntSiphon, HasReader, Reader, SiphonableReader}, + reader::{ + AdvanceableReader, BacktrackableReader, DidntRead, DidntSiphon, HasReader, Reader, + SiphonableReader, + }, writer::{BacktrackableWriter, DidntWrite, HasWriter, Writer}, - ZSlice, + ZSlice, ZSliceBuffer, ZSliceWriter, }; -use alloc::{sync::Arc, vec::Vec}; -use core::{cmp, iter, mem, num::NonZeroUsize, ops::RangeBounds, ptr}; -use zenoh_collections::SingleOrVec; - -fn get_mut_unchecked(arc: &mut Arc) -> &mut T { - unsafe { &mut (*(Arc::as_ptr(arc) as *mut T)) } -} #[derive(Debug, Clone, Default, Eq)] pub struct ZBuf { @@ -34,8 +35,10 @@ pub struct ZBuf { impl ZBuf { #[must_use] - pub fn empty() -> Self { - Self::default() + pub const fn empty() -> Self { + Self { + slices: SingleOrVec::empty(), + } } pub fn clear(&mut self) { @@ -56,84 +59,25 @@ impl ZBuf { } } - pub fn splice>(&mut self, erased: Range, replacement: &[u8]) { - let start = match erased.start_bound() { - core::ops::Bound::Included(n) => *n, - core::ops::Bound::Excluded(n) => n + 1, - core::ops::Bound::Unbounded => 0, - }; - let end = match erased.end_bound() { - core::ops::Bound::Included(n) => n + 1, - core::ops::Bound::Excluded(n) => *n, - core::ops::Bound::Unbounded => self.len(), - }; - if start != end { - self.remove(start, end); - } - self.insert(start, replacement); - } - fn remove(&mut self, mut start: usize, mut end: usize) { - assert!(start <= end); - assert!(end <= self.len()); - let mut start_slice_idx = 0; - let mut start_idx_in_start_slice = 0; - let mut end_slice_idx = 0; - let mut end_idx_in_end_slice = 0; - for (i, slice) in self.slices.as_mut().iter_mut().enumerate() { - if slice.len() > start { - start_slice_idx = i; - start_idx_in_start_slice = start; - } - if slice.len() >= end { - end_slice_idx = i; - end_idx_in_end_slice = end; - break; - } - start -= slice.len(); - end -= slice.len(); - } - let start_slice = &mut self.slices.as_mut()[start_slice_idx]; - start_slice.end = start_slice.start + start_idx_in_start_slice; - let drain_start = start_slice_idx + (start_slice.start < start_slice.end) as usize; - let end_slice = &mut self.slices.as_mut()[end_slice_idx]; - end_slice.start += end_idx_in_end_slice; - let drain_end = end_slice_idx + (end_slice.start >= end_slice.end) as usize; - self.slices.drain(drain_start..drain_end); - } - fn insert(&mut self, mut at: usize, slice: &[u8]) { - if slice.is_empty() { - return; - } - let old_at = at; - let mut slice_index = usize::MAX; - for (i, slice) in self.slices.as_ref().iter().enumerate() { - if at < slice.len() { - slice_index = i; - break; - } - if let Some(new_at) = at.checked_sub(slice.len()) { - at = new_at - } else { - panic!( - "Out of bounds insert attempted: at={old_at}, len={}", - self.len() - ) - } - } - if at != 0 { - let split = &self.slices.as_ref()[slice_index]; - let (l, r) = ( - split.subslice(0, at).unwrap(), - split.subslice(at, split.len()).unwrap(), - ); - self.slices.drain(slice_index..(slice_index + 1)); - self.slices.insert(slice_index, l); - self.slices.insert(slice_index + 1, Vec::from(slice).into()); - self.slices.insert(slice_index + 2, r); - } else { - self.slices.insert(slice_index, Vec::from(slice).into()) + pub fn to_zslice(&self) -> ZSlice { + let mut slices = self.zslices(); + match self.slices.len() { + 0 => ZSlice::empty(), + // SAFETY: it's safe to use unwrap_unchecked() because we are explicitly checking the length is 1. + 1 => unsafe { slices.next().unwrap_unchecked().clone() }, + _ => slices + .fold(Vec::new(), |mut acc, it| { + acc.extend(it.as_slice()); + acc + }) + .into(), } } + + #[inline] + fn opt_zslice_writer(&mut self) -> Option { + self.slices.last_mut().and_then(|s| s.writer()) + } } // Buffer @@ -195,17 +139,34 @@ impl PartialEq for ZBuf { } // From impls +impl From for ZBuf { + fn from(t: ZSlice) -> Self { + let mut zbuf = ZBuf::empty(); + zbuf.push_zslice(t); + zbuf + } +} + +impl From> for ZBuf +where + T: ZSliceBuffer + 'static, +{ + fn from(t: Arc) -> Self { + let zslice: ZSlice = t.into(); + Self::from(zslice) + } +} + impl From for ZBuf where - T: Into, + T: ZSliceBuffer + 'static, { fn from(t: T) -> Self { - let mut zbuf = ZBuf::empty(); let zslice: ZSlice = t.into(); - zbuf.push_zslice(zslice); - zbuf + Self::from(zslice) } } + // Reader #[derive(Debug, Default, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)] pub struct ZBufPos { @@ -265,7 +226,7 @@ impl<'a> Reader for ZBufReader<'a> { } fn read_exact(&mut self, into: &mut [u8]) -> Result<(), DidntRead> { - let len = self.read(into)?; + let len = Reader::read(self, into)?; if len.get() == into.len() { Ok(()) } else { @@ -276,7 +237,7 @@ impl<'a> Reader for ZBufReader<'a> { fn read_u8(&mut self) -> Result { let slice = self.inner.slices.get(self.cursor.slice).ok_or(DidntRead)?; - let byte = slice[self.cursor.byte]; + let byte = *slice.get(self.cursor.byte).ok_or(DidntRead)?; self.cursor.byte += 1; if self.cursor.byte == slice.len() { self.cursor.slice += 1; @@ -312,14 +273,11 @@ impl<'a> Reader for ZBufReader<'a> { match (slice.len() - self.cursor.byte).cmp(&len) { cmp::Ordering::Less => { let mut buffer = crate::vec::uninit(len); - self.read_exact(&mut buffer)?; + Reader::read_exact(self, &mut buffer)?; Ok(buffer.into()) } cmp::Ordering::Equal => { - let s = slice - .subslice(self.cursor.byte, slice.len()) - .ok_or(DidntRead)?; - + let s = slice.subslice(self.cursor.byte..).ok_or(DidntRead)?; self.cursor.slice += 1; self.cursor.byte = 0; Ok(s) @@ -327,7 +285,7 @@ impl<'a> Reader for ZBufReader<'a> { cmp::Ordering::Greater => { let start = self.cursor.byte; self.cursor.byte += len; - slice.subslice(start, self.cursor.byte).ok_or(DidntRead) + slice.subslice(start..self.cursor.byte).ok_or(DidntRead) } } } @@ -383,13 +341,81 @@ impl<'a> SiphonableReader for ZBufReader<'a> { } #[cfg(feature = "std")] -impl<'a> std::io::Read for ZBufReader<'a> { - fn read(&mut self, buf: &mut [u8]) -> std::io::Result { +impl<'a> io::Read for ZBufReader<'a> { + fn read(&mut self, buf: &mut [u8]) -> io::Result { match ::read(self, buf) { Ok(n) => Ok(n.get()), + Err(_) => Ok(0), + } + } +} + +impl<'a> AdvanceableReader for ZBufReader<'a> { + fn skip(&mut self, offset: usize) -> Result<(), DidntRead> { + let mut remaining_offset = offset; + while remaining_offset > 0 { + let s = self.inner.slices.get(self.cursor.slice).ok_or(DidntRead)?; + let remains_in_current_slice = s.len() - self.cursor.byte; + let advance = remaining_offset.min(remains_in_current_slice); + remaining_offset -= advance; + self.cursor.byte += advance; + if self.cursor.byte == s.len() { + self.cursor.slice += 1; + self.cursor.byte = 0; + } + } + Ok(()) + } + + fn backtrack(&mut self, offset: usize) -> Result<(), DidntRead> { + let mut remaining_offset = offset; + while remaining_offset > 0 { + let backtrack = remaining_offset.min(self.cursor.byte); + remaining_offset -= backtrack; + self.cursor.byte -= backtrack; + if self.cursor.byte == 0 { + if self.cursor.slice == 0 { + break; + } + self.cursor.slice -= 1; + self.cursor.byte = self + .inner + .slices + .get(self.cursor.slice) + .ok_or(DidntRead)? + .len(); + } + } + if remaining_offset == 0 { + Ok(()) + } else { + Err(DidntRead) + } + } +} + +#[cfg(feature = "std")] +impl<'a> io::Seek for ZBufReader<'a> { + fn seek(&mut self, pos: io::SeekFrom) -> io::Result { + let current_pos = self + .inner + .slices() + .take(self.cursor.slice) + .fold(0, |acc, s| acc + s.len()) + + self.cursor.byte; + let current_pos = i64::try_from(current_pos) + .map_err(|e| std::io::Error::new(std::io::ErrorKind::Other, format!("{}", e)))?; + + let offset = match pos { + std::io::SeekFrom::Start(s) => i64::try_from(s).unwrap_or(i64::MAX) - current_pos, + std::io::SeekFrom::Current(s) => s, + std::io::SeekFrom::End(s) => self.inner.len() as i64 + s - current_pos, + }; + match self.advance(offset as isize) { + Ok(()) => Ok((offset + current_pos) as u64), Err(_) => Err(std::io::Error::new( - std::io::ErrorKind::UnexpectedEof, - "UnexpectedEof", + std::io::ErrorKind::InvalidInput, + "InvalidInput", )), } } @@ -419,14 +445,14 @@ impl Iterator for ZBufSliceIterator<'_, '_> { match self.remaining.cmp(&len) { cmp::Ordering::Less => { let end = start + self.remaining; - let slice = slice.subslice(start, end); + let slice = slice.subslice(start..end); self.reader.cursor.byte = end; self.remaining = 0; slice } cmp::Ordering::Equal => { let end = start + self.remaining; - let slice = slice.subslice(start, end); + let slice = slice.subslice(start..end); self.reader.cursor.slice += 1; self.reader.cursor.byte = 0; self.remaining = 0; @@ -434,7 +460,7 @@ impl Iterator for ZBufSliceIterator<'_, '_> { } cmp::Ordering::Greater => { let end = start + len; - let slice = slice.subslice(start, end); + let slice = slice.subslice(start..end); self.reader.cursor.slice += 1; self.reader.cursor.byte = 0; self.remaining -= len; @@ -451,79 +477,43 @@ impl Iterator for ZBufSliceIterator<'_, '_> { // Writer #[derive(Debug)] pub struct ZBufWriter<'a> { - inner: &'a mut ZBuf, - cache: Arc>, + inner: NonNull, + zslice_writer: Option>, +} + +impl<'a> ZBufWriter<'a> { + #[inline] + fn zslice_writer(&mut self) -> &mut ZSliceWriter<'a> { + // Cannot use `if let` because of https://github.com/rust-lang/rust/issues/54663 + if self.zslice_writer.is_some() { + return self.zslice_writer.as_mut().unwrap(); + } + // SAFETY: `self.inner` is valid as guaranteed by `self.writer` borrow + let zbuf = unsafe { self.inner.as_mut() }; + zbuf.slices.push(ZSlice::empty()); + self.zslice_writer = zbuf.slices.last_mut().unwrap().writer(); + self.zslice_writer.as_mut().unwrap() + } } impl<'a> HasWriter for &'a mut ZBuf { type Writer = ZBufWriter<'a>; fn writer(self) -> Self::Writer { - let mut cache = None; - if let Some(ZSlice { buf, end, .. }) = self.slices.last_mut() { - // Verify the ZSlice is actually a Vec - if let Some(b) = buf.as_any().downcast_ref::>() { - // Check for the length - if *end == b.len() { - cache = Some(unsafe { Arc::from_raw(Arc::into_raw(buf.clone()).cast()) }) - } - } - } - ZBufWriter { - inner: self, - cache: cache.unwrap_or_else(|| Arc::new(Vec::new())), + inner: NonNull::new(self).unwrap(), + zslice_writer: self.opt_zslice_writer(), } } } impl Writer for ZBufWriter<'_> { fn write(&mut self, bytes: &[u8]) -> Result { - if bytes.is_empty() { - return Err(DidntWrite); - } - self.write_exact(bytes)?; - // SAFETY: this operation is safe since we check if bytes is empty - Ok(unsafe { NonZeroUsize::new_unchecked(bytes.len()) }) + self.zslice_writer().write(bytes) } fn write_exact(&mut self, bytes: &[u8]) -> Result<(), DidntWrite> { - let cache = get_mut_unchecked(&mut self.cache); - let prev_cache_len = cache.len(); - cache.extend_from_slice(bytes); - let cache_len = cache.len(); - - // Verify we are writing on the cache - if let Some(ZSlice { - buf, ref mut end, .. - }) = self.inner.slices.last_mut() - { - // Verify the previous length of the cache is the right one - if *end == prev_cache_len { - // Verify the ZSlice is actually a Vec - if let Some(b) = buf.as_any().downcast_ref::>() { - // Verify the Vec of the ZSlice is exactly the one from the cache - if core::ptr::eq(cache.as_ptr(), b.as_ptr()) { - // Simply update the slice length - *end = cache_len; - return Ok(()); - } - } - } - } - - self.inner.slices.push(ZSlice { - buf: self.cache.clone(), - start: prev_cache_len, - end: cache_len, - #[cfg(feature = "shared-memory")] - kind: ZSliceKind::Raw, - }); - Ok(()) - } - - fn write_u8(&mut self, byte: u8) -> Result<(), DidntWrite> { - self.write_exact(core::slice::from_ref(&byte)) + self.zslice_writer().write_exact(bytes) } fn remaining(&self) -> usize { @@ -531,55 +521,19 @@ impl Writer for ZBufWriter<'_> { } fn write_zslice(&mut self, slice: &ZSlice) -> Result<(), DidntWrite> { - self.inner.slices.push(slice.clone()); + self.zslice_writer = None; + // SAFETY: `self.inner` is valid as guaranteed by `self.writer` borrow, + // and `self.writer` has been overwritten + unsafe { self.inner.as_mut() }.push_zslice(slice.clone()); Ok(()) } - fn with_slot(&mut self, mut len: usize, f: F) -> Result + unsafe fn with_slot(&mut self, len: usize, write: F) -> Result where F: FnOnce(&mut [u8]) -> usize, { - let cache = get_mut_unchecked(&mut self.cache); - let prev_cache_len = cache.len(); - cache.reserve(len); - - // SAFETY: we already reserved len elements on the vector. - let s = crate::unsafe_slice_mut!(cache.spare_capacity_mut(), ..len); - // SAFETY: converting MaybeUninit into [u8] is safe because we are going to write on it. - // The returned len tells us how many bytes have been written so as to update the len accordingly. - len = unsafe { f(&mut *(s as *mut [mem::MaybeUninit] as *mut [u8])) }; - // SAFETY: we already reserved len elements on the vector. - unsafe { cache.set_len(prev_cache_len + len) }; - - let cache_len = cache.len(); - - // Verify we are writing on the cache - if let Some(ZSlice { - buf, ref mut end, .. - }) = self.inner.slices.last_mut() - { - // Verify the previous length of the cache is the right one - if *end == prev_cache_len { - // Verify the ZSlice is actually a Vec - if let Some(b) = buf.as_any().downcast_ref::>() { - // Verify the Vec of the ZSlice is exactly the one from the cache - if ptr::eq(cache.as_ptr(), b.as_ptr()) { - // Simply update the slice length - *end = cache_len; - return NonZeroUsize::new(len).ok_or(DidntWrite); - } - } - } - } - - self.inner.slices.push(ZSlice { - buf: self.cache.clone(), - start: prev_cache_len, - end: cache_len, - #[cfg(feature = "shared-memory")] - kind: ZSliceKind::Raw, - }); - NonZeroUsize::new(len).ok_or(DidntWrite) + // SAFETY: same precondition as the enclosing function + self.zslice_writer().with_slot(len, write) } } @@ -587,40 +541,43 @@ impl BacktrackableWriter for ZBufWriter<'_> { type Mark = ZBufPos; fn mark(&mut self) -> Self::Mark { - if let Some(slice) = self.inner.slices.last() { - ZBufPos { - slice: self.inner.slices.len(), - byte: slice.end, - } - } else { - ZBufPos { slice: 0, byte: 0 } + let byte = self.zslice_writer.as_mut().map(|w| w.mark()); + // SAFETY: `self.inner` is valid as guaranteed by `self.writer` borrow + let zbuf = unsafe { self.inner.as_mut() }; + ZBufPos { + slice: zbuf.slices.len(), + byte: byte + .or_else(|| Some(zbuf.opt_zslice_writer()?.mark())) + .unwrap_or(0), } } fn rewind(&mut self, mark: Self::Mark) -> bool { - self.inner - .slices - .truncate(mark.slice + usize::from(mark.byte != 0)); - if let Some(slice) = self.inner.slices.last_mut() { - slice.end = mark.byte; + // SAFETY: `self.inner` is valid as guaranteed by `self.writer` borrow, + // and `self.writer` is reassigned after modification + let zbuf = unsafe { self.inner.as_mut() }; + zbuf.slices.truncate(mark.slice); + self.zslice_writer = zbuf.opt_zslice_writer(); + if let Some(writer) = &mut self.zslice_writer { + writer.rewind(mark.byte); } true } } #[cfg(feature = "std")] -impl<'a> std::io::Write for ZBufWriter<'a> { - fn write(&mut self, buf: &[u8]) -> std::io::Result { +impl<'a> io::Write for ZBufWriter<'a> { + fn write(&mut self, buf: &[u8]) -> io::Result { + if buf.is_empty() { + return Ok(0); + } match ::write(self, buf) { Ok(n) => Ok(n.get()), - Err(_) => Err(std::io::Error::new( - std::io::ErrorKind::UnexpectedEof, - "UnexpectedEof", - )), + Err(_) => Err(io::ErrorKind::UnexpectedEof.into()), } } - fn flush(&mut self) -> std::io::Result<()> { + fn flush(&mut self) -> io::Result<()> { Ok(()) } } @@ -642,25 +599,69 @@ mod tests { let slice: ZSlice = [0u8, 1, 2, 3, 4, 5, 6, 7].to_vec().into(); let mut zbuf1 = ZBuf::empty(); - zbuf1.push_zslice(slice.subslice(0, 4).unwrap()); - zbuf1.push_zslice(slice.subslice(4, 8).unwrap()); + zbuf1.push_zslice(slice.subslice(..4).unwrap()); + zbuf1.push_zslice(slice.subslice(4..8).unwrap()); let mut zbuf2 = ZBuf::empty(); - zbuf2.push_zslice(slice.subslice(0, 1).unwrap()); - zbuf2.push_zslice(slice.subslice(1, 4).unwrap()); - zbuf2.push_zslice(slice.subslice(4, 8).unwrap()); + zbuf2.push_zslice(slice.subslice(..1).unwrap()); + zbuf2.push_zslice(slice.subslice(1..4).unwrap()); + zbuf2.push_zslice(slice.subslice(4..8).unwrap()); assert_eq!(zbuf1, zbuf2); let mut zbuf1 = ZBuf::empty(); - zbuf1.push_zslice(slice.subslice(2, 4).unwrap()); - zbuf1.push_zslice(slice.subslice(4, 8).unwrap()); + zbuf1.push_zslice(slice.subslice(2..4).unwrap()); + zbuf1.push_zslice(slice.subslice(4..8).unwrap()); let mut zbuf2 = ZBuf::empty(); - zbuf2.push_zslice(slice.subslice(2, 3).unwrap()); - zbuf2.push_zslice(slice.subslice(3, 6).unwrap()); - zbuf2.push_zslice(slice.subslice(6, 8).unwrap()); + zbuf2.push_zslice(slice.subslice(2..3).unwrap()); + zbuf2.push_zslice(slice.subslice(3..6).unwrap()); + zbuf2.push_zslice(slice.subslice(6..8).unwrap()); assert_eq!(zbuf1, zbuf2); } + + #[cfg(feature = "std")] + #[test] + fn zbuf_seek() { + use std::io::Seek; + + use super::{HasReader, ZBuf}; + use crate::reader::Reader; + + let mut buf = ZBuf::empty(); + buf.push_zslice([0u8, 1u8, 2u8, 3u8].into()); + buf.push_zslice([4u8, 5u8, 6u8, 7u8, 8u8].into()); + buf.push_zslice([9u8, 10u8, 11u8, 12u8, 13u8, 14u8].into()); + let mut reader = buf.reader(); + + assert_eq!(reader.stream_position().unwrap(), 0); + assert_eq!(reader.read_u8().unwrap(), 0); + assert_eq!(reader.seek(std::io::SeekFrom::Current(6)).unwrap(), 7); + assert_eq!(reader.read_u8().unwrap(), 7); + assert_eq!(reader.seek(std::io::SeekFrom::Current(-5)).unwrap(), 3); + assert_eq!(reader.read_u8().unwrap(), 3); + assert_eq!(reader.seek(std::io::SeekFrom::Current(10)).unwrap(), 14); + assert_eq!(reader.read_u8().unwrap(), 14); + reader.seek(std::io::SeekFrom::Current(100)).unwrap_err(); + + assert_eq!(reader.seek(std::io::SeekFrom::Start(0)).unwrap(), 0); + assert_eq!(reader.read_u8().unwrap(), 0); + assert_eq!(reader.seek(std::io::SeekFrom::Start(12)).unwrap(), 12); + assert_eq!(reader.read_u8().unwrap(), 12); + assert_eq!(reader.seek(std::io::SeekFrom::Start(15)).unwrap(), 15); + reader.read_u8().unwrap_err(); + reader.seek(std::io::SeekFrom::Start(100)).unwrap_err(); + + assert_eq!(reader.seek(std::io::SeekFrom::End(0)).unwrap(), 15); + reader.read_u8().unwrap_err(); + assert_eq!(reader.seek(std::io::SeekFrom::End(-5)).unwrap(), 10); + assert_eq!(reader.read_u8().unwrap(), 10); + assert_eq!(reader.seek(std::io::SeekFrom::End(-15)).unwrap(), 0); + assert_eq!(reader.read_u8().unwrap(), 0); + reader.seek(std::io::SeekFrom::End(-20)).unwrap_err(); + + assert_eq!(reader.seek(std::io::SeekFrom::Start(10)).unwrap(), 10); + reader.seek(std::io::SeekFrom::Current(-100)).unwrap_err(); + } } diff --git a/commons/zenoh-buffers/src/zslice.rs b/commons/zenoh-buffers/src/zslice.rs index e53e6f3334..ec77ffa770 100644 --- a/commons/zenoh-buffers/src/zslice.rs +++ b/commons/zenoh-buffers/src/zslice.rs @@ -11,63 +11,69 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::{ - buffer::{Buffer, SplitBuffer}, - reader::{BacktrackableReader, DidntRead, HasReader, Reader}, -}; use alloc::{boxed::Box, sync::Arc, vec::Vec}; use core::{ any::Any, - convert::AsRef, - fmt, + fmt, iter, num::NonZeroUsize, - ops::{Deref, Index, Range, RangeFrom, RangeFull, RangeInclusive, RangeTo, RangeToInclusive}, - option, + ops::{Bound, Deref, RangeBounds}, +}; + +use crate::{ + buffer::{Buffer, SplitBuffer}, + reader::{BacktrackableReader, DidntRead, HasReader, Reader}, + writer::{BacktrackableWriter, DidntWrite, Writer}, }; /*************************************/ /* ZSLICE BUFFER */ /*************************************/ -pub trait ZSliceBuffer: Send + Sync + fmt::Debug { +pub trait ZSliceBuffer: Any + Send + Sync + fmt::Debug { fn as_slice(&self) -> &[u8]; - fn as_mut_slice(&mut self) -> &mut [u8]; fn as_any(&self) -> &dyn Any; + fn as_any_mut(&mut self) -> &mut dyn Any; } impl ZSliceBuffer for Vec { fn as_slice(&self) -> &[u8] { - self.as_ref() - } - fn as_mut_slice(&mut self) -> &mut [u8] { - self.as_mut() + self } + fn as_any(&self) -> &dyn Any { self } + + fn as_any_mut(&mut self) -> &mut dyn Any { + self + } } impl ZSliceBuffer for Box<[u8]> { fn as_slice(&self) -> &[u8] { - self.as_ref() - } - fn as_mut_slice(&mut self) -> &mut [u8] { - self.as_mut() + self } + fn as_any(&self) -> &dyn Any { self } + + fn as_any_mut(&mut self) -> &mut dyn Any { + self + } } impl ZSliceBuffer for [u8; N] { fn as_slice(&self) -> &[u8] { - self.as_ref() - } - fn as_mut_slice(&mut self) -> &mut [u8] { - self.as_mut() + self } + fn as_any(&self) -> &dyn Any { self } + + fn as_any_mut(&mut self) -> &mut dyn Any { + self + } } /*************************************/ @@ -84,21 +90,31 @@ pub enum ZSliceKind { /// A clonable wrapper to a contiguous slice of bytes. #[derive(Clone)] pub struct ZSlice { - pub(crate) buf: Arc, - pub(crate) start: usize, - pub(crate) end: usize, + buf: Arc, + start: usize, + end: usize, #[cfg(feature = "shared-memory")] pub kind: ZSliceKind, } impl ZSlice { + #[deprecated(since = "1.0.0", note = "use `new` instead")] pub fn make( buf: Arc, start: usize, end: usize, + ) -> Result> { + Self::new(buf, start, end) + } + + #[inline] + pub fn new( + buf: Arc, + start: usize, + end: usize, ) -> Result> { if start <= end && end <= buf.as_slice().len() { - Ok(ZSlice { + Ok(Self { buf, start, end, @@ -110,19 +126,43 @@ impl ZSlice { } } + #[inline] + pub fn empty() -> Self { + Self::new(Arc::new(Vec::::new()), 0, 0).unwrap() + } + #[inline] #[must_use] - pub fn downcast_ref(&self) -> Option<&T> - where - T: Any, - { - self.buf.as_any().downcast_ref::() + pub fn downcast_ref(&self) -> Option<&T> { + self.buf.as_any().downcast_ref() } + /// # Safety + /// + /// Buffer modification must not modify slice range. #[inline] #[must_use] - pub const fn range(&self) -> Range { - self.start..self.end + pub unsafe fn downcast_mut(&mut self) -> Option<&mut T> { + Arc::get_mut(&mut self.buf)?.as_any_mut().downcast_mut() + } + + // This method is internal and is only meant to be used in `ZBufWriter`. + // It's implemented in this module because it plays with `ZSlice` invariant, + // so it should stay in the same module. + // See https://github.com/eclipse-zenoh/zenoh/pull/1289#discussion_r1701796640 + #[inline] + pub(crate) fn writer(&mut self) -> Option { + let vec = Arc::get_mut(&mut self.buf)? + .as_any_mut() + .downcast_mut::>()?; + if self.end == vec.len() { + Some(ZSliceWriter { + vec, + end: &mut self.end, + }) + } else { + None + } } #[inline] @@ -141,11 +181,20 @@ impl ZSlice { #[must_use] pub fn as_slice(&self) -> &[u8] { // SAFETY: bounds checks are performed at `ZSlice` construction via `make()` or `subslice()`. - crate::unsafe_slice!(self.buf.as_slice(), self.range()) - } - - #[must_use] - pub fn subslice(&self, start: usize, end: usize) -> Option { + unsafe { self.buf.as_slice().get_unchecked(self.start..self.end) } + } + + pub fn subslice(&self, range: impl RangeBounds) -> Option { + let start = match range.start_bound() { + Bound::Included(&n) => n, + Bound::Excluded(&n) => n + 1, + Bound::Unbounded => 0, + }; + let end = match range.end_bound() { + Bound::Included(&n) => n + 1, + Bound::Excluded(&n) => n, + Bound::Unbounded => self.len(), + }; if start <= end && end <= self.len() { Some(ZSlice { buf: self.buf.clone(), @@ -174,65 +223,9 @@ impl AsRef<[u8]> for ZSlice { } } -impl Index for ZSlice { - type Output = u8; - - fn index(&self, index: usize) -> &Self::Output { - &self.buf.as_slice()[self.start + index] - } -} - -impl Index> for ZSlice { - type Output = [u8]; - - fn index(&self, range: Range) -> &Self::Output { - &(self.deref())[range] - } -} - -impl Index> for ZSlice { - type Output = [u8]; - - fn index(&self, range: RangeFrom) -> &Self::Output { - &(self.deref())[range] - } -} - -impl Index for ZSlice { - type Output = [u8]; - - fn index(&self, _range: RangeFull) -> &Self::Output { - self - } -} - -impl Index> for ZSlice { - type Output = [u8]; - - fn index(&self, range: RangeInclusive) -> &Self::Output { - &(self.deref())[range] - } -} - -impl Index> for ZSlice { - type Output = [u8]; - - fn index(&self, range: RangeTo) -> &Self::Output { - &(self.deref())[range] - } -} - -impl Index> for ZSlice { - type Output = [u8]; - - fn index(&self, range: RangeToInclusive) -> &Self::Output { - &(self.deref())[range] - } -} - -impl PartialEq for ZSlice { - fn eq(&self, other: &Self) -> bool { - self.as_slice() == other.as_slice() +impl + ?Sized> PartialEq for ZSlice { + fn eq(&self, other: &Rhs) -> bool { + self.as_slice() == other.as_ref() } } @@ -297,10 +290,57 @@ impl Buffer for &mut ZSlice { // SplitBuffer impl SplitBuffer for ZSlice { - type Slices<'a> = option::IntoIter<&'a [u8]>; + type Slices<'a> = iter::Once<&'a [u8]>; fn slices(&self) -> Self::Slices<'_> { - Some(self.as_slice()).into_iter() + iter::once(self.as_slice()) + } +} + +#[derive(Debug)] +pub(crate) struct ZSliceWriter<'a> { + vec: &'a mut Vec, + end: &'a mut usize, +} + +impl Writer for ZSliceWriter<'_> { + fn write(&mut self, bytes: &[u8]) -> Result { + let len = self.vec.write(bytes)?; + *self.end += len.get(); + Ok(len) + } + + fn write_exact(&mut self, bytes: &[u8]) -> Result<(), DidntWrite> { + self.write(bytes).map(|_| ()) + } + + fn remaining(&self) -> usize { + self.vec.remaining() + } + + unsafe fn with_slot(&mut self, len: usize, write: F) -> Result + where + F: FnOnce(&mut [u8]) -> usize, + { + // SAFETY: same precondition as the enclosing function + let len = unsafe { self.vec.with_slot(len, write) }?; + *self.end += len.get(); + Ok(len) + } +} + +impl BacktrackableWriter for ZSliceWriter<'_> { + type Mark = usize; + + fn mark(&mut self) -> Self::Mark { + *self.end + } + + fn rewind(&mut self, mark: Self::Mark) -> bool { + assert!(mark <= self.vec.len()); + self.vec.truncate(mark); + *self.end = mark; + true } } @@ -317,6 +357,7 @@ impl Reader for &mut ZSlice { fn read(&mut self, into: &mut [u8]) -> Result { let mut reader = self.as_slice().reader(); let len = reader.read(into)?; + // we trust `Reader` impl for `&[u8]` to not overflow the size of the slice self.start += len.get(); Ok(len) } @@ -324,6 +365,7 @@ impl Reader for &mut ZSlice { fn read_exact(&mut self, into: &mut [u8]) -> Result<(), DidntRead> { let mut reader = self.as_slice().reader(); reader.read_exact(into)?; + // we trust `Reader` impl for `&[u8]` to not overflow the size of the slice self.start += into.len(); Ok(()) } @@ -331,6 +373,7 @@ impl Reader for &mut ZSlice { fn read_u8(&mut self) -> Result { let mut reader = self.as_slice().reader(); let res = reader.read_u8()?; + // we trust `Reader` impl for `&[u8]` to not overflow the size of the slice self.start += 1; Ok(res) } @@ -342,7 +385,7 @@ impl Reader for &mut ZSlice { } fn read_zslice(&mut self, len: usize) -> Result { - let res = self.subslice(0, len).ok_or(DidntRead)?; + let res = self.subslice(..len).ok_or(DidntRead)?; self.start += len; Ok(res) } @@ -364,6 +407,7 @@ impl BacktrackableReader for &mut ZSlice { } fn rewind(&mut self, mark: Self::Mark) -> bool { + assert!(mark <= self.end); self.start = mark; true } @@ -382,8 +426,8 @@ impl std::io::Read for &mut ZSlice { } } +#[cfg(feature = "test")] impl ZSlice { - #[cfg(feature = "test")] pub fn rand(len: usize) -> Self { use rand::Rng; @@ -402,9 +446,10 @@ mod tests { let mut zslice: ZSlice = buf.clone().into(); assert_eq!(buf.as_slice(), zslice.as_slice()); - let range = zslice.range(); - let mbuf = Arc::get_mut(&mut zslice.buf).unwrap(); - mbuf.as_mut_slice()[range][..buf.len()].clone_from_slice(&buf[..]); + // SAFETY: buffer slize size is not modified + let mut_slice = unsafe { zslice.downcast_mut::>() }.unwrap(); + + mut_slice[..buf.len()].clone_from_slice(&buf[..]); assert_eq!(buf.as_slice(), zslice.as_slice()); } diff --git a/commons/zenoh-buffers/tests/readwrite.rs b/commons/zenoh-buffers/tests/readwrite.rs index ea48218a85..dd5481c958 100644 --- a/commons/zenoh-buffers/tests/readwrite.rs +++ b/commons/zenoh-buffers/tests/readwrite.rs @@ -14,8 +14,8 @@ use zenoh_buffers::{ reader::{HasReader, Reader, SiphonableReader}, writer::{BacktrackableWriter, HasWriter, Writer}, + BBuf, ZBuf, ZSlice, }; -use zenoh_buffers::{BBuf, ZBuf, ZSlice}; const BYTES: usize = 18; @@ -46,13 +46,15 @@ macro_rules! run_write { writer.write_exact(&WBS4).unwrap(); - writer - .with_slot(4, |mut buffer| { + // SAFETY: callback returns the length of the buffer + unsafe { + writer.with_slot(4, |mut buffer| { let w = buffer.write(&WBS5).unwrap(); assert_eq!(4, w.get()); w.get() }) - .unwrap(); + } + .unwrap(); }; } diff --git a/commons/zenoh-codec/Cargo.toml b/commons/zenoh-codec/Cargo.toml index 6258213743..209a4c698d 100644 --- a/commons/zenoh-codec/Cargo.toml +++ b/commons/zenoh-codec/Cargo.toml @@ -39,9 +39,9 @@ std = [ shared-memory = [ "std", "zenoh-shm", - "zenoh-protocol/shared-memory" + "zenoh-protocol/shared-memory", + "zenoh-buffers/shared-memory" ] -complete_n = ["zenoh-protocol/complete_n"] [dependencies] tracing = {workspace = true, optional = true } diff --git a/commons/zenoh-codec/benches/codec.rs b/commons/zenoh-codec/benches/codec.rs index 1c46a700a7..d897038f91 100644 --- a/commons/zenoh-codec/benches/codec.rs +++ b/commons/zenoh-codec/benches/codec.rs @@ -75,19 +75,19 @@ fn criterion_benchmark(c: &mut Criterion) { let codec = Zenoh080::new(); let frame = FrameHeader { - reliability: Reliability::default(), + reliability: Reliability::DEFAULT, sn: TransportSn::MIN, - ext_qos: zenoh_protocol::transport::frame::ext::QoSType::default(), + ext_qos: zenoh_protocol::transport::frame::ext::QoSType::DEFAULT, }; let data = Push { wire_expr: WireExpr::empty(), - ext_qos: ext::QoSType::default(), + ext_qos: ext::QoSType::DEFAULT, ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), + ext_nodeid: ext::NodeIdType::DEFAULT, payload: PushBody::Put(Put { timestamp: None, - encoding: Encoding::default(), + encoding: Encoding::empty(), ext_sinfo: None, #[cfg(feature = "shared-memory")] ext_shm: None, @@ -121,19 +121,19 @@ fn criterion_benchmark(c: &mut Criterion) { let codec = Zenoh080::new(); let frame = FrameHeader { - reliability: Reliability::default(), + reliability: Reliability::DEFAULT, sn: TransportSn::MIN, - ext_qos: zenoh_protocol::transport::frame::ext::QoSType::default(), + ext_qos: zenoh_protocol::transport::frame::ext::QoSType::DEFAULT, }; let data = Push { wire_expr: WireExpr::empty(), - ext_qos: ext::QoSType::default(), + ext_qos: ext::QoSType::DEFAULT, ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), + ext_nodeid: ext::NodeIdType::DEFAULT, payload: PushBody::Put(Put { timestamp: None, - encoding: Encoding::default(), + encoding: Encoding::empty(), ext_sinfo: None, #[cfg(feature = "shared-memory")] ext_shm: None, @@ -162,19 +162,19 @@ fn criterion_benchmark(c: &mut Criterion) { let codec = Zenoh080::new(); let frame = FrameHeader { - reliability: Reliability::default(), + reliability: Reliability::DEFAULT, sn: TransportSn::MIN, - ext_qos: zenoh_protocol::transport::frame::ext::QoSType::default(), + ext_qos: zenoh_protocol::transport::frame::ext::QoSType::DEFAULT, }; let data = Push { wire_expr: WireExpr::empty(), - ext_qos: ext::QoSType::default(), + ext_qos: ext::QoSType::DEFAULT, ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), + ext_nodeid: ext::NodeIdType::DEFAULT, payload: PushBody::Put(Put { timestamp: None, - encoding: Encoding::default(), + encoding: Encoding::empty(), ext_sinfo: None, #[cfg(feature = "shared-memory")] ext_shm: None, @@ -210,12 +210,12 @@ fn criterion_benchmark(c: &mut Criterion) { let data = Push { wire_expr: WireExpr::empty(), - ext_qos: ext::QoSType::default(), + ext_qos: ext::QoSType::DEFAULT, ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), + ext_nodeid: ext::NodeIdType::DEFAULT, payload: PushBody::Put(Put { timestamp: None, - encoding: Encoding::default(), + encoding: Encoding::empty(), ext_sinfo: None, #[cfg(feature = "shared-memory")] ext_shm: None, @@ -238,12 +238,12 @@ fn criterion_benchmark(c: &mut Criterion) { let data = Push { wire_expr: WireExpr::empty(), - ext_qos: ext::QoSType::default(), + ext_qos: ext::QoSType::DEFAULT, ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), + ext_nodeid: ext::NodeIdType::DEFAULT, payload: PushBody::Put(Put { timestamp: None, - encoding: Encoding::default(), + encoding: Encoding::empty(), ext_sinfo: None, #[cfg(feature = "shared-memory")] ext_shm: None, @@ -277,12 +277,12 @@ fn criterion_benchmark(c: &mut Criterion) { let data = Push { wire_expr: WireExpr::empty(), - ext_qos: ext::QoSType::default(), + ext_qos: ext::QoSType::DEFAULT, ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), + ext_nodeid: ext::NodeIdType::DEFAULT, payload: PushBody::Put(Put { timestamp: None, - encoding: Encoding::default(), + encoding: Encoding::empty(), ext_sinfo: None, #[cfg(feature = "shared-memory")] ext_shm: None, @@ -305,7 +305,7 @@ fn criterion_benchmark(c: &mut Criterion) { let mut idx = 0; while idx < zslice.len() { let len = (zslice.len() - idx).min(chunk); - zbuf.push_zslice(ZSlice::make(buff.clone(), idx, idx + len).unwrap()); + zbuf.push_zslice(ZSlice::new(buff.clone(), idx, idx + len).unwrap()); idx += len; } diff --git a/commons/zenoh-codec/src/common/extension.rs b/commons/zenoh-codec/src/common/extension.rs index 6c22f8ff01..21d716a769 100644 --- a/commons/zenoh-codec/src/common/extension.rs +++ b/commons/zenoh-codec/src/common/extension.rs @@ -11,8 +11,8 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::{RCodec, WCodec, Zenoh080, Zenoh080Bounded, Zenoh080Header}; use alloc::vec::Vec; + use zenoh_buffers::{ reader::{DidntRead, Reader}, writer::{DidntWrite, Writer}, @@ -22,6 +22,8 @@ use zenoh_protocol::common::{ iext, imsg::has_flag, ZExtBody, ZExtUnit, ZExtUnknown, ZExtZ64, ZExtZBuf, ZExtZBufHeader, }; +use crate::{RCodec, WCodec, Zenoh080, Zenoh080Bounded, Zenoh080Header}; + fn read_inner(reader: &mut R, _s: &str, header: u8) -> Result<(ZExtUnknown, bool), DidntRead> where R: Reader, diff --git a/commons/zenoh-codec/src/common/mod.rs b/commons/zenoh-codec/src/common/mod.rs index 4c25c93241..f34f9872bf 100644 --- a/commons/zenoh-codec/src/common/mod.rs +++ b/commons/zenoh-codec/src/common/mod.rs @@ -12,4 +12,3 @@ // ZettaScale Zenoh Team, // pub mod extension; -mod priority; diff --git a/commons/zenoh-codec/src/common/priority.rs b/commons/zenoh-codec/src/common/priority.rs deleted file mode 100644 index 776229971e..0000000000 --- a/commons/zenoh-codec/src/common/priority.rs +++ /dev/null @@ -1,66 +0,0 @@ -// -// Copyright (c) 2023 ZettaScale Technology -// -// This program and the accompanying materials are made available under the -// terms of the Eclipse Public License 2.0 which is available at -// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 -// which is available at https://www.apache.org/licenses/LICENSE-2.0. -// -// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 -// -// Contributors: -// ZettaScale Zenoh Team, -// -use crate::{RCodec, WCodec, Zenoh080, Zenoh080Header}; -use core::convert::TryInto; -use zenoh_buffers::{ - reader::{DidntRead, Reader}, - writer::{DidntWrite, Writer}, -}; -use zenoh_protocol::{common::imsg, core::Priority}; - -impl WCodec<&Priority, &mut W> for Zenoh080 -where - W: Writer, -{ - type Output = Result<(), DidntWrite>; - - fn write(self, writer: &mut W, x: &Priority) -> Self::Output { - // Header - let header = imsg::id::PRIORITY | ((*x as u8) << imsg::HEADER_BITS); - self.write(&mut *writer, header)?; - Ok(()) - } -} - -impl RCodec for Zenoh080 -where - R: Reader, -{ - type Error = DidntRead; - - fn read(self, reader: &mut R) -> Result { - let header: u8 = self.read(&mut *reader)?; - let codec = Zenoh080Header::new(header); - - codec.read(reader) - } -} - -impl RCodec for Zenoh080Header -where - R: Reader, -{ - type Error = DidntRead; - - fn read(self, _reader: &mut R) -> Result { - if imsg::mid(self.header) != imsg::id::PRIORITY { - return Err(DidntRead); - } - - let priority: Priority = (imsg::flags(self.header) >> imsg::HEADER_BITS) - .try_into() - .map_err(|_| DidntRead)?; - Ok(priority) - } -} diff --git a/commons/zenoh-codec/src/core/encoding.rs b/commons/zenoh-codec/src/core/encoding.rs index 478bcf1cd8..abe33f6ab8 100644 --- a/commons/zenoh-codec/src/core/encoding.rs +++ b/commons/zenoh-codec/src/core/encoding.rs @@ -11,17 +11,24 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::{LCodec, RCodec, WCodec, Zenoh080, Zenoh080Bounded}; -use alloc::string::String; use zenoh_buffers::{ reader::{DidntRead, Reader}, writer::{DidntWrite, Writer}, }; -use zenoh_protocol::core::Encoding; +use zenoh_protocol::{ + common::imsg, + core::encoding::{flag, Encoding, EncodingId}, +}; + +use crate::{LCodec, RCodec, WCodec, Zenoh080, Zenoh080Bounded}; impl LCodec<&Encoding> for Zenoh080 { fn w_len(self, x: &Encoding) -> usize { - 1 + self.w_len(x.suffix()) + let mut len = self.w_len((x.id as u32) << 1); + if let Some(schema) = x.schema.as_ref() { + len += self.w_len(schema.as_slice()); + } + len } } @@ -32,9 +39,17 @@ where type Output = Result<(), DidntWrite>; fn write(self, writer: &mut W, x: &Encoding) -> Self::Output { - let zodec = Zenoh080Bounded::::new(); - zodec.write(&mut *writer, *x.prefix() as u8)?; - zodec.write(&mut *writer, x.suffix())?; + let mut id = (x.id as u32) << 1; + + if x.schema.is_some() { + id |= flag::S; + } + let zodec = Zenoh080Bounded::::new(); + zodec.write(&mut *writer, id)?; + if let Some(schema) = x.schema.as_ref() { + let zodec = Zenoh080Bounded::::new(); + zodec.write(&mut *writer, schema)?; + } Ok(()) } } @@ -46,10 +61,20 @@ where type Error = DidntRead; fn read(self, reader: &mut R) -> Result { - let zodec = Zenoh080Bounded::::new(); - let prefix: u8 = zodec.read(&mut *reader)?; - let suffix: String = zodec.read(&mut *reader)?; - let encoding = Encoding::new(prefix, suffix).map_err(|_| DidntRead)?; + let zodec = Zenoh080Bounded::::new(); + let id: u32 = zodec.read(&mut *reader)?; + let (id, has_schema) = ( + (id >> 1) as EncodingId, + imsg::has_flag(id as u8, flag::S as u8), + ); + + let mut schema = None; + if has_schema { + let zodec = Zenoh080Bounded::::new(); + schema = Some(zodec.read(&mut *reader)?); + } + + let encoding = Encoding { id, schema }; Ok(encoding) } } diff --git a/commons/zenoh-codec/src/core/locator.rs b/commons/zenoh-codec/src/core/locator.rs index 0bbd28a189..464b1bbb05 100644 --- a/commons/zenoh-codec/src/core/locator.rs +++ b/commons/zenoh-codec/src/core/locator.rs @@ -11,15 +11,17 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::{RCodec, WCodec, Zenoh080, Zenoh080Bounded}; use alloc::{string::String, vec::Vec}; use core::convert::TryFrom; + use zenoh_buffers::{ reader::{DidntRead, Reader}, writer::{DidntWrite, Writer}, }; use zenoh_protocol::core::Locator; +use crate::{RCodec, WCodec, Zenoh080, Zenoh080Bounded}; + impl WCodec<&Locator, &mut W> for Zenoh080 where W: Writer, diff --git a/commons/zenoh-codec/src/core/mod.rs b/commons/zenoh-codec/src/core/mod.rs index 1f48def695..8230cdd9ac 100644 --- a/commons/zenoh-codec/src/core/mod.rs +++ b/commons/zenoh-codec/src/core/mod.rs @@ -13,7 +13,6 @@ // mod encoding; mod locator; -mod property; #[cfg(feature = "shared-memory")] mod shm; mod timestamp; @@ -23,13 +22,15 @@ mod zenohid; mod zint; mod zslice; -use crate::{LCodec, RCodec, WCodec, Zenoh080, Zenoh080Bounded}; use alloc::{string::String, vec::Vec}; + use zenoh_buffers::{ reader::{DidntRead, Reader}, writer::{DidntWrite, Writer}, }; +use crate::{LCodec, RCodec, WCodec, Zenoh080, Zenoh080Bounded}; + // [u8; N] macro_rules! array_impl { ($n:expr) => { diff --git a/commons/zenoh-codec/src/core/property.rs b/commons/zenoh-codec/src/core/property.rs deleted file mode 100644 index bb7f760208..0000000000 --- a/commons/zenoh-codec/src/core/property.rs +++ /dev/null @@ -1,84 +0,0 @@ -// -// Copyright (c) 2023 ZettaScale Technology -// -// This program and the accompanying materials are made available under the -// terms of the Eclipse Public License 2.0 which is available at -// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 -// which is available at https://www.apache.org/licenses/LICENSE-2.0. -// -// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 -// -// Contributors: -// ZettaScale Zenoh Team, -// -use crate::{RCodec, WCodec, Zenoh080}; -use alloc::vec::Vec; -use zenoh_buffers::{ - reader::{DidntRead, Reader}, - writer::{DidntWrite, Writer}, -}; -use zenoh_protocol::core::Property; - -impl WCodec<&Property, &mut W> for Zenoh080 -where - W: Writer, -{ - type Output = Result<(), DidntWrite>; - - fn write(self, writer: &mut W, x: &Property) -> Self::Output { - let Property { key, value } = x; - - self.write(&mut *writer, key)?; - self.write(&mut *writer, value.as_slice())?; - Ok(()) - } -} - -impl RCodec for Zenoh080 -where - R: Reader, -{ - type Error = DidntRead; - - fn read(self, reader: &mut R) -> Result { - let key: u64 = self.read(&mut *reader)?; - let value: Vec = self.read(&mut *reader)?; - - Ok(Property { key, value }) - } -} - -impl WCodec<&[Property], &mut W> for Zenoh080 -where - W: Writer, -{ - type Output = Result<(), DidntWrite>; - - fn write(self, writer: &mut W, x: &[Property]) -> Self::Output { - self.write(&mut *writer, x.len())?; - for p in x.iter() { - self.write(&mut *writer, p)?; - } - - Ok(()) - } -} - -impl RCodec, &mut R> for Zenoh080 -where - R: Reader, -{ - type Error = DidntRead; - - fn read(self, reader: &mut R) -> Result, Self::Error> { - let num: usize = self.read(&mut *reader)?; - - let mut ps = Vec::with_capacity(num); - for _ in 0..num { - let p: Property = self.read(&mut *reader)?; - ps.push(p); - } - - Ok(ps) - } -} diff --git a/commons/zenoh-codec/src/core/shm.rs b/commons/zenoh-codec/src/core/shm.rs index 69c5c59ce0..b67716611d 100644 --- a/commons/zenoh-codec/src/core/shm.rs +++ b/commons/zenoh-codec/src/core/shm.rs @@ -11,48 +11,182 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::{RCodec, WCodec, Zenoh080}; +use std::num::NonZeroUsize; + use zenoh_buffers::{ reader::{DidntRead, Reader}, writer::{DidntWrite, Writer}, }; -use zenoh_shm::SharedMemoryBufInfo; +use zenoh_shm::{ + api::provider::chunk::ChunkDescriptor, header::descriptor::HeaderDescriptor, + watchdog::descriptor::Descriptor, ShmBufInfo, +}; + +use crate::{RCodec, WCodec, Zenoh080}; + +impl WCodec<&Descriptor, &mut W> for Zenoh080 +where + W: Writer, +{ + type Output = Result<(), DidntWrite>; + + fn write(self, writer: &mut W, x: &Descriptor) -> Self::Output { + self.write(&mut *writer, x.id)?; + self.write(&mut *writer, x.index_and_bitpos)?; + Ok(()) + } +} + +impl WCodec<&HeaderDescriptor, &mut W> for Zenoh080 +where + W: Writer, +{ + type Output = Result<(), DidntWrite>; + + fn write(self, writer: &mut W, x: &HeaderDescriptor) -> Self::Output { + self.write(&mut *writer, x.id)?; + self.write(&mut *writer, x.index)?; + Ok(()) + } +} + +impl WCodec<&ChunkDescriptor, &mut W> for Zenoh080 +where + W: Writer, +{ + type Output = Result<(), DidntWrite>; + + fn write(self, writer: &mut W, x: &ChunkDescriptor) -> Self::Output { + self.write(&mut *writer, x.segment)?; + self.write(&mut *writer, x.chunk)?; + self.write(&mut *writer, x.len)?; + Ok(()) + } +} + +impl WCodec for Zenoh080 +where + W: Writer, +{ + type Output = Result<(), DidntWrite>; + + fn write(self, writer: &mut W, x: NonZeroUsize) -> Self::Output { + self.write(&mut *writer, x.get())?; + Ok(()) + } +} -impl WCodec<&SharedMemoryBufInfo, &mut W> for Zenoh080 +impl WCodec<&ShmBufInfo, &mut W> for Zenoh080 where W: Writer, { type Output = Result<(), DidntWrite>; - fn write(self, writer: &mut W, x: &SharedMemoryBufInfo) -> Self::Output { - let SharedMemoryBufInfo { - offset, - length, - shm_manager, - kind, + fn write(self, writer: &mut W, x: &ShmBufInfo) -> Self::Output { + let ShmBufInfo { + data_descriptor, + shm_protocol, + data_len, + watchdog_descriptor, + header_descriptor, + generation, } = x; - self.write(&mut *writer, offset)?; - self.write(&mut *writer, length)?; - self.write(&mut *writer, shm_manager.as_str())?; - self.write(&mut *writer, kind)?; + self.write(&mut *writer, data_descriptor)?; + self.write(&mut *writer, shm_protocol)?; + self.write(&mut *writer, *data_len)?; + self.write(&mut *writer, watchdog_descriptor)?; + self.write(&mut *writer, header_descriptor)?; + self.write(&mut *writer, generation)?; Ok(()) } } -impl RCodec for Zenoh080 +impl RCodec for Zenoh080 +where + R: Reader, +{ + type Error = DidntRead; + + fn read(self, reader: &mut R) -> Result { + let id = self.read(&mut *reader)?; + let index_and_bitpos = self.read(&mut *reader)?; + + Ok(Descriptor { + id, + index_and_bitpos, + }) + } +} + +impl RCodec for Zenoh080 +where + R: Reader, +{ + type Error = DidntRead; + + fn read(self, reader: &mut R) -> Result { + let id = self.read(&mut *reader)?; + let index = self.read(&mut *reader)?; + + Ok(HeaderDescriptor { id, index }) + } +} + +impl RCodec for Zenoh080 +where + R: Reader, +{ + type Error = DidntRead; + + fn read(self, reader: &mut R) -> Result { + let segment = self.read(&mut *reader)?; + let chunk = self.read(&mut *reader)?; + let len = self.read(&mut *reader)?; + + Ok(ChunkDescriptor { + segment, + chunk, + len, + }) + } +} + +impl RCodec for Zenoh080 +where + R: Reader, +{ + type Error = DidntRead; + + fn read(self, reader: &mut R) -> Result { + let size: usize = self.read(&mut *reader)?; + let size = NonZeroUsize::new(size).ok_or(DidntRead)?; + Ok(size) + } +} + +impl RCodec for Zenoh080 where R: Reader, { type Error = DidntRead; - fn read(self, reader: &mut R) -> Result { - let offset: usize = self.read(&mut *reader)?; - let length: usize = self.read(&mut *reader)?; - let shm_manager: String = self.read(&mut *reader)?; - let kind: u8 = self.read(&mut *reader)?; + fn read(self, reader: &mut R) -> Result { + let data_descriptor = self.read(&mut *reader)?; + let shm_protocol = self.read(&mut *reader)?; + let data_len = self.read(&mut *reader)?; + let watchdog_descriptor = self.read(&mut *reader)?; + let header_descriptor = self.read(&mut *reader)?; + let generation = self.read(&mut *reader)?; - let shm_info = SharedMemoryBufInfo::new(offset, length, shm_manager, kind); + let shm_info = ShmBufInfo::new( + data_descriptor, + shm_protocol, + data_len, + watchdog_descriptor, + header_descriptor, + generation, + ); Ok(shm_info) } } diff --git a/commons/zenoh-codec/src/core/timestamp.rs b/commons/zenoh-codec/src/core/timestamp.rs index 4891643192..95149144dd 100644 --- a/commons/zenoh-codec/src/core/timestamp.rs +++ b/commons/zenoh-codec/src/core/timestamp.rs @@ -11,17 +11,20 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::{LCodec, RCodec, WCodec, Zenoh080}; use core::convert::TryFrom; + use zenoh_buffers::{ reader::{DidntRead, Reader}, writer::{DidntWrite, Writer}, }; -use zenoh_protocol::core::{Timestamp, ZenohId}; +use zenoh_protocol::core::{Timestamp, ZenohIdProto}; + +use crate::{LCodec, RCodec, WCodec, Zenoh080}; impl LCodec<&Timestamp> for Zenoh080 { fn w_len(self, x: &Timestamp) -> usize { - self.w_len(x.get_time().as_u64()) + self.w_len(x.get_id().size()) + let id = x.get_id(); + self.w_len(x.get_time().as_u64()) + self.w_len(&id.to_le_bytes()[..id.size()]) } } @@ -51,7 +54,7 @@ where if size > (uhlc::ID::MAX_SIZE) { return Err(DidntRead); } - let mut id = [0_u8; ZenohId::MAX_SIZE]; + let mut id = [0_u8; ZenohIdProto::MAX_SIZE]; reader.read_exact(&mut id[..size])?; let time = uhlc::NTP64(time); diff --git a/commons/zenoh-codec/src/core/wire_expr.rs b/commons/zenoh-codec/src/core/wire_expr.rs index 6caba6c8c7..d5b91f75ed 100644 --- a/commons/zenoh-codec/src/core/wire_expr.rs +++ b/commons/zenoh-codec/src/core/wire_expr.rs @@ -11,8 +11,8 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::{core::Zenoh080Bounded, RCodec, WCodec, Zenoh080, Zenoh080Condition}; use alloc::string::String; + use zenoh_buffers::{ reader::{DidntRead, Reader}, writer::{DidntWrite, Writer}, @@ -22,6 +22,8 @@ use zenoh_protocol::{ network::Mapping, }; +use crate::{core::Zenoh080Bounded, RCodec, WCodec, Zenoh080, Zenoh080Condition}; + impl WCodec<&WireExpr<'_>, &mut W> for Zenoh080 where W: Writer, @@ -65,7 +67,7 @@ where Ok(WireExpr { scope, suffix: suffix.into(), - mapping: Mapping::default(), + mapping: Mapping::DEFAULT, }) } } diff --git a/commons/zenoh-codec/src/core/zbuf.rs b/commons/zenoh-codec/src/core/zbuf.rs index 137030e66c..8b8ead6ca0 100644 --- a/commons/zenoh-codec/src/core/zbuf.rs +++ b/commons/zenoh-codec/src/core/zbuf.rs @@ -11,7 +11,6 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::{LCodec, RCodec, WCodec, Zenoh080, Zenoh080Bounded}; use zenoh_buffers::{ buffer::Buffer, reader::{DidntRead, Reader}, @@ -19,6 +18,8 @@ use zenoh_buffers::{ ZBuf, }; +use crate::{LCodec, RCodec, WCodec, Zenoh080, Zenoh080Bounded}; + // ZBuf bounded macro_rules! zbuf_impl { ($bound:ty) => { @@ -100,9 +101,10 @@ impl LCodec<&ZBuf> for Zenoh080 { // ZBuf sliced #[cfg(feature = "shared-memory")] mod shm { + use zenoh_buffers::{ZSlice, ZSliceKind}; + use super::*; use crate::Zenoh080Sliced; - use zenoh_buffers::{ZSlice, ZSliceKind}; const RAW: u8 = 0; const SHM_PTR: u8 = 1; diff --git a/commons/zenoh-codec/src/core/zenohid.rs b/commons/zenoh-codec/src/core/zenohid.rs index 6c53d4e63f..4ea06f4887 100644 --- a/commons/zenoh-codec/src/core/zenohid.rs +++ b/commons/zenoh-codec/src/core/zenohid.rs @@ -11,74 +11,76 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::{LCodec, RCodec, WCodec, Zenoh080, Zenoh080Length}; use core::convert::TryFrom; + use zenoh_buffers::{ reader::{DidntRead, Reader}, writer::{DidntWrite, Writer}, }; -use zenoh_protocol::core::ZenohId; +use zenoh_protocol::core::ZenohIdProto; + +use crate::{LCodec, RCodec, WCodec, Zenoh080, Zenoh080Length}; -impl LCodec<&ZenohId> for Zenoh080 { - fn w_len(self, x: &ZenohId) -> usize { +impl LCodec<&ZenohIdProto> for Zenoh080 { + fn w_len(self, x: &ZenohIdProto) -> usize { x.size() } } -impl WCodec<&ZenohId, &mut W> for Zenoh080 +impl WCodec<&ZenohIdProto, &mut W> for Zenoh080 where W: Writer, { type Output = Result<(), DidntWrite>; - fn write(self, writer: &mut W, x: &ZenohId) -> Self::Output { + fn write(self, writer: &mut W, x: &ZenohIdProto) -> Self::Output { self.write(&mut *writer, &x.to_le_bytes()[..x.size()]) } } -impl RCodec for Zenoh080 +impl RCodec for Zenoh080 where R: Reader, { type Error = DidntRead; - fn read(self, reader: &mut R) -> Result { + fn read(self, reader: &mut R) -> Result { let size: usize = self.read(&mut *reader)?; - if size > ZenohId::MAX_SIZE { + if size > ZenohIdProto::MAX_SIZE { return Err(DidntRead); } - let mut id = [0; ZenohId::MAX_SIZE]; + let mut id = [0; ZenohIdProto::MAX_SIZE]; reader.read_exact(&mut id[..size])?; - ZenohId::try_from(&id[..size]).map_err(|_| DidntRead) + ZenohIdProto::try_from(&id[..size]).map_err(|_| DidntRead) } } -impl WCodec<&ZenohId, &mut W> for Zenoh080Length +impl WCodec<&ZenohIdProto, &mut W> for Zenoh080Length where W: Writer, { type Output = Result<(), DidntWrite>; - fn write(self, writer: &mut W, x: &ZenohId) -> Self::Output { - if self.length > ZenohId::MAX_SIZE { + fn write(self, writer: &mut W, x: &ZenohIdProto) -> Self::Output { + if self.length > ZenohIdProto::MAX_SIZE { return Err(DidntWrite); } writer.write_exact(&x.to_le_bytes()[..x.size()]) } } -impl RCodec for Zenoh080Length +impl RCodec for Zenoh080Length where R: Reader, { type Error = DidntRead; - fn read(self, reader: &mut R) -> Result { - if self.length > ZenohId::MAX_SIZE { + fn read(self, reader: &mut R) -> Result { + if self.length > ZenohIdProto::MAX_SIZE { return Err(DidntRead); } - let mut id = [0; ZenohId::MAX_SIZE]; + let mut id = [0; ZenohIdProto::MAX_SIZE]; reader.read_exact(&mut id[..self.length])?; - ZenohId::try_from(&id[..self.length]).map_err(|_| DidntRead) + ZenohIdProto::try_from(&id[..self.length]).map_err(|_| DidntRead) } } diff --git a/commons/zenoh-codec/src/core/zint.rs b/commons/zenoh-codec/src/core/zint.rs index 8167d895c8..a42395b781 100644 --- a/commons/zenoh-codec/src/core/zint.rs +++ b/commons/zenoh-codec/src/core/zint.rs @@ -11,47 +11,49 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::{LCodec, RCodec, WCodec, Zenoh080, Zenoh080Bounded}; use zenoh_buffers::{ reader::{DidntRead, Reader}, writer::{DidntWrite, Writer}, }; -const VLE_LEN: usize = 10; +use crate::{LCodec, RCodec, WCodec, Zenoh080, Zenoh080Bounded}; + +const VLE_LEN_MAX: usize = vle_len(u64::MAX); + +const fn vle_len(x: u64) -> usize { + const B1: u64 = u64::MAX << 7; + const B2: u64 = u64::MAX << (7 * 2); + const B3: u64 = u64::MAX << (7 * 3); + const B4: u64 = u64::MAX << (7 * 4); + const B5: u64 = u64::MAX << (7 * 5); + const B6: u64 = u64::MAX << (7 * 6); + const B7: u64 = u64::MAX << (7 * 7); + const B8: u64 = u64::MAX << (7 * 8); + + if (x & B1) == 0 { + 1 + } else if (x & B2) == 0 { + 2 + } else if (x & B3) == 0 { + 3 + } else if (x & B4) == 0 { + 4 + } else if (x & B5) == 0 { + 5 + } else if (x & B6) == 0 { + 6 + } else if (x & B7) == 0 { + 7 + } else if (x & B8) == 0 { + 8 + } else { + 9 + } +} impl LCodec for Zenoh080 { fn w_len(self, x: u64) -> usize { - const B1: u64 = u64::MAX << 7; - const B2: u64 = u64::MAX << (7 * 2); - const B3: u64 = u64::MAX << (7 * 3); - const B4: u64 = u64::MAX << (7 * 4); - const B5: u64 = u64::MAX << (7 * 5); - const B6: u64 = u64::MAX << (7 * 6); - const B7: u64 = u64::MAX << (7 * 7); - const B8: u64 = u64::MAX << (7 * 8); - const B9: u64 = u64::MAX << (7 * 9); - - if (x & B1) == 0 { - 1 - } else if (x & B2) == 0 { - 2 - } else if (x & B3) == 0 { - 3 - } else if (x & B4) == 0 { - 4 - } else if (x & B5) == 0 { - 5 - } else if (x & B6) == 0 { - 6 - } else if (x & B7) == 0 { - 7 - } else if (x & B8) == 0 { - 8 - } else if (x & B9) == 0 { - 9 - } else { - 10 - } + vle_len(x) } } @@ -110,18 +112,37 @@ where type Output = Result<(), DidntWrite>; fn write(self, writer: &mut W, mut x: u64) -> Self::Output { - writer.with_slot(VLE_LEN, move |buffer| { + let write = move |buffer: &mut [u8]| { let mut len = 0; - let mut b = x as u8; - while x > 0x7f { - buffer[len] = b | 0x80; + while (x & !0x7f_u64) != 0 { + // SAFETY: buffer is guaranteed to be VLE_LEN long where VLE_LEN is + // the maximum number of bytes a VLE can take once encoded. + // I.e.: x is shifted 7 bits to the right every iteration, + // the loop is at most VLE_LEN iterations. + unsafe { + *buffer.get_unchecked_mut(len) = (x as u8) | 0x80_u8; + } len += 1; x >>= 7; - b = x as u8; } - buffer[len] = b; - len + 1 - })?; + // In case len == VLE_LEN then all the bits have already been written in the latest iteration. + // Else we haven't written all the necessary bytes yet. + if len != VLE_LEN_MAX { + // SAFETY: buffer is guaranteed to be VLE_LEN long where VLE_LEN is + // the maximum number of bytes a VLE can take once encoded. + // I.e.: x is shifted 7 bits to the right every iteration, + // the loop is at most VLE_LEN iterations. + unsafe { + *buffer.get_unchecked_mut(len) = x as u8; + } + len += 1; + } + // The number of written bytes + len + }; + // SAFETY: write algorithm guarantees than returned length is lesser than or equal to + // `VLE_LEN_MAX`. + unsafe { writer.with_slot(VLE_LEN_MAX, write)? }; Ok(()) } } @@ -137,19 +158,14 @@ where let mut v = 0; let mut i = 0; - let mut k = VLE_LEN; - while b > 0x7f && k > 0 { - v |= ((b & 0x7f) as u64) << i; - i += 7; + // 7 * VLE_LEN is beyond the maximum number of shift bits + while (b & 0x80_u8) != 0 && i != 7 * (VLE_LEN_MAX - 1) { + v |= ((b & 0x7f_u8) as u64) << i; b = reader.read_u8()?; - k -= 1; - } - if k > 0 { - v |= ((b & 0x7f) as u64) << i; - Ok(v) - } else { - Err(DidntRead) + i += 7; } + v |= (b as u64) << i; + Ok(v) } } diff --git a/commons/zenoh-codec/src/core/zslice.rs b/commons/zenoh-codec/src/core/zslice.rs index cea0961b51..fe907ed273 100644 --- a/commons/zenoh-codec/src/core/zslice.rs +++ b/commons/zenoh-codec/src/core/zslice.rs @@ -11,13 +11,14 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::{RCodec, WCodec, Zenoh080, Zenoh080Bounded}; use zenoh_buffers::{ reader::{DidntRead, Reader}, writer::{DidntWrite, Writer}, ZSlice, }; +use crate::{RCodec, WCodec, Zenoh080, Zenoh080Bounded}; + // ZSlice - Bounded macro_rules! zslice_impl { ($bound:ty) => { diff --git a/commons/zenoh-codec/src/network/declare.rs b/commons/zenoh-codec/src/network/declare.rs index 20916dc359..7c3b797d5d 100644 --- a/commons/zenoh-codec/src/network/declare.rs +++ b/commons/zenoh-codec/src/network/declare.rs @@ -11,8 +11,8 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::{common::extension, RCodec, WCodec, Zenoh080, Zenoh080Condition, Zenoh080Header}; use alloc::string::String; + use zenoh_buffers::{ reader::{DidntRead, Reader}, writer::{DidntWrite, HasWriter, Writer}, @@ -22,13 +22,13 @@ use zenoh_protocol::{ common::{iext, imsg, ZExtZ64}, core::{ExprId, ExprLen, WireExpr}, network::{ - declare::{ - self, common, interest, keyexpr, queryable, subscriber, token, Declare, DeclareBody, - }, + declare::{self, common, keyexpr, queryable, subscriber, token, Declare, DeclareBody}, id, Mapping, }, }; +use crate::{common::extension, RCodec, WCodec, Zenoh080, Zenoh080Condition, Zenoh080Header}; + // Declaration impl WCodec<&DeclareBody, &mut W> for Zenoh080 where @@ -46,9 +46,7 @@ where DeclareBody::UndeclareQueryable(r) => self.write(&mut *writer, r)?, DeclareBody::DeclareToken(r) => self.write(&mut *writer, r)?, DeclareBody::UndeclareToken(r) => self.write(&mut *writer, r)?, - DeclareBody::DeclareInterest(r) => self.write(&mut *writer, r)?, - DeclareBody::FinalInterest(r) => self.write(&mut *writer, r)?, - DeclareBody::UndeclareInterest(r) => self.write(&mut *writer, r)?, + DeclareBody::DeclareFinal(r) => self.write(&mut *writer, r)?, } Ok(()) @@ -75,9 +73,7 @@ where U_QUERYABLE => DeclareBody::UndeclareQueryable(codec.read(&mut *reader)?), D_TOKEN => DeclareBody::DeclareToken(codec.read(&mut *reader)?), U_TOKEN => DeclareBody::UndeclareToken(codec.read(&mut *reader)?), - D_INTEREST => DeclareBody::DeclareInterest(codec.read(&mut *reader)?), - F_INTEREST => DeclareBody::FinalInterest(codec.read(&mut *reader)?), - U_INTEREST => DeclareBody::UndeclareInterest(codec.read(&mut *reader)?), + D_FINAL => DeclareBody::DeclareFinal(codec.read(&mut *reader)?), _ => return Err(DidntRead), }; @@ -94,6 +90,7 @@ where fn write(self, writer: &mut W, x: &Declare) -> Self::Output { let Declare { + interest_id, ext_qos, ext_tstamp, ext_nodeid, @@ -102,16 +99,23 @@ where // Header let mut header = id::DECLARE; - let mut n_exts = ((ext_qos != &declare::ext::QoSType::default()) as u8) + if x.interest_id.is_some() { + header |= declare::flag::I; + } + let mut n_exts = ((ext_qos != &declare::ext::QoSType::DEFAULT) as u8) + (ext_tstamp.is_some() as u8) - + ((ext_nodeid != &declare::ext::NodeIdType::default()) as u8); + + ((ext_nodeid != &declare::ext::NodeIdType::DEFAULT) as u8); if n_exts != 0 { header |= declare::flag::Z; } self.write(&mut *writer, header)?; + if let Some(interest_id) = interest_id { + self.write(&mut *writer, interest_id)?; + } + // Extensions - if ext_qos != &declare::ext::QoSType::default() { + if ext_qos != &declare::ext::QoSType::DEFAULT { n_exts -= 1; self.write(&mut *writer, (*ext_qos, n_exts != 0))?; } @@ -119,7 +123,7 @@ where n_exts -= 1; self.write(&mut *writer, (ts, n_exts != 0))?; } - if ext_nodeid != &declare::ext::NodeIdType::default() { + if ext_nodeid != &declare::ext::NodeIdType::DEFAULT { n_exts -= 1; self.write(&mut *writer, (*ext_nodeid, n_exts != 0))?; } @@ -156,10 +160,15 @@ where return Err(DidntRead); } + let mut interest_id = None; + if imsg::has_flag(self.header, declare::flag::I) { + interest_id = Some(self.codec.read(&mut *reader)?); + } + // Extensions - let mut ext_qos = declare::ext::QoSType::default(); + let mut ext_qos = declare::ext::QoSType::DEFAULT; let mut ext_tstamp = None; - let mut ext_nodeid = declare::ext::NodeIdType::default(); + let mut ext_nodeid = declare::ext::NodeIdType::DEFAULT; let mut has_ext = imsg::has_flag(self.header, declare::flag::Z); while has_ext { @@ -191,14 +200,68 @@ where let body: DeclareBody = self.codec.read(&mut *reader)?; Ok(Declare { - body, + interest_id, ext_qos, ext_tstamp, ext_nodeid, + body, }) } } +// Final +impl WCodec<&common::DeclareFinal, &mut W> for Zenoh080 +where + W: Writer, +{ + type Output = Result<(), DidntWrite>; + + fn write(self, writer: &mut W, x: &common::DeclareFinal) -> Self::Output { + let common::DeclareFinal = x; + + // Header + let header = declare::id::D_FINAL; + self.write(&mut *writer, header)?; + + Ok(()) + } +} + +impl RCodec for Zenoh080 +where + R: Reader, +{ + type Error = DidntRead; + + fn read(self, reader: &mut R) -> Result { + let header: u8 = self.read(&mut *reader)?; + let codec = Zenoh080Header::new(header); + + codec.read(reader) + } +} + +impl RCodec for Zenoh080Header +where + R: Reader, +{ + type Error = DidntRead; + + fn read(self, reader: &mut R) -> Result { + if imsg::mid(self.header) != declare::id::D_FINAL { + return Err(DidntRead); + } + + // Extensions + let has_ext = imsg::has_flag(self.header, token::flag::Z); + if has_ext { + extension::skip_all(reader, "Final")?; + } + + Ok(common::DeclareFinal) + } +} + // DeclareKeyExpr impl WCodec<&keyexpr::DeclareKeyExpr, &mut W> for Zenoh080 where @@ -340,11 +403,11 @@ where // Header let mut header = declare::id::D_SUBSCRIBER; - let mut n_exts = (ext_info != &subscriber::ext::SubscriberInfo::default()) as u8; + let mut n_exts = (ext_info != &subscriber::ext::SubscriberInfo::DEFAULT) as u8; if n_exts != 0 { header |= subscriber::flag::Z; } - if wire_expr.mapping != Mapping::default() { + if wire_expr.mapping != Mapping::DEFAULT { header |= subscriber::flag::M; } if wire_expr.has_suffix() { @@ -357,7 +420,7 @@ where self.write(&mut *writer, wire_expr)?; // Extensions - if ext_info != &subscriber::ext::SubscriberInfo::default() { + if ext_info != &subscriber::ext::SubscriberInfo::DEFAULT { n_exts -= 1; self.write(&mut *writer, (*ext_info, n_exts != 0))?; } @@ -402,7 +465,7 @@ where }; // Extensions - let mut ext_info = subscriber::ext::SubscriberInfo::default(); + let mut ext_info = subscriber::ext::SubscriberInfo::DEFAULT; let mut has_ext = imsg::has_flag(self.header, subscriber::flag::Z); while has_ext { @@ -440,14 +503,19 @@ where let subscriber::UndeclareSubscriber { id, ext_wire_expr } = x; // Header - let header = declare::id::U_SUBSCRIBER | subscriber::flag::Z; + let mut header = declare::id::U_SUBSCRIBER; + if !ext_wire_expr.is_null() { + header |= subscriber::flag::Z; + } self.write(&mut *writer, header)?; // Body self.write(&mut *writer, id)?; // Extension - self.write(&mut *writer, (ext_wire_expr, false))?; + if !ext_wire_expr.is_null() { + self.write(&mut *writer, (ext_wire_expr, false))?; + } Ok(()) } @@ -482,7 +550,6 @@ where let id: subscriber::SubscriberId = self.codec.read(&mut *reader)?; // Extensions - // WARNING: this is a temporary and mandatory extension used for undeclarations let mut ext_wire_expr = common::ext::WireExprType::null(); let mut has_ext = imsg::has_flag(self.header, subscriber::flag::Z); @@ -506,7 +573,46 @@ where } // QueryableInfo -crate::impl_zextz64!(queryable::ext::QueryableInfo, queryable::ext::Info::ID); +impl WCodec<(&queryable::ext::QueryableInfoType, bool), &mut W> for Zenoh080 +where + W: Writer, +{ + type Output = Result<(), DidntWrite>; + fn write(self, writer: &mut W, x: (&queryable::ext::QueryableInfoType, bool)) -> Self::Output { + let (x, more) = x; + + let mut flags: u8 = 0; + if x.complete { + flags |= queryable::ext::flag::C; + } + let v: u64 = (flags as u64) | ((x.distance as u64) << 8); + let ext = queryable::ext::QueryableInfo::new(v); + + self.write(&mut *writer, (&ext, more)) + } +} + +impl RCodec<(queryable::ext::QueryableInfoType, bool), &mut R> for Zenoh080Header +where + R: Reader, +{ + type Error = DidntRead; + + fn read( + self, + reader: &mut R, + ) -> Result<(queryable::ext::QueryableInfoType, bool), Self::Error> { + let (ext, more): (queryable::ext::QueryableInfo, bool) = self.read(&mut *reader)?; + + let complete = imsg::has_flag(ext.value as u8, queryable::ext::flag::C); + let distance = (ext.value >> 8) as u16; + + Ok(( + queryable::ext::QueryableInfoType { complete, distance }, + more, + )) + } +} // DeclareQueryable impl WCodec<&queryable::DeclareQueryable, &mut W> for Zenoh080 @@ -524,11 +630,11 @@ where // Header let mut header = declare::id::D_QUERYABLE; - let mut n_exts = (ext_info != &queryable::ext::QueryableInfo::default()) as u8; + let mut n_exts = (ext_info != &queryable::ext::QueryableInfoType::DEFAULT) as u8; if n_exts != 0 { header |= subscriber::flag::Z; } - if wire_expr.mapping != Mapping::default() { + if wire_expr.mapping != Mapping::DEFAULT { header |= subscriber::flag::M; } if wire_expr.has_suffix() { @@ -539,9 +645,9 @@ where // Body self.write(&mut *writer, id)?; self.write(&mut *writer, wire_expr)?; - if ext_info != &queryable::ext::QueryableInfo::default() { + if ext_info != &queryable::ext::QueryableInfoType::DEFAULT { n_exts -= 1; - self.write(&mut *writer, (*ext_info, n_exts != 0))?; + self.write(&mut *writer, (ext_info, n_exts != 0))?; } Ok(()) @@ -584,15 +690,15 @@ where }; // Extensions - let mut ext_info = queryable::ext::QueryableInfo::default(); + let mut ext_info = queryable::ext::QueryableInfoType::DEFAULT; let mut has_ext = imsg::has_flag(self.header, queryable::flag::Z); while has_ext { let ext: u8 = self.codec.read(&mut *reader)?; let eodec = Zenoh080Header::new(ext); match iext::eid(ext) { - queryable::ext::Info::ID => { - let (i, ext): (queryable::ext::QueryableInfo, bool) = + queryable::ext::QueryableInfo::ID => { + let (i, ext): (queryable::ext::QueryableInfoType, bool) = eodec.read(&mut *reader)?; ext_info = i; has_ext = ext; @@ -664,7 +770,6 @@ where let id: queryable::QueryableId = self.codec.read(&mut *reader)?; // Extensions - // WARNING: this is a temporary and mandatory extension used for undeclarations let mut ext_wire_expr = common::ext::WireExprType::null(); let mut has_ext = imsg::has_flag(self.header, queryable::flag::Z); @@ -699,7 +804,7 @@ where // Header let mut header = declare::id::D_TOKEN; - if wire_expr.mapping != Mapping::default() { + if wire_expr.mapping != Mapping::DEFAULT { header |= subscriber::flag::M; } if wire_expr.has_suffix() { @@ -812,10 +917,9 @@ where let id: token::TokenId = self.codec.read(&mut *reader)?; // Extensions - // WARNING: this is a temporary and mandatory extension used for undeclarations let mut ext_wire_expr = common::ext::WireExprType::null(); - let mut has_ext = imsg::has_flag(self.header, interest::flag::Z); + let mut has_ext = imsg::has_flag(self.header, token::flag::Z); while has_ext { let ext: u8 = self.codec.read(&mut *reader)?; let eodec = Zenoh080Header::new(ext); @@ -835,223 +939,6 @@ where } } -// DeclareInterest -impl WCodec<&interest::DeclareInterest, &mut W> for Zenoh080 -where - W: Writer, -{ - type Output = Result<(), DidntWrite>; - - fn write(self, writer: &mut W, x: &interest::DeclareInterest) -> Self::Output { - let interest::DeclareInterest { - id, - wire_expr, - interest, - } = x; - - // Header - let mut header = declare::id::D_INTEREST; - if wire_expr.mapping != Mapping::default() { - header |= subscriber::flag::M; - } - if wire_expr.has_suffix() { - header |= subscriber::flag::N; - } - self.write(&mut *writer, header)?; - - // Body - self.write(&mut *writer, id)?; - self.write(&mut *writer, wire_expr)?; - self.write(&mut *writer, interest.as_u8())?; - - Ok(()) - } -} - -impl RCodec for Zenoh080 -where - R: Reader, -{ - type Error = DidntRead; - - fn read(self, reader: &mut R) -> Result { - let header: u8 = self.read(&mut *reader)?; - let codec = Zenoh080Header::new(header); - codec.read(reader) - } -} - -impl RCodec for Zenoh080Header -where - R: Reader, -{ - type Error = DidntRead; - - fn read(self, reader: &mut R) -> Result { - if imsg::mid(self.header) != declare::id::D_INTEREST { - return Err(DidntRead); - } - - // Body - let id: interest::InterestId = self.codec.read(&mut *reader)?; - let ccond = Zenoh080Condition::new(imsg::has_flag(self.header, token::flag::N)); - let mut wire_expr: WireExpr<'static> = ccond.read(&mut *reader)?; - wire_expr.mapping = if imsg::has_flag(self.header, token::flag::M) { - Mapping::Sender - } else { - Mapping::Receiver - }; - let interest: u8 = self.codec.read(&mut *reader)?; - - // Extensions - let has_ext = imsg::has_flag(self.header, token::flag::Z); - if has_ext { - extension::skip_all(reader, "DeclareInterest")?; - } - - Ok(interest::DeclareInterest { - id, - wire_expr, - interest: interest.into(), - }) - } -} - -// FinalInterest -impl WCodec<&interest::FinalInterest, &mut W> for Zenoh080 -where - W: Writer, -{ - type Output = Result<(), DidntWrite>; - - fn write(self, writer: &mut W, x: &interest::FinalInterest) -> Self::Output { - let interest::FinalInterest { id } = x; - - // Header - let header = declare::id::F_INTEREST; - self.write(&mut *writer, header)?; - - // Body - self.write(&mut *writer, id)?; - - Ok(()) - } -} - -impl RCodec for Zenoh080 -where - R: Reader, -{ - type Error = DidntRead; - - fn read(self, reader: &mut R) -> Result { - let header: u8 = self.read(&mut *reader)?; - let codec = Zenoh080Header::new(header); - - codec.read(reader) - } -} - -impl RCodec for Zenoh080Header -where - R: Reader, -{ - type Error = DidntRead; - - fn read(self, reader: &mut R) -> Result { - if imsg::mid(self.header) != declare::id::F_INTEREST { - return Err(DidntRead); - } - - // Body - let id: interest::InterestId = self.codec.read(&mut *reader)?; - - // Extensions - let has_ext = imsg::has_flag(self.header, token::flag::Z); - if has_ext { - extension::skip_all(reader, "FinalInterest")?; - } - - Ok(interest::FinalInterest { id }) - } -} - -// UndeclareInterest -impl WCodec<&interest::UndeclareInterest, &mut W> for Zenoh080 -where - W: Writer, -{ - type Output = Result<(), DidntWrite>; - - fn write(self, writer: &mut W, x: &interest::UndeclareInterest) -> Self::Output { - let interest::UndeclareInterest { id, ext_wire_expr } = x; - - // Header - let header = declare::id::U_INTEREST | interest::flag::Z; - self.write(&mut *writer, header)?; - - // Body - self.write(&mut *writer, id)?; - - // Extension - self.write(&mut *writer, (ext_wire_expr, false))?; - - Ok(()) - } -} - -impl RCodec for Zenoh080 -where - R: Reader, -{ - type Error = DidntRead; - - fn read(self, reader: &mut R) -> Result { - let header: u8 = self.read(&mut *reader)?; - let codec = Zenoh080Header::new(header); - - codec.read(reader) - } -} - -impl RCodec for Zenoh080Header -where - R: Reader, -{ - type Error = DidntRead; - - fn read(self, reader: &mut R) -> Result { - if imsg::mid(self.header) != declare::id::U_INTEREST { - return Err(DidntRead); - } - - // Body - let id: interest::InterestId = self.codec.read(&mut *reader)?; - - // Extensions - // WARNING: this is a temporary and mandatory extension used for undeclarations - let mut ext_wire_expr = common::ext::WireExprType::null(); - - let mut has_ext = imsg::has_flag(self.header, interest::flag::Z); - while has_ext { - let ext: u8 = self.codec.read(&mut *reader)?; - let eodec = Zenoh080Header::new(ext); - match iext::eid(ext) { - common::ext::WireExprExt::ID => { - let (we, ext): (common::ext::WireExprType, bool) = eodec.read(&mut *reader)?; - ext_wire_expr = we; - has_ext = ext; - } - _ => { - has_ext = extension::skip(reader, "UndeclareInterest", ext)?; - } - } - } - - Ok(interest::UndeclareInterest { id, ext_wire_expr }) - } -} - // WARNING: this is a temporary extension used for undeclarations impl WCodec<(&common::ext::WireExprType, bool), &mut W> for Zenoh080 where @@ -1071,7 +958,7 @@ where if x.wire_expr.has_suffix() { flags |= 1; } - if let Mapping::Receiver = wire_expr.mapping { + if let Mapping::Sender = wire_expr.mapping { flags |= 1 << 1; } codec.write(&mut zriter, flags)?; @@ -1111,9 +998,9 @@ where String::new() }; let mapping = if imsg::has_flag(flags, 1 << 1) { - Mapping::Receiver - } else { Mapping::Sender + } else { + Mapping::Receiver }; Ok(( diff --git a/commons/zenoh-codec/src/network/interest.rs b/commons/zenoh-codec/src/network/interest.rs new file mode 100644 index 0000000000..5ebdc91f71 --- /dev/null +++ b/commons/zenoh-codec/src/network/interest.rs @@ -0,0 +1,187 @@ +// +// Copyright (c) 2022 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// +use zenoh_buffers::{ + reader::{DidntRead, Reader}, + writer::{DidntWrite, Writer}, +}; +use zenoh_protocol::{ + common::{ + iext, + imsg::{self, HEADER_BITS}, + }, + core::WireExpr, + network::{ + declare, id, + interest::{self, Interest, InterestMode, InterestOptions}, + Mapping, + }, +}; + +use crate::{common::extension, RCodec, WCodec, Zenoh080, Zenoh080Condition, Zenoh080Header}; + +// Interest +impl WCodec<&Interest, &mut W> for Zenoh080 +where + W: Writer, +{ + type Output = Result<(), DidntWrite>; + + fn write(self, writer: &mut W, x: &Interest) -> Self::Output { + let Interest { + id, + mode, + options: _, // Compute the options on-the-fly according to Interest fields + wire_expr, + ext_qos, + ext_tstamp, + ext_nodeid, + } = x; + + // Header + let mut header = id::INTEREST; + header |= match mode { + InterestMode::Final => 0b00, + InterestMode::Current => 0b01, + InterestMode::Future => 0b10, + InterestMode::CurrentFuture => 0b11, + } << HEADER_BITS; + let mut n_exts = ((ext_qos != &declare::ext::QoSType::DEFAULT) as u8) + + (ext_tstamp.is_some() as u8) + + ((ext_nodeid != &declare::ext::NodeIdType::DEFAULT) as u8); + if n_exts != 0 { + header |= declare::flag::Z; + } + self.write(&mut *writer, header)?; + + self.write(&mut *writer, id)?; + + if *mode != InterestMode::Final { + self.write(&mut *writer, x.options())?; + if let Some(we) = wire_expr.as_ref() { + self.write(&mut *writer, we)?; + } + } + + // Extensions + if ext_qos != &declare::ext::QoSType::DEFAULT { + n_exts -= 1; + self.write(&mut *writer, (*ext_qos, n_exts != 0))?; + } + if let Some(ts) = ext_tstamp.as_ref() { + n_exts -= 1; + self.write(&mut *writer, (ts, n_exts != 0))?; + } + if ext_nodeid != &declare::ext::NodeIdType::DEFAULT { + n_exts -= 1; + self.write(&mut *writer, (*ext_nodeid, n_exts != 0))?; + } + + Ok(()) + } +} + +impl RCodec for Zenoh080 +where + R: Reader, +{ + type Error = DidntRead; + + fn read(self, reader: &mut R) -> Result { + let header: u8 = self.read(&mut *reader)?; + let codec = Zenoh080Header::new(header); + + codec.read(reader) + } +} + +impl RCodec for Zenoh080Header +where + R: Reader, +{ + type Error = DidntRead; + + fn read(self, reader: &mut R) -> Result { + if imsg::mid(self.header) != id::INTEREST { + return Err(DidntRead); + } + + let id = self.codec.read(&mut *reader)?; + let mode = match (self.header >> HEADER_BITS) & 0b11 { + 0b00 => InterestMode::Final, + 0b01 => InterestMode::Current, + 0b10 => InterestMode::Future, + 0b11 => InterestMode::CurrentFuture, + _ => return Err(DidntRead), + }; + + let mut options = InterestOptions::empty(); + let mut wire_expr = None; + if mode != InterestMode::Final { + let options_byte: u8 = self.codec.read(&mut *reader)?; + options = InterestOptions::from(options_byte); + if options.restricted() { + let ccond = Zenoh080Condition::new(options.named()); + let mut we: WireExpr<'static> = ccond.read(&mut *reader)?; + we.mapping = if options.mapping() { + Mapping::Sender + } else { + Mapping::Receiver + }; + wire_expr = Some(we); + } + } + + // Extensions + let mut ext_qos = declare::ext::QoSType::DEFAULT; + let mut ext_tstamp = None; + let mut ext_nodeid = declare::ext::NodeIdType::DEFAULT; + + let mut has_ext = imsg::has_flag(self.header, declare::flag::Z); + while has_ext { + let ext: u8 = self.codec.read(&mut *reader)?; + let eodec = Zenoh080Header::new(ext); + match iext::eid(ext) { + declare::ext::QoS::ID => { + let (q, ext): (interest::ext::QoSType, bool) = eodec.read(&mut *reader)?; + ext_qos = q; + has_ext = ext; + } + declare::ext::Timestamp::ID => { + let (t, ext): (interest::ext::TimestampType, bool) = + eodec.read(&mut *reader)?; + ext_tstamp = Some(t); + has_ext = ext; + } + declare::ext::NodeId::ID => { + let (nid, ext): (interest::ext::NodeIdType, bool) = eodec.read(&mut *reader)?; + ext_nodeid = nid; + has_ext = ext; + } + _ => { + has_ext = extension::skip(reader, "Declare", ext)?; + } + } + } + + Ok(Interest { + id, + mode, + options, + wire_expr, + ext_qos, + ext_tstamp, + ext_nodeid, + }) + } +} diff --git a/commons/zenoh-codec/src/network/mod.rs b/commons/zenoh-codec/src/network/mod.rs index c1f2489b88..c68a3470aa 100644 --- a/commons/zenoh-codec/src/network/mod.rs +++ b/commons/zenoh-codec/src/network/mod.rs @@ -12,22 +12,24 @@ // ZettaScale Zenoh Team, // mod declare; +mod interest; mod oam; mod push; mod request; mod response; -use crate::{ - LCodec, RCodec, WCodec, Zenoh080, Zenoh080Header, Zenoh080Length, Zenoh080Reliability, -}; use zenoh_buffers::{ reader::{DidntRead, Reader}, writer::{DidntWrite, Writer}, }; use zenoh_protocol::{ common::{imsg, ZExtZ64, ZExtZBufHeader}, - core::{Reliability, ZenohId}, - network::{ext::EntityIdType, *}, + core::{EntityId, Reliability, ZenohIdProto}, + network::{ext::EntityGlobalIdType, *}, +}; + +use crate::{ + LCodec, RCodec, WCodec, Zenoh080, Zenoh080Header, Zenoh080Length, Zenoh080Reliability, }; // NetworkMessage @@ -45,6 +47,7 @@ where NetworkBody::Request(b) => self.write(&mut *writer, b), NetworkBody::Response(b) => self.write(&mut *writer, b), NetworkBody::ResponseFinal(b) => self.write(&mut *writer, b), + NetworkBody::Interest(b) => self.write(&mut *writer, b), NetworkBody::Declare(b) => self.write(&mut *writer, b), NetworkBody::OAM(b) => self.write(&mut *writer, b), } @@ -58,7 +61,7 @@ where type Error = DidntRead; fn read(self, reader: &mut R) -> Result { - let codec = Zenoh080Reliability::new(Reliability::default()); + let codec = Zenoh080Reliability::new(Reliability::DEFAULT); codec.read(reader) } } @@ -89,6 +92,7 @@ where id::REQUEST => NetworkBody::Request(self.read(&mut *reader)?), id::RESPONSE => NetworkBody::Response(self.read(&mut *reader)?), id::RESPONSE_FINAL => NetworkBody::ResponseFinal(self.read(&mut *reader)?), + id::INTEREST => NetworkBody::Interest(self.read(&mut *reader)?), id::DECLARE => NetworkBody::Declare(self.read(&mut *reader)?), id::OAM => NetworkBody::OAM(self.read(&mut *reader)?), _ => return Err(DidntRead), @@ -218,21 +222,21 @@ where } // Extension: EntityId -impl LCodec<&ext::EntityIdType<{ ID }>> for Zenoh080 { - fn w_len(self, x: &ext::EntityIdType<{ ID }>) -> usize { - let EntityIdType { zid, eid } = x; +impl LCodec<&ext::EntityGlobalIdType<{ ID }>> for Zenoh080 { + fn w_len(self, x: &ext::EntityGlobalIdType<{ ID }>) -> usize { + let EntityGlobalIdType { zid, eid } = x; 1 + self.w_len(zid) + self.w_len(*eid) } } -impl WCodec<(&ext::EntityIdType<{ ID }>, bool), &mut W> for Zenoh080 +impl WCodec<(&ext::EntityGlobalIdType<{ ID }>, bool), &mut W> for Zenoh080 where W: Writer, { type Output = Result<(), DidntWrite>; - fn write(self, writer: &mut W, x: (&ext::EntityIdType<{ ID }>, bool)) -> Self::Output { + fn write(self, writer: &mut W, x: (&ext::EntityGlobalIdType<{ ID }>, bool)) -> Self::Output { let (x, more) = x; let header: ZExtZBufHeader<{ ID }> = ZExtZBufHeader::new(self.w_len(x)); self.write(&mut *writer, (&header, more))?; @@ -248,23 +252,23 @@ where } } -impl RCodec<(ext::EntityIdType<{ ID }>, bool), &mut R> for Zenoh080Header +impl RCodec<(ext::EntityGlobalIdType<{ ID }>, bool), &mut R> for Zenoh080Header where R: Reader, { type Error = DidntRead; - fn read(self, reader: &mut R) -> Result<(ext::EntityIdType<{ ID }>, bool), Self::Error> { + fn read(self, reader: &mut R) -> Result<(ext::EntityGlobalIdType<{ ID }>, bool), Self::Error> { let (_, more): (ZExtZBufHeader<{ ID }>, bool) = self.read(&mut *reader)?; let flags: u8 = self.codec.read(&mut *reader)?; let length = 1 + ((flags >> 4) as usize); let lodec = Zenoh080Length::new(length); - let zid: ZenohId = lodec.read(&mut *reader)?; + let zid: ZenohIdProto = lodec.read(&mut *reader)?; - let eid: u32 = self.codec.read(&mut *reader)?; + let eid: EntityId = self.codec.read(&mut *reader)?; - Ok((ext::EntityIdType { zid, eid }, more)) + Ok((ext::EntityGlobalIdType { zid, eid }, more)) } } diff --git a/commons/zenoh-codec/src/network/oam.rs b/commons/zenoh-codec/src/network/oam.rs index ff6daeb020..172b3f1058 100644 --- a/commons/zenoh-codec/src/network/oam.rs +++ b/commons/zenoh-codec/src/network/oam.rs @@ -11,7 +11,6 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::{common::extension, RCodec, WCodec, Zenoh080, Zenoh080Header}; use zenoh_buffers::{ reader::{DidntRead, Reader}, writer::{DidntWrite, Writer}, @@ -25,6 +24,8 @@ use zenoh_protocol::{ }, }; +use crate::{common::extension, RCodec, WCodec, Zenoh080, Zenoh080Header}; + impl WCodec<&Oam, &mut W> for Zenoh080 where W: Writer, @@ -52,8 +53,7 @@ where header |= iext::ENC_ZBUF; } } - let mut n_exts = - ((ext_qos != &ext::QoSType::default()) as u8) + (ext_tstamp.is_some() as u8); + let mut n_exts = ((ext_qos != &ext::QoSType::DEFAULT) as u8) + (ext_tstamp.is_some() as u8); if n_exts != 0 { header |= flag::Z; } @@ -63,7 +63,7 @@ where self.write(&mut *writer, id)?; // Extensions - if ext_qos != &ext::QoSType::default() { + if ext_qos != &ext::QoSType::DEFAULT { n_exts -= 1; self.write(&mut *writer, (*ext_qos, n_exts != 0))?; } @@ -115,7 +115,7 @@ where let id: OamId = self.codec.read(&mut *reader)?; // Extensions - let mut ext_qos = ext::QoSType::default(); + let mut ext_qos = ext::QoSType::DEFAULT; let mut ext_tstamp = None; let mut has_ext = imsg::has_flag(self.header, flag::Z); diff --git a/commons/zenoh-codec/src/network/push.rs b/commons/zenoh-codec/src/network/push.rs index 10a8489b29..2c2e11a718 100644 --- a/commons/zenoh-codec/src/network/push.rs +++ b/commons/zenoh-codec/src/network/push.rs @@ -11,7 +11,6 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::{common::extension, RCodec, WCodec, Zenoh080, Zenoh080Condition, Zenoh080Header}; use zenoh_buffers::{ reader::{DidntRead, Reader}, writer::{DidntWrite, Writer}, @@ -27,6 +26,8 @@ use zenoh_protocol::{ zenoh::PushBody, }; +use crate::{common::extension, RCodec, WCodec, Zenoh080, Zenoh080Condition, Zenoh080Header}; + impl WCodec<&Push, &mut W> for Zenoh080 where W: Writer, @@ -44,13 +45,13 @@ where // Header let mut header = id::PUSH; - let mut n_exts = ((ext_qos != &ext::QoSType::default()) as u8) + let mut n_exts = ((ext_qos != &ext::QoSType::DEFAULT) as u8) + (ext_tstamp.is_some() as u8) - + ((ext_nodeid != &ext::NodeIdType::default()) as u8); + + ((ext_nodeid != &ext::NodeIdType::DEFAULT) as u8); if n_exts != 0 { header |= flag::Z; } - if wire_expr.mapping != Mapping::default() { + if wire_expr.mapping != Mapping::DEFAULT { header |= flag::M; } if wire_expr.has_suffix() { @@ -62,7 +63,7 @@ where self.write(&mut *writer, wire_expr)?; // Extensions - if ext_qos != &ext::QoSType::default() { + if ext_qos != &ext::QoSType::DEFAULT { n_exts -= 1; self.write(&mut *writer, (*ext_qos, n_exts != 0))?; } @@ -70,7 +71,7 @@ where n_exts -= 1; self.write(&mut *writer, (ts, n_exts != 0))?; } - if ext_nodeid != &ext::NodeIdType::default() { + if ext_nodeid != &ext::NodeIdType::DEFAULT { n_exts -= 1; self.write(&mut *writer, (*ext_nodeid, n_exts != 0))?; } @@ -116,9 +117,9 @@ where }; // Extensions - let mut ext_qos = ext::QoSType::default(); + let mut ext_qos = ext::QoSType::DEFAULT; let mut ext_tstamp = None; - let mut ext_nodeid = ext::NodeIdType::default(); + let mut ext_nodeid = ext::NodeIdType::DEFAULT; let mut has_ext = imsg::has_flag(self.header, flag::Z); while has_ext { diff --git a/commons/zenoh-codec/src/network/request.rs b/commons/zenoh-codec/src/network/request.rs index 19711ff147..21f42709c4 100644 --- a/commons/zenoh-codec/src/network/request.rs +++ b/commons/zenoh-codec/src/network/request.rs @@ -11,9 +11,6 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::{ - common::extension, RCodec, WCodec, Zenoh080, Zenoh080Bounded, Zenoh080Condition, Zenoh080Header, -}; use zenoh_buffers::{ reader::{DidntRead, Reader}, writer::{DidntWrite, Writer}, @@ -29,6 +26,10 @@ use zenoh_protocol::{ zenoh::RequestBody, }; +use crate::{ + common::extension, RCodec, WCodec, Zenoh080, Zenoh080Bounded, Zenoh080Condition, Zenoh080Header, +}; + // Target impl WCodec<(&ext::TargetType, bool), &mut W> for Zenoh080 where @@ -43,8 +44,6 @@ where ext::TargetType::BestMatching => 0, ext::TargetType::All => 1, ext::TargetType::AllComplete => 2, - #[cfg(feature = "complete_n")] - ext::TargetType::Complete(n) => 3 + *n, }; let ext = ext::Target::new(v); self.write(&mut *writer, (&ext, more)) @@ -63,9 +62,6 @@ where 0 => ext::TargetType::BestMatching, 1 => ext::TargetType::All, 2 => ext::TargetType::AllComplete, - #[cfg(feature = "complete_n")] - n => ext::TargetType::Complete(n - 3), - #[cfg(not(feature = "complete_n"))] _ => return Err(DidntRead), }; Ok((rt, more)) @@ -93,16 +89,16 @@ where // Header let mut header = id::REQUEST; - let mut n_exts = ((ext_qos != &ext::QoSType::default()) as u8) + let mut n_exts = ((ext_qos != &ext::QoSType::DEFAULT) as u8) + (ext_tstamp.is_some() as u8) - + ((ext_target != &ext::TargetType::default()) as u8) + + ((ext_target != &ext::TargetType::DEFAULT) as u8) + (ext_budget.is_some() as u8) + (ext_timeout.is_some() as u8) - + ((ext_nodeid != &ext::NodeIdType::default()) as u8); + + ((ext_nodeid != &ext::NodeIdType::DEFAULT) as u8); if n_exts != 0 { header |= flag::Z; } - if wire_expr.mapping != Mapping::default() { + if wire_expr.mapping != Mapping::DEFAULT { header |= flag::M; } if wire_expr.has_suffix() { @@ -115,7 +111,7 @@ where self.write(&mut *writer, wire_expr)?; // Extensions - if ext_qos != &ext::QoSType::default() { + if ext_qos != &ext::QoSType::DEFAULT { n_exts -= 1; self.write(&mut *writer, (*ext_qos, n_exts != 0))?; } @@ -123,7 +119,7 @@ where n_exts -= 1; self.write(&mut *writer, (ts, n_exts != 0))?; } - if ext_target != &ext::TargetType::default() { + if ext_target != &ext::TargetType::DEFAULT { n_exts -= 1; self.write(&mut *writer, (ext_target, n_exts != 0))?; } @@ -137,7 +133,7 @@ where let e = ext::Timeout::new(to.as_millis() as u64); self.write(&mut *writer, (&e, n_exts != 0))?; } - if ext_nodeid != &ext::NodeIdType::default() { + if ext_nodeid != &ext::NodeIdType::DEFAULT { n_exts -= 1; self.write(&mut *writer, (*ext_nodeid, n_exts != 0))?; } @@ -185,10 +181,10 @@ where }; // Extensions - let mut ext_qos = ext::QoSType::default(); + let mut ext_qos = ext::QoSType::DEFAULT; let mut ext_tstamp = None; - let mut ext_nodeid = ext::NodeIdType::default(); - let mut ext_target = ext::TargetType::default(); + let mut ext_nodeid = ext::NodeIdType::DEFAULT; + let mut ext_target = ext::TargetType::DEFAULT; let mut ext_limit = None; let mut ext_timeout = None; diff --git a/commons/zenoh-codec/src/network/response.rs b/commons/zenoh-codec/src/network/response.rs index bec7df2967..d94316de8e 100644 --- a/commons/zenoh-codec/src/network/response.rs +++ b/commons/zenoh-codec/src/network/response.rs @@ -11,9 +11,6 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::{ - common::extension, RCodec, WCodec, Zenoh080, Zenoh080Bounded, Zenoh080Condition, Zenoh080Header, -}; use zenoh_buffers::{ reader::{DidntRead, Reader}, writer::{DidntWrite, Writer}, @@ -29,6 +26,10 @@ use zenoh_protocol::{ zenoh::ResponseBody, }; +use crate::{ + common::extension, RCodec, WCodec, Zenoh080, Zenoh080Bounded, Zenoh080Condition, Zenoh080Header, +}; + // Response impl WCodec<&Response, &mut W> for Zenoh080 where @@ -48,13 +49,13 @@ where // Header let mut header = id::RESPONSE; - let mut n_exts = ((ext_qos != &ext::QoSType::default()) as u8) + let mut n_exts = ((ext_qos != &ext::QoSType::DEFAULT) as u8) + (ext_tstamp.is_some() as u8) + (ext_respid.is_some() as u8); if n_exts != 0 { header |= flag::Z; } - if wire_expr.mapping != Mapping::default() { + if wire_expr.mapping != Mapping::DEFAULT { header |= flag::M; } if wire_expr.has_suffix() { @@ -67,7 +68,7 @@ where self.write(&mut *writer, wire_expr)?; // Extensions - if ext_qos != &ext::QoSType::default() { + if ext_qos != &ext::QoSType::DEFAULT { n_exts -= 1; self.write(&mut *writer, (*ext_qos, n_exts != 0))?; } @@ -123,7 +124,7 @@ where }; // Extensions - let mut ext_qos = ext::QoSType::default(); + let mut ext_qos = ext::QoSType::DEFAULT; let mut ext_tstamp = None; let mut ext_respid = None; @@ -183,8 +184,7 @@ where // Header let mut header = id::RESPONSE_FINAL; - let mut n_exts = - ((ext_qos != &ext::QoSType::default()) as u8) + (ext_tstamp.is_some() as u8); + let mut n_exts = ((ext_qos != &ext::QoSType::DEFAULT) as u8) + (ext_tstamp.is_some() as u8); if n_exts != 0 { header |= flag::Z; } @@ -194,7 +194,7 @@ where self.write(&mut *writer, rid)?; // Extensions - if ext_qos != &ext::QoSType::default() { + if ext_qos != &ext::QoSType::DEFAULT { n_exts -= 1; self.write(&mut *writer, (*ext_qos, n_exts != 0))?; } @@ -236,7 +236,7 @@ where let rid: RequestId = bodec.read(&mut *reader)?; // Extensions - let mut ext_qos = ext::QoSType::default(); + let mut ext_qos = ext::QoSType::DEFAULT; let mut ext_tstamp = None; let mut has_ext = imsg::has_flag(self.header, flag::Z); diff --git a/commons/zenoh-codec/src/scouting/hello.rs b/commons/zenoh-codec/src/scouting/hello.rs index 430201133e..770519855b 100644 --- a/commons/zenoh-codec/src/scouting/hello.rs +++ b/commons/zenoh-codec/src/scouting/hello.rs @@ -11,29 +11,31 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::{RCodec, WCodec, Zenoh080, Zenoh080Header, Zenoh080Length}; use alloc::{vec, vec::Vec}; + use zenoh_buffers::{ reader::{DidntRead, Reader}, writer::{DidntWrite, Writer}, }; use zenoh_protocol::{ common::{imsg, ZExtUnknown}, - core::{Locator, WhatAmI, ZenohId}, + core::{Locator, WhatAmI, ZenohIdProto}, scouting::{ - hello::{flag, Hello}, + hello::{flag, HelloProto}, id, }, }; -impl WCodec<&Hello, &mut W> for Zenoh080 +use crate::{RCodec, WCodec, Zenoh080, Zenoh080Header, Zenoh080Length}; + +impl WCodec<&HelloProto, &mut W> for Zenoh080 where W: Writer, { type Output = Result<(), DidntWrite>; - fn write(self, writer: &mut W, x: &Hello) -> Self::Output { - let Hello { + fn write(self, writer: &mut W, x: &HelloProto) -> Self::Output { + let HelloProto { version, whatami, zid, @@ -71,26 +73,26 @@ where } } -impl RCodec for Zenoh080 +impl RCodec for Zenoh080 where R: Reader, { type Error = DidntRead; - fn read(self, reader: &mut R) -> Result { + fn read(self, reader: &mut R) -> Result { let header: u8 = self.read(&mut *reader)?; let codec = Zenoh080Header::new(header); codec.read(reader) } } -impl RCodec for Zenoh080Header +impl RCodec for Zenoh080Header where R: Reader, { type Error = DidntRead; - fn read(self, reader: &mut R) -> Result { + fn read(self, reader: &mut R) -> Result { if imsg::mid(self.header) != id::HELLO { return Err(DidntRead); } @@ -106,7 +108,7 @@ where }; let length = 1 + ((flags >> 4) as usize); let lodec = Zenoh080Length::new(length); - let zid: ZenohId = lodec.read(&mut *reader)?; + let zid: ZenohIdProto = lodec.read(&mut *reader)?; let locators = if imsg::has_flag(self.header, flag::L) { let locs: Vec = self.codec.read(&mut *reader)?; @@ -122,7 +124,7 @@ where has_extensions = more; } - Ok(Hello { + Ok(HelloProto { version, zid, whatami, diff --git a/commons/zenoh-codec/src/scouting/mod.rs b/commons/zenoh-codec/src/scouting/mod.rs index bbedce4282..d1f0b883a1 100644 --- a/commons/zenoh-codec/src/scouting/mod.rs +++ b/commons/zenoh-codec/src/scouting/mod.rs @@ -14,7 +14,6 @@ mod hello; mod scout; -use crate::{RCodec, WCodec, Zenoh080, Zenoh080Header}; use zenoh_buffers::{ reader::{DidntRead, Reader}, writer::{DidntWrite, Writer}, @@ -24,6 +23,8 @@ use zenoh_protocol::{ scouting::{id, ScoutingBody, ScoutingMessage}, }; +use crate::{RCodec, WCodec, Zenoh080, Zenoh080Header}; + impl WCodec<&ScoutingMessage, &mut W> for Zenoh080 where W: Writer, diff --git a/commons/zenoh-codec/src/scouting/scout.rs b/commons/zenoh-codec/src/scouting/scout.rs index 02d5294047..f4863e69b8 100644 --- a/commons/zenoh-codec/src/scouting/scout.rs +++ b/commons/zenoh-codec/src/scouting/scout.rs @@ -11,21 +11,23 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::{RCodec, WCodec, Zenoh080, Zenoh080Header, Zenoh080Length}; use core::convert::TryFrom; + use zenoh_buffers::{ reader::{DidntRead, Reader}, writer::{DidntWrite, Writer}, }; use zenoh_protocol::{ common::{imsg, ZExtUnknown}, - core::{whatami::WhatAmIMatcher, ZenohId}, + core::{whatami::WhatAmIMatcher, ZenohIdProto}, scouting::{ id, scout::{flag, Scout}, }, }; +use crate::{RCodec, WCodec, Zenoh080, Zenoh080Header, Zenoh080Length}; + impl WCodec<&Scout, &mut W> for Zenoh080 where W: Writer, @@ -91,7 +93,7 @@ where let zid = if imsg::has_flag(flags, flag::I) { let length = 1 + ((flags >> 4) as usize); let lodec = Zenoh080Length::new(length); - let zid: ZenohId = lodec.read(&mut *reader)?; + let zid: ZenohIdProto = lodec.read(&mut *reader)?; Some(zid) } else { None diff --git a/commons/zenoh-codec/src/transport/batch.rs b/commons/zenoh-codec/src/transport/batch.rs index d0774e0f30..bfdc21f618 100644 --- a/commons/zenoh-codec/src/transport/batch.rs +++ b/commons/zenoh-codec/src/transport/batch.rs @@ -11,17 +11,23 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::{RCodec, WCodec, Zenoh080}; use core::num::NonZeroUsize; -use zenoh_buffers::reader::{BacktrackableReader, DidntRead, Reader, SiphonableReader}; -use zenoh_buffers::writer::{BacktrackableWriter, DidntWrite, Writer}; -use zenoh_buffers::ZBufReader; -use zenoh_protocol::core::Reliability; -use zenoh_protocol::network::NetworkMessage; -use zenoh_protocol::transport::{ - Fragment, FragmentHeader, Frame, FrameHeader, TransportBody, TransportMessage, TransportSn, + +use zenoh_buffers::{ + reader::{BacktrackableReader, DidntRead, Reader, SiphonableReader}, + writer::{BacktrackableWriter, DidntWrite, Writer}, + ZBufReader, +}; +use zenoh_protocol::{ + core::Reliability, + network::NetworkMessage, + transport::{ + Fragment, FragmentHeader, Frame, FrameHeader, TransportBody, TransportMessage, TransportSn, + }, }; +use crate::{RCodec, WCodec, Zenoh080}; + #[derive(Clone, Copy, Debug)] #[repr(u8)] pub enum CurrentFrame { diff --git a/commons/zenoh-codec/src/transport/close.rs b/commons/zenoh-codec/src/transport/close.rs index 9771b9e1e9..62d9e542b7 100644 --- a/commons/zenoh-codec/src/transport/close.rs +++ b/commons/zenoh-codec/src/transport/close.rs @@ -11,7 +11,6 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::{common::extension, RCodec, WCodec, Zenoh080, Zenoh080Header}; use zenoh_buffers::{ reader::{DidntRead, Reader}, writer::{DidntWrite, Writer}, @@ -24,6 +23,8 @@ use zenoh_protocol::{ }, }; +use crate::{common::extension, RCodec, WCodec, Zenoh080, Zenoh080Header}; + impl WCodec<&Close, &mut W> for Zenoh080 where W: Writer, diff --git a/commons/zenoh-codec/src/transport/fragment.rs b/commons/zenoh-codec/src/transport/fragment.rs index b66f395df1..fc30abce9d 100644 --- a/commons/zenoh-codec/src/transport/fragment.rs +++ b/commons/zenoh-codec/src/transport/fragment.rs @@ -11,7 +11,6 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::{common::extension, RCodec, WCodec, Zenoh080, Zenoh080Header}; use zenoh_buffers::{ reader::{BacktrackableReader, DidntRead, Reader}, writer::{DidntWrite, Writer}, @@ -25,6 +24,8 @@ use zenoh_protocol::{ }, }; +use crate::{common::extension, RCodec, WCodec, Zenoh080, Zenoh080Header}; + // FragmentHeader impl WCodec<&FragmentHeader, &mut W> for Zenoh080 where @@ -48,7 +49,7 @@ where if *more { header |= flag::M; } - if ext_qos != &ext::QoSType::default() { + if ext_qos != &ext::QoSType::DEFAULT { header |= flag::Z; } self.write(&mut *writer, header)?; @@ -57,7 +58,7 @@ where self.write(&mut *writer, sn)?; // Extensions - if ext_qos != &ext::QoSType::default() { + if ext_qos != &ext::QoSType::DEFAULT { self.write(&mut *writer, (*ext_qos, false))?; } @@ -97,7 +98,7 @@ where let sn: TransportSn = self.codec.read(&mut *reader)?; // Extensions - let mut ext_qos = ext::QoSType::default(); + let mut ext_qos = ext::QoSType::DEFAULT; let mut has_ext = imsg::has_flag(self.header, flag::Z); while has_ext { diff --git a/commons/zenoh-codec/src/transport/frame.rs b/commons/zenoh-codec/src/transport/frame.rs index 8d39aabcdb..6db4e70652 100644 --- a/commons/zenoh-codec/src/transport/frame.rs +++ b/commons/zenoh-codec/src/transport/frame.rs @@ -11,8 +11,8 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::{common::extension, RCodec, WCodec, Zenoh080, Zenoh080Header, Zenoh080Reliability}; use alloc::vec::Vec; + use zenoh_buffers::{ reader::{BacktrackableReader, DidntRead, Reader}, writer::{DidntWrite, Writer}, @@ -27,6 +27,8 @@ use zenoh_protocol::{ }, }; +use crate::{common::extension, RCodec, WCodec, Zenoh080, Zenoh080Header, Zenoh080Reliability}; + // FrameHeader impl WCodec<&FrameHeader, &mut W> for Zenoh080 where @@ -46,7 +48,7 @@ where if let Reliability::Reliable = reliability { header |= flag::R; } - if ext_qos != &ext::QoSType::default() { + if ext_qos != &ext::QoSType::DEFAULT { header |= flag::Z; } self.write(&mut *writer, header)?; @@ -55,7 +57,7 @@ where self.write(&mut *writer, sn)?; // Extensions - if ext_qos != &ext::QoSType::default() { + if ext_qos != &ext::QoSType::DEFAULT { self.write(&mut *writer, (x.ext_qos, false))?; } @@ -94,7 +96,7 @@ where let sn: TransportSn = self.codec.read(&mut *reader)?; // Extensions - let mut ext_qos = ext::QoSType::default(); + let mut ext_qos = ext::QoSType::DEFAULT; let mut has_ext = imsg::has_flag(self.header, flag::Z); while has_ext { diff --git a/commons/zenoh-codec/src/transport/init.rs b/commons/zenoh-codec/src/transport/init.rs index d3a92165ea..c559fdbd51 100644 --- a/commons/zenoh-codec/src/transport/init.rs +++ b/commons/zenoh-codec/src/transport/init.rs @@ -11,9 +11,6 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::{ - common::extension, RCodec, WCodec, Zenoh080, Zenoh080Bounded, Zenoh080Header, Zenoh080Length, -}; use zenoh_buffers::{ reader::{DidntRead, Reader}, writer::{DidntWrite, Writer}, @@ -21,7 +18,7 @@ use zenoh_buffers::{ }; use zenoh_protocol::{ common::{iext, imsg}, - core::{Resolution, WhatAmI, ZenohId}, + core::{Resolution, WhatAmI, ZenohIdProto}, transport::{ batch_size, id, init::{ext, flag, InitAck, InitSyn}, @@ -29,6 +26,10 @@ use zenoh_protocol::{ }, }; +use crate::{ + common::extension, RCodec, WCodec, Zenoh080, Zenoh080Bounded, Zenoh080Header, Zenoh080Length, +}; + // InitSyn impl WCodec<&InitSyn, &mut W> for Zenoh080 where @@ -44,6 +45,7 @@ where resolution, batch_size, ext_qos, + #[cfg(feature = "shared-memory")] ext_shm, ext_auth, ext_mlink, @@ -57,11 +59,16 @@ where header |= flag::S; } let mut n_exts = (ext_qos.is_some() as u8) - + (ext_shm.is_some() as u8) + (ext_auth.is_some() as u8) + (ext_mlink.is_some() as u8) + (ext_lowlatency.is_some() as u8) + (ext_compression.is_some() as u8); + + #[cfg(feature = "shared-memory")] + { + n_exts += ext_shm.is_some() as u8; + } + if n_exts != 0 { header |= flag::Z; } @@ -91,6 +98,7 @@ where n_exts -= 1; self.write(&mut *writer, (qos, n_exts != 0))?; } + #[cfg(feature = "shared-memory")] if let Some(shm) = ext_shm.as_ref() { n_exts -= 1; self.write(&mut *writer, (shm, n_exts != 0))?; @@ -152,7 +160,7 @@ where }; let length = 1 + ((flags >> 4) as usize); let lodec = Zenoh080Length::new(length); - let zid: ZenohId = lodec.read(&mut *reader)?; + let zid: ZenohIdProto = lodec.read(&mut *reader)?; let mut resolution = Resolution::default(); let mut batch_size = batch_size::UNICAST.to_le_bytes(); @@ -165,6 +173,7 @@ where // Extensions let mut ext_qos = None; + #[cfg(feature = "shared-memory")] let mut ext_shm = None; let mut ext_auth = None; let mut ext_mlink = None; @@ -181,6 +190,7 @@ where ext_qos = Some(q); has_ext = ext; } + #[cfg(feature = "shared-memory")] ext::Shm::ID => { let (s, ext): (ext::Shm, bool) = eodec.read(&mut *reader)?; ext_shm = Some(s); @@ -219,6 +229,7 @@ where resolution, batch_size, ext_qos, + #[cfg(feature = "shared-memory")] ext_shm, ext_auth, ext_mlink, @@ -244,6 +255,7 @@ where batch_size, cookie, ext_qos, + #[cfg(feature = "shared-memory")] ext_shm, ext_auth, ext_mlink, @@ -257,11 +269,16 @@ where header |= flag::S; } let mut n_exts = (ext_qos.is_some() as u8) - + (ext_shm.is_some() as u8) + (ext_auth.is_some() as u8) + (ext_mlink.is_some() as u8) + (ext_lowlatency.is_some() as u8) + (ext_compression.is_some() as u8); + + #[cfg(feature = "shared-memory")] + { + n_exts += ext_shm.is_some() as u8; + } + if n_exts != 0 { header |= flag::Z; } @@ -294,6 +311,7 @@ where n_exts -= 1; self.write(&mut *writer, (qos, n_exts != 0))?; } + #[cfg(feature = "shared-memory")] if let Some(shm) = ext_shm.as_ref() { n_exts -= 1; self.write(&mut *writer, (shm, n_exts != 0))?; @@ -355,7 +373,7 @@ where }; let length = 1 + ((flags >> 4) as usize); let lodec = Zenoh080Length::new(length); - let zid: ZenohId = lodec.read(&mut *reader)?; + let zid: ZenohIdProto = lodec.read(&mut *reader)?; let mut resolution = Resolution::default(); let mut batch_size = batch_size::UNICAST.to_le_bytes(); @@ -371,6 +389,7 @@ where // Extensions let mut ext_qos = None; + #[cfg(feature = "shared-memory")] let mut ext_shm = None; let mut ext_auth = None; let mut ext_mlink = None; @@ -387,6 +406,7 @@ where ext_qos = Some(q); has_ext = ext; } + #[cfg(feature = "shared-memory")] ext::Shm::ID => { let (s, ext): (ext::Shm, bool) = eodec.read(&mut *reader)?; ext_shm = Some(s); @@ -426,6 +446,7 @@ where batch_size, cookie, ext_qos, + #[cfg(feature = "shared-memory")] ext_shm, ext_auth, ext_mlink, diff --git a/commons/zenoh-codec/src/transport/join.rs b/commons/zenoh-codec/src/transport/join.rs index 80c1663413..3f70d2ec8b 100644 --- a/commons/zenoh-codec/src/transport/join.rs +++ b/commons/zenoh-codec/src/transport/join.rs @@ -11,16 +11,16 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::{common::extension, LCodec, RCodec, WCodec, Zenoh080, Zenoh080Header, Zenoh080Length}; use alloc::boxed::Box; use core::time::Duration; + use zenoh_buffers::{ reader::{DidntRead, Reader}, writer::{DidntWrite, Writer}, }; use zenoh_protocol::{ common::{iext, imsg, ZExtZBufHeader}, - core::{Priority, Resolution, WhatAmI, ZenohId}, + core::{Priority, Resolution, WhatAmI, ZenohIdProto}, transport::{ batch_size, id, join::{ext, flag, Join}, @@ -28,6 +28,8 @@ use zenoh_protocol::{ }, }; +use crate::{common::extension, LCodec, RCodec, WCodec, Zenoh080, Zenoh080Header, Zenoh080Length}; + impl LCodec<&PrioritySn> for Zenoh080 { fn w_len(self, p: &PrioritySn) -> usize { let PrioritySn { @@ -121,7 +123,7 @@ where let (_, more): (ZExtZBufHeader<{ ext::QoS::ID }>, bool) = self.read(&mut *reader)?; // Body - let mut ext_qos = Box::new([PrioritySn::default(); Priority::NUM]); + let mut ext_qos = Box::new([PrioritySn::DEFAULT; Priority::NUM]); for p in ext_qos.iter_mut() { *p = self.codec.read(&mut *reader)?; } @@ -240,7 +242,7 @@ where }; let length = 1 + ((flags >> 4) as usize); let lodec = Zenoh080Length::new(length); - let zid: ZenohId = lodec.read(&mut *reader)?; + let zid: ZenohIdProto = lodec.read(&mut *reader)?; let mut resolution = Resolution::default(); let mut batch_size = batch_size::MULTICAST.to_le_bytes(); diff --git a/commons/zenoh-codec/src/transport/keepalive.rs b/commons/zenoh-codec/src/transport/keepalive.rs index aa6726f50b..44ef4c676a 100644 --- a/commons/zenoh-codec/src/transport/keepalive.rs +++ b/commons/zenoh-codec/src/transport/keepalive.rs @@ -11,7 +11,6 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::{common::extension, RCodec, WCodec, Zenoh080, Zenoh080Header}; use zenoh_buffers::{ reader::{DidntRead, Reader}, writer::{DidntWrite, Writer}, @@ -24,6 +23,8 @@ use zenoh_protocol::{ }, }; +use crate::{common::extension, RCodec, WCodec, Zenoh080, Zenoh080Header}; + impl WCodec<&KeepAlive, &mut W> for Zenoh080 where W: Writer, diff --git a/commons/zenoh-codec/src/transport/mod.rs b/commons/zenoh-codec/src/transport/mod.rs index 559b5b5fda..3adae0fb72 100644 --- a/commons/zenoh-codec/src/transport/mod.rs +++ b/commons/zenoh-codec/src/transport/mod.rs @@ -21,7 +21,6 @@ mod keepalive; mod oam; mod open; -use crate::{RCodec, WCodec, Zenoh080, Zenoh080Header}; use zenoh_buffers::{ reader::{BacktrackableReader, DidntRead, Reader}, writer::{DidntWrite, Writer}, @@ -32,6 +31,8 @@ use zenoh_protocol::{ transport::*, }; +use crate::{RCodec, WCodec, Zenoh080, Zenoh080Header}; + // TransportMessageLowLatency impl WCodec<&TransportMessageLowLatency, &mut W> for Zenoh080 where diff --git a/commons/zenoh-codec/src/transport/oam.rs b/commons/zenoh-codec/src/transport/oam.rs index e2f905abf8..156a0ce1ff 100644 --- a/commons/zenoh-codec/src/transport/oam.rs +++ b/commons/zenoh-codec/src/transport/oam.rs @@ -11,7 +11,6 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::{common::extension, RCodec, WCodec, Zenoh080, Zenoh080Header}; use zenoh_buffers::{ reader::{DidntRead, Reader}, writer::{DidntWrite, Writer}, @@ -25,6 +24,8 @@ use zenoh_protocol::{ }, }; +use crate::{common::extension, RCodec, WCodec, Zenoh080, Zenoh080Header}; + impl WCodec<&Oam, &mut W> for Zenoh080 where W: Writer, @@ -47,7 +48,7 @@ where header |= iext::ENC_ZBUF; } } - let mut n_exts = (ext_qos != &ext::QoSType::default()) as u8; + let mut n_exts = (ext_qos != &ext::QoSType::DEFAULT) as u8; if n_exts != 0 { header |= flag::Z; } @@ -57,7 +58,7 @@ where self.write(&mut *writer, id)?; // Extensions - if ext_qos != &ext::QoSType::default() { + if ext_qos != &ext::QoSType::DEFAULT { n_exts -= 1; self.write(&mut *writer, (*ext_qos, n_exts != 0))?; } @@ -105,7 +106,7 @@ where let id: OamId = self.codec.read(&mut *reader)?; // Extensions - let mut ext_qos = ext::QoSType::default(); + let mut ext_qos = ext::QoSType::DEFAULT; let mut has_ext = imsg::has_flag(self.header, flag::Z); while has_ext { diff --git a/commons/zenoh-codec/src/transport/open.rs b/commons/zenoh-codec/src/transport/open.rs index f895942ea1..712fe5ca95 100644 --- a/commons/zenoh-codec/src/transport/open.rs +++ b/commons/zenoh-codec/src/transport/open.rs @@ -11,8 +11,8 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::{common::extension, RCodec, WCodec, Zenoh080, Zenoh080Header}; use core::time::Duration; + use zenoh_buffers::{ reader::{DidntRead, Reader}, writer::{DidntWrite, Writer}, @@ -27,6 +27,8 @@ use zenoh_protocol::{ }, }; +use crate::{common::extension, RCodec, WCodec, Zenoh080, Zenoh080Header}; + // OpenSyn impl WCodec<&OpenSyn, &mut W> for Zenoh080 where @@ -40,6 +42,7 @@ where initial_sn, cookie, ext_qos, + #[cfg(feature = "shared-memory")] ext_shm, ext_auth, ext_mlink, @@ -53,11 +56,16 @@ where header |= flag::T; } let mut n_exts = (ext_qos.is_some() as u8) - + (ext_shm.is_some() as u8) + (ext_auth.is_some() as u8) + (ext_mlink.is_some() as u8) + (ext_lowlatency.is_some() as u8) + (ext_compression.is_some() as u8); + + #[cfg(feature = "shared-memory")] + { + n_exts += ext_shm.is_some() as u8; + } + if n_exts != 0 { header |= flag::Z; } @@ -77,6 +85,7 @@ where n_exts -= 1; self.write(&mut *writer, (qos, n_exts != 0))?; } + #[cfg(feature = "shared-memory")] if let Some(shm) = ext_shm.as_ref() { n_exts -= 1; self.write(&mut *writer, (shm, n_exts != 0))?; @@ -138,6 +147,7 @@ where // Extensions let mut ext_qos = None; + #[cfg(feature = "shared-memory")] let mut ext_shm = None; let mut ext_auth = None; let mut ext_mlink = None; @@ -154,6 +164,7 @@ where ext_qos = Some(q); has_ext = ext; } + #[cfg(feature = "shared-memory")] ext::Shm::ID => { let (s, ext): (ext::Shm, bool) = eodec.read(&mut *reader)?; ext_shm = Some(s); @@ -190,6 +201,7 @@ where initial_sn, cookie, ext_qos, + #[cfg(feature = "shared-memory")] ext_shm, ext_auth, ext_mlink, @@ -211,6 +223,7 @@ where lease, initial_sn, ext_qos, + #[cfg(feature = "shared-memory")] ext_shm, ext_auth, ext_mlink, @@ -226,11 +239,16 @@ where header |= flag::T; } let mut n_exts = (ext_qos.is_some() as u8) - + (ext_shm.is_some() as u8) + (ext_auth.is_some() as u8) + (ext_mlink.is_some() as u8) + (ext_lowlatency.is_some() as u8) + (ext_compression.is_some() as u8); + + #[cfg(feature = "shared-memory")] + { + n_exts += ext_shm.is_some() as u8; + } + if n_exts != 0 { header |= flag::Z; } @@ -249,6 +267,7 @@ where n_exts -= 1; self.write(&mut *writer, (qos, n_exts != 0))?; } + #[cfg(feature = "shared-memory")] if let Some(shm) = ext_shm.as_ref() { n_exts -= 1; self.write(&mut *writer, (shm, n_exts != 0))?; @@ -309,6 +328,7 @@ where // Extensions let mut ext_qos = None; + #[cfg(feature = "shared-memory")] let mut ext_shm = None; let mut ext_auth = None; let mut ext_mlink = None; @@ -325,6 +345,7 @@ where ext_qos = Some(q); has_ext = ext; } + #[cfg(feature = "shared-memory")] ext::Shm::ID => { let (s, ext): (ext::Shm, bool) = eodec.read(&mut *reader)?; ext_shm = Some(s); @@ -360,6 +381,7 @@ where lease, initial_sn, ext_qos, + #[cfg(feature = "shared-memory")] ext_shm, ext_auth, ext_mlink, diff --git a/commons/zenoh-codec/src/zenoh/ack.rs b/commons/zenoh-codec/src/zenoh/ack.rs deleted file mode 100644 index 78cbca2987..0000000000 --- a/commons/zenoh-codec/src/zenoh/ack.rs +++ /dev/null @@ -1,129 +0,0 @@ -// -// Copyright (c) 2022 ZettaScale Technology -// -// This program and the accompanying materials are made available under the -// terms of the Eclipse Public License 2.0 which is available at -// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 -// which is available at https://www.apache.org/licenses/LICENSE-2.0. -// -// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 -// -// Contributors: -// ZettaScale Zenoh Team, -// -use crate::{common::extension, RCodec, WCodec, Zenoh080, Zenoh080Header}; -use alloc::vec::Vec; -use zenoh_buffers::{ - reader::{DidntRead, Reader}, - writer::{DidntWrite, Writer}, -}; -use zenoh_protocol::{ - common::{iext, imsg}, - zenoh::{ - ack::{ext, flag, Ack}, - id, - }, -}; - -impl WCodec<&Ack, &mut W> for Zenoh080 -where - W: Writer, -{ - type Output = Result<(), DidntWrite>; - - fn write(self, writer: &mut W, x: &Ack) -> Self::Output { - let Ack { - timestamp, - ext_sinfo, - ext_unknown, - } = x; - - // Header - let mut header = id::ACK; - if timestamp.is_some() { - header |= flag::T; - } - let mut n_exts = ((ext_sinfo.is_some()) as u8) + (ext_unknown.len() as u8); - if n_exts != 0 { - header |= flag::Z; - } - self.write(&mut *writer, header)?; - - // Body - if let Some(ts) = timestamp.as_ref() { - self.write(&mut *writer, ts)?; - } - - // Extensions - if let Some(sinfo) = ext_sinfo.as_ref() { - n_exts -= 1; - self.write(&mut *writer, (sinfo, n_exts != 0))?; - } - for u in ext_unknown.iter() { - n_exts -= 1; - self.write(&mut *writer, (u, n_exts != 0))?; - } - - Ok(()) - } -} - -impl RCodec for Zenoh080 -where - R: Reader, -{ - type Error = DidntRead; - - fn read(self, reader: &mut R) -> Result { - let header: u8 = self.read(&mut *reader)?; - let codec = Zenoh080Header::new(header); - codec.read(reader) - } -} - -impl RCodec for Zenoh080Header -where - R: Reader, -{ - type Error = DidntRead; - - fn read(self, reader: &mut R) -> Result { - if imsg::mid(self.header) != id::ACK { - return Err(DidntRead); - } - - // Body - let mut timestamp: Option = None; - if imsg::has_flag(self.header, flag::T) { - timestamp = Some(self.codec.read(&mut *reader)?); - } - - // Extensions - let mut ext_sinfo: Option = None; - let mut ext_unknown = Vec::new(); - - let mut has_ext = imsg::has_flag(self.header, flag::Z); - while has_ext { - let ext: u8 = self.codec.read(&mut *reader)?; - let eodec = Zenoh080Header::new(ext); - match iext::eid(ext) { - ext::SourceInfo::ID => { - let (s, ext): (ext::SourceInfoType, bool) = eodec.read(&mut *reader)?; - ext_sinfo = Some(s); - has_ext = ext; - } - _ => { - let (u, ext) = extension::read(reader, "Ack", ext)?; - ext_unknown.push(u); - has_ext = ext; - } - } - } - - Ok(Ack { - timestamp, - ext_sinfo, - ext_unknown, - }) - } -} diff --git a/commons/zenoh-codec/src/zenoh/del.rs b/commons/zenoh-codec/src/zenoh/del.rs index 3d0a64f428..07df1affc7 100644 --- a/commons/zenoh-codec/src/zenoh/del.rs +++ b/commons/zenoh-codec/src/zenoh/del.rs @@ -11,8 +11,8 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::{common::extension, RCodec, WCodec, Zenoh080, Zenoh080Header}; use alloc::vec::Vec; + use zenoh_buffers::{ reader::{DidntRead, Reader}, writer::{DidntWrite, Writer}, @@ -25,6 +25,8 @@ use zenoh_protocol::{ }, }; +use crate::{common::extension, RCodec, WCodec, Zenoh080, Zenoh080Header}; + impl WCodec<&Del, &mut W> for Zenoh080 where W: Writer, diff --git a/commons/zenoh-codec/src/zenoh/err.rs b/commons/zenoh-codec/src/zenoh/err.rs index 5cef1a6389..e19b11f70d 100644 --- a/commons/zenoh-codec/src/zenoh/err.rs +++ b/commons/zenoh-codec/src/zenoh/err.rs @@ -11,20 +11,24 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::{common::extension, RCodec, WCodec, Zenoh080, Zenoh080Header}; use alloc::vec::Vec; + use zenoh_buffers::{ reader::{DidntRead, Reader}, writer::{DidntWrite, Writer}, + ZBuf, }; use zenoh_protocol::{ common::{iext, imsg}, + core::Encoding, zenoh::{ err::{ext, flag, Err}, id, }, }; +use crate::{common::extension, RCodec, WCodec, Zenoh080, Zenoh080Bounded, Zenoh080Header}; + impl WCodec<&Err, &mut W> for Zenoh080 where W: Writer, @@ -33,33 +37,32 @@ where fn write(self, writer: &mut W, x: &Err) -> Self::Output { let Err { - code, - is_infrastructure, - timestamp, + encoding, ext_sinfo, - ext_body, + #[cfg(feature = "shared-memory")] + ext_shm, ext_unknown, + payload, } = x; // Header let mut header = id::ERR; - if timestamp.is_some() { - header |= flag::T; + if encoding != &Encoding::empty() { + header |= flag::E; } - if *is_infrastructure { - header |= flag::I; + let mut n_exts = (ext_sinfo.is_some() as u8) + (ext_unknown.len() as u8); + #[cfg(feature = "shared-memory")] + { + n_exts += ext_shm.is_some() as u8; } - let mut n_exts = - (ext_sinfo.is_some() as u8) + (ext_body.is_some() as u8) + (ext_unknown.len() as u8); if n_exts != 0 { header |= flag::Z; } self.write(&mut *writer, header)?; // Body - self.write(&mut *writer, code)?; - if let Some(ts) = timestamp.as_ref() { - self.write(&mut *writer, ts)?; + if encoding != &Encoding::empty() { + self.write(&mut *writer, encoding)?; } // Extensions @@ -67,15 +70,20 @@ where n_exts -= 1; self.write(&mut *writer, (sinfo, n_exts != 0))?; } - if let Some(body) = ext_body.as_ref() { + #[cfg(feature = "shared-memory")] + if let Some(eshm) = ext_shm.as_ref() { n_exts -= 1; - self.write(&mut *writer, (body, n_exts != 0))?; + self.write(&mut *writer, (eshm, n_exts != 0))?; } for u in ext_unknown.iter() { n_exts -= 1; self.write(&mut *writer, (u, n_exts != 0))?; } + // Payload + let bodec = Zenoh080Bounded::::new(); + bodec.write(&mut *writer, payload)?; + Ok(()) } } @@ -105,16 +113,15 @@ where } // Body - let code: u16 = self.codec.read(&mut *reader)?; - let is_infrastructure = imsg::has_flag(self.header, flag::I); - let mut timestamp: Option = None; - if imsg::has_flag(self.header, flag::T) { - timestamp = Some(self.codec.read(&mut *reader)?); + let mut encoding = Encoding::empty(); + if imsg::has_flag(self.header, flag::E) { + encoding = self.codec.read(&mut *reader)?; } // Extensions let mut ext_sinfo: Option = None; - let mut ext_body: Option = None; + #[cfg(feature = "shared-memory")] + let mut ext_shm: Option = None; let mut ext_unknown = Vec::new(); let mut has_ext = imsg::has_flag(self.header, flag::Z); @@ -127,9 +134,10 @@ where ext_sinfo = Some(s); has_ext = ext; } - ext::ErrBodyType::VID | ext::ErrBodyType::SID => { - let (s, ext): (ext::ErrBodyType, bool) = eodec.read(&mut *reader)?; - ext_body = Some(s); + #[cfg(feature = "shared-memory")] + ext::Shm::ID => { + let (s, ext): (ext::ShmType, bool) = eodec.read(&mut *reader)?; + ext_shm = Some(s); has_ext = ext; } _ => { @@ -140,13 +148,17 @@ where } } + // Payload + let bodec = Zenoh080Bounded::::new(); + let payload: ZBuf = bodec.read(&mut *reader)?; + Ok(Err { - code, - is_infrastructure, - timestamp, + encoding, ext_sinfo, - ext_body, + #[cfg(feature = "shared-memory")] + ext_shm, ext_unknown, + payload, }) } } diff --git a/commons/zenoh-codec/src/zenoh/mod.rs b/commons/zenoh-codec/src/zenoh/mod.rs index 2e3ea48be7..3c8170adea 100644 --- a/commons/zenoh-codec/src/zenoh/mod.rs +++ b/commons/zenoh-codec/src/zenoh/mod.rs @@ -11,19 +11,12 @@ // Contributors: // ZettaScale Zenoh Team, // -pub mod ack; pub mod del; pub mod err; -pub mod pull; pub mod put; pub mod query; pub mod reply; -#[cfg(not(feature = "shared-memory"))] -use crate::Zenoh080Bounded; -#[cfg(feature = "shared-memory")] -use crate::Zenoh080Sliced; -use crate::{LCodec, RCodec, WCodec, Zenoh080, Zenoh080Header, Zenoh080Length}; use zenoh_buffers::{ reader::{DidntRead, Reader}, writer::{DidntWrite, Writer}, @@ -33,10 +26,16 @@ use zenoh_buffers::{ use zenoh_protocol::common::{iext, ZExtUnit}; use zenoh_protocol::{ common::{imsg, ZExtZBufHeader}, - core::{Encoding, ZenohId}, + core::{Encoding, EntityGlobalIdProto, EntityId, ZenohIdProto}, zenoh::{ext, id, PushBody, RequestBody, ResponseBody}, }; +#[cfg(not(feature = "shared-memory"))] +use crate::Zenoh080Bounded; +#[cfg(feature = "shared-memory")] +use crate::Zenoh080Sliced; +use crate::{LCodec, RCodec, WCodec, Zenoh080, Zenoh080Header, Zenoh080Length}; + // Push impl WCodec<&PushBody, &mut W> for Zenoh080 where @@ -82,9 +81,6 @@ where fn write(self, writer: &mut W, x: &RequestBody) -> Self::Output { match x { RequestBody::Query(b) => self.write(&mut *writer, b), - RequestBody::Put(b) => self.write(&mut *writer, b), - RequestBody::Del(b) => self.write(&mut *writer, b), - RequestBody::Pull(b) => self.write(&mut *writer, b), } } } @@ -101,9 +97,6 @@ where let codec = Zenoh080Header::new(header); let body = match imsg::mid(codec.header) { id::QUERY => RequestBody::Query(codec.read(&mut *reader)?), - id::PUT => RequestBody::Put(codec.read(&mut *reader)?), - id::DEL => RequestBody::Del(codec.read(&mut *reader)?), - id::PULL => RequestBody::Pull(codec.read(&mut *reader)?), _ => return Err(DidntRead), }; @@ -122,8 +115,6 @@ where match x { ResponseBody::Reply(b) => self.write(&mut *writer, b), ResponseBody::Err(b) => self.write(&mut *writer, b), - ResponseBody::Ack(b) => self.write(&mut *writer, b), - ResponseBody::Put(b) => self.write(&mut *writer, b), } } } @@ -141,8 +132,6 @@ where let body = match imsg::mid(codec.header) { id::REPLY => ResponseBody::Reply(codec.read(&mut *reader)?), id::ERR => ResponseBody::Err(codec.read(&mut *reader)?), - id::ACK => ResponseBody::Ack(codec.read(&mut *reader)?), - id::PUT => ResponseBody::Put(codec.read(&mut *reader)?), _ => return Err(DidntRead), }; @@ -153,9 +142,9 @@ where // Extension: SourceInfo impl LCodec<&ext::SourceInfoType<{ ID }>> for Zenoh080 { fn w_len(self, x: &ext::SourceInfoType<{ ID }>) -> usize { - let ext::SourceInfoType { zid, eid, sn } = x; + let ext::SourceInfoType { id, sn } = x; - 1 + self.w_len(zid) + self.w_len(*eid) + self.w_len(*sn) + 1 + self.w_len(&id.zid) + self.w_len(id.eid) + self.w_len(*sn) } } @@ -167,18 +156,18 @@ where fn write(self, writer: &mut W, x: (&ext::SourceInfoType<{ ID }>, bool)) -> Self::Output { let (x, more) = x; - let ext::SourceInfoType { zid, eid, sn } = x; + let ext::SourceInfoType { id, sn } = x; let header: ZExtZBufHeader<{ ID }> = ZExtZBufHeader::new(self.w_len(x)); self.write(&mut *writer, (&header, more))?; - let flags: u8 = (zid.size() as u8 - 1) << 4; + let flags: u8 = (id.zid.size() as u8 - 1) << 4; self.write(&mut *writer, flags)?; - let lodec = Zenoh080Length::new(zid.size()); - lodec.write(&mut *writer, zid)?; + let lodec = Zenoh080Length::new(id.zid.size()); + lodec.write(&mut *writer, &id.zid)?; - self.write(&mut *writer, eid)?; + self.write(&mut *writer, id.eid)?; self.write(&mut *writer, sn)?; Ok(()) } @@ -197,12 +186,18 @@ where let length = 1 + ((flags >> 4) as usize); let lodec = Zenoh080Length::new(length); - let zid: ZenohId = lodec.read(&mut *reader)?; + let zid: ZenohIdProto = lodec.read(&mut *reader)?; - let eid: u32 = self.codec.read(&mut *reader)?; + let eid: EntityId = self.codec.read(&mut *reader)?; let sn: u32 = self.codec.read(&mut *reader)?; - Ok((ext::SourceInfoType { zid, eid, sn }, more)) + Ok(( + ext::SourceInfoType { + id: EntityGlobalIdProto { zid, eid }, + sn, + }, + more, + )) } } diff --git a/commons/zenoh-codec/src/zenoh/pull.rs b/commons/zenoh-codec/src/zenoh/pull.rs deleted file mode 100644 index dc71901d58..0000000000 --- a/commons/zenoh-codec/src/zenoh/pull.rs +++ /dev/null @@ -1,93 +0,0 @@ -// -// Copyright (c) 2022 ZettaScale Technology -// -// This program and the accompanying materials are made available under the -// terms of the Eclipse Public License 2.0 which is available at -// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 -// which is available at https://www.apache.org/licenses/LICENSE-2.0. -// -// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 -// -// Contributors: -// ZettaScale Zenoh Team, -// -use crate::{common::extension, RCodec, WCodec, Zenoh080, Zenoh080Header}; -use alloc::vec::Vec; -use zenoh_buffers::{ - reader::{DidntRead, Reader}, - writer::{DidntWrite, Writer}, -}; - -use zenoh_protocol::{ - common::imsg, - zenoh::{ - id, - pull::{flag, Pull}, - }, -}; - -impl WCodec<&Pull, &mut W> for Zenoh080 -where - W: Writer, -{ - type Output = Result<(), DidntWrite>; - - fn write(self, writer: &mut W, x: &Pull) -> Self::Output { - let Pull { ext_unknown } = x; - - // Header - let mut header = id::PULL; - let mut n_exts = ext_unknown.len() as u8; - if n_exts != 0 { - header |= flag::Z; - } - self.write(&mut *writer, header)?; - - // Extensions - for u in ext_unknown.iter() { - n_exts -= 1; - self.write(&mut *writer, (u, n_exts != 0))?; - } - - Ok(()) - } -} - -impl RCodec for Zenoh080 -where - R: Reader, -{ - type Error = DidntRead; - - fn read(self, reader: &mut R) -> Result { - let header: u8 = self.read(&mut *reader)?; - let codec = Zenoh080Header::new(header); - codec.read(reader) - } -} - -impl RCodec for Zenoh080Header -where - R: Reader, -{ - type Error = DidntRead; - - fn read(self, reader: &mut R) -> Result { - if imsg::mid(self.header) != id::PULL { - return Err(DidntRead); - } - - // Extensions - let mut ext_unknown = Vec::new(); - - let mut has_ext = imsg::has_flag(self.header, flag::Z); - while has_ext { - let ext: u8 = self.codec.read(&mut *reader)?; - let (u, ext) = extension::read(reader, "Pull", ext)?; - ext_unknown.push(u); - has_ext = ext; - } - - Ok(Pull { ext_unknown }) - } -} diff --git a/commons/zenoh-codec/src/zenoh/put.rs b/commons/zenoh-codec/src/zenoh/put.rs index ebc364cf9b..c10a98f6d8 100644 --- a/commons/zenoh-codec/src/zenoh/put.rs +++ b/commons/zenoh-codec/src/zenoh/put.rs @@ -11,12 +11,8 @@ // Contributors: // ZettaScale Zenoh Team, // -#[cfg(not(feature = "shared-memory"))] -use crate::Zenoh080Bounded; -#[cfg(feature = "shared-memory")] -use crate::Zenoh080Sliced; -use crate::{common::extension, RCodec, WCodec, Zenoh080, Zenoh080Header}; use alloc::vec::Vec; + use zenoh_buffers::{ reader::{DidntRead, Reader}, writer::{DidntWrite, Writer}, @@ -31,6 +27,12 @@ use zenoh_protocol::{ }, }; +#[cfg(not(feature = "shared-memory"))] +use crate::Zenoh080Bounded; +#[cfg(feature = "shared-memory")] +use crate::Zenoh080Sliced; +use crate::{common::extension, RCodec, WCodec, Zenoh080, Zenoh080Header}; + impl WCodec<&Put, &mut W> for Zenoh080 where W: Writer, @@ -54,7 +56,7 @@ where if timestamp.is_some() { header |= flag::T; } - if encoding != &Encoding::default() { + if encoding != &Encoding::empty() { header |= flag::E; } let mut n_exts = (ext_sinfo.is_some()) as u8 @@ -73,7 +75,7 @@ where if let Some(ts) = timestamp.as_ref() { self.write(&mut *writer, ts)?; } - if encoding != &Encoding::default() { + if encoding != &Encoding::empty() { self.write(&mut *writer, encoding)?; } @@ -143,7 +145,7 @@ where timestamp = Some(self.codec.read(&mut *reader)?); } - let mut encoding = Encoding::default(); + let mut encoding = Encoding::empty(); if imsg::has_flag(self.header, flag::E) { encoding = self.codec.read(&mut *reader)?; } diff --git a/commons/zenoh-codec/src/zenoh/query.rs b/commons/zenoh-codec/src/zenoh/query.rs index 09b01b2266..c9b1cc196e 100644 --- a/commons/zenoh-codec/src/zenoh/query.rs +++ b/commons/zenoh-codec/src/zenoh/query.rs @@ -11,59 +11,56 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::{common::extension, RCodec, WCodec, Zenoh080, Zenoh080Header}; use alloc::{string::String, vec::Vec}; + use zenoh_buffers::{ reader::{DidntRead, Reader}, writer::{DidntWrite, Writer}, }; - use zenoh_protocol::{ common::{iext, imsg}, zenoh::{ id, - query::{ext, flag, Query}, + query::{ext, flag, Consolidation, Query}, }, }; -// Extension Consolidation -impl WCodec<(ext::ConsolidationType, bool), &mut W> for Zenoh080 +use crate::{common::extension, RCodec, WCodec, Zenoh080, Zenoh080Header}; + +// Consolidation +impl WCodec for Zenoh080 where W: Writer, { type Output = Result<(), DidntWrite>; - fn write(self, writer: &mut W, x: (ext::ConsolidationType, bool)) -> Self::Output { - let (x, more) = x; + fn write(self, writer: &mut W, x: Consolidation) -> Self::Output { let v: u64 = match x { - ext::ConsolidationType::Auto => 0, - ext::ConsolidationType::None => 1, - ext::ConsolidationType::Monotonic => 2, - ext::ConsolidationType::Latest => 3, - ext::ConsolidationType::Unique => 4, + Consolidation::Auto => 0, + Consolidation::None => 1, + Consolidation::Monotonic => 2, + Consolidation::Latest => 3, }; - let v = ext::Consolidation::new(v); - self.write(&mut *writer, (&v, more)) + self.write(&mut *writer, v) } } -impl RCodec<(ext::ConsolidationType, bool), &mut R> for Zenoh080Header +impl RCodec for Zenoh080 where R: Reader, { type Error = DidntRead; - fn read(self, reader: &mut R) -> Result<(ext::ConsolidationType, bool), Self::Error> { - let (ext, more): (ext::Consolidation, bool) = self.read(&mut *reader)?; - let c = match ext.value { - 0 => ext::ConsolidationType::Auto, - 1 => ext::ConsolidationType::None, - 2 => ext::ConsolidationType::Monotonic, - 3 => ext::ConsolidationType::Latest, - 4 => ext::ConsolidationType::Unique, - _ => return Err(DidntRead), + fn read(self, reader: &mut R) -> Result { + let v: u64 = self.read(&mut *reader)?; + let c = match v { + 0 => Consolidation::Auto, + 1 => Consolidation::None, + 2 => Consolidation::Monotonic, + 3 => Consolidation::Latest, + _ => Consolidation::Auto, // Fallback on Auto if Consolidation is unknown }; - Ok((c, more)) + Ok(c) } } @@ -75,9 +72,9 @@ where fn write(self, writer: &mut W, x: &Query) -> Self::Output { let Query { + consolidation, parameters, ext_sinfo, - ext_consolidation, ext_body, ext_attachment, ext_unknown, @@ -85,11 +82,13 @@ where // Header let mut header = id::QUERY; + if consolidation != &Consolidation::DEFAULT { + header |= flag::C; + } if !parameters.is_empty() { header |= flag::P; } let mut n_exts = (ext_sinfo.is_some() as u8) - + ((ext_consolidation != &ext::ConsolidationType::default()) as u8) + (ext_body.is_some() as u8) + (ext_attachment.is_some() as u8) + (ext_unknown.len() as u8); @@ -99,6 +98,9 @@ where self.write(&mut *writer, header)?; // Body + if consolidation != &Consolidation::DEFAULT { + self.write(&mut *writer, *consolidation)?; + } if !parameters.is_empty() { self.write(&mut *writer, parameters)?; } @@ -108,10 +110,6 @@ where n_exts -= 1; self.write(&mut *writer, (sinfo, n_exts != 0))?; } - if ext_consolidation != &ext::ConsolidationType::default() { - n_exts -= 1; - self.write(&mut *writer, (*ext_consolidation, n_exts != 0))?; - } if let Some(body) = ext_body.as_ref() { n_exts -= 1; self.write(&mut *writer, (body, n_exts != 0))?; @@ -154,6 +152,11 @@ where } // Body + let mut consolidation = Consolidation::DEFAULT; + if imsg::has_flag(self.header, flag::C) { + consolidation = self.codec.read(&mut *reader)?; + } + let mut parameters = String::new(); if imsg::has_flag(self.header, flag::P) { parameters = self.codec.read(&mut *reader)?; @@ -161,7 +164,6 @@ where // Extensions let mut ext_sinfo: Option = None; - let mut ext_consolidation = ext::ConsolidationType::default(); let mut ext_body: Option = None; let mut ext_attachment: Option = None; let mut ext_unknown = Vec::new(); @@ -176,11 +178,6 @@ where ext_sinfo = Some(s); has_ext = ext; } - ext::Consolidation::ID => { - let (c, ext): (ext::ConsolidationType, bool) = eodec.read(&mut *reader)?; - ext_consolidation = c; - has_ext = ext; - } ext::QueryBodyType::SID | ext::QueryBodyType::VID => { let (s, ext): (ext::QueryBodyType, bool) = eodec.read(&mut *reader)?; ext_body = Some(s); @@ -200,9 +197,9 @@ where } Ok(Query { + consolidation, parameters, ext_sinfo, - ext_consolidation, ext_body, ext_attachment, ext_unknown, diff --git a/commons/zenoh-codec/src/zenoh/reply.rs b/commons/zenoh-codec/src/zenoh/reply.rs index d98c72b341..a8d6a2afdc 100644 --- a/commons/zenoh-codec/src/zenoh/reply.rs +++ b/commons/zenoh-codec/src/zenoh/reply.rs @@ -11,26 +11,23 @@ // Contributors: // ZettaScale Zenoh Team, // -#[cfg(not(feature = "shared-memory"))] -use crate::Zenoh080Bounded; -#[cfg(feature = "shared-memory")] -use crate::Zenoh080Sliced; -use crate::{common::extension, RCodec, WCodec, Zenoh080, Zenoh080Header}; use alloc::vec::Vec; + use zenoh_buffers::{ reader::{DidntRead, Reader}, writer::{DidntWrite, Writer}, - ZBuf, }; use zenoh_protocol::{ - common::{iext, imsg}, - core::Encoding, + common::imsg, zenoh::{ id, - reply::{ext, flag, Reply}, + query::Consolidation, + reply::{flag, Reply, ReplyBody}, }, }; +use crate::{common::extension, RCodec, WCodec, Zenoh080, Zenoh080Header}; + impl WCodec<&Reply, &mut W> for Zenoh080 where W: Writer, @@ -39,81 +36,35 @@ where fn write(self, writer: &mut W, x: &Reply) -> Self::Output { let Reply { - timestamp, - encoding, - ext_sinfo, - ext_consolidation, - #[cfg(feature = "shared-memory")] - ext_shm, - ext_attachment, + consolidation, ext_unknown, payload, } = x; // Header let mut header = id::REPLY; - if timestamp.is_some() { - header |= flag::T; - } - if encoding != &Encoding::default() { - header |= flag::E; - } - let mut n_exts = (ext_sinfo.is_some()) as u8 - + ((ext_consolidation != &ext::ConsolidationType::default()) as u8) - + (ext_attachment.is_some()) as u8 - + (ext_unknown.len() as u8); - #[cfg(feature = "shared-memory")] - { - n_exts += ext_shm.is_some() as u8; + if consolidation != &Consolidation::DEFAULT { + header |= flag::C; } + let mut n_exts = ext_unknown.len() as u8; if n_exts != 0 { header |= flag::Z; } self.write(&mut *writer, header)?; // Body - if let Some(ts) = timestamp.as_ref() { - self.write(&mut *writer, ts)?; - } - if encoding != &Encoding::default() { - self.write(&mut *writer, encoding)?; + if consolidation != &Consolidation::DEFAULT { + self.write(&mut *writer, *consolidation)?; } // Extensions - if let Some(sinfo) = ext_sinfo.as_ref() { - n_exts -= 1; - self.write(&mut *writer, (sinfo, n_exts != 0))?; - } - if ext_consolidation != &ext::ConsolidationType::default() { - n_exts -= 1; - self.write(&mut *writer, (*ext_consolidation, n_exts != 0))?; - } - #[cfg(feature = "shared-memory")] - if let Some(eshm) = ext_shm.as_ref() { - n_exts -= 1; - self.write(&mut *writer, (eshm, n_exts != 0))?; - } - if let Some(att) = ext_attachment.as_ref() { - n_exts -= 1; - self.write(&mut *writer, (att, n_exts != 0))?; - } for u in ext_unknown.iter() { n_exts -= 1; self.write(&mut *writer, (u, n_exts != 0))?; } // Payload - #[cfg(feature = "shared-memory")] - { - let codec = Zenoh080Sliced::::new(ext_shm.is_some()); - codec.write(&mut *writer, payload)?; - } - - #[cfg(not(feature = "shared-memory"))] - { - let bodec = Zenoh080Bounded::::new(); - bodec.write(&mut *writer, payload)?; - } + self.write(&mut *writer, payload)?; Ok(()) } @@ -144,81 +95,27 @@ where } // Body - let mut timestamp: Option = None; - if imsg::has_flag(self.header, flag::T) { - timestamp = Some(self.codec.read(&mut *reader)?); - } - - let mut encoding = Encoding::default(); - if imsg::has_flag(self.header, flag::E) { - encoding = self.codec.read(&mut *reader)?; + let mut consolidation = Consolidation::DEFAULT; + if imsg::has_flag(self.header, flag::C) { + consolidation = self.codec.read(&mut *reader)?; } // Extensions - let mut ext_sinfo: Option = None; - let mut ext_consolidation = ext::ConsolidationType::default(); - #[cfg(feature = "shared-memory")] - let mut ext_shm: Option = None; - let mut ext_attachment: Option = None; let mut ext_unknown = Vec::new(); let mut has_ext = imsg::has_flag(self.header, flag::Z); while has_ext { let ext: u8 = self.codec.read(&mut *reader)?; - let eodec = Zenoh080Header::new(ext); - match iext::eid(ext) { - ext::SourceInfo::ID => { - let (s, ext): (ext::SourceInfoType, bool) = eodec.read(&mut *reader)?; - ext_sinfo = Some(s); - has_ext = ext; - } - ext::Consolidation::ID => { - let (c, ext): (ext::ConsolidationType, bool) = eodec.read(&mut *reader)?; - ext_consolidation = c; - has_ext = ext; - } - #[cfg(feature = "shared-memory")] - ext::Shm::ID => { - let (s, ext): (ext::ShmType, bool) = eodec.read(&mut *reader)?; - ext_shm = Some(s); - has_ext = ext; - } - ext::Attachment::ID => { - let (a, ext): (ext::AttachmentType, bool) = eodec.read(&mut *reader)?; - ext_attachment = Some(a); - has_ext = ext; - } - _ => { - let (u, ext) = extension::read(reader, "Reply", ext)?; - ext_unknown.push(u); - has_ext = ext; - } - } + let (u, ext) = extension::read(reader, "Reply", ext)?; + ext_unknown.push(u); + has_ext = ext; } // Payload - let payload: ZBuf = { - #[cfg(feature = "shared-memory")] - { - let codec = Zenoh080Sliced::::new(ext_shm.is_some()); - codec.read(&mut *reader)? - } - - #[cfg(not(feature = "shared-memory"))] - { - let bodec = Zenoh080Bounded::::new(); - bodec.read(&mut *reader)? - } - }; + let payload: ReplyBody = self.codec.read(&mut *reader)?; Ok(Reply { - timestamp, - encoding, - ext_sinfo, - ext_consolidation, - #[cfg(feature = "shared-memory")] - ext_shm, - ext_attachment, + consolidation, ext_unknown, payload, }) diff --git a/commons/zenoh-codec/tests/codec.rs b/commons/zenoh-codec/tests/codec.rs index 3c1a6a821b..46fabe5c51 100644 --- a/commons/zenoh-codec/tests/codec.rs +++ b/commons/zenoh-codec/tests/codec.rs @@ -11,11 +11,12 @@ // Contributors: // ZettaScale Zenoh Team, // +use std::convert::TryFrom; + use rand::{ distributions::{Alphanumeric, DistString}, *, }; -use std::convert::TryFrom; use zenoh_buffers::{ reader::{HasReader, Reader}, writer::HasWriter, @@ -31,6 +32,22 @@ use zenoh_protocol::{ zenoh, zextunit, zextz64, zextzbuf, }; +#[test] +fn zbuf_test() { + let mut buffer = vec![0u8; 64]; + + let zbuf = ZBuf::empty(); + let mut writer = buffer.writer(); + + let codec = Zenoh080::new(); + codec.write(&mut writer, &zbuf).unwrap(); + println!("Buffer: {:?}", buffer); + + let mut reader = buffer.reader(); + let ret: ZBuf = codec.read(&mut reader).unwrap(); + assert_eq!(ret, zbuf); +} + const NUM_ITER: usize = 100; const MAX_PAYLOAD_SIZE: usize = 256; @@ -121,10 +138,28 @@ macro_rules! run { // Core #[test] fn codec_zint() { + run!(u8, { u8::MIN }); + run!(u8, { u8::MAX }); run!(u8, { thread_rng().gen::() }); + + run!(u16, { u16::MIN }); + run!(u16, { u16::MAX }); run!(u16, { thread_rng().gen::() }); + + run!(u32, { u32::MIN }); + run!(u32, { u32::MAX }); run!(u32, { thread_rng().gen::() }); + + run!(u64, { u64::MIN }); + run!(u64, { u64::MAX }); + let codec = Zenoh080::new(); + for i in 1..=codec.w_len(u64::MAX) { + run!(u64, { 1 << (7 * i) }); + } run!(u64, { thread_rng().gen::() }); + + run!(usize, { usize::MIN }); + run!(usize, { usize::MAX }); run!(usize, thread_rng().gen::()); } @@ -138,11 +173,12 @@ fn codec_zint_len() { codec.write(&mut writer, n).unwrap(); assert_eq!(codec.w_len(n), buff.len()); - for i in 1..=9 { + for i in 1..=codec.w_len(u64::MAX) { let mut buff = vec![]; let mut writer = buff.writer(); let n: u64 = 1 << (7 * i); codec.write(&mut writer, n).unwrap(); + println!("ZInt len: {} {:02x?}", n, buff); assert_eq!(codec.w_len(n), buff.len()); } @@ -236,7 +272,7 @@ fn codec_string_bounded() { #[test] fn codec_zid() { - run!(ZenohId, ZenohId::default()); + run!(ZenohIdProto, ZenohIdProto::default()); } #[test] @@ -312,7 +348,7 @@ fn codec_locator() { fn codec_timestamp() { run!(Timestamp, { let time = uhlc::NTP64(thread_rng().gen()); - let id = uhlc::ID::try_from(ZenohId::rand().to_le_bytes()).unwrap(); + let id = uhlc::ID::try_from(ZenohIdProto::rand().to_le_bytes()).unwrap(); Timestamp::new(time, id) }); } @@ -325,15 +361,25 @@ fn codec_encoding() { #[cfg(feature = "shared-memory")] #[test] fn codec_shm_info() { - use zenoh_shm::SharedMemoryBufInfo; + use zenoh_shm::{ + api::provider::chunk::ChunkDescriptor, header::descriptor::HeaderDescriptor, + watchdog::descriptor::Descriptor, ShmBufInfo, + }; - run!(SharedMemoryBufInfo, { + run!(ShmBufInfo, { let mut rng = rand::thread_rng(); - let len = rng.gen_range(0..16); - SharedMemoryBufInfo::new( + ShmBufInfo::new( + ChunkDescriptor::new(rng.gen(), rng.gen(), rng.gen()), rng.gen(), rng.gen(), - Alphanumeric.sample_string(&mut rng, len), + Descriptor { + id: rng.gen(), + index_and_bitpos: rng.gen(), + }, + HeaderDescriptor { + id: rng.gen(), + index: rng.gen(), + }, rng.gen(), ) }); @@ -401,7 +447,7 @@ fn codec_scout() { #[test] fn codec_hello() { - run!(Hello, Hello::rand()); + run!(HelloProto, HelloProto::rand()); } #[test] @@ -486,6 +532,11 @@ fn codec_declare_body() { run!(DeclareBody, DeclareBody::rand()); } +#[test] +fn codec_interest() { + run!(Interest, Interest::rand()); +} + #[test] fn codec_declare_keyexpr() { run!(DeclareKeyExpr, DeclareKeyExpr::rand()); @@ -556,7 +607,7 @@ fn codec_network() { run!(NetworkMessage, NetworkMessage::rand()); } -// Zenoh new +// Zenoh #[test] fn codec_put() { run!(zenoh::Put, zenoh::Put::rand()); @@ -581,13 +632,3 @@ fn codec_reply() { fn codec_err() { run!(zenoh::Err, zenoh::Err::rand()); } - -#[test] -fn codec_ack() { - run!(zenoh::Ack, zenoh::Ack::rand()); -} - -#[test] -fn codec_pull() { - run!(zenoh::Pull, zenoh::Pull::rand()); -} diff --git a/commons/zenoh-collections/Cargo.toml b/commons/zenoh-collections/Cargo.toml index ca01d7460e..27787e8c6a 100644 --- a/commons/zenoh-collections/Cargo.toml +++ b/commons/zenoh-collections/Cargo.toml @@ -31,5 +31,7 @@ description = "Internal crate for zenoh." [features] default = ["std"] std = [] +test = ["rand"] [dependencies] +rand = { workspace = true, optional = true } diff --git a/commons/zenoh-collections/src/lib.rs b/commons/zenoh-collections/src/lib.rs index ea9a9209e6..6549594de2 100644 --- a/commons/zenoh-collections/src/lib.rs +++ b/commons/zenoh-collections/src/lib.rs @@ -32,8 +32,3 @@ pub use ring_buffer::*; pub mod stack_buffer; #[cfg(feature = "std")] pub use stack_buffer::*; - -#[cfg(feature = "std")] -pub mod properties; -#[cfg(feature = "std")] -pub use properties::*; diff --git a/commons/zenoh-collections/src/properties.rs b/commons/zenoh-collections/src/properties.rs deleted file mode 100644 index 6a3c96241c..0000000000 --- a/commons/zenoh-collections/src/properties.rs +++ /dev/null @@ -1,174 +0,0 @@ -// -// Copyright (c) 2022 ZettaScale Technology -// -// This program and the accompanying materials are made available under the -// terms of the Eclipse Public License 2.0 which is available at -// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 -// which is available at https://www.apache.org/licenses/LICENSE-2.0. -// -// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 -// -// Contributors: -// ZettaScale Zenoh Team, -// -use std::{ - collections::HashMap, - fmt, - ops::{Deref, DerefMut}, -}; - -const PROP_SEPS: &[&str] = &["\r\n", "\n", ";"]; -const DEFAULT_PROP_SEP: char = ';'; -const KV_SEP: char = '='; -const COMMENT_PREFIX: char = '#'; - -/// A map of key/value (String,String) properties. -/// It can be parsed from a String, using `;` or `` as separator between each properties -/// and `=` as separator between a key and its value. Keys and values are trimmed. -#[non_exhaustive] -#[derive(Clone, PartialEq, Eq, Default)] -pub struct Properties(HashMap); - -impl Deref for Properties { - type Target = HashMap; - - fn deref(&self) -> &Self::Target { - &self.0 - } -} - -impl DerefMut for Properties { - fn deref_mut(&mut self) -> &mut Self::Target { - &mut self.0 - } -} - -impl fmt::Display for Properties { - /// Format the Properties as a string, using `'='` for key/value separator - /// and `';'` for separator between each keys/values. - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - let mut it = self.0.iter(); - if let Some((k, v)) = it.next() { - if v.is_empty() { - write!(f, "{k}")? - } else { - write!(f, "{k}{KV_SEP}{v}")? - } - for (k, v) in it { - if v.is_empty() { - write!(f, "{DEFAULT_PROP_SEP}{k}")? - } else { - write!(f, "{DEFAULT_PROP_SEP}{k}{KV_SEP}{v}")? - } - } - } - Ok(()) - } -} - -impl fmt::Debug for Properties { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "{self}") - } -} - -impl From<&str> for Properties { - fn from(s: &str) -> Self { - let mut props = vec![s]; - for sep in PROP_SEPS { - props = props - .into_iter() - .flat_map(|s| s.split(sep)) - .collect::>(); - } - props = props.into_iter().map(str::trim).collect::>(); - let inner = props - .iter() - .filter_map(|prop| { - if prop.is_empty() || prop.starts_with(COMMENT_PREFIX) { - None - } else { - let mut it = prop.splitn(2, KV_SEP); - Some(( - it.next().unwrap().trim().to_string(), - it.next().unwrap_or("").trim().to_string(), - )) - } - }) - .collect(); - Self(inner) - } -} - -impl From for Properties { - fn from(s: String) -> Self { - Self::from(s.as_str()) - } -} - -impl From> for Properties { - fn from(map: HashMap) -> Self { - Self(map) - } -} - -impl From<&[(&str, &str)]> for Properties { - fn from(kvs: &[(&str, &str)]) -> Self { - let inner = kvs - .iter() - .map(|(k, v)| ((*k).to_string(), (*v).to_string())) - .collect(); - Self(inner) - } -} - -impl TryFrom<&std::path::Path> for Properties { - type Error = std::io::Error; - - fn try_from(p: &std::path::Path) -> Result { - Ok(Self::from(std::fs::read_to_string(p)?)) - } -} - -impl From for HashMap { - fn from(props: Properties) -> Self { - props.0 - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn test_properties() { - assert!(Properties::from("").0.is_empty()); - - assert_eq!(Properties::from("p1"), Properties::from(&[("p1", "")][..])); - - assert_eq!( - Properties::from("p1=v1"), - Properties::from(&[("p1", "v1")][..]) - ); - - assert_eq!( - Properties::from("p1=v1;p2=v2;"), - Properties::from(&[("p1", "v1"), ("p2", "v2")][..]) - ); - - assert_eq!( - Properties::from("p1=v1;p2;p3=v3"), - Properties::from(&[("p1", "v1"), ("p2", ""), ("p3", "v3")][..]) - ); - - assert_eq!( - Properties::from("p1=v 1;p 2=v2"), - Properties::from(&[("p1", "v 1"), ("p 2", "v2")][..]) - ); - - assert_eq!( - Properties::from("p1=x=y;p2=a==b"), - Properties::from(&[("p1", "x=y"), ("p2", "a==b")][..]) - ); - } -} diff --git a/commons/zenoh-collections/src/ring_buffer.rs b/commons/zenoh-collections/src/ring_buffer.rs index fd60030ebc..e9f7909d5f 100644 --- a/commons/zenoh-collections/src/ring_buffer.rs +++ b/commons/zenoh-collections/src/ring_buffer.rs @@ -40,6 +40,15 @@ impl RingBuffer { Some(elem) } + #[inline] + pub fn push_force(&mut self, elem: T) -> Option { + self.push(elem).and_then(|elem| { + let ret = self.buffer.pop_front(); + self.buffer.push_back(elem); + ret + }) + } + #[inline] pub fn pull(&mut self) -> Option { let x = self.buffer.pop_front(); diff --git a/commons/zenoh-collections/src/single_or_vec.rs b/commons/zenoh-collections/src/single_or_vec.rs index c68ac6d8ff..7b2391197d 100644 --- a/commons/zenoh-collections/src/single_or_vec.rs +++ b/commons/zenoh-collections/src/single_or_vec.rs @@ -13,6 +13,8 @@ // use alloc::vec; +#[cfg(not(feature = "std"))] +use alloc::vec::Vec; use core::{ cmp::PartialEq, fmt, iter, @@ -20,9 +22,6 @@ use core::{ ptr, slice, }; -#[cfg(not(feature = "std"))] -use alloc::vec::Vec; - #[derive(Clone, Eq)] enum SingleOrVecInner { Single(T), @@ -30,6 +29,10 @@ enum SingleOrVecInner { } impl SingleOrVecInner { + const fn empty() -> Self { + SingleOrVecInner::Vec(Vec::new()) + } + fn push(&mut self, value: T) { match self { SingleOrVecInner::Vec(vec) if vec.capacity() == 0 => *self = Self::Single(value), @@ -53,7 +56,7 @@ where impl Default for SingleOrVecInner { fn default() -> Self { - SingleOrVecInner::Vec(Vec::new()) + Self::empty() } } @@ -88,6 +91,10 @@ where pub struct SingleOrVec(SingleOrVecInner); impl SingleOrVec { + pub const fn empty() -> Self { + Self(SingleOrVecInner::empty()) + } + pub fn push(&mut self, value: T) { self.0.push(value); } @@ -174,14 +181,17 @@ impl SingleOrVec { self.vectorize().insert(at, value); } } + enum DrainInner<'a, T> { Vec(alloc::vec::Drain<'a, T>), Single(&'a mut SingleOrVecInner), Done, } + pub struct Drain<'a, T> { inner: DrainInner<'a, T>, } + impl<'a, T> Iterator for Drain<'a, T> { type Item = T; diff --git a/commons/zenoh-config/Cargo.toml b/commons/zenoh-config/Cargo.toml index cb502881d9..49c9d722f1 100644 --- a/commons/zenoh-config/Cargo.toml +++ b/commons/zenoh-config/Cargo.toml @@ -23,8 +23,11 @@ license = { workspace = true } categories = { workspace = true } description = "Internal crate for zenoh." +[features] +internal = [] + [dependencies] -tracing = {workspace = true} +tracing = { workspace = true } flume = { workspace = true } json5 = { workspace = true } num_cpus = { workspace = true } @@ -36,4 +39,6 @@ zenoh-core = { workspace = true } zenoh-protocol = { workspace = true } zenoh-result = { workspace = true } zenoh-util = { workspace = true } +zenoh-macros = { workspace = true } secrecy = { workspace = true } +uhlc = { workspace = true } diff --git a/commons/zenoh-config/src/connection_retry.rs b/commons/zenoh-config/src/connection_retry.rs index e1d383749c..55234dcc91 100644 --- a/commons/zenoh-config/src/connection_retry.rs +++ b/commons/zenoh-config/src/connection_retry.rs @@ -12,18 +12,18 @@ // ZettaScale Zenoh Team, // +use serde::{Deserialize, Serialize}; +use zenoh_core::zparse_default; +use zenoh_protocol::core::{EndPoint, WhatAmI}; + use crate::{ defaults::{ self, DEFAULT_CONNECT_EXIT_ON_FAIL, DEFAULT_CONNECT_TIMEOUT_MS, DEFAULT_LISTEN_EXIT_ON_FAIL, DEFAULT_LISTEN_TIMEOUT_MS, }, + mode_dependent::*, Config, }; -use serde::{Deserialize, Serialize}; -use zenoh_core::zparse_default; -use zenoh_protocol::core::WhatAmI; - -use crate::mode_dependent::*; #[derive(Debug, Deserialize, Serialize, Clone)] pub struct ConnectionRetryModeDependentConf { diff --git a/commons/zenoh-config/src/defaults.rs b/commons/zenoh-config/src/defaults.rs index 7a2159559a..cc6bf5854a 100644 --- a/commons/zenoh-config/src/defaults.rs +++ b/commons/zenoh-config/src/defaults.rs @@ -36,7 +36,7 @@ pub const mode: WhatAmI = WhatAmI::Peer; #[allow(dead_code)] pub mod scouting { pub const timeout: u64 = 3000; - pub const delay: u64 = 200; + pub const delay: u64 = 500; pub mod multicast { pub const enabled: bool = true; pub const address: ([u8; 4], u16) = ([224, 0, 0, 224], 7446); @@ -100,6 +100,34 @@ pub mod routing { } } +impl Default for ListenConfig { + #[allow(clippy::unnecessary_cast)] + fn default() -> Self { + Self { + timeout_ms: None, + endpoints: ModeDependentValue::Dependent(ModeValues { + router: Some(vec!["tcp/[::]:7447".parse().unwrap()]), + peer: Some(vec!["tcp/[::]:0".parse().unwrap()]), + client: None, + }), + exit_on_failure: None, + retry: None, + } + } +} + +impl Default for ConnectConfig { + #[allow(clippy::unnecessary_cast)] + fn default() -> Self { + Self { + timeout_ms: None, + endpoints: ModeDependentValue::Unique(vec![]), + exit_on_failure: None, + retry: None, + } + } +} + impl Default for TransportUnicastConf { fn default() -> Self { Self { @@ -167,16 +195,6 @@ impl Default for LinkTxConf { } } -impl Default for QueueConf { - fn default() -> Self { - Self { - size: QueueSizeConf::default(), - congestion_control: CongestionControlConf::default(), - backoff: 100, - } - } -} - impl QueueSizeConf { pub const MIN: usize = 1; pub const MAX: usize = 16; @@ -205,6 +223,15 @@ impl Default for CongestionControlConf { } } +impl Default for BatchingConf { + fn default() -> Self { + BatchingConf { + enabled: true, + time_limit: 1, + } + } +} + impl Default for LinkRxConf { fn default() -> Self { Self { @@ -216,9 +243,9 @@ impl Default for LinkRxConf { // Make explicit the value and ignore clippy warning #[allow(clippy::derivable_impls)] -impl Default for SharedMemoryConf { +impl Default for ShmConf { fn default() -> Self { - Self { enabled: false } + Self { enabled: true } } } @@ -228,6 +255,8 @@ impl Default for AclConfig { enabled: false, default_permission: Permission::Deny, rules: None, + subjects: None, + policies: None, } } } diff --git a/commons/zenoh-config/src/lib.rs b/commons/zenoh-config/src/lib.rs index a6aedaa59b..2d9ba0beee 100644 --- a/commons/zenoh-config/src/lib.rs +++ b/commons/zenoh-config/src/lib.rs @@ -15,11 +15,8 @@ //! Configuration to pass to `zenoh::open()` and `zenoh::scout()` functions and associated constants. pub mod defaults; mod include; +pub mod wrappers; -use include::recursive_include; -use secrecy::{CloneableSecret, DebugSecret, Secret, SerializableSecret, Zeroize}; -use serde::{Deserialize, Serialize}; -use serde_json::{Map, Value}; #[allow(unused_imports)] use std::convert::TryFrom; // This is a false positive from the rust analyser use std::{ @@ -31,18 +28,24 @@ use std::{ path::Path, sync::{Arc, Mutex, MutexGuard, Weak}, }; + +use include::recursive_include; +use secrecy::{CloneableSecret, DebugSecret, Secret, SerializableSecret, Zeroize}; +use serde::{Deserialize, Serialize}; +use serde_json::{Map, Value}; use validated_struct::ValidatedMapAssociatedTypes; pub use validated_struct::{GetError, ValidatedMap}; +pub use wrappers::ZenohId; use zenoh_core::zlock; pub use zenoh_protocol::core::{ - whatami, EndPoint, Locator, Priority, WhatAmI, WhatAmIMatcher, WhatAmIMatcherVisitor, ZenohId, + whatami, EndPoint, Locator, WhatAmI, WhatAmIMatcher, WhatAmIMatcherVisitor, }; use zenoh_protocol::{ core::{key_expr::OwnedKeyExpr, Bits}, transport::{BatchSize, TransportSn}, }; use zenoh_result::{bail, zerror, ZResult}; -use zenoh_util::LibLoader; +use zenoh_util::{LibLoader, LibSearchDirs}; pub mod mode_dependent; pub use mode_dependent::*; @@ -101,37 +104,73 @@ pub struct DownsamplingItemConf { } #[derive(Serialize, Debug, Deserialize, Clone)] -pub struct AclConfigRules { - pub interfaces: Option>, +pub struct AclConfigRule { + pub id: String, pub key_exprs: Vec, - pub actions: Vec, + pub messages: Vec, pub flows: Option>, pub permission: Permission, } +#[derive(Serialize, Debug, Deserialize, Clone)] +pub struct AclConfigSubjects { + pub id: String, + pub interfaces: Option>, + pub cert_common_names: Option>, + pub usernames: Option>, +} + +#[derive(Serialize, Debug, Deserialize, Clone, PartialEq, Eq, Hash)] +pub struct Interface(pub String); + +impl std::fmt::Display for Interface { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "Interface({})", self.0) + } +} + +#[derive(Serialize, Debug, Deserialize, Clone, PartialEq, Eq, Hash)] +pub struct CertCommonName(pub String); + +impl std::fmt::Display for CertCommonName { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "CertCommonName({})", self.0) + } +} + +#[derive(Serialize, Debug, Deserialize, Clone, PartialEq, Eq, Hash)] +pub struct Username(pub String); + +impl std::fmt::Display for Username { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "Username({})", self.0) + } +} + +#[derive(Serialize, Debug, Deserialize, Clone, PartialEq, Eq, Hash)] +pub struct AclConfigPolicyEntry { + pub rules: Vec, + pub subjects: Vec, +} + #[derive(Clone, Serialize, Debug, Deserialize)] pub struct PolicyRule { - pub subject: Subject, + pub subject_id: usize, pub key_expr: String, - pub action: Action, + pub message: AclMessage, pub permission: Permission, pub flow: InterceptorFlow, } -#[derive(Serialize, Debug, Deserialize, Eq, PartialEq, Hash, Clone)] -#[serde(untagged)] -#[serde(rename_all = "snake_case")] -pub enum Subject { - Interface(String), -} - #[derive(Clone, Copy, Debug, Serialize, Deserialize, Eq, Hash, PartialEq)] #[serde(rename_all = "snake_case")] -pub enum Action { +pub enum AclMessage { Put, + Delete, DeclareSubscriber, - Get, + Query, DeclareQueryable, + Reply, } #[derive(Clone, Copy, Debug, Serialize, Deserialize, Eq, Hash, PartialEq)] @@ -178,10 +217,8 @@ pub fn peer() -> Config { pub fn client, T: Into>(peers: I) -> Config { let mut config = Config::default(); config.set_mode(Some(WhatAmI::Client)).unwrap(); - config - .connect - .endpoints - .extend(peers.into_iter().map(|t| t.into())); + config.connect.endpoints = + ModeDependentValue::Unique(peers.into_iter().map(|t| t.into()).collect()); config } @@ -192,15 +229,6 @@ fn config_keys() { dbg!(c.keys()); } -fn treat_error_as_none<'a, T, D>(deserializer: D) -> Result, D::Error> -where - T: serde::de::Deserialize<'a>, - D: serde::de::Deserializer<'a>, -{ - let value: Value = serde::de::Deserialize::deserialize(deserializer)?; - Ok(T::deserialize(value).ok()) -} - validated_struct::validator! { /// The main configuration structure for Zenoh. /// @@ -220,21 +248,23 @@ validated_struct::validator! { /// The node's mode ("router" (default value in `zenohd`), "peer" or "client"). mode: Option, /// Which zenoh nodes to connect to. - pub connect: #[derive(Default)] + pub connect: ConnectConfig { /// global timeout for full connect cycle pub timeout_ms: Option>, - pub endpoints: Vec, + /// The list of endpoints to connect to + pub endpoints: ModeDependentValue>, /// if connection timeout exceed, exit from application pub exit_on_failure: Option>, pub retry: Option, }, - /// Which endpoints to listen on. `zenohd` will add `tcp/[::]:7447` to these locators if left empty. - pub listen: #[derive(Default)] + /// Which endpoints to listen on. + pub listen: ListenConfig { /// global timeout for full listen cycle pub timeout_ms: Option>, - pub endpoints: Vec, + /// The list of endpoints to listen on + pub endpoints: ModeDependentValue>, /// if connection timeout exceed, exit from application pub exit_on_failure: Option>, pub retry: Option, @@ -257,7 +287,6 @@ validated_struct::validator! { /// The time-to-live on multicast scouting packets. (default: 1) pub ttl: Option, /// Which type of Zenoh instances to automatically establish sessions with upon discovery through UDP multicast. - #[serde(deserialize_with = "treat_error_as_none")] autoconnect: Option>, /// Whether or not to listen for scout messages on UDP multicast and reply to them. listen: Option>, @@ -274,7 +303,6 @@ validated_struct::validator! { /// direct connectivity with each other. multihop: Option, /// Which type of Zenoh instances to automatically establish sessions with upon discovery through gossip. - #[serde(deserialize_with = "treat_error_as_none")] autoconnect: Option>, }, }, @@ -377,9 +405,10 @@ validated_struct::validator! { lease: u64, /// Number of keep-alive messages in a link lease duration (default: 4) keep_alive: usize, - /// Zenoh's MTU equivalent (default: 2^16-1) + /// Zenoh's MTU equivalent (default: 2^16-1) (max: 2^16-1) batch_size: BatchSize, - pub queue: QueueConf { + pub queue: #[derive(Default)] + QueueConf { /// The size of each priority queue indicates the number of batches a given queue can contain. /// The amount of memory being allocated for each queue is then SIZE_XXX * BATCH_SIZE. /// In the case of the transport link MTU being smaller than the ZN_BATCH_SIZE, @@ -402,9 +431,15 @@ validated_struct::validator! { /// The maximum time in microseconds to wait for an available batch before dropping the message if still no batch is available. pub wait_before_drop: u64, }, - /// The initial exponential backoff time in nanoseconds to allow the batching to eventually progress. - /// Higher values lead to a more aggressive batching but it will introduce additional latency. - backoff: u64, + pub batching: BatchingConf { + /// Perform adaptive batching of messages if they are smaller of the batch_size. + /// When the network is detected to not be fast enough to transmit every message individually, many small messages may be + /// batched together and sent all at once on the wire reducing the overall network overhead. This is typically of a high-throughput + /// scenario mainly composed of small messages. In other words, batching is activated by the network back-pressure. + enabled: bool, + /// The maximum time limit (in ms) a message should be retained for batching when back-pressure happens. + time_limit: u64, + }, }, // Number of threads used for TX threads: usize, @@ -447,7 +482,7 @@ validated_struct::validator! { }, }, pub shared_memory: - SharedMemoryConf { + ShmConf { /// Whether shared memory is enabled or not. /// If set to `true`, the SHM buffer optimization support will be announced to other parties. (default `false`). /// This option doesn't make SHM buffer optimization mandatory, the real support depends on other party setting @@ -507,7 +542,9 @@ validated_struct::validator! { pub access_control: AclConfig { pub enabled: bool, pub default_permission: Permission, - pub rules: Option> + pub rules: Option>, + pub subjects: Option>, + pub policies: Option>, }, /// A list of directories where plugins may be searched for if no `__path__` was specified for them. @@ -515,7 +552,7 @@ validated_struct::validator! { pub plugins_loading: #[derive(Default)] PluginsLoading { pub enabled: bool, - pub search_dirs: Option>, // TODO (low-prio): Switch this String to a PathBuf? (applies to other paths in the config as well) + pub search_dirs: LibSearchDirs, }, #[validated(recursive_accessors)] /// The configuration for plugins. @@ -541,19 +578,6 @@ fn set_false() -> bool { false } -#[derive(Clone, Debug, PartialEq, Eq)] -pub struct PluginSearchDirs(Vec); -impl Default for PluginSearchDirs { - fn default() -> Self { - Self( - (*zenoh_util::LIB_DEFAULT_SEARCH_PATHS) - .split(':') - .map(|c| c.to_string()) - .collect(), - ) - } -} - #[test] fn config_deser() { let config = Config::from_deserializer( @@ -562,7 +586,7 @@ fn config_deser() { scouting: { multicast: { enabled: false, - autoconnect: "peer|router" + autoconnect: ["peer", "router"] } } }"#, @@ -589,7 +613,7 @@ fn config_deser() { scouting: { multicast: { enabled: false, - autoconnect: {router: "", peer: "peer|router"} + autoconnect: {router: [], peer: ["peer", "router"]} } } }"#, @@ -731,10 +755,7 @@ impl Config { pub fn libloader(&self) -> LibLoader { if self.plugins_loading.enabled { - match self.plugins_loading.search_dirs() { - Some(dirs) => LibLoader::new(dirs, true), - None => LibLoader::default(), - } + LibLoader::new(self.plugins_loading.search_dirs().clone()) } else { LibLoader::empty() } diff --git a/commons/zenoh-config/src/mode_dependent.rs b/commons/zenoh-config/src/mode_dependent.rs index 91e366f452..6576161473 100644 --- a/commons/zenoh-config/src/mode_dependent.rs +++ b/commons/zenoh-config/src/mode_dependent.rs @@ -12,15 +12,13 @@ // ZettaScale Zenoh Team, // +use std::{fmt, marker::PhantomData}; + use serde::{ de::{self, MapAccess, Visitor}, Deserialize, Serialize, }; -use std::fmt; -use std::marker::PhantomData; -pub use zenoh_protocol::core::{ - whatami, EndPoint, Locator, Priority, WhatAmI, WhatAmIMatcher, WhatAmIMatcherVisitor, ZenohId, -}; +use zenoh_protocol::core::{EndPoint, WhatAmI, WhatAmIMatcher, WhatAmIMatcherVisitor}; pub trait ModeDependent { fn router(&self) -> Option<&T>; @@ -34,6 +32,7 @@ pub trait ModeDependent { WhatAmI::Client => self.client(), } } + fn get_mut(&mut self, whatami: WhatAmI) -> Option<&mut T>; } #[derive(Clone, Debug, Serialize, Deserialize)] @@ -61,6 +60,15 @@ impl ModeDependent for ModeValues { fn client(&self) -> Option<&T> { self.client.as_ref() } + + #[inline] + fn get_mut(&mut self, whatami: WhatAmI) -> Option<&mut T> { + match whatami { + WhatAmI::Router => self.router.as_mut(), + WhatAmI::Peer => self.peer.as_mut(), + WhatAmI::Client => self.client.as_mut(), + } + } } #[derive(Clone, Debug)] @@ -69,6 +77,15 @@ pub enum ModeDependentValue { Dependent(ModeValues), } +impl ModeDependentValue { + #[inline] + pub fn set(&mut self, value: T) -> Result, ModeDependentValue> { + let mut value = ModeDependentValue::Unique(value); + std::mem::swap(self, &mut value); + Ok(value) + } +} + impl ModeDependent for ModeDependentValue { #[inline] fn router(&self) -> Option<&T> { @@ -93,6 +110,14 @@ impl ModeDependent for ModeDependentValue { Self::Dependent(o) => o.client(), } } + + #[inline] + fn get_mut(&mut self, whatami: WhatAmI) -> Option<&mut T> { + match self { + Self::Unique(v) => Some(v), + Self::Dependent(o) => o.get_mut(whatami), + } + } } impl serde::Serialize for ModeDependentValue @@ -230,12 +255,12 @@ impl<'a> serde::Deserialize<'a> for ModeDependentValue { formatter.write_str("WhatAmIMatcher or mode dependent WhatAmIMatcher") } - fn visit_str(self, value: &str) -> Result + fn visit_seq(self, seq: A) -> Result where - E: de::Error, + A: de::SeqAccess<'de>, { WhatAmIMatcherVisitor {} - .visit_str(value) + .visit_seq(seq) .map(ModeDependentValue::Unique) } @@ -251,31 +276,62 @@ impl<'a> serde::Deserialize<'a> for ModeDependentValue { } } +impl<'a> serde::Deserialize<'a> for ModeDependentValue> { + fn deserialize(deserializer: D) -> Result + where + D: serde::Deserializer<'a>, + { + struct UniqueOrDependent(PhantomData U>); + + impl<'de> Visitor<'de> for UniqueOrDependent>> { + type Value = ModeDependentValue>; + + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("list of endpoints or mode dependent list of endpoints") + } + + fn visit_seq(self, mut seq: A) -> Result + where + A: de::SeqAccess<'de>, + { + let mut v = seq.size_hint().map_or_else(Vec::new, Vec::with_capacity); + + while let Some(s) = seq.next_element()? { + v.push(s); + } + Ok(ModeDependentValue::Unique(v)) + } + + fn visit_map(self, map: M) -> Result + where + M: MapAccess<'de>, + { + ModeValues::deserialize(de::value::MapAccessDeserializer::new(map)) + .map(ModeDependentValue::Dependent) + } + } + deserializer.deserialize_any(UniqueOrDependent(PhantomData)) + } +} + impl ModeDependent for Option> { #[inline] fn router(&self) -> Option<&T> { - match self { - Some(ModeDependentValue::Unique(v)) => Some(v), - Some(ModeDependentValue::Dependent(o)) => o.router(), - None => None, - } + self.as_ref().and_then(|m| m.router()) } #[inline] fn peer(&self) -> Option<&T> { - match self { - Some(ModeDependentValue::Unique(v)) => Some(v), - Some(ModeDependentValue::Dependent(o)) => o.peer(), - None => None, - } + self.as_ref().and_then(|m| m.peer()) } #[inline] fn client(&self) -> Option<&T> { - match self { - Some(ModeDependentValue::Unique(v)) => Some(v), - Some(ModeDependentValue::Dependent(o)) => o.client(), - None => None, - } + self.as_ref().and_then(|m| m.client()) + } + + #[inline] + fn get_mut(&mut self, whatami: WhatAmI) -> Option<&mut T> { + self.as_mut().and_then(|m| m.get_mut(whatami)) } } diff --git a/commons/zenoh-config/src/wrappers.rs b/commons/zenoh-config/src/wrappers.rs new file mode 100644 index 0000000000..fd6d2ef50b --- /dev/null +++ b/commons/zenoh-config/src/wrappers.rs @@ -0,0 +1,165 @@ +// +// Copyright (c) 2024 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// + +//! Wrappers around types reexported by `zenoh` from subcrates. +//! These wrappers are used to avoid exposing the the API necessary only for zenoh internals into the public API. + +use core::fmt; +use std::str::FromStr; + +use serde::{Deserialize, Serialize}; +use zenoh_protocol::{ + core::{key_expr::OwnedKeyExpr, EntityGlobalIdProto, EntityId, Locator, WhatAmI, ZenohIdProto}, + scouting::HelloProto, +}; + +/// The global unique id of a zenoh peer. +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize, Default)] +#[repr(transparent)] +pub struct ZenohId(ZenohIdProto); + +impl ZenohId { + /// Used by plugins for crating adminspace path + #[zenoh_macros::internal] + pub fn into_keyexpr(self) -> OwnedKeyExpr { + self.into() + } +} + +impl fmt::Debug for ZenohId { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + self.0.fmt(f) + } +} +impl fmt::Display for ZenohId { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + self.0.fmt(f) + } +} + +impl From for ZenohId { + fn from(id: ZenohIdProto) -> Self { + Self(id) + } +} + +impl From for ZenohIdProto { + fn from(id: ZenohId) -> Self { + id.0 + } +} + +impl From for uhlc::ID { + fn from(zid: ZenohId) -> Self { + zid.0.into() + } +} + +impl From for OwnedKeyExpr { + fn from(zid: ZenohId) -> Self { + zid.0.into() + } +} + +impl From<&ZenohId> for OwnedKeyExpr { + fn from(zid: &ZenohId) -> Self { + (*zid).into() + } +} + +impl FromStr for ZenohId { + type Err = zenoh_result::Error; + + fn from_str(s: &str) -> Result { + ZenohIdProto::from_str(s).map(|zid| zid.into()) + } +} + +/// A zenoh Hello message. +#[repr(transparent)] +pub struct Hello(HelloProto); + +impl Hello { + /// Get the locators of this Hello message. + pub fn locators(&self) -> &[Locator] { + &self.0.locators + } + + /// Get the zenoh id of this Hello message. + pub fn zid(&self) -> ZenohId { + self.0.zid.into() + } + + /// Get the whatami of this Hello message. + pub fn whatami(&self) -> WhatAmI { + self.0.whatami + } +} + +impl From for Hello { + fn from(inner: HelloProto) -> Self { + Hello(inner) + } +} + +impl fmt::Debug for Hello { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fmt::Debug::fmt(&self.0, f) + } +} + +impl fmt::Display for Hello { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("Hello") + .field("zid", &self.zid()) + .field("whatami", &self.whatami()) + .field("locators", &self.locators()) + .finish() + } +} + +#[derive(Default, Copy, Clone, Eq, Hash, PartialEq)] +#[repr(transparent)] +pub struct EntityGlobalId(EntityGlobalIdProto); + +impl EntityGlobalId { + pub fn zid(&self) -> ZenohId { + self.0.zid.into() + } + + pub fn eid(&self) -> EntityId { + self.0.eid + } +} + +impl fmt::Debug for EntityGlobalId { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("EntityGlobalId") + .field("zid", &self.zid()) + .field("eid", &self.eid()) + .finish() + } +} + +impl From for EntityGlobalId { + fn from(id: EntityGlobalIdProto) -> Self { + Self(id) + } +} + +impl From for EntityGlobalIdProto { + fn from(value: EntityGlobalId) -> Self { + value.0 + } +} diff --git a/commons/zenoh-core/src/lib.rs b/commons/zenoh-core/src/lib.rs index e15ff1d3bf..37102d619d 100644 --- a/commons/zenoh-core/src/lib.rs +++ b/commons/zenoh-core/src/lib.rs @@ -20,25 +20,50 @@ pub use lazy_static::lazy_static; pub mod macros; -use std::future::{Future, Ready}; +use std::future::{Future, IntoFuture, Ready}; // Re-exports after moving ZError/ZResult to zenoh-result pub use zenoh_result::{bail, to_zerror, zerror}; pub mod zresult { pub use zenoh_result::*; } -pub use zresult::Error; -pub use zresult::ZResult as Result; +pub use zresult::{Error, ZResult as Result}; +/// A resolvable execution, either sync or async pub trait Resolvable { type To: Sized + Send; } +/// Trick used to mark `::IntoFuture` bound as Send +#[doc(hidden)] +pub trait IntoSendFuture: Resolvable { + type IntoFuture: Future + Send; +} + +impl IntoSendFuture for T +where + T: Resolvable + IntoFuture, + T::IntoFuture: Send, +{ + type IntoFuture = T::IntoFuture; +} + +/// Synchronous execution of a resolvable +pub trait Wait: Resolvable { + /// Synchronously execute and wait + fn wait(self) -> Self::To; +} + +#[deprecated(since = "1.0.0", note = "use `.await` directly instead")] pub trait AsyncResolve: Resolvable { - type Future: Future::To> + Send; + type Future: Future + Send; + #[allow(deprecated)] + #[deprecated(since = "1.0.0", note = "use `.await` directly instead")] fn res_async(self) -> Self::Future; + #[allow(deprecated)] + #[deprecated(since = "1.0.0", note = "use `.wait()` instead`")] fn res(self) -> Self::Future where Self: Sized, @@ -47,10 +72,27 @@ pub trait AsyncResolve: Resolvable { } } +#[allow(deprecated)] +impl AsyncResolve for T +where + T: Resolvable + IntoFuture, + T::IntoFuture: Send, +{ + type Future = T::IntoFuture; + + fn res_async(self) -> Self::Future { + self.into_future() + } +} + +#[deprecated(since = "1.0.0", note = "use `.wait()` instead`")] pub trait SyncResolve: Resolvable { - fn res_sync(self) -> ::To; + #[deprecated(since = "1.0.0", note = "use `.wait()` instead`")] + fn res_sync(self) -> Self::To; - fn res(self) -> ::To + #[allow(deprecated)] + #[deprecated(since = "1.0.0", note = "use `.wait()` instead`")] + fn res(self) -> Self::To where Self: Sized, { @@ -58,23 +100,42 @@ pub trait SyncResolve: Resolvable { } } +#[allow(deprecated)] +impl SyncResolve for T +where + T: Wait, +{ + fn res_sync(self) -> Self::To { + self.wait() + } +} + /// Zenoh's trait for resolving builder patterns. /// -/// Builder patterns in Zenoh can be resolved with [`AsyncResolve`] in async context and [`SyncResolve`] in sync context. -/// In both async and sync context calling `.res()` resolves the builder. -/// `.res()` maps to `.res_async()` in async context. -/// `.res()` maps to `.res_sync()` in sync context. -/// We advise to prefer the usage of [`AsyncResolve`] and to use [`SyncResolve`] with caution. -#[must_use = "Resolvables do nothing unless you resolve them using `.res()`."] -pub trait Resolve: Resolvable + SyncResolve + AsyncResolve + Send {} +/// Builder patterns in Zenoh can be resolved by awaiting them, in async context, +/// and [`Wait::wait`] in sync context. +/// We advise to prefer the usage of asynchronous execution, and to use synchronous one with caution +#[must_use = "Resolvables do nothing unless you resolve them using `.await` or synchronous `.wait()` method"] +pub trait Resolve: + Resolvable + + Wait + + IntoSendFuture + + IntoFuture::IntoFuture, Output = Output> + + Send +{ +} impl Resolve for T where - T: Resolvable + SyncResolve + AsyncResolve + Send + T: Resolvable + + Wait + + IntoSendFuture + + IntoFuture::IntoFuture, Output = Output> + + Send { } // Closure to wait -#[must_use = "Resolvables do nothing unless you resolve them using the `res` method from either `SyncResolve` or `AsyncResolve`"] +#[must_use = "Resolvables do nothing unless you resolve them using `.await` or synchronous `.wait()` method"] pub struct ResolveClosure(C) where To: Sized + Send, @@ -98,30 +159,31 @@ where type To = To; } -impl AsyncResolve for ResolveClosure +impl IntoFuture for ResolveClosure where To: Sized + Send, C: FnOnce() -> To + Send, { - type Future = Ready<::To>; + type Output = ::To; + type IntoFuture = Ready<::To>; - fn res_async(self) -> Self::Future { - std::future::ready(self.res_sync()) + fn into_future(self) -> Self::IntoFuture { + std::future::ready(self.wait()) } } -impl SyncResolve for ResolveClosure +impl Wait for ResolveClosure where To: Sized + Send, C: FnOnce() -> To + Send, { - fn res_sync(self) -> ::To { + fn wait(self) -> ::To { self.0() } } // Future to wait -#[must_use = "Resolvables do nothing unless you resolve them using the `res` method from either `SyncResolve` or `AsyncResolve`"] +#[must_use = "Resolvables do nothing unless you resolve them using `.await` or synchronous `.wait()` method"] pub struct ResolveFuture(F) where To: Sized + Send, @@ -145,24 +207,25 @@ where type To = To; } -impl AsyncResolve for ResolveFuture +impl IntoFuture for ResolveFuture where To: Sized + Send, F: Future + Send, { - type Future = F; + type Output = To; + type IntoFuture = F; - fn res_async(self) -> Self::Future { + fn into_future(self) -> Self::IntoFuture { self.0 } } -impl SyncResolve for ResolveFuture +impl Wait for ResolveFuture where To: Sized + Send, F: Future + Send, { - fn res_sync(self) -> ::To { + fn wait(self) -> ::To { zenoh_runtime::ZRuntime::Application.block_in_place(self.0) } } diff --git a/commons/zenoh-core/src/macros.rs b/commons/zenoh-core/src/macros.rs index d8f2f1fdc3..f20f22f41a 100644 --- a/commons/zenoh-core/src/macros.rs +++ b/commons/zenoh-core/src/macros.rs @@ -233,6 +233,8 @@ macro_rules! zcondfeat { #[macro_export] macro_rules! ztimeout { ($f:expr) => { - tokio::time::timeout(TIMEOUT, $f).await.unwrap() + tokio::time::timeout(TIMEOUT, ::core::future::IntoFuture::into_future($f)) + .await + .unwrap() }; } diff --git a/commons/zenoh-crypto/src/cipher.rs b/commons/zenoh-crypto/src/cipher.rs index 3d12712e56..aa78b97b46 100644 --- a/commons/zenoh-crypto/src/cipher.rs +++ b/commons/zenoh-crypto/src/cipher.rs @@ -11,12 +11,15 @@ // Contributors: // ZettaScale Zenoh Team, // -use super::PseudoRng; -use aes::cipher::{generic_array::GenericArray, BlockDecrypt, BlockEncrypt, KeyInit}; -use aes::Aes128; +use aes::{ + cipher::{generic_array::GenericArray, BlockDecrypt, BlockEncrypt, KeyInit}, + Aes128, +}; use rand::Rng; use zenoh_result::{bail, ZResult}; +use super::PseudoRng; + pub struct BlockCipher { inner: Aes128, } @@ -68,9 +71,10 @@ impl BlockCipher { mod tests { #[test] fn cipher() { - use super::{BlockCipher, PseudoRng}; use rand::{RngCore, SeedableRng}; + use super::{BlockCipher, PseudoRng}; + fn encrypt_decrypt(cipher: &BlockCipher, prng: &mut PseudoRng) { println!("\n[1]"); let t1 = "A".as_bytes().to_vec(); diff --git a/commons/zenoh-keyexpr/Cargo.toml b/commons/zenoh-keyexpr/Cargo.toml index 41456af1ec..2f90386e4a 100644 --- a/commons/zenoh-keyexpr/Cargo.toml +++ b/commons/zenoh-keyexpr/Cargo.toml @@ -26,6 +26,8 @@ description = "Internal crate for zenoh." [features] default = ["std"] std = ["zenoh-result/std", "dep:schemars"] +internal = [] +unstable = [] [dependencies] keyed-set = { workspace = true } diff --git a/commons/zenoh-keyexpr/benches/keyexpr_tree.rs b/commons/zenoh-keyexpr/benches/keyexpr_tree.rs index 4047e3cf5c..7048521eda 100644 --- a/commons/zenoh-keyexpr/benches/keyexpr_tree.rs +++ b/commons/zenoh-keyexpr/benches/keyexpr_tree.rs @@ -18,12 +18,15 @@ use std::{ }; use rand::SeedableRng; -use zenoh_keyexpr::keyexpr_tree::{ - impls::{HashMapProvider, VecSetProvider}, - traits::*, - KeArcTree, KeBoxTree, +use zenoh_keyexpr::{ + fuzzer::KeyExprFuzzer, + keyexpr_tree::{ + impls::{HashMapProvider, VecSetProvider}, + traits::*, + KeArcTree, KeBoxTree, + }, + OwnedKeyExpr, }; -use zenoh_keyexpr::{fuzzer::KeyExprFuzzer, OwnedKeyExpr}; #[derive(Clone, Copy, Debug, Default)] pub struct Averager { diff --git a/commons/zenoh-keyexpr/src/key_expr/borrowed.rs b/commons/zenoh-keyexpr/src/key_expr/borrowed.rs index 53d30d625e..e2afa9712f 100644 --- a/commons/zenoh-keyexpr/src/key_expr/borrowed.rs +++ b/commons/zenoh-keyexpr/src/key_expr/borrowed.rs @@ -12,20 +12,23 @@ // ZettaScale Zenoh Team, // -use super::{canon::Canonizable, OwnedKeyExpr, FORBIDDEN_CHARS}; +#[cfg(feature = "internal")] +use alloc::vec::Vec; use alloc::{ borrow::{Borrow, ToOwned}, format, string::String, - vec::Vec, }; use core::{ convert::{TryFrom, TryInto}, fmt, ops::{Deref, Div}, }; + use zenoh_result::{bail, Error as ZError, ZResult}; +use super::{canon::Canonize, OwnedKeyExpr, FORBIDDEN_CHARS}; + /// A [`str`] newtype that is statically known to be a valid key expression. /// /// The exact key expression specification can be found [here](https://github.com/eclipse-zenoh/roadmap/blob/main/rfcs/ALL/Key%20Expressions.md). Here are the major lines: @@ -69,7 +72,7 @@ impl keyexpr { pub fn autocanonize<'a, T, E>(t: &'a mut T) -> Result<&'a Self, E> where &'a Self: TryFrom<&'a T, Error = E>, - T: Canonizable + ?Sized, + T: Canonize + ?Sized, { t.canonize(); Self::new(t) @@ -90,6 +93,7 @@ impl keyexpr { /// Returns the relation between `self` and `other` from `self`'s point of view ([`SetIntersectionLevel::Includes`] signifies that `self` includes `other`). /// /// Note that this is slower than [`keyexpr::intersects`] and [`keyexpr::includes`], so you should favor these methods for most applications. + #[cfg(feature = "unstable")] pub fn relation_to(&self, other: &Self) -> SetIntersectionLevel { use SetIntersectionLevel::*; if self.intersects(other) { @@ -124,7 +128,12 @@ impl keyexpr { } /// Returns `true` if `self` contains any wildcard character (`**` or `$*`). + #[cfg(feature = "internal")] + #[doc(hidden)] pub fn is_wild(&self) -> bool { + self.is_wild_impl() + } + pub(crate) fn is_wild_impl(&self) -> bool { self.0.contains(super::SINGLE_WILD as char) } @@ -161,6 +170,8 @@ impl keyexpr { /// None, /// keyexpr::new("dem$*").unwrap().get_nonwild_prefix()); /// ``` + #[cfg(feature = "internal")] + #[doc(hidden)] pub fn get_nonwild_prefix(&self) -> Option<&keyexpr> { match self.0.find('*') { Some(i) => match self.0[..i].rfind('/') { @@ -226,6 +237,8 @@ impl keyexpr { /// keyexpr::new("demo/example/test/**").unwrap().strip_prefix(keyexpr::new("not/a/prefix").unwrap()).is_empty() /// ); /// ``` + #[cfg(feature = "internal")] + #[doc(hidden)] pub fn strip_prefix(&self, prefix: &Self) -> Vec<&keyexpr> { let mut result = alloc::vec![]; 'chunks: for i in (0..=self.len()).rev() { @@ -291,7 +304,13 @@ impl keyexpr { pub unsafe fn from_slice_unchecked(s: &[u8]) -> &Self { core::mem::transmute(s) } + + #[cfg(feature = "internal")] + #[doc(hidden)] pub const fn chunks(&self) -> Chunks { + self.chunks_impl() + } + pub(crate) const fn chunks_impl(&self) -> Chunks { Chunks { inner: self.as_str(), } @@ -550,6 +569,7 @@ impl Div for &keyexpr { /// /// You can check for intersection with `level >= SetIntersecionLevel::Intersection` and for inclusion with `level >= SetIntersectionLevel::Includes`. #[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] +#[cfg(feature = "unstable")] pub enum SetIntersectionLevel { Disjoint, Intersects, diff --git a/commons/zenoh-keyexpr/src/key_expr/canon.rs b/commons/zenoh-keyexpr/src/key_expr/canon.rs index 00e79b0c08..a8950b6d0c 100644 --- a/commons/zenoh-keyexpr/src/key_expr/canon.rs +++ b/commons/zenoh-keyexpr/src/key_expr/canon.rs @@ -11,114 +11,104 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::key_expr::{ - utils::{Split, Writer}, - DELIMITER, DOUBLE_WILD, SINGLE_WILD, -}; use alloc::string::String; -use core::{slice, str}; -pub trait Canonizable { +pub trait Canonize { fn canonize(&mut self); } -const DOLLAR_STAR: &[u8; 2] = b"$*"; - -impl Canonizable for &mut str { - fn canonize(&mut self) { - let mut writer = Writer { - ptr: self.as_mut_ptr(), - len: 0, - }; - if let Some(position) = self.find("$*$*") { - writer.len = position; - let mut need_final_write = true; - for between_dollarstar in self.as_bytes()[(position + 4)..].splitter(DOLLAR_STAR) { - need_final_write = between_dollarstar.is_empty(); - if !need_final_write { - writer.write(DOLLAR_STAR.as_ref()); - writer.write(between_dollarstar); - } +// Return the length of the canonized string +fn canonize(bytes: &mut [u8]) -> usize { + let mut index = 0; + let mut written = 0; + let mut double_wild = false; + loop { + match &bytes[index..] { + [b'*', b'*'] => { + bytes[written..written + 2].copy_from_slice(b"**"); + written += 2; + return written; } - if need_final_write { - writer.write(DOLLAR_STAR.as_ref()) + [b'*', b'*', b'/', ..] => { + double_wild = true; + index += 3; } - *self = unsafe { - str::from_utf8_unchecked_mut(slice::from_raw_parts_mut(writer.ptr, writer.len)) - } - } - writer.len = 0; - let mut ke = self.as_bytes().splitter(&b'/'); - let mut in_big_wild = false; - - for chunk in ke.by_ref() { - if chunk.is_empty() { - break; - } - if in_big_wild { - match chunk { - [SINGLE_WILD] | b"$*" => { - writer.write_byte(b'*'); - break; - } - DOUBLE_WILD => continue, - _ => { - writer.write(b"**/"); - writer.write(chunk); - in_big_wild = false; - break; + [b'*', r @ ..] | [b'$', b'*', r @ ..] if r.is_empty() || r.starts_with(b"/") => { + let (end, len) = (!r.starts_with(b"/"), r.len()); + bytes[written] = b'*'; + written += 1; + if end { + if double_wild { + bytes[written..written + 3].copy_from_slice(b"/**"); + written += 3; } + return written; } - } else if chunk == DOUBLE_WILD { - in_big_wild = true; - continue; - } else { - writer.write(if chunk == b"$*" { b"*" } else { chunk }); - break; + bytes[written] = b'/'; + written += 1; + index = bytes.len() - len + 1; } - } - for chunk in ke { - if chunk.is_empty() { - writer.write_byte(b'/'); - continue; + // Handle chunks with only repeated "$*" + [b'$', b'*', b'$', b'*', ..] => { + index += 2; } - if in_big_wild { - match chunk { - [SINGLE_WILD] | b"$*" => { - writer.write(b"/*"); - } - DOUBLE_WILD => {} - _ => { - writer.write(b"/**/"); - writer.write(chunk); - in_big_wild = false; + _ => { + if double_wild && &bytes[index..] != b"**" { + bytes[written..written + 3].copy_from_slice(b"**/"); + written += 3; + double_wild = false; + } + let mut write_start = index; + loop { + match bytes.get(index) { + Some(b'/') => { + index += 1; + bytes.copy_within(write_start..index, written); + written += index - write_start; + break; + } + Some(b'$') if matches!(bytes.get(index + 1..index + 4), Some(b"*$*")) => { + index += 2; + bytes.copy_within(write_start..index, written); + written += index - write_start; + let skip = bytes[index + 4..] + .windows(2) + .take_while(|s| s == b"$*") + .count(); + index += (1 + skip) * 2; + write_start = index; + } + Some(_) => index += 1, + None => { + bytes.copy_within(write_start..index, written); + written += index - write_start; + return written; + } } } - } else if chunk == DOUBLE_WILD { - in_big_wild = true; - } else { - writer.write_byte(DELIMITER); - writer.write(if chunk == b"$*" { b"*" } else { chunk }); } } - if in_big_wild { - if writer.len != 0 { - writer.write_byte(DELIMITER); - } - writer.write(DOUBLE_WILD) - } - *self = unsafe { - str::from_utf8_unchecked_mut(slice::from_raw_parts_mut(writer.ptr, writer.len)) - } } } -impl Canonizable for String { +impl Canonize for &mut str { + fn canonize(&mut self) { + // SAFETY: canonize leave an UTF8 string within the returned length, + // and remaining garbage bytes are zeroed + let bytes = unsafe { self.as_bytes_mut() }; + let length = canonize(bytes); + bytes[length..].fill(b'\0'); + *self = &mut core::mem::take(self)[..length]; + } +} + +impl Canonize for String { fn canonize(&mut self) { - let mut s = self.as_mut(); - s.canonize(); - let len = s.len(); - self.truncate(len); + // SAFETY: canonize leave an UTF8 string within the returned length, + // and remaining garbage bytes are truncated + let bytes = unsafe { self.as_mut_vec() }; + let length = canonize(bytes); + bytes.truncate(length); } } @@ -149,6 +139,9 @@ fn canonizer() { let mut s = String::from("hello/**/**/bye"); s.canonize(); assert_eq!(s, "hello/**/bye"); + let mut s = String::from("hello/**/**"); + s.canonize(); + assert_eq!(s, "hello/**"); // Any $* chunk is replaced by a * chunk let mut s = String::from("hello/$*/bye"); @@ -171,4 +164,11 @@ fn canonizer() { let mut s = String::from("hello/**/*"); s.canonize(); assert_eq!(s, "hello/*/**"); + + // &mut str remaining part is zeroed + let mut s = String::from("$*$*$*/hello/$*$*/bye/$*$*"); + let mut s_mut = s.as_mut_str(); + s_mut.canonize(); + assert_eq!(s_mut, "*/hello/*/bye/*"); + assert_eq!(s, "*/hello/*/bye/*\0\0\0\0\0\0\0\0\0\0\0"); } diff --git a/commons/zenoh-keyexpr/src/key_expr/format/mod.rs b/commons/zenoh-keyexpr/src/key_expr/format/mod.rs index bf5536ec63..d4eccd6d41 100644 --- a/commons/zenoh-keyexpr/src/key_expr/format/mod.rs +++ b/commons/zenoh-keyexpr/src/key_expr/format/mod.rs @@ -17,8 +17,8 @@ //! The same issue arises naturally when designing a KE space, and [`KeFormat`] was designed to help you with this, //! both in constructing and in parsing KEs that fit the formats you've defined. //! -//! [`kedefine`](https://docs.rs/zenoh/0.10.1-rc/zenoh/macro.kedefine.html) also allows you to define formats at compile time, allowing a more performant, but more importantly safer and more convenient use of said formats, -//! as the [`keformat`](https://docs.rs/zenoh/0.10.1-rc/zenoh/macro.keformat.html) and [`kewrite`](https://docs.rs/zenoh/0.10.1-rc/zenoh/macro.kewrite.html) macros will be able to tell you if you're attempting to set fields of the format that do not exist. +//! [`kedefine`](https://docs.rs/zenoh/latest/zenoh/key_expr/format/macro.kedefine.html) also allows you to define formats at compile time, allowing a more performant, but more importantly safer and more convenient use of said formats, +//! as the [`keformat`](https://docs.rs/zenoh/latest/zenoh/key_expr/format/macro.keformat.htmll) and [`kewrite`](https://docs.rs/zenoh/latest/zenoh/key_expr/format/macro.kewrite.html) macros will be able to tell you if you're attempting to set fields of the format that do not exist. //! //! ## The format syntax //! KE formats are defined following a syntax that extends the [`keyexpr`] syntax. In addition to existing chunk types, KE formmats support "specification" chunks. @@ -67,8 +67,8 @@ use support::{IterativeConstructor, Spec}; /// The same issue arises naturally when designing a KE space, and [`KeFormat`] was designed to help you with this, /// both in constructing and in parsing KEs that fit the formats you've defined. /// -/// [`zenoh::kedefine`](https://docs.rs/zenoh/0.10.1-rc/zenoh/macro.kedefine.html) also allows you to define formats at compile time, allowing a more performant, but more importantly safer and more convenient use of said formats, -/// as the [`zenoh::keformat`](https://docs.rs/zenoh/0.10.1-rc/zenoh/macro.keformat.html) and [`zenoh::kewrite`](https://docs.rs/zenoh/0.10.1-rc/zenoh/macro.kewrite.html) macros will be able to tell you if you're attempting to set fields of the format that do not exist. +/// [`kedefine`](https://docs.rs/zenoh/latest/zenoh/key_expr/format/macro.kedefine.html) also allows you to define formats at compile time, allowing a more performant, but more importantly safer and more convenient use of said formats, +/// as the [`keformat`](https://docs.rs/zenoh/latest/zenoh/key_expr/format/macro.keformat.html) and [`kewrite`](https://docs.rs/zenoh/latest/zenoh/macro.kewrite.html) macros will be able to tell you if you're attempting to set fields of the format that do not exist. /// /// ## The format syntax /// KE formats are defined following a syntax that extends the [`keyexpr`] syntax. In addition to existing chunk types, KE formmats support "specification" chunks. @@ -120,7 +120,7 @@ impl<'s> KeFormat<'s, Vec>> { /// /// `N` is simply the number of specifications in `value`. If this number of specs isn't known at compile-time, use [`KeFormat::new`] instead. /// - /// If you know `value` at compile time, using [`zenoh::kedefine`](https://docs.rs/zenoh/0.10.1-rc/zenoh/macro.kedefine.html) instead is advised, + /// If you know `value` at compile time, using [`kedefine`](https://docs.rs/zenoh/latest/zenoh/key_expr/format/macro.kedefine.html) instead is advised, /// as it will provide more features and construct higher performance formats than this constructor. pub fn noalloc_new(value: &'s str) -> ZResult; N]>> { value.try_into() diff --git a/commons/zenoh-keyexpr/src/key_expr/format/parsing.rs b/commons/zenoh-keyexpr/src/key_expr/format/parsing.rs index 52f01c5b6a..a6329cdf73 100644 --- a/commons/zenoh-keyexpr/src/key_expr/format/parsing.rs +++ b/commons/zenoh-keyexpr/src/key_expr/format/parsing.rs @@ -230,8 +230,9 @@ fn do_parse<'a>( #[test] fn parsing() { - use crate::key_expr::OwnedKeyExpr; use core::convert::TryFrom; + + use crate::key_expr::OwnedKeyExpr; for a_spec in ["${a:*}", "a/${a:*}"] { for b_spec in ["b/${b:**}", "${b:**}"] { let specs = [a_spec, b_spec, "c"]; diff --git a/commons/zenoh-keyexpr/src/key_expr/intersect/classical.rs b/commons/zenoh-keyexpr/src/key_expr/intersect/classical.rs index cc28ef2c4c..aea554a2f1 100644 --- a/commons/zenoh-keyexpr/src/key_expr/intersect/classical.rs +++ b/commons/zenoh-keyexpr/src/key_expr/intersect/classical.rs @@ -119,8 +119,7 @@ pub fn intersect(s1: &[u8], s2: &[u8]) -> bool { it_intersect::(s1, s2) } -use super::restiction::NoSubWilds; -use super::{Intersector, MayHaveVerbatim}; +use super::{restiction::NoSubWilds, Intersector, MayHaveVerbatim}; pub struct ClassicIntersector; impl Intersector, NoSubWilds<&[u8]>> for ClassicIntersector { diff --git a/commons/zenoh-keyexpr/src/key_expr/intersect/mod.rs b/commons/zenoh-keyexpr/src/key_expr/intersect/mod.rs index f5d7735d9e..06b990ee72 100644 --- a/commons/zenoh-keyexpr/src/key_expr/intersect/mod.rs +++ b/commons/zenoh-keyexpr/src/key_expr/intersect/mod.rs @@ -12,9 +12,8 @@ // ZettaScale Zenoh Team, // -use crate::DELIMITER; - use super::keyexpr; +use crate::DELIMITER; mod classical; pub use classical::ClassicIntersector; diff --git a/commons/zenoh-keyexpr/src/key_expr/owned.rs b/commons/zenoh-keyexpr/src/key_expr/owned.rs index 5164e4762c..517de97658 100644 --- a/commons/zenoh-keyexpr/src/key_expr/owned.rs +++ b/commons/zenoh-keyexpr/src/key_expr/owned.rs @@ -13,7 +13,6 @@ // extern crate alloc; -use super::{canon::Canonizable, keyexpr}; // use crate::core::WireExpr; use alloc::{borrow::ToOwned, boxed::Box, string::String, sync::Arc}; use core::{ @@ -23,6 +22,8 @@ use core::{ str::FromStr, }; +use super::{canon::Canonize, keyexpr}; + /// A [`Arc`] newtype that is statically known to be a valid key expression. /// /// See [`keyexpr`](super::borrowed::keyexpr). @@ -59,7 +60,7 @@ impl OwnedKeyExpr { pub fn autocanonize(mut t: T) -> Result where Self: TryFrom, - T: Canonizable, + T: Canonize, { t.canonize(); Self::new(t) @@ -70,13 +71,13 @@ impl OwnedKeyExpr { /// Key Expressions must follow some rules to be accepted by a Zenoh network. /// Messages addressed with invalid key expressions will be dropped. pub unsafe fn from_string_unchecked(s: String) -> Self { - Self::from_boxed_string_unchecked(s.into_boxed_str()) + Self::from_boxed_str_unchecked(s.into_boxed_str()) } /// Constructs an OwnedKeyExpr without checking [`keyexpr`]'s invariants /// # Safety /// Key Expressions must follow some rules to be accepted by a Zenoh network. /// Messages addressed with invalid key expressions will be dropped. - pub unsafe fn from_boxed_string_unchecked(s: Box) -> Self { + pub unsafe fn from_boxed_str_unchecked(s: Box) -> Self { OwnedKeyExpr(s.into()) } } diff --git a/commons/zenoh-keyexpr/src/key_expr/tests.rs b/commons/zenoh-keyexpr/src/key_expr/tests.rs index 6d9e64896e..c004666776 100644 --- a/commons/zenoh-keyexpr/src/key_expr/tests.rs +++ b/commons/zenoh-keyexpr/src/key_expr/tests.rs @@ -12,9 +12,10 @@ // ZettaScale Zenoh Team, // -use crate::key_expr::{fuzzer, intersect::*, keyexpr}; use std::{convert::TryInto, fmt::Debug}; +use crate::key_expr::{fuzzer, intersect::*, keyexpr}; + type BoxedIntersectors = Vec Intersector<&'a keyexpr, &'a keyexpr> + Send + Sync>>; lazy_static::lazy_static! { diff --git a/commons/zenoh-keyexpr/src/key_expr/utils.rs b/commons/zenoh-keyexpr/src/key_expr/utils.rs index 628477174a..63f4b4c088 100644 --- a/commons/zenoh-keyexpr/src/key_expr/utils.rs +++ b/commons/zenoh-keyexpr/src/key_expr/utils.rs @@ -11,25 +11,6 @@ // Contributors: // ZettaScale Zenoh Team, // -use core::ptr; - -pub(crate) struct Writer { - pub ptr: *mut u8, - pub len: usize, -} - -impl Writer { - pub(crate) fn write(&mut self, slice: &[u8]) { - let len = slice.len(); - unsafe { ptr::copy(slice.as_ptr(), self.ptr.add(self.len), len) }; - self.len += len - } - pub(crate) fn write_byte(&mut self, byte: u8) { - unsafe { *self.ptr.add(self.len) = byte }; - self.len += 1 - } -} - #[derive(Debug)] pub struct Splitter<'a, S: ?Sized, D: ?Sized> { s: Option<&'a S>, diff --git a/commons/zenoh-keyexpr/src/keyexpr_tree/arc_tree.rs b/commons/zenoh-keyexpr/src/keyexpr_tree/arc_tree.rs index fe2640c604..35197a26da 100644 --- a/commons/zenoh-keyexpr/src/keyexpr_tree/arc_tree.rs +++ b/commons/zenoh-keyexpr/src/keyexpr_tree/arc_tree.rs @@ -20,10 +20,11 @@ use core::fmt::Debug; use token_cell::prelude::*; -use super::box_tree::PruneResult; -use super::support::IterOrOption; -use crate::keyexpr; -use crate::keyexpr_tree::{support::IWildness, *}; +use super::{box_tree::PruneResult, support::IterOrOption}; +use crate::{ + keyexpr, + keyexpr_tree::{support::IWildness, *}, +}; pub struct KeArcTreeInner< Weight, @@ -147,7 +148,7 @@ where // tags{ketree.arc.node} fn node(&'a self, token: &'a Token, at: &keyexpr) -> Option { let inner = ketree_borrow(&self.inner, token); - let mut chunks = at.chunks(); + let mut chunks = at.chunks_impl(); let mut node = inner.children.child_at(chunks.next().unwrap())?; for chunk in chunks { let as_node: &Arc< @@ -168,7 +169,7 @@ where // tags{ketree.arc.node.or_create} fn node_or_create(&'a self, token: &'a mut Token, at: &keyexpr) -> Self::NodeMut { let inner = ketree_borrow_mut(&self.inner, token); - if at.is_wild() { + if at.is_wild_impl() { inner.wildness.set(true); } let inner: &mut KeArcTreeInner = @@ -184,7 +185,7 @@ where token, )) }; - let mut chunks = at.chunks(); + let mut chunks = at.chunks_impl(); let mut node = inner .children .entry(chunks.next().unwrap()) @@ -262,7 +263,7 @@ where // tags{ketree.arc.intersecting} fn intersecting_nodes(&'a self, token: &'a Token, key: &'a keyexpr) -> Self::Intersection { let inner = ketree_borrow(&self.inner, token); - if inner.wildness.get() || key.is_wild() { + if inner.wildness.get() || key.is_wild_impl() { IterOrOption::Iter(TokenPacker { iter: Intersection::new(&inner.children, key), token, @@ -291,7 +292,7 @@ where key: &'a keyexpr, ) -> Self::IntersectionMut { let inner = ketree_borrow(&self.inner, token); - if inner.wildness.get() || key.is_wild() { + if inner.wildness.get() || key.is_wild_impl() { IterOrOption::Iter(TokenPacker { iter: Intersection::new( unsafe { @@ -322,7 +323,7 @@ where // tags{ketree.arc.included} fn included_nodes(&'a self, token: &'a Token, key: &'a keyexpr) -> Self::Inclusion { let inner = ketree_borrow(&self.inner, token); - if inner.wildness.get() || key.is_wild() { + if inner.wildness.get() || key.is_wild_impl() { IterOrOption::Iter(TokenPacker { iter: Inclusion::new(&inner.children, key), token, @@ -347,7 +348,7 @@ where // tags{ketree.arc.included.mut} fn included_nodes_mut(&'a self, token: &'a mut Token, key: &'a keyexpr) -> Self::InclusionMut { let inner = ketree_borrow(&self.inner, token); - if inner.wildness.get() || key.is_wild() { + if inner.wildness.get() || key.is_wild_impl() { unsafe { IterOrOption::Iter(TokenPacker { iter: Inclusion::new( @@ -378,7 +379,7 @@ where // tags{ketree.arc.including} fn nodes_including(&'a self, token: &'a Token, key: &'a keyexpr) -> Self::Includer { let inner = ketree_borrow(&self.inner, token); - if inner.wildness.get() || key.is_wild() { + if inner.wildness.get() || key.is_wild_impl() { IterOrOption::Iter(TokenPacker { iter: Includer::new(&inner.children, key), token, @@ -403,7 +404,7 @@ where // tags{ketree.arc.including.mut} fn nodes_including_mut(&'a self, token: &'a mut Token, key: &'a keyexpr) -> Self::IncluderMut { let inner = ketree_borrow(&self.inner, token); - if inner.wildness.get() || key.is_wild() { + if inner.wildness.get() || key.is_wild_impl() { unsafe { IterOrOption::Iter(TokenPacker { iter: Includer::new( @@ -444,6 +445,7 @@ where pub(crate) mod sealed { use alloc::sync::Arc; use core::ops::{Deref, DerefMut}; + use token_cell::prelude::{TokenCell, TokenTrait}; pub struct Tokenized(pub A, pub(crate) B); @@ -595,7 +597,7 @@ where }); if predicate(self) && self.children.is_empty() { result = PruneResult::Delete - } else if self.chunk.is_wild() { + } else if self.chunk.is_wild_impl() { result = PruneResult::Wild } result diff --git a/commons/zenoh-keyexpr/src/keyexpr_tree/box_tree.rs b/commons/zenoh-keyexpr/src/keyexpr_tree/box_tree.rs index aed873f51a..69607e9608 100644 --- a/commons/zenoh-keyexpr/src/keyexpr_tree/box_tree.rs +++ b/commons/zenoh-keyexpr/src/keyexpr_tree/box_tree.rs @@ -17,10 +17,11 @@ use alloc::boxed::Box; use alloc::string::String; use core::ptr::NonNull; -use crate::keyexpr; -use crate::keyexpr_tree::{support::IWildness, *}; - use super::support::IterOrOption; +use crate::{ + keyexpr, + keyexpr_tree::{support::IWildness, *}, +}; /// A fully owned KeTree. /// @@ -74,7 +75,7 @@ where { type Node = KeyExprTreeNode; fn node(&'a self, at: &keyexpr) -> Option<&'a Self::Node> { - let mut chunks = at.chunks(); + let mut chunks = at.chunks_impl(); let mut node = self.children.child_at(chunks.next().unwrap())?; for chunk in chunks { node = node.as_node().children.child_at(chunk)?; @@ -93,7 +94,7 @@ where &'a Self::Node, >; fn intersecting_nodes(&'a self, ke: &'a keyexpr) -> Self::Intersection { - if self.wildness.get() || ke.is_wild() { + if self.wildness.get() || ke.is_wild_impl() { Intersection::new(&self.children, ke).into() } else { let node = self.node(ke); @@ -107,7 +108,7 @@ where &'a Self::Node, >; fn included_nodes(&'a self, ke: &'a keyexpr) -> Self::Inclusion { - if self.wildness.get() || ke.is_wild() { + if self.wildness.get() || ke.is_wild_impl() { Inclusion::new(&self.children, ke).into() } else { let node = self.node(ke); @@ -121,7 +122,7 @@ where &'a Self::Node, >; fn nodes_including(&'a self, ke: &'a keyexpr) -> Self::Includer { - if self.wildness.get() || ke.is_wild() { + if self.wildness.get() || ke.is_wild_impl() { Includer::new(&self.children, ke).into() } else { let node = self.node(ke); @@ -144,7 +145,7 @@ where > + 'a, { fn node_mut<'b>(&'b mut self, at: &keyexpr) -> Option<&'b mut Self::Node> { - let mut chunks = at.chunks(); + let mut chunks = at.chunks_impl(); let mut node = self.children.child_at_mut(chunks.next().unwrap())?; for chunk in chunks { node = node.as_node_mut().children.child_at_mut(chunk)?; @@ -168,10 +169,10 @@ where } fn node_mut_or_create<'b>(&'b mut self, at: &keyexpr) -> &'b mut Self::Node { - if at.is_wild() { + if at.is_wild_impl() { self.wildness.set(true); } - let mut chunks = at.chunks(); + let mut chunks = at.chunks_impl(); let mut node = self .children .entry(chunks.next().unwrap()) @@ -209,7 +210,7 @@ where &'a mut Self::Node, >; fn intersecting_nodes_mut(&'a mut self, ke: &'a keyexpr) -> Self::IntersectionMut { - if self.wildness.get() || ke.is_wild() { + if self.wildness.get() || ke.is_wild_impl() { IntersectionMut::new(&mut self.children, ke).into() } else { let node = self.node_mut(ke); @@ -222,7 +223,7 @@ where &'a mut Self::Node, >; fn included_nodes_mut(&'a mut self, ke: &'a keyexpr) -> Self::InclusionMut { - if self.wildness.get() || ke.is_wild() { + if self.wildness.get() || ke.is_wild_impl() { InclusionMut::new(&mut self.children, ke).into() } else { let node = self.node_mut(ke); @@ -235,7 +236,7 @@ where &'a mut Self::Node, >; fn nodes_including_mut(&'a mut self, ke: &'a keyexpr) -> Self::IncluderMut { - if self.wildness.get() || ke.is_wild() { + if self.wildness.get() || ke.is_wild_impl() { IncluderMut::new(&mut self.children, ke).into() } else { let node = self.node_mut(ke); @@ -364,7 +365,7 @@ where }); if predicate(self) && self.children.is_empty() { result = PruneResult::Delete - } else if self.chunk.is_wild() { + } else if self.chunk.is_wild_impl() { result = PruneResult::Wild } result diff --git a/commons/zenoh-keyexpr/src/keyexpr_tree/impls/hashmap_impl.rs b/commons/zenoh-keyexpr/src/keyexpr_tree/impls/hashmap_impl.rs index 72f830a912..a5a16e1d82 100644 --- a/commons/zenoh-keyexpr/src/keyexpr_tree/impls/hashmap_impl.rs +++ b/commons/zenoh-keyexpr/src/keyexpr_tree/impls/hashmap_impl.rs @@ -17,17 +17,18 @@ use core::hash::Hasher; // `SipHasher` is deprecated in favour of a symbol that only exists in `std` #[allow(deprecated)] use core::hash::SipHasher as DefaultHasher; -#[cfg(not(feature = "std"))] -use hashbrown::{ - hash_map::{Entry, Iter, IterMut, Values, ValuesMut}, - HashMap, -}; #[cfg(feature = "std")] use std::collections::{ hash_map::{DefaultHasher, Entry, Iter, IterMut, Values, ValuesMut}, HashMap, }; +#[cfg(not(feature = "std"))] +use hashbrown::{ + hash_map::{Entry, Iter, IterMut, Values, ValuesMut}, + HashMap, +}; + use crate::keyexpr_tree::*; #[cfg_attr(not(feature = "std"), allow(deprecated))] diff --git a/commons/zenoh-keyexpr/src/keyexpr_tree/impls/keyed_set_impl.rs b/commons/zenoh-keyexpr/src/keyexpr_tree/impls/keyed_set_impl.rs index 4fab65a850..f61e509ead 100644 --- a/commons/zenoh-keyexpr/src/keyexpr_tree/impls/keyed_set_impl.rs +++ b/commons/zenoh-keyexpr/src/keyexpr_tree/impls/keyed_set_impl.rs @@ -20,9 +20,10 @@ use core::hash::SipHasher as DefaultHasher; #[cfg(feature = "std")] use std::collections::hash_map::DefaultHasher; -use crate::keyexpr_tree::*; use keyed_set::{KeyExtractor, KeyedSet}; +use crate::keyexpr_tree::*; + #[cfg_attr(not(feature = "std"), allow(deprecated))] pub struct KeyedSetProvider( core::marker::PhantomData, @@ -52,7 +53,8 @@ impl + AsNodeMut> IChildren for KeyedSet Option<&mut T> { - self.get_mut_unguarded(&chunk) + // Unicity is guaranteed by &mut self + unsafe { self.get_mut_unguarded(&chunk) } } fn remove(&mut self, chunk: &keyexpr) -> Option { self.remove(&chunk) diff --git a/commons/zenoh-keyexpr/src/keyexpr_tree/impls/mod.rs b/commons/zenoh-keyexpr/src/keyexpr_tree/impls/mod.rs index 2645c9d95b..48547429f3 100644 --- a/commons/zenoh-keyexpr/src/keyexpr_tree/impls/mod.rs +++ b/commons/zenoh-keyexpr/src/keyexpr_tree/impls/mod.rs @@ -12,10 +12,11 @@ // ZettaScale Zenoh Team, // -use crate::keyexpr; pub use hashmap_impl::HashMapProvider; pub use keyed_set_impl::KeyedSetProvider; pub use vec_set_impl::VecSetProvider; + +use crate::keyexpr; mod hashmap_impl; mod keyed_set_impl; mod vec_set_impl; diff --git a/commons/zenoh-keyexpr/src/keyexpr_tree/impls/vec_set_impl.rs b/commons/zenoh-keyexpr/src/keyexpr_tree/impls/vec_set_impl.rs index 96877ebda6..510755e3c4 100644 --- a/commons/zenoh-keyexpr/src/keyexpr_tree/impls/vec_set_impl.rs +++ b/commons/zenoh-keyexpr/src/keyexpr_tree/impls/vec_set_impl.rs @@ -13,6 +13,7 @@ // use alloc::vec::Vec; + use zenoh_result::unlikely; use crate::keyexpr_tree::*; diff --git a/commons/zenoh-keyexpr/src/keyexpr_tree/iters/includer.rs b/commons/zenoh-keyexpr/src/keyexpr_tree/iters/includer.rs index a22d0804b1..bf09714f29 100644 --- a/commons/zenoh-keyexpr/src/keyexpr_tree/iters/includer.rs +++ b/commons/zenoh-keyexpr/src/keyexpr_tree/iters/includer.rs @@ -12,9 +12,10 @@ // ZettaScale Zenoh Team, // -use crate::keyexpr_tree::*; use alloc::vec::Vec; +use crate::keyexpr_tree::*; + struct StackFrame<'a, Children: IChildrenProvider, Node: UIKeyExprTreeNode, Weight> where Children::Assoc: IChildren + 'a, diff --git a/commons/zenoh-keyexpr/src/keyexpr_tree/iters/inclusion.rs b/commons/zenoh-keyexpr/src/keyexpr_tree/iters/inclusion.rs index 0ed2c96645..87e5af90a9 100644 --- a/commons/zenoh-keyexpr/src/keyexpr_tree/iters/inclusion.rs +++ b/commons/zenoh-keyexpr/src/keyexpr_tree/iters/inclusion.rs @@ -12,10 +12,12 @@ // ZettaScale Zenoh Team, // -use crate::keyexpr_tree::*; use alloc::vec::Vec; + use zenoh_result::unlikely; +use crate::keyexpr_tree::*; + struct StackFrame<'a, Children: IChildrenProvider, Node: UIKeyExprTreeNode, Weight> where Children::Assoc: IChildren + 'a, diff --git a/commons/zenoh-keyexpr/src/keyexpr_tree/iters/intersection.rs b/commons/zenoh-keyexpr/src/keyexpr_tree/iters/intersection.rs index 34902810f0..dccd571911 100644 --- a/commons/zenoh-keyexpr/src/keyexpr_tree/iters/intersection.rs +++ b/commons/zenoh-keyexpr/src/keyexpr_tree/iters/intersection.rs @@ -12,10 +12,12 @@ // ZettaScale Zenoh Team, // -use crate::keyexpr_tree::*; use alloc::vec::Vec; + use zenoh_result::unlikely; +use crate::keyexpr_tree::*; + struct StackFrame<'a, Children: IChildrenProvider, Node: UIKeyExprTreeNode, Weight> where Children::Assoc: IChildren + 'a, diff --git a/commons/zenoh-keyexpr/src/keyexpr_tree/iters/tree_iter.rs b/commons/zenoh-keyexpr/src/keyexpr_tree/iters/tree_iter.rs index 666f0cb2c2..05afae3885 100644 --- a/commons/zenoh-keyexpr/src/keyexpr_tree/iters/tree_iter.rs +++ b/commons/zenoh-keyexpr/src/keyexpr_tree/iters/tree_iter.rs @@ -12,9 +12,8 @@ // ZettaScale Zenoh Team, // -use core::num::NonZeroUsize; - use alloc::vec::Vec; +use core::num::NonZeroUsize; use crate::keyexpr_tree::*; pub struct TreeIter<'a, Children: IChildrenProvider, Node: UIKeyExprTreeNode, Weight> diff --git a/commons/zenoh-keyexpr/src/keyexpr_tree/mod.rs b/commons/zenoh-keyexpr/src/keyexpr_tree/mod.rs index db18f6ab67..fd36e6fdcc 100644 --- a/commons/zenoh-keyexpr/src/keyexpr_tree/mod.rs +++ b/commons/zenoh-keyexpr/src/keyexpr_tree/mod.rs @@ -47,7 +47,7 @@ //! # Iterators //! KeTrees provide iterators for the following operations: //! - Iterating on all nodes ([`IKeyExprTree::tree_iter`]/[`IKeyExprTreeMut::tree_iter_mut`]) -//! - Iterating on key-value pairs in the KeTree ([`IKeyExprTreeExt::key_value_pairs`]) +//! - Iterating on key-value pairs in the KeTree ([`IKeyExprTree::key_value_pairs`]) //! - Iterating on nodes whose KE intersects with a queried KE ([`IKeyExprTree::intersecting_nodes`], [`IKeyExprTreeMut::intersecting_nodes_mut`]) //! - Iterating on nodes whose KE are included by a queried KE ([`IKeyExprTree::included_nodes`], [`IKeyExprTreeMut::included_nodes_mut`]) //! - Iterating on nodes whose KE includes a queried KE ([`IKeyExprTree::nodes_including`], [`IKeyExprTreeMut::nodes_including_mut`]) diff --git a/commons/zenoh-keyexpr/src/keyexpr_tree/test.rs b/commons/zenoh-keyexpr/src/keyexpr_tree/test.rs index 2d916d9a84..39b8c12aee 100644 --- a/commons/zenoh-keyexpr/src/keyexpr_tree/test.rs +++ b/commons/zenoh-keyexpr/src/keyexpr_tree/test.rs @@ -12,24 +12,25 @@ // ZettaScale Zenoh Team, // -use crate::fuzzer::KeyExprFuzzer; use alloc::vec::Vec; -use rand::Rng; - -use super::{ - impls::{KeyedSetProvider, VecSetProvider}, - *, -}; use core::{ convert::{TryFrom, TryInto}, fmt::Debug, ops::Deref, }; -#[cfg(not(feature = "std"))] -use hashbrown::HashMap; #[cfg(feature = "std")] use std::collections::HashMap; +#[cfg(not(feature = "std"))] +use hashbrown::HashMap; +use rand::Rng; + +use super::{ + impls::{KeyedSetProvider, VecSetProvider}, + *, +}; +use crate::fuzzer::KeyExprFuzzer; + fn insert<'a, K: TryInto<&'a keyexpr>, V: Clone + PartialEq + Debug + 'static>( ketree: &mut KeBoxTree, map: &mut HashMap>, diff --git a/commons/zenoh-keyexpr/src/keyexpr_tree/traits/default_impls.rs b/commons/zenoh-keyexpr/src/keyexpr_tree/traits/default_impls.rs index e6def16608..6a043ccda0 100644 --- a/commons/zenoh-keyexpr/src/keyexpr_tree/traits/default_impls.rs +++ b/commons/zenoh-keyexpr/src/keyexpr_tree/traits/default_impls.rs @@ -13,6 +13,7 @@ // use alloc::{boxed::Box, sync::Arc}; + use token_cell::prelude::{TokenCell, TokenCellTrait, TokenTrait}; use super::*; diff --git a/commons/zenoh-keyexpr/src/keyexpr_tree/traits/mod.rs b/commons/zenoh-keyexpr/src/keyexpr_tree/traits/mod.rs index ed3f36a371..e880dae9c0 100644 --- a/commons/zenoh-keyexpr/src/keyexpr_tree/traits/mod.rs +++ b/commons/zenoh-keyexpr/src/keyexpr_tree/traits/mod.rs @@ -12,8 +12,9 @@ // ZettaScale Zenoh Team, // -use crate::{keyexpr, OwnedKeyExpr}; use alloc::boxed::Box; + +use crate::{keyexpr, OwnedKeyExpr}; pub mod default_impls; /// The basic immutable methods of all KeTrees. diff --git a/commons/zenoh-keyexpr/src/lib.rs b/commons/zenoh-keyexpr/src/lib.rs index c5e444b29e..03565fe63a 100644 --- a/commons/zenoh-keyexpr/src/lib.rs +++ b/commons/zenoh-keyexpr/src/lib.rs @@ -22,8 +22,8 @@ //! # Storing Key Expressions //! This module provides 2 flavours to store strings that have been validated to respect the KE syntax, and a third is provided by [`zenoh`](https://docs.rs/zenoh): //! - [`keyexpr`] is the equivalent of a [`str`], -//! - [`OwnedKeyExpr`] works like an [`Arc`], -//! - [`KeyExpr`](https://docs.rs/zenoh/latest/zenoh/key_expr/struct.KeyExpr.html) works like a [`Cow`], but also stores some additional context internal to Zenoh to optimize +//! - [`OwnedKeyExpr`] works like an [`Arc`](std::sync::Arc), +//! - [`KeyExpr`](https://docs.rs/zenoh/latest/zenoh/key_expr/struct.KeyExpr.html) works like a [`Cow`](std::borrow::Cow), but also stores some additional context internal to Zenoh to optimize //! routing and network usage. //! //! All of these types [`Deref`](core::ops::Deref) to [`keyexpr`], which notably has methods to check whether a given [`keyexpr::intersects`] with another, @@ -40,8 +40,8 @@ //! The same issue arises naturally when designing a KE space, and [`KeFormat`](format::KeFormat) was designed to help you with this, //! both in constructing and in parsing KEs that fit the formats you've defined. //! -//! [`kedefine`] also allows you to define formats at compile time, allowing a more performant, but more importantly safer and more convenient use of said formats, -//! as the [`keformat`] and [`kewrite`] macros will be able to tell you if you're attempting to set fields of the format that do not exist. +//! [`kedefine`](https://docs.rs/zenoh/latest/zenoh/key_expr/format/macro.kedefine.html) also allows you to define formats at compile time, allowing a more performant, but more importantly safer and more convenient use of said formats, +//! as the [`keformat`](https://docs.rs/zenoh/latest/zenoh/key_expr/format/macro.keformat.html) and [`kewrite`](https://docs.rs/zenoh/latest/zenoh/key_expr/format/macro.kewrite.html) macros will be able to tell you if you're attempting to set fields of the format that do not exist. #![cfg_attr(not(feature = "std"), no_std)] extern crate alloc; diff --git a/commons/zenoh-macros/Cargo.toml b/commons/zenoh-macros/Cargo.toml index 7d06482e48..6adfe63deb 100644 --- a/commons/zenoh-macros/Cargo.toml +++ b/commons/zenoh-macros/Cargo.toml @@ -20,7 +20,7 @@ homepage = { workspace = true } authors = { workspace = true } edition = { workspace = true } license = { workspace = true } -categories = ["proc-macros"] +categories = ["development-tools::procedural-macro-helpers"] description = "Internal crate for zenoh." # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html diff --git a/commons/zenoh-macros/src/lib.rs b/commons/zenoh-macros/src/lib.rs index 3118399dc4..003525daa9 100644 --- a/commons/zenoh-macros/src/lib.rs +++ b/commons/zenoh-macros/src/lib.rs @@ -59,15 +59,15 @@ pub fn rustc_version_release(_tokens: TokenStream) -> TokenStream { (quote! {(#release, #commit)}).into() } -/// An enumeration of items supported by the [`unstable`] attribute. -enum UnstableItem { +/// An enumeration of items which can be annotated with `#[zenoh_macros::unstable_doc]`, #[zenoh_macros::unstable]`, `#[zenoh_macros::internal]` +enum AnnotableItem { /// Wrapper around [`syn::Item`]. Item(Item), /// Wrapper around [`syn::TraitItem`]. TraitItem(TraitItem), } -macro_rules! parse_unstable_item { +macro_rules! parse_annotable_item { ($tokens:ident) => {{ let item: Item = parse_macro_input!($tokens as Item); @@ -81,19 +81,19 @@ macro_rules! parse_unstable_item { "the `unstable` proc-macro attribute only supports items and trait items", )) } else { - Ok(UnstableItem::TraitItem(trait_item)) + Ok(AnnotableItem::TraitItem(trait_item)) } } else { - Ok(UnstableItem::Item(item)) + Ok(AnnotableItem::Item(item)) } }}; } -impl UnstableItem { +impl AnnotableItem { /// Mutably borrows the attribute list of this item. fn attributes_mut(&mut self) -> Result<&mut Vec, Error> { match self { - UnstableItem::Item(item) => match item { + AnnotableItem::Item(item) => match item { Item::Const(item) => Ok(&mut item.attrs), Item::Enum(item) => Ok(&mut item.attrs), Item::ExternCrate(item) => Ok(&mut item.attrs), @@ -111,17 +111,17 @@ impl UnstableItem { Item::Use(item) => Ok(&mut item.attrs), other => Err(Error::new_spanned( other, - "item is not supported by the `unstable` proc-macro attribute", + "item is not supported by the `unstable` or `internal` proc-macro attribute", )), }, - UnstableItem::TraitItem(trait_item) => match trait_item { + AnnotableItem::TraitItem(trait_item) => match trait_item { TraitItem::Const(trait_item) => Ok(&mut trait_item.attrs), TraitItem::Fn(trait_item) => Ok(&mut trait_item.attrs), TraitItem::Type(trait_item) => Ok(&mut trait_item.attrs), TraitItem::Macro(trait_item) => Ok(&mut trait_item.attrs), other => Err(Error::new_spanned( other, - "item is not supported by the `unstable` proc-macro attribute", + "item is not supported by the `unstable` or `internal` proc-macro attribute", )), }, } @@ -130,15 +130,18 @@ impl UnstableItem { /// Converts this item to a `proc_macro2::TokenStream`. fn to_token_stream(&self) -> proc_macro2::TokenStream { match self { - UnstableItem::Item(item) => item.to_token_stream(), - UnstableItem::TraitItem(trait_item) => trait_item.to_token_stream(), + AnnotableItem::Item(item) => item.to_token_stream(), + AnnotableItem::TraitItem(trait_item) => trait_item.to_token_stream(), } } } #[proc_macro_attribute] -pub fn unstable(_attr: TokenStream, tokens: TokenStream) -> TokenStream { - let mut item = match parse_unstable_item!(tokens) { +/// Adds only piece of documentation about the item being unstable but no unstable attribute itself. +/// This is useful when the whole crate is supposed to be used in unstable mode only, it makes sense +/// to mention it in dcoumentation for the crate items, but not to add `#[cfg(feature = "unstable")]` to every item. +pub fn unstable_doc(_attr: TokenStream, tokens: TokenStream) -> TokenStream { + let mut item = match parse_annotable_item!(tokens) { Ok(item) => item, Err(err) => return err.into_compile_error().into(), }; @@ -155,12 +158,50 @@ pub fn unstable(_attr: TokenStream, tokens: TokenStream) -> TokenStream { attrs.push(note); } + TokenStream::from(item.to_token_stream()) +} + +#[proc_macro_attribute] +/// Adds a `#[cfg(feature = "unstable")]` attribute to the item and appends piece of documentation about the item being unstable. +pub fn unstable(attr: TokenStream, tokens: TokenStream) -> TokenStream { + let tokens = unstable_doc(attr, tokens); + let mut item = match parse_annotable_item!(tokens) { + Ok(item) => item, + Err(err) => return err.into_compile_error().into(), + }; + + let attrs = match item.attributes_mut() { + Ok(attrs) => attrs, + Err(err) => return err.into_compile_error().into(), + }; + let feature_gate: Attribute = parse_quote!(#[cfg(feature = "unstable")]); attrs.push(feature_gate); TokenStream::from(item.to_token_stream()) } +#[proc_macro_attribute] +/// Adds a `#[cfg(feature = "internal")]` and `#[doc(hidden)]` attributes to the item. +pub fn internal(_attr: TokenStream, tokens: TokenStream) -> TokenStream { + let mut item = match parse_annotable_item!(tokens) { + Ok(item) => item, + Err(err) => return err.into_compile_error().into(), + }; + + let attrs = match item.attributes_mut() { + Ok(attrs) => attrs, + Err(err) => return err.into_compile_error().into(), + }; + + let feature_gate: Attribute = parse_quote!(#[cfg(feature = "internal")]); + let hide_doc: Attribute = parse_quote!(#[doc(hidden)]); + attrs.push(feature_gate); + attrs.push(hide_doc); + + TokenStream::from(item.to_token_stream()) +} + /// Returns `true` if the attribute is a `#[doc = "..."]` attribute. fn is_doc_attribute(attr: &Attribute) -> bool { attr.path() diff --git a/commons/zenoh-protocol/Cargo.toml b/commons/zenoh-protocol/Cargo.toml index 93c92ee33f..2c3a36b7a7 100644 --- a/commons/zenoh-protocol/Cargo.toml +++ b/commons/zenoh-protocol/Cargo.toml @@ -33,10 +33,9 @@ std = [ "zenoh-keyexpr/std", "zenoh-result/std", ] -test = ["rand", "zenoh-buffers/test"] +test = ["rand", "zenoh-buffers/test", "zenoh-collections/test"] shared-memory = ["std", "zenoh-buffers/shared-memory"] stats = [] -complete_n = [] [dependencies] const_format = { workspace = true } @@ -44,6 +43,7 @@ rand = { workspace = true, features = ["alloc", "getrandom"], optional = true } serde = { workspace = true, features = ["alloc"] } uhlc = { workspace = true, default-features = false } zenoh-buffers = { workspace = true, default-features = false } +zenoh-collections = { workspace = true, default-features = false } zenoh-keyexpr = { workspace = true } zenoh-result = { workspace = true } diff --git a/commons/zenoh-protocol/src/common/extension.rs b/commons/zenoh-protocol/src/common/extension.rs index 3c0fd881df..5fc6cf8664 100644 --- a/commons/zenoh-protocol/src/common/extension.rs +++ b/commons/zenoh-protocol/src/common/extension.rs @@ -15,6 +15,7 @@ use core::{ convert::TryFrom, fmt::{self, Debug}, }; + use zenoh_buffers::ZBuf; /// # Zenoh extensions diff --git a/commons/zenoh-protocol/src/common/mod.rs b/commons/zenoh-protocol/src/common/mod.rs index d11d0b0c52..99bc471cfd 100644 --- a/commons/zenoh-protocol/src/common/mod.rs +++ b/commons/zenoh-protocol/src/common/mod.rs @@ -19,21 +19,6 @@ pub use extension::*; /*************************************/ // Inner Message IDs pub mod imsg { - pub mod id { - // Zenoh Messages - pub const DECLARE: u8 = 0x0b; - pub const DATA: u8 = 0x0c; - pub const QUERY: u8 = 0x0d; - pub const PULL: u8 = 0x0e; - pub const UNIT: u8 = 0x0f; - pub const LINK_STATE_LIST: u8 = 0x10; - - // Message decorators - pub const PRIORITY: u8 = 0x1c; - pub const ROUTING_CONTEXT: u8 = 0x1d; - pub const REPLY_CONTEXT: u8 = 0x1e; - } - // Header mask pub const HEADER_BITS: u8 = 5; pub const HEADER_MASK: u8 = !(0xff << HEADER_BITS); @@ -61,6 +46,12 @@ pub mod imsg { byte } + pub const fn set_bitfield(mut byte: u8, value: u8, mask: u8) -> u8 { + byte = unset_flag(byte, mask); + byte |= value; + byte + } + pub const fn has_option(options: u64, flag: u64) -> bool { options & flag != 0 } diff --git a/commons/zenoh-protocol/src/core/cowstr.rs b/commons/zenoh-protocol/src/core/cowstr.rs index 33dac4524f..b31c1c4a5d 100644 --- a/commons/zenoh-protocol/src/core/cowstr.rs +++ b/commons/zenoh-protocol/src/core/cowstr.rs @@ -12,8 +12,10 @@ // ZettaScale Zenoh Team, // use alloc::{borrow::ToOwned, boxed::Box, string::String, vec::Vec}; -use core::fmt::{Debug, Display, Formatter}; -use core::num::NonZeroUsize; +use core::{ + fmt::{Debug, Display, Formatter}, + num::NonZeroUsize, +}; enum CowStrInner<'a> { Borrowed(&'a str), @@ -21,7 +23,7 @@ enum CowStrInner<'a> { } pub struct CowStr<'a>(CowStrInner<'a>); impl<'a> CowStr<'a> { - pub(crate) fn borrowed(s: &'a str) -> Self { + pub(crate) const fn borrowed(s: &'a str) -> Self { Self(CowStrInner::Borrowed(s)) } pub fn as_str(&self) -> &str { diff --git a/commons/zenoh-protocol/src/core/encoding.rs b/commons/zenoh-protocol/src/core/encoding.rs index 8132729c58..e58088b581 100644 --- a/commons/zenoh-protocol/src/core/encoding.rs +++ b/commons/zenoh-protocol/src/core/encoding.rs @@ -11,290 +11,69 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::core::CowStr; -use alloc::{borrow::Cow, string::String}; -use core::{ - convert::TryFrom, - fmt::{self, Debug}, - mem, -}; -use zenoh_result::{bail, zerror, ZError, ZResult}; +use core::fmt::Debug; -mod consts { - pub(super) const MIMES: [&str; 21] = [ - /* 0 */ "", - /* 1 */ "application/octet-stream", - /* 2 */ "application/custom", // non iana standard - /* 3 */ "text/plain", - /* 4 */ "application/properties", // non iana standard - /* 5 */ "application/json", // if not readable from casual users - /* 6 */ "application/sql", - /* 7 */ "application/integer", // non iana standard - /* 8 */ "application/float", // non iana standard - /* 9 */ - "application/xml", // if not readable from casual users (RFC 3023, sec 3) - /* 10 */ "application/xhtml+xml", - /* 11 */ "application/x-www-form-urlencoded", - /* 12 */ "text/json", // non iana standard - if readable from casual users - /* 13 */ "text/html", - /* 14 */ "text/xml", // if readable from casual users (RFC 3023, section 3) - /* 15 */ "text/css", - /* 16 */ "text/csv", - /* 17 */ "text/javascript", - /* 18 */ "image/jpeg", - /* 19 */ "image/png", - /* 20 */ "image/gif", - ]; -} - -#[repr(u8)] -#[derive(Clone, Copy, Debug, PartialEq, Eq)] -pub enum KnownEncoding { - Empty = 0, - AppOctetStream = 1, - AppCustom = 2, - TextPlain = 3, - AppProperties = 4, - AppJson = 5, - AppSql = 6, - AppInteger = 7, - AppFloat = 8, - AppXml = 9, - AppXhtmlXml = 10, - AppXWwwFormUrlencoded = 11, - TextJson = 12, - TextHtml = 13, - TextXml = 14, - TextCss = 15, - TextCsv = 16, - TextJavascript = 17, - ImageJpeg = 18, - ImagePng = 19, - ImageGif = 20, -} - -impl From for u8 { - fn from(val: KnownEncoding) -> Self { - val as u8 - } -} - -impl From for &str { - fn from(val: KnownEncoding) -> Self { - consts::MIMES[u8::from(val) as usize] - } -} - -impl TryFrom for KnownEncoding { - type Error = ZError; - fn try_from(value: u8) -> Result { - if value < consts::MIMES.len() as u8 + 1 { - Ok(unsafe { mem::transmute::(value) }) - } else { - Err(zerror!("Unknown encoding")) - } - } -} +use zenoh_buffers::ZSlice; -impl AsRef for KnownEncoding { - fn as_ref(&self) -> &str { - consts::MIMES[u8::from(*self) as usize] - } -} +pub type EncodingId = u16; -/// The encoding of a zenoh `zenoh::Value`. -/// -/// A zenoh encoding is a HTTP Mime type represented, for wire efficiency, -/// as an integer prefix (that maps to a string) and a string suffix. +/// [`Encoding`] is a metadata that indicates how the data payload should be interpreted. +/// For wire-efficiency and extensibility purposes, Zenoh defines an [`Encoding`] as +/// composed of an unsigned integer prefix and a bytes schema. The actual meaning of the +/// prefix and schema are out-of-scope of the protocol definition. Therefore, Zenoh does not +/// impose any encoding mapping and users are free to use any mapping they like. +/// Nevertheless, it is worth highlighting that Zenoh still provides a default mapping as part +/// of the API as per user convenience. That mapping has no impact on the Zenoh protocol definition. #[derive(Clone, Debug, PartialEq, Eq)] -pub enum Encoding { - Exact(KnownEncoding), - WithSuffix(KnownEncoding, CowStr<'static>), +pub struct Encoding { + pub id: EncodingId, + pub schema: Option, } -impl Encoding { - pub fn new(prefix: u8, suffix: IntoCowStr) -> ZResult - where - IntoCowStr: Into> + AsRef, - { - let prefix = KnownEncoding::try_from(prefix)?; - let suffix = suffix.into(); - if suffix.as_bytes().len() > u8::MAX as usize { - bail!("Suffix length is limited to 255 characters") - } - if suffix.as_ref().is_empty() { - Ok(Encoding::Exact(prefix)) - } else { - Ok(Encoding::WithSuffix(prefix, suffix.into())) - } - } - - /// Sets the suffix of this encoding. - pub fn with_suffix(self, suffix: IntoCowStr) -> ZResult - where - IntoCowStr: Into> + AsRef, - { - match self { - Encoding::Exact(e) => Encoding::new(e as u8, suffix), - Encoding::WithSuffix(e, s) => Encoding::new(e as u8, s + suffix.as_ref()), - } - } - - pub fn as_ref<'a, T>(&'a self) -> T - where - &'a Self: Into, - { - self.into() - } - - /// Returns `true`if the string representation of this encoding starts with - /// the string representation of their given encoding. - pub fn starts_with(&self, with: T) -> bool - where - T: Into, - { - let with: Encoding = with.into(); - self.prefix() == with.prefix() && self.suffix().starts_with(with.suffix()) - } - - pub const fn prefix(&self) -> &KnownEncoding { - match self { - Encoding::Exact(e) | Encoding::WithSuffix(e, _) => e, - } - } - - pub fn suffix(&self) -> &str { - match self { - Encoding::Exact(_) => "", - Encoding::WithSuffix(_, s) => s.as_ref(), - } - } +/// # Encoding field +/// +/// ```text +/// 7 6 5 4 3 2 1 0 +/// +-+-+-+-+-+-+-+-+ +/// ~ id: z16 |S~ +/// +---------------+ +/// ~schema: ~ -- if S==1 +/// +---------------+ +/// ``` +pub mod flag { + pub const S: u32 = 1; // 0x01 Suffix if S==1 then schema is present } impl Encoding { - pub const EMPTY: Encoding = Encoding::Exact(KnownEncoding::Empty); - pub const APP_OCTET_STREAM: Encoding = Encoding::Exact(KnownEncoding::AppOctetStream); - pub const APP_CUSTOM: Encoding = Encoding::Exact(KnownEncoding::AppCustom); - pub const TEXT_PLAIN: Encoding = Encoding::Exact(KnownEncoding::TextPlain); - pub const APP_PROPERTIES: Encoding = Encoding::Exact(KnownEncoding::AppProperties); - pub const APP_JSON: Encoding = Encoding::Exact(KnownEncoding::AppJson); - pub const APP_SQL: Encoding = Encoding::Exact(KnownEncoding::AppSql); - pub const APP_INTEGER: Encoding = Encoding::Exact(KnownEncoding::AppInteger); - pub const APP_FLOAT: Encoding = Encoding::Exact(KnownEncoding::AppFloat); - pub const APP_XML: Encoding = Encoding::Exact(KnownEncoding::AppXml); - pub const APP_XHTML_XML: Encoding = Encoding::Exact(KnownEncoding::AppXhtmlXml); - pub const APP_XWWW_FORM_URLENCODED: Encoding = - Encoding::Exact(KnownEncoding::AppXWwwFormUrlencoded); - pub const TEXT_JSON: Encoding = Encoding::Exact(KnownEncoding::TextJson); - pub const TEXT_HTML: Encoding = Encoding::Exact(KnownEncoding::TextHtml); - pub const TEXT_XML: Encoding = Encoding::Exact(KnownEncoding::TextXml); - pub const TEXT_CSS: Encoding = Encoding::Exact(KnownEncoding::TextCss); - pub const TEXT_CSV: Encoding = Encoding::Exact(KnownEncoding::TextCsv); - pub const TEXT_JAVASCRIPT: Encoding = Encoding::Exact(KnownEncoding::TextJavascript); - pub const IMAGE_JPEG: Encoding = Encoding::Exact(KnownEncoding::ImageJpeg); - pub const IMAGE_PNG: Encoding = Encoding::Exact(KnownEncoding::ImagePng); - pub const IMAGE_GIF: Encoding = Encoding::Exact(KnownEncoding::ImageGif); -} - -impl fmt::Display for Encoding { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match self { - Encoding::Exact(e) => f.write_str(e.as_ref()), - Encoding::WithSuffix(e, s) => { - f.write_str(e.as_ref())?; - f.write_str(s) - } - } - } -} - -impl From<&'static str> for Encoding { - fn from(s: &'static str) -> Self { - for (i, v) in consts::MIMES.iter().enumerate().skip(1) { - if let Some(suffix) = s.strip_prefix(v) { - if suffix.is_empty() { - return Encoding::Exact(unsafe { - mem::transmute::(i as u8) - }); - } else { - return Encoding::WithSuffix( - unsafe { mem::transmute::(i as u8) }, - suffix.into(), - ); - } - } - } - if s.is_empty() { - Encoding::Exact(KnownEncoding::Empty) - } else { - Encoding::WithSuffix(KnownEncoding::Empty, s.into()) - } - } -} - -impl From for Encoding { - fn from(mut s: String) -> Self { - for (i, v) in consts::MIMES.iter().enumerate().skip(1) { - if s.starts_with(v) { - s.replace_range(..v.len(), ""); - if s.is_empty() { - return Encoding::Exact(unsafe { - mem::transmute::(i as u8) - }); - } else { - return Encoding::WithSuffix( - unsafe { mem::transmute::(i as u8) }, - s.into(), - ); - } - } - } - if s.is_empty() { - Encoding::Exact(KnownEncoding::Empty) - } else { - Encoding::WithSuffix(KnownEncoding::Empty, s.into()) + /// Returns a new [`Encoding`] object with default empty prefix ID. + pub const fn empty() -> Self { + Self { + id: 0, + schema: None, } } } -impl From<&KnownEncoding> for Encoding { - fn from(e: &KnownEncoding) -> Encoding { - Encoding::Exact(*e) - } -} - -impl From for Encoding { - fn from(e: KnownEncoding) -> Encoding { - Encoding::Exact(e) - } -} - impl Default for Encoding { fn default() -> Self { - KnownEncoding::Empty.into() + Self::empty() } } impl Encoding { #[cfg(feature = "test")] pub fn rand() -> Self { - use rand::{ - distributions::{Alphanumeric, DistString}, - Rng, - }; + use rand::Rng; const MIN: usize = 2; const MAX: usize = 16; let mut rng = rand::thread_rng(); - let prefix: u8 = rng.gen_range(0..20); - let suffix: String = if rng.gen_bool(0.5) { - let len = rng.gen_range(MIN..MAX); - Alphanumeric.sample_string(&mut rng, len) - } else { - String::new() - }; - Encoding::new(prefix, suffix).unwrap() + let id: EncodingId = rng.gen(); + let schema = rng + .gen_bool(0.5) + .then_some(ZSlice::rand(rng.gen_range(MIN..MAX))); + Encoding { id, schema } } } diff --git a/commons/zenoh-protocol/src/core/endpoint.rs b/commons/zenoh-protocol/src/core/endpoint.rs index 026fde3097..96b9b40665 100644 --- a/commons/zenoh-protocol/src/core/endpoint.rs +++ b/commons/zenoh-protocol/src/core/endpoint.rs @@ -11,28 +11,17 @@ // Contributors: // ZettaScale Zenoh Team, // -use super::locator::*; -use alloc::{borrow::ToOwned, format, string::String, vec::Vec}; -use core::{convert::TryFrom, fmt, str::FromStr}; +use alloc::{borrow::ToOwned, format, string::String}; +use core::{borrow::Borrow, convert::TryFrom, fmt, str::FromStr}; + use zenoh_result::{bail, zerror, Error as ZError, ZResult}; +use super::{locator::*, parameters}; + // Parsing chars pub const PROTO_SEPARATOR: char = '/'; pub const METADATA_SEPARATOR: char = '?'; -pub const LIST_SEPARATOR: char = ';'; -pub const FIELD_SEPARATOR: char = '='; pub const CONFIG_SEPARATOR: char = '#'; -pub const VALUE_SEPARATOR: char = '|'; - -fn split_once(s: &str, c: char) -> (&str, &str) { - match s.find(c) { - Some(index) => { - let (l, r) = s.split_at(index); - (l, &r[1..]) - } - None => (s, ""), - } -} // Parsing functions pub(super) fn protocol(s: &str) -> &str { @@ -64,77 +53,6 @@ pub(super) fn config(s: &str) -> &str { } } -pub struct Parameters; - -impl Parameters { - pub fn extend<'s, I>(iter: I, into: &mut String) - where - I: Iterator, - { - let mut first = into.is_empty(); - for (k, v) in iter { - if !first { - into.push(LIST_SEPARATOR); - } - into.push_str(k); - if !v.is_empty() { - into.push(FIELD_SEPARATOR); - into.push_str(v); - } - first = false; - } - } - - pub fn iter(s: &str) -> impl DoubleEndedIterator { - s.split(LIST_SEPARATOR).filter_map(|prop| { - if prop.is_empty() { - None - } else { - Some(split_once(prop, FIELD_SEPARATOR)) - } - }) - } - - pub fn get<'s>(s: &'s str, k: &str) -> Option<&'s str> { - Self::iter(s).find(|x| x.0 == k).map(|x| x.1) - } - - pub fn values<'s>(s: &'s str, k: &str) -> impl DoubleEndedIterator { - match Self::get(s, k) { - Some(v) => v.split(VALUE_SEPARATOR), - None => { - let mut i = "".split(VALUE_SEPARATOR); - i.next(); - i - } - } - } - - pub(super) fn insert<'s, I>(iter: I, k: &'s str, v: &'s str) -> String - where - I: Iterator, - { - let current = iter.filter(|x| x.0 != k); - let new = Some((k, v)).into_iter(); - let iter = current.chain(new); - - let mut into = String::new(); - Parameters::extend(iter, &mut into); - into - } - - pub(super) fn remove<'s, I>(iter: I, k: &'s str) -> String - where - I: Iterator, - { - let iter = iter.filter(|x| x.0 != k); - - let mut into = String::new(); - Parameters::extend(iter, &mut into); - into - } -} - // Protocol #[repr(transparent)] #[derive(Copy, Clone, PartialEq, Eq, Hash)] @@ -277,16 +195,16 @@ impl<'a> Metadata<'a> { self.as_str().is_empty() } - pub fn iter(&'a self) -> impl DoubleEndedIterator { - Parameters::iter(self.0) + pub fn iter(&'a self) -> impl DoubleEndedIterator + Clone { + parameters::iter(self.0) } pub fn get(&'a self, k: &str) -> Option<&'a str> { - Parameters::get(self.0, k) + parameters::get(self.0, k) } pub fn values(&'a self, k: &str) -> impl DoubleEndedIterator { - Parameters::values(self.0, k) + parameters::values(self.0, k) } } @@ -323,25 +241,35 @@ impl<'a> MetadataMut<'a> { } impl MetadataMut<'_> { - pub fn extend(&mut self, iter: I) -> ZResult<()> + pub fn extend_from_iter<'s, I, K, V>(&mut self, iter: I) -> ZResult<()> where - I: Iterator, - K: AsRef, - V: AsRef, + I: Iterator + Clone, + K: Borrow + 's + ?Sized, + V: Borrow + 's + ?Sized, { - for (k, v) in iter { - let k: &str = k.as_ref(); - let v: &str = v.as_ref(); - self.insert(k, v)? - } + let ep = EndPoint::new( + self.0.protocol(), + self.0.address(), + parameters::from_iter(parameters::sort(parameters::join( + self.0.metadata().iter(), + iter.map(|(k, v)| (k.borrow(), v.borrow())), + ))), + self.0.config(), + )?; + + self.0.inner = ep.inner; Ok(()) } - pub fn insert(&mut self, k: &str, v: &str) -> ZResult<()> { + pub fn insert(&mut self, k: K, v: V) -> ZResult<()> + where + K: Borrow, + V: Borrow, + { let ep = EndPoint::new( self.0.protocol(), self.0.address(), - Parameters::insert(self.0.metadata().iter(), k, v), + parameters::insert_sort(self.0.metadata().as_str(), k.borrow(), v.borrow()).0, self.0.config(), )?; @@ -349,11 +277,14 @@ impl MetadataMut<'_> { Ok(()) } - pub fn remove(&mut self, k: &str) -> ZResult<()> { + pub fn remove(&mut self, k: K) -> ZResult<()> + where + K: Borrow, + { let ep = EndPoint::new( self.0.protocol(), self.0.address(), - Parameters::remove(self.0.metadata().iter(), k), + parameters::remove(self.0.metadata().as_str(), k.borrow()).0, self.0.config(), )?; @@ -394,16 +325,16 @@ impl<'a> Config<'a> { self.as_str().is_empty() } - pub fn iter(&'a self) -> impl DoubleEndedIterator { - Parameters::iter(self.0) + pub fn iter(&'a self) -> impl DoubleEndedIterator + Clone { + parameters::iter(self.0) } pub fn get(&'a self, k: &str) -> Option<&'a str> { - Parameters::get(self.0, k) + parameters::get(self.0, k) } pub fn values(&'a self, k: &str) -> impl DoubleEndedIterator { - Parameters::values(self.0, k) + parameters::values(self.0, k) } } @@ -440,38 +371,51 @@ impl<'a> ConfigMut<'a> { } impl ConfigMut<'_> { - pub fn extend(&mut self, iter: I) -> ZResult<()> + pub fn extend_from_iter<'s, I, K, V>(&mut self, iter: I) -> ZResult<()> where - I: Iterator, - K: AsRef, - V: AsRef, + I: Iterator + Clone, + K: Borrow + 's + ?Sized, + V: Borrow + 's + ?Sized, { - for (k, v) in iter { - let k: &str = k.as_ref(); - let v: &str = v.as_ref(); - self.insert(k, v)? - } + let ep = EndPoint::new( + self.0.protocol(), + self.0.address(), + self.0.metadata(), + parameters::from_iter(parameters::sort(parameters::join( + self.0.config().iter(), + iter.map(|(k, v)| (k.borrow(), v.borrow())), + ))), + )?; + + self.0.inner = ep.inner; Ok(()) } - pub fn insert(&mut self, k: &str, v: &str) -> ZResult<()> { + pub fn insert(&mut self, k: K, v: V) -> ZResult<()> + where + K: Borrow, + V: Borrow, + { let ep = EndPoint::new( self.0.protocol(), self.0.address(), self.0.metadata(), - Parameters::insert(self.0.config().iter(), k, v), + parameters::insert_sort(self.0.config().as_str(), k.borrow(), v.borrow()).0, )?; self.0.inner = ep.inner; Ok(()) } - pub fn remove(&mut self, k: &str) -> ZResult<()> { + pub fn remove(&mut self, k: K) -> ZResult<()> + where + K: Borrow, + { let ep = EndPoint::new( self.0.protocol(), self.0.address(), self.0.metadata(), - Parameters::remove(self.0.config().iter(), k), + parameters::remove(self.0.config().as_str(), k.borrow()).0, )?; self.0.inner = ep.inner; @@ -497,7 +441,12 @@ impl fmt::Debug for ConfigMut<'_> { } } -/// A `String` that respects the [`EndPoint`] canon form: `#`, such that `` is a valid [`Locator`] `` is of the form `=;...;=` where keys are alphabetically sorted. +/// A string that respects the [`EndPoint`] canon form: `[#]`. +/// +/// `` is a valid [`Locator`] and `` is of the form `=;...;=` where keys are alphabetically sorted. +/// `` is optional and can be provided to configure some aspectes for an [`EndPoint`], e.g. the interface to listen on or connect to. +/// +/// A full [`EndPoint`] string is hence in the form of `/
[?][#config]`. #[derive(Clone, PartialEq, Eq, Hash, serde::Serialize, serde::Deserialize)] #[serde(into = "String")] #[serde(try_from = "String")] @@ -616,27 +565,6 @@ impl TryFrom for EndPoint { const ERR: &str = "Endpoints must be of the form /
[?][#]"; - fn sort_hashmap(from: &str, into: &mut String) { - let mut from = from - .split(LIST_SEPARATOR) - .map(|p| split_once(p, FIELD_SEPARATOR)) - .collect::>(); - from.sort_by(|(k1, _), (k2, _)| k1.cmp(k2)); - - let mut first = true; - for (k, v) in from.iter() { - if !first { - into.push(LIST_SEPARATOR); - } - into.push_str(k); - if !v.is_empty() { - into.push(FIELD_SEPARATOR); - into.push_str(v); - } - first = false; - } - } - let pidx = s .find(PROTO_SEPARATOR) .and_then(|i| (!s[..i].is_empty() && !s[i + 1..].is_empty()).then_some(i)) @@ -649,14 +577,20 @@ impl TryFrom for EndPoint { (Some(midx), None) if midx > pidx && !s[midx + 1..].is_empty() => { let mut inner = String::with_capacity(s.len()); inner.push_str(&s[..midx + 1]); // Includes metadata separator - sort_hashmap(&s[midx + 1..], &mut inner); + parameters::from_iter_into( + parameters::sort(parameters::iter(&s[midx + 1..])), + &mut inner, + ); Ok(EndPoint { inner }) } // There is some config (None, Some(cidx)) if cidx > pidx && !s[cidx + 1..].is_empty() => { let mut inner = String::with_capacity(s.len()); inner.push_str(&s[..cidx + 1]); // Includes config separator - sort_hashmap(&s[cidx + 1..], &mut inner); + parameters::from_iter_into( + parameters::sort(parameters::iter(&s[cidx + 1..])), + &mut inner, + ); Ok(EndPoint { inner }) } // There is some metadata and some config @@ -669,10 +603,16 @@ impl TryFrom for EndPoint { let mut inner = String::with_capacity(s.len()); inner.push_str(&s[..midx + 1]); // Includes metadata separator - sort_hashmap(&s[midx + 1..cidx], &mut inner); + parameters::from_iter_into( + parameters::sort(parameters::iter(&s[midx + 1..cidx])), + &mut inner, + ); inner.push(CONFIG_SEPARATOR); - sort_hashmap(&s[cidx + 1..], &mut inner); + parameters::from_iter_into( + parameters::sort(parameters::iter(&s[cidx + 1..])), + &mut inner, + ); Ok(EndPoint { inner }) } @@ -694,31 +634,12 @@ impl EndPoint { pub fn rand() -> Self { use rand::{ distributions::{Alphanumeric, DistString}, - rngs::ThreadRng, Rng, }; const MIN: usize = 2; const MAX: usize = 8; - fn gen_hashmap(rng: &mut ThreadRng, endpoint: &mut String) { - let num = rng.gen_range(MIN..MAX); - for i in 0..num { - if i != 0 { - endpoint.push(LIST_SEPARATOR); - } - let len = rng.gen_range(MIN..MAX); - let key = Alphanumeric.sample_string(rng, len); - endpoint.push_str(key.as_str()); - - endpoint.push(FIELD_SEPARATOR); - - let len = rng.gen_range(MIN..MAX); - let value = Alphanumeric.sample_string(rng, len); - endpoint.push_str(value.as_str()); - } - } - let mut rng = rand::thread_rng(); let mut endpoint = String::new(); @@ -734,11 +655,11 @@ impl EndPoint { if rng.gen_bool(0.5) { endpoint.push(METADATA_SEPARATOR); - gen_hashmap(&mut rng, &mut endpoint); + parameters::rand(&mut endpoint); } if rng.gen_bool(0.5) { endpoint.push(CONFIG_SEPARATOR); - gen_hashmap(&mut rng, &mut endpoint); + parameters::rand(&mut endpoint); } endpoint.parse().unwrap() @@ -910,14 +831,14 @@ fn endpoints() { let mut endpoint = EndPoint::from_str("udp/127.0.0.1:7447").unwrap(); endpoint .metadata_mut() - .extend([("a", "1"), ("c", "3"), ("b", "2")].iter().copied()) + .extend_from_iter([("a", "1"), ("c", "3"), ("b", "2")].iter().copied()) .unwrap(); assert_eq!(endpoint.as_str(), "udp/127.0.0.1:7447?a=1;b=2;c=3"); let mut endpoint = EndPoint::from_str("udp/127.0.0.1:7447").unwrap(); endpoint .config_mut() - .extend([("A", "1"), ("C", "3"), ("B", "2")].iter().copied()) + .extend_from_iter([("A", "1"), ("C", "3"), ("B", "2")].iter().copied()) .unwrap(); assert_eq!(endpoint.as_str(), "udp/127.0.0.1:7447#A=1;B=2;C=3"); diff --git a/commons/zenoh-protocol/src/core/locator.rs b/commons/zenoh-protocol/src/core/locator.rs index cdd3dfa64c..14f899e7c6 100644 --- a/commons/zenoh-protocol/src/core/locator.rs +++ b/commons/zenoh-protocol/src/core/locator.rs @@ -11,14 +11,16 @@ // Contributors: // ZettaScale Zenoh Team, // -use super::endpoint::*; use alloc::{borrow::ToOwned, string::String}; use core::{convert::TryFrom, fmt, hash::Hash, str::FromStr}; + use zenoh_result::{Error as ZError, ZResult}; -// Locator -/// A `String` that respects the [`Locator`] canon form: `/
[?]`, -/// such that `` is of the form `=;...;=` where keys are alphabetically sorted. +use super::endpoint::*; + +/// A string that respects the [`Locator`] canon form: `/
[?]`. +/// +/// `` is of the form `=;...;=` where keys are alphabetically sorted. #[derive(Clone, PartialEq, Eq, Hash, serde::Serialize, serde::Deserialize)] #[serde(into = "String")] #[serde(try_from = "String")] @@ -122,67 +124,3 @@ impl Locator { EndPoint::rand().into() } } - -// pub(crate) trait HasCanonForm { -// fn is_canon(&self) -> bool; - -// type Output; -// fn canonicalize(self) -> Self::Output; -// } - -// fn cmp(this: &str, than: &str) -> core::cmp::Ordering { -// let is_longer = this.len().cmp(&than.len()); -// let this = this.chars(); -// let than = than.chars(); -// let zip = this.zip(than); -// for (this, than) in zip { -// match this.cmp(&than) { -// core::cmp::Ordering::Equal => {} -// o => return o, -// } -// } -// is_longer -// } - -// impl<'a, T: Iterator + Clone, V> HasCanonForm for T { -// fn is_canon(&self) -> bool { -// let mut iter = self.clone(); -// let mut acc = if let Some((key, _)) = iter.next() { -// key -// } else { -// return true; -// }; -// for (key, _) in iter { -// if cmp(key, acc) != core::cmp::Ordering::Greater { -// return false; -// } -// acc = key; -// } -// true -// } - -// type Output = Vec<(&'a str, V)>; -// fn canonicalize(mut self) -> Self::Output { -// let mut result = Vec::new(); -// if let Some(v) = self.next() { -// result.push(v); -// } -// 'outer: for (k, v) in self { -// for (i, (x, _)) in result.iter().enumerate() { -// match cmp(k, x) { -// core::cmp::Ordering::Less => { -// result.insert(i, (k, v)); -// continue 'outer; -// } -// core::cmp::Ordering::Equal => { -// result[i].1 = v; -// continue 'outer; -// } -// core::cmp::Ordering::Greater => {} -// } -// } -// result.push((k, v)) -// } -// result -// } -// } diff --git a/commons/zenoh-protocol/src/core/mod.rs b/commons/zenoh-protocol/src/core/mod.rs index 2547034c44..ebf1bb7f85 100644 --- a/commons/zenoh-protocol/src/core/mod.rs +++ b/commons/zenoh-protocol/src/core/mod.rs @@ -16,7 +16,6 @@ use alloc::{ boxed::Box, format, string::{String, ToString}, - vec::Vec, }; use core::{ convert::{From, TryFrom, TryInto}, @@ -24,6 +23,7 @@ use core::{ hash::Hash, str::FromStr, }; + pub use uhlc::{Timestamp, NTP64}; use zenoh_keyexpr::OwnedKeyExpr; use zenoh_result::{bail, zerror}; @@ -34,7 +34,6 @@ pub type TimestampId = uhlc::ID; /// Constants and helpers for zenoh `whatami` flags. pub mod whatami; pub use whatami::*; - pub use zenoh_keyexpr::key_expr; pub mod wire_expr; @@ -42,8 +41,8 @@ pub use wire_expr::*; mod cowstr; pub use cowstr::CowStr; -mod encoding; -pub use encoding::{Encoding, KnownEncoding}; +pub mod encoding; +pub use encoding::{Encoding, EncodingId}; pub mod locator; pub use locator::*; @@ -54,49 +53,15 @@ pub use endpoint::*; pub mod resolution; pub use resolution::*; -#[derive(Debug, Clone, PartialEq, Eq)] -pub struct Property { - pub key: u64, - pub value: Vec, -} - -/// The kind of a `Sample`. -#[repr(u8)] -#[derive(Debug, Default, Copy, Clone, PartialEq, Eq)] -pub enum SampleKind { - /// if the `Sample` was issued by a `put` operation. - #[default] - Put = 0, - /// if the `Sample` was issued by a `delete` operation. - Delete = 1, -} - -impl fmt::Display for SampleKind { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match self { - SampleKind::Put => write!(f, "PUT"), - SampleKind::Delete => write!(f, "DELETE"), - } - } -} - -impl TryFrom for SampleKind { - type Error = u64; - fn try_from(kind: u64) -> Result { - match kind { - 0 => Ok(SampleKind::Put), - 1 => Ok(SampleKind::Delete), - _ => Err(kind), - } - } -} +pub mod parameters; +pub use parameters::Parameters; /// The global unique id of a zenoh peer. #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] #[repr(transparent)] -pub struct ZenohId(uhlc::ID); +pub struct ZenohIdProto(uhlc::ID); -impl ZenohId { +impl ZenohIdProto { pub const MAX_SIZE: usize = 16; #[inline] @@ -109,8 +74,8 @@ impl ZenohId { self.0.to_le_bytes() } - pub fn rand() -> ZenohId { - ZenohId(uhlc::ID::rand()) + pub fn rand() -> ZenohIdProto { + ZenohIdProto(uhlc::ID::rand()) } pub fn into_keyexpr(self) -> OwnedKeyExpr { @@ -118,7 +83,7 @@ impl ZenohId { } } -impl Default for ZenohId { +impl Default for ZenohIdProto { fn default() -> Self { Self::rand() } @@ -153,7 +118,7 @@ impl fmt::Display for SizeError { macro_rules! derive_tryfrom { ($T: ty) => { - impl TryFrom<$T> for ZenohId { + impl TryFrom<$T> for ZenohIdProto { type Error = zenoh_result::Error; fn try_from(val: $T) -> Result { match val.try_into() { @@ -198,7 +163,7 @@ derive_tryfrom!([u8; 16]); derive_tryfrom!(&[u8; 16]); derive_tryfrom!(&[u8]); -impl FromStr for ZenohId { +impl FromStr for ZenohIdProto { type Err = zenoh_result::Error; fn from_str(s: &str) -> Result { @@ -211,31 +176,37 @@ impl FromStr for ZenohId { let u: uhlc::ID = s .parse() .map_err(|e: uhlc::ParseIDError| zerror!("Invalid id: {} - {}", s, e.cause))?; - Ok(ZenohId(u)) + Ok(ZenohIdProto(u)) } } -impl fmt::Debug for ZenohId { +impl fmt::Debug for ZenohIdProto { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.0) } } -impl fmt::Display for ZenohId { +impl fmt::Display for ZenohIdProto { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fmt::Debug::fmt(self, f) } } // A PeerID can be converted into a Timestamp's ID -impl From<&ZenohId> for uhlc::ID { - fn from(zid: &ZenohId) -> Self { +impl From<&ZenohIdProto> for uhlc::ID { + fn from(zid: &ZenohIdProto) -> Self { + zid.0 + } +} + +impl From for uhlc::ID { + fn from(zid: ZenohIdProto) -> Self { zid.0 } } -impl From for OwnedKeyExpr { - fn from(zid: ZenohId) -> Self { +impl From for OwnedKeyExpr { + fn from(zid: ZenohIdProto) -> Self { // SAFETY: zid.to_string() returns an stringified hexadecimal // representation of the zid. Therefore, building a OwnedKeyExpr // by calling from_string_unchecked() is safe because it is @@ -244,13 +215,13 @@ impl From for OwnedKeyExpr { } } -impl From<&ZenohId> for OwnedKeyExpr { - fn from(zid: &ZenohId) -> Self { +impl From<&ZenohIdProto> for OwnedKeyExpr { + fn from(zid: &ZenohIdProto) -> Self { (*zid).into() } } -impl serde::Serialize for ZenohId { +impl serde::Serialize for ZenohIdProto { fn serialize(&self, serializer: S) -> Result where S: serde::Serializer, @@ -259,7 +230,7 @@ impl serde::Serialize for ZenohId { } } -impl<'de> serde::Deserialize<'de> for ZenohId { +impl<'de> serde::Deserialize<'de> for ZenohIdProto { fn deserialize(deserializer: D) -> Result where D: serde::Deserializer<'de>, @@ -267,10 +238,13 @@ impl<'de> serde::Deserialize<'de> for ZenohId { struct ZenohIdVisitor; impl<'de> serde::de::Visitor<'de> for ZenohIdVisitor { - type Value = ZenohId; + type Value = ZenohIdProto; fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str(&format!("An hex string of 1-{} bytes", ZenohId::MAX_SIZE)) + formatter.write_str(&format!( + "An hex string of 1-{} bytes", + ZenohIdProto::MAX_SIZE + )) } fn visit_str(self, v: &str) -> Result @@ -299,6 +273,27 @@ impl<'de> serde::Deserialize<'de> for ZenohId { } } +/// The unique id of a zenoh entity inside it's parent `Session`. +pub type EntityId = u32; + +/// The global unique id of a zenoh entity. +#[derive(Debug, Default, Copy, Clone, Eq, Hash, PartialEq)] +pub struct EntityGlobalIdProto { + pub zid: ZenohIdProto, + pub eid: EntityId, +} + +impl EntityGlobalIdProto { + #[cfg(feature = "test")] + pub fn rand() -> Self { + use rand::Rng; + Self { + zid: ZenohIdProto::rand(), + eid: rand::thread_rng().gen(), + } + } +} + #[repr(u8)] #[derive(Debug, Default, Copy, Clone, Eq, Hash, PartialEq)] pub enum Priority { @@ -314,6 +309,8 @@ pub enum Priority { } impl Priority { + /// Default + pub const DEFAULT: Self = Self::Data; /// The lowest Priority pub const MIN: Self = Self::Background; /// The highest Priority @@ -354,6 +351,8 @@ pub enum Reliability { } impl Reliability { + pub const DEFAULT: Self = Self::BestEffort; + #[cfg(feature = "test")] pub fn rand() -> Self { use rand::Rng; @@ -374,6 +373,13 @@ pub struct Channel { pub reliability: Reliability, } +impl Channel { + pub const DEFAULT: Self = Self { + priority: Priority::DEFAULT, + reliability: Reliability::DEFAULT, + }; +} + /// The kind of congestion control. #[derive(Debug, Default, Copy, Clone, PartialEq, Eq)] #[repr(u8)] @@ -383,51 +389,6 @@ pub enum CongestionControl { Block = 1, } -/// The subscription mode. -#[derive(Debug, Default, Copy, Clone, PartialEq, Eq)] -#[repr(u8)] -pub enum SubMode { - #[default] - Push = 0, - Pull = 1, -} - -#[derive(Debug, Clone, PartialEq, Eq, Default)] -pub struct SubInfo { - pub reliability: Reliability, - pub mode: SubMode, -} - -#[derive(Debug, Default, Clone, PartialEq, Eq, Hash)] -pub struct QueryableInfo { - pub complete: u64, // Default 0: incomplete - pub distance: u64, // Default 0: no distance -} - -/// The kind of consolidation. -#[derive(Debug, Clone, PartialEq, Eq, Copy)] -pub enum ConsolidationMode { - /// No consolidation applied: multiple samples may be received for the same key-timestamp. - None, - /// Monotonic consolidation immediately forwards samples, except if one with an equal or more recent timestamp - /// has already been sent with the same key. - /// - /// This optimizes latency while potentially reducing bandwidth. - /// - /// Note that this doesn't cause re-ordering, but drops the samples for which a more recent timestamp has already - /// been observed with the same key. - Monotonic, - /// Holds back samples to only send the set of samples that had the highest timestamp for their key. - Latest, -} - -/// The `zenoh::queryable::Queryable`s that should be target of a `zenoh::Session::get()`. -#[derive(Debug, Default, Clone, Copy, PartialEq, Eq)] -pub enum QueryTarget { - #[default] - BestMatching, - All, - AllComplete, - #[cfg(feature = "complete_n")] - Complete(u64), +impl CongestionControl { + pub const DEFAULT: Self = Self::Drop; } diff --git a/commons/zenoh-protocol/src/core/parameters.rs b/commons/zenoh-protocol/src/core/parameters.rs new file mode 100644 index 0000000000..38cb368b5b --- /dev/null +++ b/commons/zenoh-protocol/src/core/parameters.rs @@ -0,0 +1,525 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// + +/// Module provides a set of utility functions which allows to manipulate &str` which follows the format `a=b;c=d|e;f=g`. +/// and structure `Parameters` which provides `HashMap<&str, &str>`-like view over a string of such format. +/// +/// `;` is the separator between the key-value `(&str, &str)` elements. +/// +/// `=` is the separator between the `&str`-key and `&str`-value +/// +/// `|` is the separator between multiple elements of the values. +use alloc::{ + borrow::Cow, + string::{String, ToString}, + vec::Vec, +}; +use core::{borrow::Borrow, fmt}; +#[cfg(feature = "std")] +use std::collections::HashMap; + +pub(super) const LIST_SEPARATOR: char = ';'; +pub(super) const FIELD_SEPARATOR: char = '='; +pub(super) const VALUE_SEPARATOR: char = '|'; + +fn split_once(s: &str, c: char) -> (&str, &str) { + match s.find(c) { + Some(index) => { + let (l, r) = s.split_at(index); + (l, &r[1..]) + } + None => (s, ""), + } +} + +/// Returns an iterator of key-value `(&str, &str)` pairs according to the parameters format. +pub fn iter(s: &str) -> impl DoubleEndedIterator + Clone { + s.split(LIST_SEPARATOR) + .filter(|p| !p.is_empty()) + .map(|p| split_once(p, FIELD_SEPARATOR)) +} + +/// Same as [`from_iter_into`] but keys are sorted in alphabetical order. +pub fn sort<'s, I>(iter: I) -> impl Iterator +where + I: Iterator, +{ + let mut from = iter.collect::>(); + from.sort_unstable_by(|(k1, _), (k2, _)| k1.cmp(k2)); + from.into_iter() +} + +/// Joins two key-value `(&str, &str)` iterators removing from `current` any element whose key is present in `new`. +pub fn join<'s, C, N>(current: C, new: N) -> impl Iterator + Clone +where + C: Iterator + Clone, + N: Iterator + Clone + 's, +{ + let n = new.clone(); + let current = current + .clone() + .filter(move |(kc, _)| !n.clone().any(|(kn, _)| kn == *kc)); + current.chain(new) +} + +/// Builds a string from an iterator preserving the order. +#[allow(clippy::should_implement_trait)] +pub fn from_iter<'s, I>(iter: I) -> String +where + I: Iterator, +{ + let mut into = String::new(); + from_iter_into(iter, &mut into); + into +} + +/// Same as [`from_iter`] but it writes into a user-provided string instead of allocating a new one. +pub fn from_iter_into<'s, I>(iter: I, into: &mut String) +where + I: Iterator, +{ + concat_into(iter, into); +} + +/// Get the a `&str`-value for a `&str`-key according to the parameters format. +pub fn get<'s>(s: &'s str, k: &str) -> Option<&'s str> { + iter(s).find(|(key, _)| *key == k).map(|(_, value)| value) +} + +/// Get the a `&str`-value iterator for a `&str`-key according to the parameters format. +pub fn values<'s>(s: &'s str, k: &str) -> impl DoubleEndedIterator { + match get(s, k) { + Some(v) => v.split(VALUE_SEPARATOR), + None => { + let mut i = "".split(VALUE_SEPARATOR); + i.next(); + i + } + } +} + +fn _insert<'s, I>( + i: I, + k: &'s str, + v: &'s str, +) -> (impl Iterator, Option<&'s str>) +where + I: Iterator + Clone, +{ + let mut iter = i.clone(); + let item = iter.find(|(key, _)| *key == k).map(|(_, v)| v); + + let current = i.filter(move |x| x.0 != k); + let new = Some((k, v)).into_iter(); + (current.chain(new), item) +} + +/// Insert a key-value `(&str, &str)` pair by appending it at the end of `s` preserving the insertion order. +pub fn insert<'s>(s: &'s str, k: &'s str, v: &'s str) -> (String, Option<&'s str>) { + let (iter, item) = _insert(iter(s), k, v); + (from_iter(iter), item) +} + +/// Same as [`insert`] but keys are sorted in alphabetical order. +pub fn insert_sort<'s>(s: &'s str, k: &'s str, v: &'s str) -> (String, Option<&'s str>) { + let (iter, item) = _insert(iter(s), k, v); + (from_iter(sort(iter)), item) +} + +/// Remove a key-value `(&str, &str)` pair from `s` preserving the insertion order. +pub fn remove<'s>(s: &'s str, k: &str) -> (String, Option<&'s str>) { + let mut iter = iter(s); + let item = iter.find(|(key, _)| *key == k).map(|(_, v)| v); + let iter = iter.filter(|x| x.0 != k); + (concat(iter), item) +} + +/// Returns `true` if all keys are sorted in alphabetical order +pub fn is_ordered(s: &str) -> bool { + let mut prev = None; + for (k, _) in iter(s) { + match prev.take() { + Some(p) if k < p => return false, + _ => prev = Some(k), + } + } + true +} + +fn concat<'s, I>(iter: I) -> String +where + I: Iterator, +{ + let mut into = String::new(); + concat_into(iter, &mut into); + into +} + +fn concat_into<'s, I>(iter: I, into: &mut String) +where + I: Iterator, +{ + let mut first = true; + for (k, v) in iter.filter(|(k, _)| !k.is_empty()) { + if !first { + into.push(LIST_SEPARATOR); + } + into.push_str(k); + if !v.is_empty() { + into.push(FIELD_SEPARATOR); + into.push_str(v); + } + first = false; + } +} + +#[cfg(feature = "test")] +pub fn rand(into: &mut String) { + use rand::{ + distributions::{Alphanumeric, DistString}, + Rng, + }; + + const MIN: usize = 2; + const MAX: usize = 8; + + let mut rng = rand::thread_rng(); + + let num = rng.gen_range(MIN..MAX); + for i in 0..num { + if i != 0 { + into.push(LIST_SEPARATOR); + } + let len = rng.gen_range(MIN..MAX); + let key = Alphanumeric.sample_string(&mut rng, len); + into.push_str(key.as_str()); + + into.push(FIELD_SEPARATOR); + + let len = rng.gen_range(MIN..MAX); + let value = Alphanumeric.sample_string(&mut rng, len); + into.push_str(value.as_str()); + } +} + +/// A map of key/value (String,String) parameters. +/// It can be parsed from a String, using `;` or `` as separator between each parameters +/// and `=` as separator between a key and its value. Keys and values are trimmed. +/// +/// Example: +/// ``` +/// use zenoh_protocol::core::Parameters; +/// +/// let a = "a=1;b=2;c=3|4|5;d=6"; +/// let p = Parameters::from(a); +/// +/// // Retrieve values +/// assert!(!p.is_empty()); +/// assert_eq!(p.get("a").unwrap(), "1"); +/// assert_eq!(p.get("b").unwrap(), "2"); +/// assert_eq!(p.get("c").unwrap(), "3|4|5"); +/// assert_eq!(p.get("d").unwrap(), "6"); +/// assert_eq!(p.values("c").collect::>(), vec!["3", "4", "5"]); +/// +/// // Iterate over parameters +/// let mut iter = p.iter(); +/// assert_eq!(iter.next().unwrap(), ("a", "1")); +/// assert_eq!(iter.next().unwrap(), ("b", "2")); +/// assert_eq!(iter.next().unwrap(), ("c", "3|4|5")); +/// assert_eq!(iter.next().unwrap(), ("d", "6")); +/// assert!(iter.next().is_none()); +/// +/// // Create parameters from iterators +/// let pi = Parameters::from_iter(vec![("a", "1"), ("b", "2"), ("c", "3|4|5"), ("d", "6")]); +/// assert_eq!(p, pi); +/// ``` +#[derive(Clone, PartialEq, Eq, Hash, Default)] +pub struct Parameters<'s>(Cow<'s, str>); + +impl<'s> Parameters<'s> { + /// Create empty parameters. + pub const fn empty() -> Self { + Self(Cow::Borrowed("")) + } + + /// Returns `true` if parameters does not contain anything. + pub fn is_empty(&self) -> bool { + self.0.is_empty() + } + + /// Returns parameters as [`str`]. + pub fn as_str(&'s self) -> &'s str { + &self.0 + } + + /// Returns `true` if parameters contains the specified key. + pub fn contains_key(&self, k: K) -> bool + where + K: Borrow, + { + super::parameters::get(self.as_str(), k.borrow()).is_some() + } + + /// Returns a reference to the `&str`-value corresponding to the key. + pub fn get(&'s self, k: K) -> Option<&'s str> + where + K: Borrow, + { + super::parameters::get(self.as_str(), k.borrow()) + } + + /// Returns an iterator to the `&str`-values corresponding to the key. + pub fn values(&'s self, k: K) -> impl DoubleEndedIterator + where + K: Borrow, + { + super::parameters::values(self.as_str(), k.borrow()) + } + + /// Returns an iterator on the key-value pairs as `(&str, &str)`. + pub fn iter(&'s self) -> impl DoubleEndedIterator + Clone { + super::parameters::iter(self.as_str()) + } + + /// Inserts a key-value pair into the map. + /// If the map did not have this key present, [`None`]` is returned. + /// If the map did have this key present, the value is updated, and the old value is returned. + pub fn insert(&mut self, k: K, v: V) -> Option + where + K: Borrow, + V: Borrow, + { + let (inner, item) = super::parameters::insert(self.as_str(), k.borrow(), v.borrow()); + let item = item.map(|i| i.to_string()); + self.0 = Cow::Owned(inner); + item + } + + /// Removes a key from the map, returning the value at the key if the key was previously in the parameters. + pub fn remove(&mut self, k: K) -> Option + where + K: Borrow, + { + let (inner, item) = super::parameters::remove(self.as_str(), k.borrow()); + let item = item.map(|i| i.to_string()); + self.0 = Cow::Owned(inner); + item + } + + /// Extend these parameters with other parameters. + pub fn extend(&mut self, other: &Parameters) { + self.extend_from_iter(other.iter()); + } + + /// Extend these parameters from an iterator. + pub fn extend_from_iter<'e, I, K, V>(&mut self, iter: I) + where + I: Iterator + Clone, + K: Borrow + 'e + ?Sized, + V: Borrow + 'e + ?Sized, + { + let inner = super::parameters::from_iter(super::parameters::join( + self.iter(), + iter.map(|(k, v)| (k.borrow(), v.borrow())), + )); + self.0 = Cow::Owned(inner); + } + + /// Convert these parameters into owned parameters. + pub fn into_owned(self) -> Parameters<'static> { + Parameters(Cow::Owned(self.0.into_owned())) + } + + /// Returns `true`` if all keys are sorted in alphabetical order. + pub fn is_ordered(&self) -> bool { + super::parameters::is_ordered(self.as_str()) + } +} + +impl<'s> From<&'s str> for Parameters<'s> { + fn from(mut value: &'s str) -> Self { + value = value.trim_end_matches(|c| { + c == LIST_SEPARATOR || c == FIELD_SEPARATOR || c == VALUE_SEPARATOR + }); + Self(Cow::Borrowed(value)) + } +} + +impl From for Parameters<'_> { + fn from(mut value: String) -> Self { + let s = value.trim_end_matches(|c| { + c == LIST_SEPARATOR || c == FIELD_SEPARATOR || c == VALUE_SEPARATOR + }); + value.truncate(s.len()); + Self(Cow::Owned(value)) + } +} + +impl<'s> From> for Parameters<'s> { + fn from(value: Cow<'s, str>) -> Self { + match value { + Cow::Borrowed(s) => Parameters::from(s), + Cow::Owned(s) => Parameters::from(s), + } + } +} + +impl<'a> From> for Cow<'_, Parameters<'a>> { + fn from(props: Parameters<'a>) -> Self { + Cow::Owned(props) + } +} + +impl<'a> From<&'a Parameters<'a>> for Cow<'a, Parameters<'a>> { + fn from(props: &'a Parameters<'a>) -> Self { + Cow::Borrowed(props) + } +} + +impl<'s, K, V> FromIterator<(&'s K, &'s V)> for Parameters<'_> +where + K: Borrow + 's + ?Sized, + V: Borrow + 's + ?Sized, +{ + fn from_iter>(iter: T) -> Self { + let iter = iter.into_iter(); + let inner = super::parameters::from_iter(iter.map(|(k, v)| (k.borrow(), v.borrow()))); + Self(Cow::Owned(inner)) + } +} + +impl<'s, K, V> FromIterator<&'s (K, V)> for Parameters<'_> +where + K: Borrow + 's, + V: Borrow + 's, +{ + fn from_iter>(iter: T) -> Self { + Self::from_iter(iter.into_iter().map(|(k, v)| (k.borrow(), v.borrow()))) + } +} + +impl<'s, K, V> From<&'s [(K, V)]> for Parameters<'_> +where + K: Borrow + 's, + V: Borrow + 's, +{ + fn from(value: &'s [(K, V)]) -> Self { + Self::from_iter(value.iter()) + } +} + +#[cfg(feature = "std")] +impl From> for Parameters<'_> +where + K: Borrow, + V: Borrow, +{ + fn from(map: HashMap) -> Self { + Self::from_iter(map.iter()) + } +} + +#[cfg(feature = "std")] +impl<'s> From<&'s Parameters<'s>> for HashMap<&'s str, &'s str> { + fn from(props: &'s Parameters<'s>) -> Self { + HashMap::from_iter(props.iter()) + } +} + +#[cfg(feature = "std")] +impl From<&Parameters<'_>> for HashMap { + fn from(props: &Parameters<'_>) -> Self { + HashMap::from_iter(props.iter().map(|(k, v)| (k.to_string(), v.to_string()))) + } +} + +#[cfg(feature = "std")] +impl<'s> From<&'s Parameters<'s>> for HashMap, Cow<'s, str>> { + fn from(props: &'s Parameters<'s>) -> Self { + HashMap::from_iter(props.iter().map(|(k, v)| (Cow::from(k), Cow::from(v)))) + } +} + +#[cfg(feature = "std")] +impl From> for HashMap { + fn from(props: Parameters) -> Self { + HashMap::from(&props) + } +} + +impl fmt::Display for Parameters<'_> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.0) + } +} + +impl fmt::Debug for Parameters<'_> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_parameters() { + assert!(Parameters::from("").0.is_empty()); + + assert_eq!(Parameters::from("p1"), Parameters::from(&[("p1", "")][..])); + + assert_eq!( + Parameters::from("p1=v1"), + Parameters::from(&[("p1", "v1")][..]) + ); + + assert_eq!( + Parameters::from("p1=v1;p2=v2;"), + Parameters::from(&[("p1", "v1"), ("p2", "v2")][..]) + ); + + assert_eq!( + Parameters::from("p1=v1;p2=v2;|="), + Parameters::from(&[("p1", "v1"), ("p2", "v2")][..]) + ); + + assert_eq!( + Parameters::from("p1=v1;p2;p3=v3"), + Parameters::from(&[("p1", "v1"), ("p2", ""), ("p3", "v3")][..]) + ); + + assert_eq!( + Parameters::from("p1=v 1;p 2=v2"), + Parameters::from(&[("p1", "v 1"), ("p 2", "v2")][..]) + ); + + assert_eq!( + Parameters::from("p1=x=y;p2=a==b"), + Parameters::from(&[("p1", "x=y"), ("p2", "a==b")][..]) + ); + + let mut hm: HashMap = HashMap::new(); + hm.insert("p1".to_string(), "v1".to_string()); + assert_eq!(Parameters::from(hm), Parameters::from("p1=v1")); + + let mut hm: HashMap<&str, &str> = HashMap::new(); + hm.insert("p1", "v1"); + assert_eq!(Parameters::from(hm), Parameters::from("p1=v1")); + + let mut hm: HashMap, Cow> = HashMap::new(); + hm.insert(Cow::from("p1"), Cow::from("v1")); + assert_eq!(Parameters::from(hm), Parameters::from("p1=v1")); + } +} diff --git a/commons/zenoh-protocol/src/core/resolution.rs b/commons/zenoh-protocol/src/core/resolution.rs index bfce6c6466..5756fd2a53 100644 --- a/commons/zenoh-protocol/src/core/resolution.rs +++ b/commons/zenoh-protocol/src/core/resolution.rs @@ -11,11 +11,13 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::{network::RequestId, transport::TransportSn}; use alloc::string::String; use core::{fmt, str::FromStr}; + use zenoh_result::{bail, ZError}; +use crate::{network::RequestId, transport::TransportSn}; + #[repr(u8)] // The value represents the 2-bit encoded value #[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)] diff --git a/commons/zenoh-protocol/src/core/whatami.rs b/commons/zenoh-protocol/src/core/whatami.rs index 6aacb0d356..9ae0690382 100644 --- a/commons/zenoh-protocol/src/core/whatami.rs +++ b/commons/zenoh-protocol/src/core/whatami.rs @@ -12,14 +12,16 @@ // ZettaScale Zenoh Team, // use alloc::string::String; -use const_format::formatcp; use core::{convert::TryFrom, fmt, num::NonZeroU8, ops::BitOr, str::FromStr}; + +use const_format::formatcp; use zenoh_result::{bail, ZError}; #[repr(u8)] -#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] +#[derive(Debug, Default, Clone, Copy, PartialEq, Eq, Hash)] pub enum WhatAmI { Router = 0b001, + #[default] Peer = 0b010, Client = 0b100, } @@ -144,6 +146,7 @@ impl WhatAmIMatcher { Self::U8_R_C => formatcp!("{}|{}", WhatAmI::STR_R, WhatAmI::STR_C), Self::U8_P_C => formatcp!("{}|{}", WhatAmI::STR_P, WhatAmI::STR_C), Self::U8_R_P_C => formatcp!("{}|{}|{}", WhatAmI::STR_R, WhatAmI::STR_P, WhatAmI::STR_C), + _ => unreachable!(), } } @@ -327,41 +330,40 @@ impl<'de> serde::de::Visitor<'de> for WhatAmIMatcherVisitor { fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { write!( formatter, - "a | separated list of whatami variants ('{}', '{}', '{}')", + "a list of whatami variants ('{}', '{}', '{}')", WhatAmI::STR_R, WhatAmI::STR_P, WhatAmI::STR_C ) } - fn visit_str(self, v: &str) -> Result + fn visit_seq(self, mut seq: A) -> Result where - E: serde::de::Error, + A: serde::de::SeqAccess<'de>, { - v.parse().map_err(|_| { - serde::de::Error::invalid_value( - serde::de::Unexpected::Str(v), - &formatcp!( - "a | separated list of whatami variants ('{}', '{}', '{}')", - WhatAmI::STR_R, - WhatAmI::STR_P, - WhatAmI::STR_C - ), - ) - }) - } + let mut inner = 0; - fn visit_borrowed_str(self, v: &'de str) -> Result - where - E: serde::de::Error, - { - self.visit_str(v) - } - fn visit_string(self, v: String) -> Result - where - E: serde::de::Error, - { - self.visit_str(&v) + while let Some(s) = seq.next_element::()? { + match s.as_str() { + WhatAmI::STR_R => inner |= WhatAmI::U8_R, + WhatAmI::STR_P => inner |= WhatAmI::U8_P, + WhatAmI::STR_C => inner |= WhatAmI::U8_C, + _ => { + return Err(serde::de::Error::invalid_value( + serde::de::Unexpected::Str(&s), + &formatcp!( + "one of ('{}', '{}', '{}')", + WhatAmI::STR_R, + WhatAmI::STR_P, + WhatAmI::STR_C + ), + )) + } + } + } + + Ok(WhatAmIMatcher::try_from(inner) + .expect("`WhatAmIMatcher` should be valid by construction")) } } @@ -370,6 +372,6 @@ impl<'de> serde::Deserialize<'de> for WhatAmIMatcher { where D: serde::Deserializer<'de>, { - deserializer.deserialize_str(WhatAmIMatcherVisitor) + deserializer.deserialize_seq(WhatAmIMatcherVisitor) } } diff --git a/commons/zenoh-protocol/src/core/wire_expr.rs b/commons/zenoh-protocol/src/core/wire_expr.rs index 7b0dee7471..9f5c432665 100644 --- a/commons/zenoh-protocol/src/core/wire_expr.rs +++ b/commons/zenoh-protocol/src/core/wire_expr.rs @@ -18,6 +18,7 @@ use alloc::{ string::{String, ToString}, }; use core::{convert::TryInto, fmt, sync::atomic::AtomicU16}; + use zenoh_keyexpr::{keyexpr, OwnedKeyExpr}; use zenoh_result::{bail, ZResult}; @@ -71,6 +72,10 @@ impl<'a> WireExpr<'a> { } } + pub fn is_empty(&self) -> bool { + self.scope == 0 && self.suffix.as_ref().is_empty() + } + pub fn as_str(&'a self) -> &'a str { if self.scope == 0 { self.suffix.as_ref() @@ -257,7 +262,7 @@ impl WireExpr<'_> { WireExpr { scope, suffix: suffix.into(), - mapping: Mapping::default(), + mapping: Mapping::DEFAULT, } } } diff --git a/commons/zenoh-protocol/src/lib.rs b/commons/zenoh-protocol/src/lib.rs index 074aae49a5..fbfefa7c09 100644 --- a/commons/zenoh-protocol/src/lib.rs +++ b/commons/zenoh-protocol/src/lib.rs @@ -28,7 +28,7 @@ pub mod transport; pub mod zenoh; // Zenoh version -pub const VERSION: u8 = 0x08; +pub const VERSION: u8 = 0x09; // Zenoh protocol uses the following conventions for message definition and representation. // diff --git a/commons/zenoh-protocol/src/network/declare.rs b/commons/zenoh-protocol/src/network/declare.rs index 396caf187d..8f31e0ff2a 100644 --- a/commons/zenoh-protocol/src/network/declare.rs +++ b/commons/zenoh-protocol/src/network/declare.rs @@ -11,42 +11,47 @@ // Contributors: // ZettaScale Zenoh Team, // +use alloc::borrow::Cow; + +pub use common::*; +pub use keyexpr::*; +pub use queryable::*; +pub use subscriber::*; +pub use token::*; + use crate::{ common::{imsg, ZExtZ64, ZExtZBuf}, core::{ExprId, Reliability, WireExpr}, network::Mapping, zextz64, zextzbuf, }; -use alloc::borrow::Cow; -use core::ops::BitOr; -pub use interest::*; -pub use keyexpr::*; -pub use queryable::*; -pub use subscriber::*; -pub use token::*; pub mod flag { - // pub const X: u8 = 1 << 5; // 0x20 Reserved - // pub const X: u8 = 1 << 6; // 0x40 Reserved + pub const I: u8 = 1 << 5; // 0x20 Interest if I==1 then the declare is in a response to an Interest with future==false + // pub const X: u8 = 1 << 6; // 0x40 Reserved pub const Z: u8 = 1 << 7; // 0x80 Extensions if Z==1 then an extension will follow } +/// ```text /// Flags: -/// - X: Reserved +/// - I: Interest If I==1 then interest_id is present /// - X: Reserved /// - Z: Extension If Z==1 then at least one extension is present /// /// 7 6 5 4 3 2 1 0 /// +-+-+-+-+-+-+-+-+ -/// |Z|X|X| DECLARE | +/// |Z|X|I| DECLARE | /// +-+-+-+---------+ +/// ~interest_id:z32~ if I==1 +/// +---------------+ /// ~ [decl_exts] ~ if Z==1 /// +---------------+ /// ~ declaration ~ /// +---------------+ -/// +/// ``` #[derive(Debug, Clone, PartialEq, Eq)] pub struct Declare { + pub interest_id: Option, pub ext_qos: ext::QoSType, pub ext_tstamp: Option, pub ext_nodeid: ext::NodeIdType, @@ -82,9 +87,7 @@ pub mod id { pub const D_TOKEN: u8 = 0x06; pub const U_TOKEN: u8 = 0x07; - pub const D_INTEREST: u8 = 0x08; - pub const F_INTEREST: u8 = 0x09; - pub const U_INTEREST: u8 = 0x0A; + pub const D_FINAL: u8 = 0x1A; } #[derive(Debug, Clone, PartialEq, Eq)] @@ -97,9 +100,7 @@ pub enum DeclareBody { UndeclareQueryable(UndeclareQueryable), DeclareToken(DeclareToken), UndeclareToken(UndeclareToken), - DeclareInterest(DeclareInterest), - FinalInterest(FinalInterest), - UndeclareInterest(UndeclareInterest), + DeclareFinal(DeclareFinal), } impl DeclareBody { @@ -109,7 +110,7 @@ impl DeclareBody { let mut rng = rand::thread_rng(); - match rng.gen_range(0..11) { + match rng.gen_range(0..9) { 0 => DeclareBody::DeclareKeyExpr(DeclareKeyExpr::rand()), 1 => DeclareBody::UndeclareKeyExpr(UndeclareKeyExpr::rand()), 2 => DeclareBody::DeclareSubscriber(DeclareSubscriber::rand()), @@ -118,9 +119,7 @@ impl DeclareBody { 5 => DeclareBody::UndeclareQueryable(UndeclareQueryable::rand()), 6 => DeclareBody::DeclareToken(DeclareToken::rand()), 7 => DeclareBody::UndeclareToken(UndeclareToken::rand()), - 8 => DeclareBody::DeclareInterest(DeclareInterest::rand()), - 9 => DeclareBody::FinalInterest(FinalInterest::rand()), - 10 => DeclareBody::UndeclareInterest(UndeclareInterest::rand()), + 8 => DeclareBody::DeclareFinal(DeclareFinal::rand()), _ => unreachable!(), } } @@ -133,50 +132,67 @@ impl Declare { let mut rng = rand::thread_rng(); - let body = DeclareBody::rand(); + let interest_id = rng + .gen_bool(0.5) + .then_some(rng.gen::()); let ext_qos = ext::QoSType::rand(); let ext_tstamp = rng.gen_bool(0.5).then(ext::TimestampType::rand); let ext_nodeid = ext::NodeIdType::rand(); + let body = DeclareBody::rand(); Self { - body, + interest_id, ext_qos, ext_tstamp, ext_nodeid, + body, } } } -#[derive(Debug, Default, Copy, Clone, PartialEq, Eq)] -#[repr(u8)] -pub enum Mode { - #[default] - Push, - Pull, -} - -impl Mode { - #[cfg(feature = "test")] - fn rand() -> Self { - use rand::Rng; +pub mod common { + use super::*; - let mut rng = rand::thread_rng(); + /// ```text + /// Flags: + /// - X: Reserved + /// - X: Reserved + /// - Z: Extension If Z==1 then at least one extension is present + /// + /// 7 6 5 4 3 2 1 0 + /// +-+-+-+-+-+-+-+-+ + /// |Z|X|X| D_FINAL | + /// +---------------+ + /// ~ [final_exts] ~ if Z==1 + /// +---------------+ + /// ``` + #[derive(Debug, Clone, PartialEq, Eq)] + pub struct DeclareFinal; - if rng.gen_bool(0.5) { - Mode::Push - } else { - Mode::Pull + impl DeclareFinal { + #[cfg(feature = "test")] + pub fn rand() -> Self { + Self } } -} - -pub mod common { - use super::*; pub mod ext { use super::*; - // WARNING: this is a temporary and mandatory extension used for undeclarations + /// ```text + /// Flags: + /// - N: Named If N==1 then the key expr has name/suffix + /// - M: Mapping if M==1 then key expr mapping is the one declared by the sender, else it is the one declared by the receiver + /// + /// 7 6 5 4 3 2 1 0 + /// +-+-+-+-+-+-+-+-+ + /// |X|X|X|X|X|X|M|N| + /// +-+-+-+---------+ + /// ~ key_scope:z16 ~ + /// +---------------+ + /// ~ key_suffix ~ if N==1 -- + /// +---------------+ + /// ``` pub type WireExprExt = zextzbuf!(0x0f, true); #[derive(Debug, Clone, PartialEq, Eq)] pub struct WireExprType { @@ -194,6 +210,10 @@ pub mod common { } } + pub fn is_null(&self) -> bool { + self.wire_expr.is_empty() + } + #[cfg(feature = "test")] pub fn rand() -> Self { Self { @@ -286,8 +306,9 @@ pub mod keyexpr { pub mod subscriber { use super::*; + use crate::core::EntityId; - pub type SubscriberId = u32; + pub type SubscriberId = EntityId; pub mod flag { pub const N: u8 = 1 << 5; // 0x20 Named if N==1 then the key expr has name/suffix @@ -314,9 +335,7 @@ pub mod subscriber { /// ~ [decl_exts] ~ if Z==1 /// +---------------+ /// - /// - if R==1 then the subscription is reliable, else it is best effort - /// - if P==1 then the subscription is pull, else it is push - /// + /// - if R==1 then the subscription is reliable, else it is best effort /// /// ``` #[derive(Debug, Clone, PartialEq, Eq)] pub struct DeclareSubscriber { @@ -337,29 +356,35 @@ pub mod subscriber { /// +-+-+-+-+-+-+-+-+ /// |Z|0_1| ID | /// +-+-+-+---------+ - /// % reserved |P|R% + /// % reserved |R% /// +---------------+ /// /// - if R==1 then the subscription is reliable, else it is best effort - /// - if P==1 then the subscription is pull, else it is push /// - rsv: Reserved /// ``` - #[derive(Debug, Default, Clone, Copy, PartialEq, Eq)] + #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub struct SubscriberInfo { pub reliability: Reliability, - pub mode: Mode, } impl SubscriberInfo { pub const R: u64 = 1; - pub const P: u64 = 1 << 1; + + pub const DEFAULT: Self = Self { + reliability: Reliability::DEFAULT, + }; #[cfg(feature = "test")] pub fn rand() -> Self { let reliability = Reliability::rand(); - let mode = Mode::rand(); - Self { reliability, mode } + Self { reliability } + } + } + + impl Default for SubscriberInfo { + fn default() -> Self { + Self::DEFAULT } } @@ -370,12 +395,7 @@ pub mod subscriber { } else { Reliability::BestEffort }; - let mode = if imsg::has_option(ext.value, SubscriberInfo::P) { - Mode::Pull - } else { - Mode::Push - }; - Self { reliability, mode } + Self { reliability } } } @@ -385,9 +405,6 @@ pub mod subscriber { if ext.reliability == Reliability::Reliable { v |= SubscriberInfo::R; } - if ext.mode == Mode::Pull { - v |= SubscriberInfo::P; - } Info::new(v) } } @@ -429,7 +446,6 @@ pub mod subscriber { #[derive(Debug, Clone, PartialEq, Eq)] pub struct UndeclareSubscriber { pub id: SubscriberId, - // WARNING: this is a temporary and mandatory extension used for undeclarations pub ext_wire_expr: common::ext::WireExprType, } @@ -449,8 +465,9 @@ pub mod subscriber { pub mod queryable { use super::*; + use crate::core::EntityId; - pub type QueryableId = u32; + pub type QueryableId = EntityId; pub mod flag { pub const N: u8 = 1 << 5; // 0x20 Named if N==1 then the key expr has name/suffix @@ -486,54 +503,54 @@ pub mod queryable { pub struct DeclareQueryable { pub id: QueryableId, pub wire_expr: WireExpr<'static>, - pub ext_info: ext::QueryableInfo, + pub ext_info: ext::QueryableInfoType, } pub mod ext { use super::*; - pub type Info = zextz64!(0x01, false); + pub type QueryableInfo = zextz64!(0x01, false); + pub mod flag { + pub const C: u8 = 1; // 0x01 Complete if C==1 then the queryable is complete + } + /// + /// ```text /// 7 6 5 4 3 2 1 0 /// +-+-+-+-+-+-+-+-+ /// |Z|0_1| ID | /// +-+-+-+---------+ - /// ~ complete_n ~ + /// |x|x|x|x|x|x|x|C| /// +---------------+ - /// ~ distance ~ + /// ~ distance ~ /// +---------------+ - #[derive(Debug, Default, Clone, Copy, PartialEq, Eq)] - pub struct QueryableInfo { - pub complete: u8, // Default 0: incomplete // @TODO: maybe a bitflag - pub distance: u32, // Default 0: no distance + /// ``` + #[derive(Debug, Clone, Copy, PartialEq, Eq)] + pub struct QueryableInfoType { + pub complete: bool, // Default false: incomplete + pub distance: u16, // Default 0: no distance } - impl QueryableInfo { + impl QueryableInfoType { + pub const DEFAULT: Self = Self { + complete: false, + distance: 0, + }; + #[cfg(feature = "test")] pub fn rand() -> Self { use rand::Rng; let mut rng = rand::thread_rng(); - let complete: u8 = rng.gen(); - let distance: u32 = rng.gen(); + let complete: bool = rng.gen_bool(0.5); + let distance: u16 = rng.gen(); Self { complete, distance } } } - impl From for QueryableInfo { - fn from(ext: Info) -> Self { - let complete = ext.value as u8; - let distance = (ext.value >> 8) as u32; - - Self { complete, distance } - } - } - - impl From for Info { - fn from(ext: QueryableInfo) -> Self { - let mut v: u64 = ext.complete as u64; - v |= (ext.distance as u64) << 8; - Info::new(v) + impl Default for QueryableInfoType { + fn default() -> Self { + Self::DEFAULT } } } @@ -546,7 +563,7 @@ pub mod queryable { let id: QueryableId = rng.gen(); let wire_expr = WireExpr::rand(); - let ext_info = ext::QueryableInfo::rand(); + let ext_info = ext::QueryableInfoType::rand(); Self { id, @@ -574,7 +591,6 @@ pub mod queryable { #[derive(Debug, Clone, PartialEq, Eq)] pub struct UndeclareQueryable { pub id: QueryableId, - // WARNING: this is a temporary and mandatory extension used for undeclarations pub ext_wire_expr: common::ext::WireExprType, } @@ -660,7 +676,6 @@ pub mod token { #[derive(Debug, Clone, PartialEq, Eq)] pub struct UndeclareToken { pub id: TokenId, - // WARNING: this is a temporary and mandatory extension used for undeclarations pub ext_wire_expr: common::ext::WireExprType, } @@ -677,247 +692,3 @@ pub mod token { } } } - -pub mod interest { - use super::*; - - pub type InterestId = u32; - - pub mod flag { - pub const N: u8 = 1 << 5; // 0x20 Named if N==1 then the key expr has name/suffix - pub const M: u8 = 1 << 6; // 0x40 Mapping if M==1 then key expr mapping is the one declared by the sender, else it is the one declared by the receiver - pub const Z: u8 = 1 << 7; // 0x80 Extensions if Z==1 then an extension will follow - } - - /// # DeclareInterest message - /// - /// The DECLARE INTEREST message is sent to request the transmission of existing and future - /// declarations of a given kind matching a target keyexpr. E.g., a declare interest could be sent to - /// request the transmission of all existing subscriptions matching `a/*`. A FINAL INTEREST is used to - /// mark the end of the transmission of existing matching declarations. - /// - /// E.g., the [`DeclareInterest`]/[`FinalInterest`]/[`UndeclareInterest`] message flow is the following: - /// - /// ```text - /// A B - /// | DECL INTEREST | - /// |------------------>| -- This is a DeclareInterest e.g. for subscriber declarations/undeclarations. - /// | | - /// | DECL SUBSCRIBER | - /// |<------------------| - /// | DECL SUBSCRIBER | - /// |<------------------| - /// | DECL SUBSCRIBER | - /// |<------------------| - /// | | - /// | FINAL INTEREST | - /// |<------------------| -- The FinalInterest signals that all known subscribers have been transmitted. - /// | | - /// | DECL SUBSCRIBER | - /// |<------------------| -- This is a new subscriber declaration. - /// | UNDECL SUBSCRIBER | - /// |<------------------| -- This is a new subscriber undeclaration. - /// | | - /// | ... | - /// | | - /// | UNDECL INTEREST | - /// |------------------>| -- This is an UndeclareInterest to stop receiving subscriber declarations/undeclarations. - /// | | - /// ``` - /// - /// The DECLARE INTEREST message structure is defined as follows: - /// - /// ```text - /// Flags: - /// - N: Named If N==1 then the key expr has name/suffix - /// - M: Mapping if M==1 then key expr mapping is the one declared by the sender, else it is the one declared by the receiver - /// - Z: Extension If Z==1 then at least one extension is present - /// - /// 7 6 5 4 3 2 1 0 - /// +-+-+-+-+-+-+-+-+ - /// |Z|M|N| D_INT | - /// +---------------+ - /// ~ intst_id:z32 ~ - /// +---------------+ - /// ~ key_scope:z16 ~ - /// +---------------+ - /// ~ key_suffix ~ if N==1 -- - /// +---------------+ - /// |A|F|C|X|T|Q|S|K| (*) - /// +---------------+ - /// ~ [decl_exts] ~ if Z==1 - /// +---------------+ - /// - /// (*) - if K==1 then the interest refers to key expressions - /// - if S==1 then the interest refers to subscribers - /// - if Q==1 then the interest refers to queryables - /// - if T==1 then the interest refers to tokens - /// - if C==1 then the interest refers to the current declarations. - /// - if F==1 then the interest refers to the future declarations. Note that if F==0 then: - /// - replies SHOULD NOT be sent after the FinalInterest; - /// - UndeclareInterest SHOULD NOT be sent after the FinalInterest. - /// - if A==1 then the replies SHOULD be aggregated - /// ``` - #[derive(Debug, Clone, PartialEq, Eq)] - pub struct DeclareInterest { - pub id: InterestId, - pub wire_expr: WireExpr<'static>, - pub interest: Interest, - } - - #[repr(transparent)] - #[derive(Debug, Clone, PartialEq, Eq)] - pub struct Interest(u8); - - impl Interest { - pub const KEYEXPRS: Interest = Interest(1); - pub const SUBSCRIBERS: Interest = Interest(1 << 1); - pub const QUERYABLES: Interest = Interest(1 << 2); - pub const TOKENS: Interest = Interest(1 << 3); - // pub const X: Interest = Interest(1 << 4); - pub const CURRENT: Interest = Interest(1 << 5); - pub const FUTURE: Interest = Interest(1 << 6); - pub const AGGREGATE: Interest = Interest(1 << 7); - - pub const fn keyexprs(&self) -> bool { - imsg::has_flag(self.0, Self::KEYEXPRS.0) - } - - pub const fn subscribers(&self) -> bool { - imsg::has_flag(self.0, Self::SUBSCRIBERS.0) - } - - pub const fn queryables(&self) -> bool { - imsg::has_flag(self.0, Self::QUERYABLES.0) - } - - pub const fn tokens(&self) -> bool { - imsg::has_flag(self.0, Self::TOKENS.0) - } - - pub const fn current(&self) -> bool { - imsg::has_flag(self.0, Self::CURRENT.0) - } - - pub const fn future(&self) -> bool { - imsg::has_flag(self.0, Self::FUTURE.0) - } - - pub const fn aggregate(&self) -> bool { - imsg::has_flag(self.0, Self::AGGREGATE.0) - } - - pub const fn as_u8(&self) -> u8 { - self.0 - } - - #[cfg(feature = "test")] - pub fn rand() -> Self { - use rand::Rng; - let mut rng = rand::thread_rng(); - - let inner: u8 = rng.gen(); - - Self(inner) - } - } - - impl BitOr for Interest { - type Output = Self; - - fn bitor(self, rhs: Self) -> Self::Output { - Self(self.0 | rhs.0) - } - } - - impl From for Interest { - fn from(v: u8) -> Self { - Self(v) - } - } - - impl DeclareInterest { - #[cfg(feature = "test")] - pub fn rand() -> Self { - use rand::Rng; - let mut rng = rand::thread_rng(); - - let id: InterestId = rng.gen(); - let wire_expr = WireExpr::rand(); - let interest = Interest::rand(); - - Self { - id, - wire_expr, - interest, - } - } - } - - /// ```text - /// Flags: - /// - X: Reserved - /// - X: Reserved - /// - Z: Extension If Z==1 then at least one extension is present - /// - /// 7 6 5 4 3 2 1 0 - /// +-+-+-+-+-+-+-+-+ - /// |Z|X|X| F_INT | - /// +---------------+ - /// ~ intst_id:z32 ~ - /// +---------------+ - /// ~ [decl_exts] ~ if Z==1 - /// +---------------+ - /// ``` - #[derive(Debug, Clone, PartialEq, Eq)] - pub struct FinalInterest { - pub id: InterestId, - } - - impl FinalInterest { - #[cfg(feature = "test")] - pub fn rand() -> Self { - use rand::Rng; - let mut rng = rand::thread_rng(); - - let id: InterestId = rng.gen(); - - Self { id } - } - } - - /// ```text - /// Flags: - /// - X: Reserved - /// - X: Reserved - /// - Z: Extension If Z==1 then at least one extension is present - /// - /// 7 6 5 4 3 2 1 0 - /// +-+-+-+-+-+-+-+-+ - /// |Z|X|X| U_INT | - /// +---------------+ - /// ~ intst_id:z32 ~ - /// +---------------+ - /// ~ [decl_exts] ~ if Z==1 - /// +---------------+ - /// ``` - #[derive(Debug, Clone, PartialEq, Eq)] - pub struct UndeclareInterest { - pub id: InterestId, - // WARNING: this is a temporary and mandatory extension used for undeclarations - pub ext_wire_expr: common::ext::WireExprType, - } - - impl UndeclareInterest { - #[cfg(feature = "test")] - pub fn rand() -> Self { - use rand::Rng; - let mut rng = rand::thread_rng(); - - let id: InterestId = rng.gen(); - let ext_wire_expr = common::ext::WireExprType::rand(); - - Self { id, ext_wire_expr } - } - } -} diff --git a/commons/zenoh-protocol/src/network/interest.rs b/commons/zenoh-protocol/src/network/interest.rs new file mode 100644 index 0000000000..9f329b6ff5 --- /dev/null +++ b/commons/zenoh-protocol/src/network/interest.rs @@ -0,0 +1,388 @@ +// +// Copyright (c) 2022 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// +use core::{ + fmt::{self, Debug}, + ops::{Add, AddAssign, Sub, SubAssign}, + sync::atomic::AtomicU32, +}; + +use crate::{common::imsg, core::WireExpr, network::Mapping}; + +pub type InterestId = u32; + +pub mod flag { + pub const Z: u8 = 1 << 7; // 0x80 Extensions if Z==1 then an extension will follow +} + +/// The INTEREST message is sent to request the transmission of current and optionally future +/// declarations of a given kind matching a target keyexpr. E.g., an interest could be +/// sent to request the transmission of all current subscriptions matching `a/*`. +/// +/// The behaviour of a INTEREST depends on the INTEREST MODE. +/// +/// E.g., the message flow is the following for an [`Interest`] with mode `Current`: +/// +/// ```text +/// A B +/// | INTEREST | +/// |------------------>| -- Mode: Current +/// | | This is an Interest e.g. for subscriber declarations. +/// | | +/// | DECL SUBSCRIBER | +/// |<------------------| -- With interest_id field set +/// | DECL SUBSCRIBER | +/// |<------------------| -- With interest_id field set +/// | DECL SUBSCRIBER | +/// |<------------------| -- With interest_id field set +/// | | +/// | DECL FINAL | +/// |<------------------| -- With interest_id field set +/// | | +/// ``` +/// +/// And the message flow is the following for an [`Interest`] with mode `CurrentFuture`: +/// +/// ```text +/// A B +/// | INTEREST | +/// |------------------>| -- This is a DeclareInterest e.g. for subscriber declarations/undeclarations. +/// | | +/// | DECL SUBSCRIBER | +/// |<------------------| -- With interest_id field not set +/// | DECL SUBSCRIBER | +/// |<------------------| -- With interest_id field not set +/// | DECL SUBSCRIBER | +/// |<------------------| -- With interest_id field not set +/// | | +/// | DECL FINAL | +/// |<------------------| -- With interest_id field set +/// | | +/// | DECL SUBSCRIBER | +/// |<------------------| -- With interest_id field not set +/// | UNDECL SUBSCRIBER | +/// |<------------------| -- With interest_id field not set +/// | | +/// | ... | +/// | | +/// | INTEREST FINAL | +/// |------------------>| -- Mode: Final +/// | | This stops the transmission of subscriber declarations/undeclarations. +/// | | +/// +/// Flags: +/// - |: Mode The mode of the interest* +/// -/ +/// - Z: Extension If Z==1 then at least one extension is present +/// +/// 7 6 5 4 3 2 1 0 +/// +-+-+-+-+-+-+-+-+ +/// |Z|Mod|INTEREST | +/// +-+-+-+---------+ +/// ~ id:z32 ~ +/// +---------------+ +/// |A|M|N|R|T|Q|S|K| if Mod!=Final (*) +/// +---------------+ +/// ~ key_scope:z16 ~ if Mod!=Final && R==1 +/// +---------------+ +/// ~ key_suffix ~ if Mod!=Final && R==1 && N==1 -- +/// +---------------+ +/// ~ [int_exts] ~ if Z==1 +/// +---------------+ +/// +/// *Mode of declaration: +/// - Mode 0b00: Final +/// - Mode 0b01: Current +/// - Mode 0b10: Future +/// - Mode 0b11: CurrentFuture +/// +/// (*) - if K==1 then the interest refers to key expressions +/// - if S==1 then the interest refers to subscribers +/// - if Q==1 then the interest refers to queryables +/// - if T==1 then the interest refers to tokens +/// - if R==1 then the interest is restricted to the matching key expression, else it is for all key expressions. +/// - if N==1 then the key expr has name/suffix. If R==0 then N should be set to 0. +/// - if M==1 then key expr mapping is the one declared by the sender, else it is the one declared by the receiver. +/// If R==0 then M should be set to 0. +/// - if A==1 then the replies SHOULD be aggregated +/// ``` + +/// The resolution of a RequestId +pub type DeclareRequestId = u32; +pub type AtomicDeclareRequestId = AtomicU32; + +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub enum InterestMode { + Final, + Current, + Future, + CurrentFuture, +} + +impl InterestMode { + #[cfg(feature = "test")] + pub fn rand() -> Self { + use rand::Rng; + + let mut rng = rand::thread_rng(); + + match rng.gen_range(0..4) { + 0 => InterestMode::Final, + 1 => InterestMode::Current, + 2 => InterestMode::Future, + 3 => InterestMode::CurrentFuture, + _ => unreachable!(), + } + } +} + +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct Interest { + pub id: InterestId, + pub mode: InterestMode, + pub options: InterestOptions, + pub wire_expr: Option>, + pub ext_qos: ext::QoSType, + pub ext_tstamp: Option, + pub ext_nodeid: ext::NodeIdType, +} + +pub mod ext { + use crate::{ + common::{ZExtZ64, ZExtZBuf}, + zextz64, zextzbuf, + }; + + pub type QoS = zextz64!(0x1, false); + pub type QoSType = crate::network::ext::QoSType<{ QoS::ID }>; + + pub type Timestamp = zextzbuf!(0x2, false); + pub type TimestampType = crate::network::ext::TimestampType<{ Timestamp::ID }>; + + pub type NodeId = zextz64!(0x3, true); + pub type NodeIdType = crate::network::ext::NodeIdType<{ NodeId::ID }>; +} + +impl Interest { + pub fn options(&self) -> u8 { + let mut interest = self.options; + if let Some(we) = self.wire_expr.as_ref() { + interest += InterestOptions::RESTRICTED; + if we.has_suffix() { + interest += InterestOptions::NAMED; + } + if let Mapping::Sender = we.mapping { + interest += InterestOptions::MAPPING; + } + } + interest.options + } + + #[cfg(feature = "test")] + pub fn rand() -> Self { + use rand::Rng; + let mut rng = rand::thread_rng(); + + let id = rng.gen::(); + let mode = InterestMode::rand(); + let options = if mode == InterestMode::Final { + InterestOptions::empty() + } else { + InterestOptions::rand() + }; + let wire_expr = options.restricted().then_some(WireExpr::rand()); + let ext_qos = ext::QoSType::rand(); + let ext_tstamp = rng.gen_bool(0.5).then(ext::TimestampType::rand); + let ext_nodeid = ext::NodeIdType::rand(); + + Self { + id, + mode, + wire_expr, + options, + ext_qos, + ext_tstamp, + ext_nodeid, + } + } +} + +#[repr(transparent)] +#[derive(Clone, Copy)] +pub struct InterestOptions { + options: u8, +} + +impl InterestOptions { + // Flags + pub const KEYEXPRS: InterestOptions = InterestOptions::options(1); + pub const SUBSCRIBERS: InterestOptions = InterestOptions::options(1 << 1); + pub const QUERYABLES: InterestOptions = InterestOptions::options(1 << 2); + pub const TOKENS: InterestOptions = InterestOptions::options(1 << 3); + const RESTRICTED: InterestOptions = InterestOptions::options(1 << 4); + const NAMED: InterestOptions = InterestOptions::options(1 << 5); + const MAPPING: InterestOptions = InterestOptions::options(1 << 6); + pub const AGGREGATE: InterestOptions = InterestOptions::options(1 << 7); + pub const ALL: InterestOptions = InterestOptions::options( + InterestOptions::KEYEXPRS.options + | InterestOptions::SUBSCRIBERS.options + | InterestOptions::QUERYABLES.options + | InterestOptions::TOKENS.options, + ); + + const fn options(options: u8) -> Self { + Self { options } + } + + pub const fn empty() -> Self { + Self { options: 0 } + } + + pub const fn keyexprs(&self) -> bool { + imsg::has_flag(self.options, Self::KEYEXPRS.options) + } + + pub const fn subscribers(&self) -> bool { + imsg::has_flag(self.options, Self::SUBSCRIBERS.options) + } + + pub const fn queryables(&self) -> bool { + imsg::has_flag(self.options, Self::QUERYABLES.options) + } + + pub const fn tokens(&self) -> bool { + imsg::has_flag(self.options, Self::TOKENS.options) + } + + pub const fn restricted(&self) -> bool { + imsg::has_flag(self.options, Self::RESTRICTED.options) + } + + pub const fn named(&self) -> bool { + imsg::has_flag(self.options, Self::NAMED.options) + } + + pub const fn mapping(&self) -> bool { + imsg::has_flag(self.options, Self::MAPPING.options) + } + + pub const fn aggregate(&self) -> bool { + imsg::has_flag(self.options, Self::AGGREGATE.options) + } + + #[cfg(feature = "test")] + pub fn rand() -> Self { + use rand::Rng; + let mut rng = rand::thread_rng(); + + let mut s = Self::empty(); + if rng.gen_bool(0.5) { + s += InterestOptions::KEYEXPRS; + } + if rng.gen_bool(0.5) { + s += InterestOptions::SUBSCRIBERS; + } + if rng.gen_bool(0.5) { + s += InterestOptions::TOKENS; + } + if rng.gen_bool(0.5) { + s += InterestOptions::AGGREGATE; + } + s + } +} + +impl PartialEq for InterestOptions { + fn eq(&self, other: &Self) -> bool { + self.keyexprs() == other.keyexprs() + && self.subscribers() == other.subscribers() + && self.queryables() == other.queryables() + && self.tokens() == other.tokens() + && self.aggregate() == other.aggregate() + } +} + +impl Debug for InterestOptions { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "Interest {{ ")?; + if self.keyexprs() { + write!(f, "K:Y, ")?; + } else { + write!(f, "K:N, ")?; + } + if self.subscribers() { + write!(f, "S:Y, ")?; + } else { + write!(f, "S:N, ")?; + } + if self.queryables() { + write!(f, "Q:Y, ")?; + } else { + write!(f, "Q:N, ")?; + } + if self.tokens() { + write!(f, "T:Y, ")?; + } else { + write!(f, "T:N, ")?; + } + if self.aggregate() { + write!(f, "A:Y")?; + } else { + write!(f, "A:N")?; + } + write!(f, " }}")?; + Ok(()) + } +} + +impl Eq for InterestOptions {} + +impl Add for InterestOptions { + type Output = Self; + + #[allow(clippy::suspicious_arithmetic_impl)] // Allows to implement Add & Sub for Interest + fn add(self, rhs: Self) -> Self::Output { + Self { + options: self.options | rhs.options, + } + } +} + +impl AddAssign for InterestOptions { + #[allow(clippy::suspicious_op_assign_impl)] // Allows to implement Add & Sub for Interest + fn add_assign(&mut self, rhs: Self) { + self.options |= rhs.options; + } +} + +impl Sub for InterestOptions { + type Output = Self; + + fn sub(self, rhs: Self) -> Self::Output { + Self { + options: self.options & !rhs.options, + } + } +} + +impl SubAssign for InterestOptions { + fn sub_assign(&mut self, rhs: Self) { + self.options &= !rhs.options; + } +} + +impl From for InterestOptions { + fn from(options: u8) -> Self { + Self { options } + } +} diff --git a/commons/zenoh-protocol/src/network/mod.rs b/commons/zenoh-protocol/src/network/mod.rs index b2ae5deabe..407df6dd52 100644 --- a/commons/zenoh-protocol/src/network/mod.rs +++ b/commons/zenoh-protocol/src/network/mod.rs @@ -12,6 +12,7 @@ // ZettaScale Zenoh Team, // pub mod declare; +pub mod interest; pub mod oam; pub mod push; pub mod request; @@ -20,10 +21,10 @@ pub mod response; use core::fmt; pub use declare::{ - Declare, DeclareBody, DeclareInterest, DeclareKeyExpr, DeclareQueryable, DeclareSubscriber, - DeclareToken, UndeclareInterest, UndeclareKeyExpr, UndeclareQueryable, UndeclareSubscriber, - UndeclareToken, + Declare, DeclareBody, DeclareFinal, DeclareKeyExpr, DeclareQueryable, DeclareSubscriber, + DeclareToken, UndeclareKeyExpr, UndeclareQueryable, UndeclareSubscriber, UndeclareToken, }; +pub use interest::Interest; pub use oam::Oam; pub use push::Push; pub use request::{AtomicRequestId, Request, RequestId}; @@ -40,6 +41,7 @@ pub mod id { pub const REQUEST: u8 = 0x1c; pub const RESPONSE: u8 = 0x1b; pub const RESPONSE_FINAL: u8 = 0x1a; + pub const INTEREST: u8 = 0x19; } #[repr(u8)] @@ -51,6 +53,8 @@ pub enum Mapping { } impl Mapping { + pub const DEFAULT: Self = Self::Receiver; + #[cfg(feature = "test")] pub fn rand() -> Self { use rand::Rng; @@ -71,6 +75,7 @@ pub enum NetworkBody { Request(Request), Response(Response), ResponseFinal(ResponseFinal), + Interest(Interest), Declare(Declare), OAM(Oam), } @@ -108,6 +113,19 @@ impl NetworkMessage { true } + #[inline] + pub fn is_express(&self) -> bool { + match &self.body { + NetworkBody::Push(msg) => msg.ext_qos.is_express(), + NetworkBody::Request(msg) => msg.ext_qos.is_express(), + NetworkBody::Response(msg) => msg.ext_qos.is_express(), + NetworkBody::ResponseFinal(msg) => msg.ext_qos.is_express(), + NetworkBody::Interest(msg) => msg.ext_qos.is_express(), + NetworkBody::Declare(msg) => msg.ext_qos.is_express(), + NetworkBody::OAM(msg) => msg.ext_qos.is_express(), + } + } + #[inline] pub fn is_droppable(&self) -> bool { if !self.is_reliable() { @@ -115,11 +133,12 @@ impl NetworkMessage { } let cc = match &self.body { - NetworkBody::Declare(msg) => msg.ext_qos.get_congestion_control(), NetworkBody::Push(msg) => msg.ext_qos.get_congestion_control(), NetworkBody::Request(msg) => msg.ext_qos.get_congestion_control(), NetworkBody::Response(msg) => msg.ext_qos.get_congestion_control(), NetworkBody::ResponseFinal(msg) => msg.ext_qos.get_congestion_control(), + NetworkBody::Interest(msg) => msg.ext_qos.get_congestion_control(), + NetworkBody::Declare(msg) => msg.ext_qos.get_congestion_control(), NetworkBody::OAM(msg) => msg.ext_qos.get_congestion_control(), }; @@ -129,11 +148,12 @@ impl NetworkMessage { #[inline] pub fn priority(&self) -> Priority { match &self.body { - NetworkBody::Declare(msg) => msg.ext_qos.get_priority(), NetworkBody::Push(msg) => msg.ext_qos.get_priority(), NetworkBody::Request(msg) => msg.ext_qos.get_priority(), NetworkBody::Response(msg) => msg.ext_qos.get_priority(), NetworkBody::ResponseFinal(msg) => msg.ext_qos.get_priority(), + NetworkBody::Interest(msg) => msg.ext_qos.get_priority(), + NetworkBody::Declare(msg) => msg.ext_qos.get_priority(), NetworkBody::OAM(msg) => msg.ext_qos.get_priority(), } } @@ -148,6 +168,7 @@ impl fmt::Display for NetworkMessage { Request(_) => write!(f, "Request"), Response(_) => write!(f, "Response"), ResponseFinal(_) => write!(f, "ResponseFinal"), + Interest(_) => write!(f, "Interest"), Declare(_) => write!(f, "Declare"), } } @@ -196,11 +217,12 @@ impl From for NetworkMessage { // Extensions pub mod ext { + use core::fmt; + use crate::{ common::{imsg, ZExtZ64}, - core::{CongestionControl, Priority, ZenohId}, + core::{CongestionControl, EntityId, Priority, ZenohIdProto}, }; - use core::fmt; /// ```text /// 7 6 5 4 3 2 1 0 @@ -226,6 +248,16 @@ pub mod ext { const D_FLAG: u8 = 0b00001000; const E_FLAG: u8 = 0b00010000; + pub const DEFAULT: Self = Self::new(Priority::DEFAULT, CongestionControl::DEFAULT, false); + + pub const DECLARE: Self = Self::new(Priority::DEFAULT, CongestionControl::Block, false); + pub const PUSH: Self = Self::new(Priority::DEFAULT, CongestionControl::Drop, false); + pub const REQUEST: Self = Self::new(Priority::DEFAULT, CongestionControl::Block, false); + pub const RESPONSE: Self = Self::new(Priority::DEFAULT, CongestionControl::Block, false); + pub const RESPONSE_FINAL: Self = + Self::new(Priority::DEFAULT, CongestionControl::Block, false); + pub const OAM: Self = Self::new(Priority::DEFAULT, CongestionControl::Block, false); + pub const fn new( priority: Priority, congestion_control: CongestionControl, @@ -242,7 +274,7 @@ pub mod ext { } pub fn set_priority(&mut self, priority: Priority) { - self.inner = imsg::set_flag(self.inner, priority as u8); + self.inner = imsg::set_bitfield(self.inner, priority as u8, Self::P_MASK); } pub const fn get_priority(&self) -> Priority { @@ -282,35 +314,11 @@ pub mod ext { let inner: u8 = rng.gen(); Self { inner } } - - pub fn declare_default() -> Self { - Self::new(Priority::default(), CongestionControl::Block, false) - } - - pub fn push_default() -> Self { - Self::new(Priority::default(), CongestionControl::Drop, false) - } - - pub fn request_default() -> Self { - Self::new(Priority::default(), CongestionControl::Block, false) - } - - pub fn response_default() -> Self { - Self::new(Priority::default(), CongestionControl::Block, false) - } - - pub fn response_final_default() -> Self { - Self::new(Priority::default(), CongestionControl::Block, false) - } - - pub fn oam_default() -> Self { - Self::new(Priority::default(), CongestionControl::Block, false) - } } impl Default for QoSType<{ ID }> { fn default() -> Self { - Self::new(Priority::default(), CongestionControl::default(), false) + Self::new(Priority::DEFAULT, CongestionControl::DEFAULT, false) } } @@ -358,7 +366,7 @@ pub mod ext { let mut rng = rand::thread_rng(); let time = uhlc::NTP64(rng.gen()); - let id = uhlc::ID::try_from(ZenohId::rand().to_le_bytes()).unwrap(); + let id = uhlc::ID::try_from(ZenohIdProto::rand().to_le_bytes()).unwrap(); let timestamp = uhlc::Timestamp::new(time, id); Self { timestamp } } @@ -378,6 +386,9 @@ pub mod ext { } impl NodeIdType<{ ID }> { + // node_id == 0 means the message has been generated by the node itself + pub const DEFAULT: Self = Self { node_id: 0 }; + #[cfg(feature = "test")] pub fn rand() -> Self { use rand::Rng; @@ -389,8 +400,7 @@ pub mod ext { impl Default for NodeIdType<{ ID }> { fn default() -> Self { - // node_id == 0 means the message has been generated by the node itself - Self { node_id: 0 } + Self::DEFAULT } } @@ -408,6 +418,7 @@ pub mod ext { } } + /// ```text /// 7 6 5 4 3 2 1 0 /// +-+-+-+-+-+-+-+-+ /// |zid_len|X|X|X|X| @@ -416,20 +427,21 @@ pub mod ext { /// +---------------+ /// % eid % /// +---------------+ + /// ``` #[derive(Debug, Clone, PartialEq, Eq)] - pub struct EntityIdType { - pub zid: ZenohId, - pub eid: u32, + pub struct EntityGlobalIdType { + pub zid: ZenohIdProto, + pub eid: EntityId, } - impl EntityIdType<{ ID }> { + impl EntityGlobalIdType<{ ID }> { #[cfg(feature = "test")] pub fn rand() -> Self { use rand::Rng; let mut rng = rand::thread_rng(); - let zid = ZenohId::rand(); - let eid: u32 = rng.gen(); + let zid = ZenohIdProto::rand(); + let eid: EntityId = rng.gen(); Self { zid, eid } } } diff --git a/commons/zenoh-protocol/src/network/request.rs b/commons/zenoh-protocol/src/network/request.rs index ccd64ae5cd..ceeec85043 100644 --- a/commons/zenoh-protocol/src/network/request.rs +++ b/commons/zenoh-protocol/src/network/request.rs @@ -11,9 +11,10 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::{core::WireExpr, zenoh::RequestBody}; use core::sync::atomic::AtomicU32; +use crate::{core::WireExpr, zenoh::RequestBody}; + /// The resolution of a RequestId pub type RequestId = u32; pub type AtomicRequestId = AtomicU32; @@ -64,12 +65,12 @@ pub struct Request { } pub mod ext { + use core::{num::NonZeroU32, time::Duration}; + use crate::{ common::{ZExtZ64, ZExtZBuf}, - core::QueryTarget, zextz64, zextzbuf, }; - use core::{num::NonZeroU32, time::Duration}; pub type QoS = zextz64!(0x1, false); pub type QoSType = crate::network::ext::QoSType<{ QoS::ID }>; @@ -87,12 +88,19 @@ pub mod ext { /// +-+-+-+-+-+-+-+-+ /// % target % /// +---------------+ - /// - /// The `zenoh::queryable::Queryable`s that should be target of a `zenoh::Session::get()`. /// ``` - pub type TargetType = QueryTarget; + /// The `zenoh::queryable::Queryable`s that should be target of a `zenoh::Session::get()`. + #[derive(Debug, Default, Clone, Copy, PartialEq, Eq)] + pub enum TargetType { + #[default] + BestMatching, + All, + AllComplete, + } impl TargetType { + pub const DEFAULT: Self = Self::BestMatching; + #[cfg(feature = "test")] pub fn rand() -> Self { use rand::prelude::*; @@ -102,8 +110,6 @@ pub mod ext { TargetType::All, TargetType::AllComplete, TargetType::BestMatching, - #[cfg(feature = "complete_n")] - TargetType::Complete(rng.gen()), ] .choose(&mut rng) .unwrap() diff --git a/commons/zenoh-protocol/src/network/response.rs b/commons/zenoh-protocol/src/network/response.rs index 9ef2c26a10..6f0925429b 100644 --- a/commons/zenoh-protocol/src/network/response.rs +++ b/commons/zenoh-protocol/src/network/response.rs @@ -67,7 +67,7 @@ pub mod ext { pub type TimestampType = crate::network::ext::TimestampType<{ Timestamp::ID }>; pub type ResponderId = zextzbuf!(0x3, false); - pub type ResponderIdType = crate::network::ext::EntityIdType<{ ResponderId::ID }>; + pub type ResponderIdType = crate::network::ext::EntityGlobalIdType<{ ResponderId::ID }>; } impl Response { diff --git a/commons/zenoh-protocol/src/scouting/hello.rs b/commons/zenoh-protocol/src/scouting/hello.rs index 562e2fb8c4..69109ed611 100644 --- a/commons/zenoh-protocol/src/scouting/hello.rs +++ b/commons/zenoh-protocol/src/scouting/hello.rs @@ -11,14 +11,14 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::core::{Locator, WhatAmI, ZenohId}; use alloc::vec::Vec; -use core::fmt; + +use crate::core::{Locator, WhatAmI, ZenohIdProto}; /// # Hello message /// -/// The [`Hello`] message is used to advertise the locators a zenoh node is reachable at. -/// The [`Hello`] message SHOULD be sent in a unicast fashion in response to a [`super::Scout`] +/// The `Hello` message is used to advertise the locators a zenoh node is reachable at. +/// The `Hello` message SHOULD be sent in a unicast fashion in response to a [`super::Scout`] /// message as shown below: /// /// ```text @@ -34,7 +34,7 @@ use core::fmt; /// | | | /// ``` /// -/// Moreover, a [`Hello`] message MAY be sent in the network in a multicast +/// Moreover, a `Hello` message MAY be sent in the network in a multicast /// fashion to advertise the presence of zenoh node. The advertisement operation MAY be performed /// periodically as shown below: /// @@ -54,7 +54,7 @@ use core::fmt; /// | | | /// ``` /// -/// Examples of locators included in the [`Hello`] message are: +/// Examples of locators included in the `Hello` message are: /// /// ```text /// udp/192.168.1.1:7447 @@ -63,7 +63,7 @@ use core::fmt; /// tcp/localhost:7447 /// ``` /// -/// The [`Hello`] message structure is defined as follows: +/// The `Hello` message structure is defined as follows: /// /// ```text /// Header flags: @@ -99,24 +99,14 @@ pub mod flag { } #[derive(Debug, Clone, PartialEq, Eq)] -pub struct Hello { +pub struct HelloProto { pub version: u8, pub whatami: WhatAmI, - pub zid: ZenohId, + pub zid: ZenohIdProto, pub locators: Vec, } -impl fmt::Display for Hello { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.debug_struct("Hello") - .field("zid", &self.zid) - .field("whatami", &self.whatami) - .field("locators", &self.locators) - .finish() - } -} - -impl Hello { +impl HelloProto { #[cfg(feature = "test")] pub fn rand() -> Self { use rand::Rng; @@ -124,7 +114,7 @@ impl Hello { let mut rng = rand::thread_rng(); let version: u8 = rng.gen(); - let zid = ZenohId::default(); + let zid = ZenohIdProto::default(); let whatami = WhatAmI::rand(); let locators = if rng.gen_bool(0.5) { Vec::from_iter((1..5).map(|_| Locator::rand())) diff --git a/commons/zenoh-protocol/src/scouting/mod.rs b/commons/zenoh-protocol/src/scouting/mod.rs index 9e7fd27c2d..2cb5d1c0f0 100644 --- a/commons/zenoh-protocol/src/scouting/mod.rs +++ b/commons/zenoh-protocol/src/scouting/mod.rs @@ -14,7 +14,7 @@ pub mod hello; pub mod scout; -pub use hello::Hello; +pub use hello::HelloProto; pub use scout::Scout; pub mod id { @@ -27,7 +27,7 @@ pub mod id { #[derive(Debug, Clone, PartialEq, Eq)] pub enum ScoutingBody { Scout(Scout), - Hello(Hello), + Hello(HelloProto), } #[derive(Debug, Clone, PartialEq, Eq)] @@ -46,7 +46,7 @@ impl ScoutingMessage { match rng.gen_range(0..2) { 0 => ScoutingBody::Scout(Scout::rand()), - 1 => ScoutingBody::Hello(Hello::rand()), + 1 => ScoutingBody::Hello(HelloProto::rand()), _ => unreachable!(), } .into() @@ -69,8 +69,8 @@ impl From for ScoutingMessage { } } -impl From for ScoutingMessage { - fn from(hello: Hello) -> Self { +impl From for ScoutingMessage { + fn from(hello: HelloProto) -> Self { ScoutingBody::Hello(hello).into() } } diff --git a/commons/zenoh-protocol/src/scouting/scout.rs b/commons/zenoh-protocol/src/scouting/scout.rs index b7a51642df..a65c10a4f5 100644 --- a/commons/zenoh-protocol/src/scouting/scout.rs +++ b/commons/zenoh-protocol/src/scouting/scout.rs @@ -11,14 +11,14 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::core::{whatami::WhatAmIMatcher, ZenohId}; +use crate::core::{whatami::WhatAmIMatcher, ZenohIdProto}; /// # Scout message /// /// The [`Scout`] message MAY be sent at any point in time to discover the available zenoh nodes in the /// network. The [`Scout`] message SHOULD be sent in a multicast or broadcast fashion. Upon receiving a /// [`Scout`] message, a zenoh node MUST first verify whether the matching criteria are satisfied, then -/// it SHOULD reply with a [`super::Hello`] message in a unicast fashion including all the requested +/// it SHOULD reply with a [`super::HelloProto`] message in a unicast fashion including all the requested /// information. /// /// The scouting message flow is the following: @@ -75,7 +75,7 @@ pub mod flag { pub struct Scout { pub version: u8, pub what: WhatAmIMatcher, - pub zid: Option, + pub zid: Option, } impl Scout { @@ -87,7 +87,7 @@ impl Scout { let version: u8 = rng.gen(); let what = WhatAmIMatcher::rand(); - let zid = rng.gen_bool(0.5).then_some(ZenohId::rand()); + let zid = rng.gen_bool(0.5).then_some(ZenohIdProto::rand()); Self { version, what, zid } } } diff --git a/commons/zenoh-protocol/src/transport/fragment.rs b/commons/zenoh-protocol/src/transport/fragment.rs index 5af22db4f1..eccc7b80c0 100644 --- a/commons/zenoh-protocol/src/transport/fragment.rs +++ b/commons/zenoh-protocol/src/transport/fragment.rs @@ -11,13 +11,14 @@ // Contributors: // ZettaScale Zenoh Team, // +use zenoh_buffers::ZSlice; + use crate::core::Reliability; pub use crate::transport::TransportSn; -use zenoh_buffers::ZSlice; /// # Fragment message /// -/// The [`Fragment`] message is used to transmit on the wire large [`crate::zenoh::ZenohMessage`] +/// The [`Fragment`] message is used to transmit on the wire large [`crate::network::NetworkMessage`] /// that require fragmentation because they are larger than the maximum batch size /// (i.e. 2^16-1) and/or the link MTU. /// diff --git a/commons/zenoh-protocol/src/transport/frame.rs b/commons/zenoh-protocol/src/transport/frame.rs index 7afce036ce..b3ef1d819f 100644 --- a/commons/zenoh-protocol/src/transport/frame.rs +++ b/commons/zenoh-protocol/src/transport/frame.rs @@ -11,17 +11,18 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::{core::Reliability, network::NetworkMessage, transport::TransportSn}; use alloc::vec::Vec; +use crate::{core::Reliability, network::NetworkMessage, transport::TransportSn}; + /// # Frame message /// /// The [`Frame`] message is used to transmit one ore more complete serialized -/// [`crate::net::protocol::message::ZenohMessage`]. I.e., the total length of the -/// serialized [`crate::net::protocol::message::ZenohMessage`] (s) MUST be smaller +/// [`crate::network::NetworkMessage`]. I.e., the total length of the +/// serialized [`crate::network::NetworkMessage`] (s) MUST be smaller /// than the maximum batch size (i.e. 2^16-1) and the link MTU. /// The [`Frame`] message is used as means to aggregate multiple -/// [`crate::net::protocol::message::ZenohMessage`] in a single atomic message that +/// [`crate::network::NetworkMessage`] in a single atomic message that /// goes on the wire. By doing so, many small messages can be batched together and /// share common information like the sequence number. /// diff --git a/commons/zenoh-protocol/src/transport/init.rs b/commons/zenoh-protocol/src/transport/init.rs index 1327288471..7e56bfd770 100644 --- a/commons/zenoh-protocol/src/transport/init.rs +++ b/commons/zenoh-protocol/src/transport/init.rs @@ -11,11 +11,12 @@ // Contributors: // ZettaScale Zenoh Team, // +use zenoh_buffers::ZSlice; + use crate::{ - core::{Resolution, WhatAmI, ZenohId}, + core::{Resolution, WhatAmI, ZenohIdProto}, transport::BatchSize, }; -use zenoh_buffers::ZSlice; /// # Init message /// @@ -110,10 +111,11 @@ pub mod flag { pub struct InitSyn { pub version: u8, pub whatami: WhatAmI, - pub zid: ZenohId, + pub zid: ZenohIdProto, pub resolution: Resolution, pub batch_size: BatchSize, pub ext_qos: Option, + #[cfg(feature = "shared-memory")] pub ext_shm: Option, pub ext_auth: Option, pub ext_mlink: Option, @@ -134,6 +136,7 @@ pub mod ext { /// # Shm extension /// Used as challenge for probing shared memory capabilities + #[cfg(feature = "shared-memory")] pub type Shm = zextzbuf!(0x2, false); /// # Auth extension @@ -156,17 +159,19 @@ pub mod ext { impl InitSyn { #[cfg(feature = "test")] pub fn rand() -> Self { - use crate::common::{ZExtUnit, ZExtZBuf}; use rand::Rng; + use crate::common::{ZExtUnit, ZExtZBuf}; + let mut rng = rand::thread_rng(); let version: u8 = rng.gen(); let whatami = WhatAmI::rand(); - let zid = ZenohId::default(); + let zid = ZenohIdProto::default(); let resolution = Resolution::rand(); - let batch_size: u16 = rng.gen(); + let batch_size: BatchSize = rng.gen(); let ext_qos = rng.gen_bool(0.5).then_some(ZExtUnit::rand()); + #[cfg(feature = "shared-memory")] let ext_shm = rng.gen_bool(0.5).then_some(ZExtZBuf::rand()); let ext_auth = rng.gen_bool(0.5).then_some(ZExtZBuf::rand()); let ext_mlink = rng.gen_bool(0.5).then_some(ZExtZBuf::rand()); @@ -180,6 +185,7 @@ impl InitSyn { resolution, batch_size, ext_qos, + #[cfg(feature = "shared-memory")] ext_shm, ext_auth, ext_mlink, @@ -193,11 +199,12 @@ impl InitSyn { pub struct InitAck { pub version: u8, pub whatami: WhatAmI, - pub zid: ZenohId, + pub zid: ZenohIdProto, pub resolution: Resolution, pub batch_size: BatchSize, pub cookie: ZSlice, pub ext_qos: Option, + #[cfg(feature = "shared-memory")] pub ext_shm: Option, pub ext_auth: Option, pub ext_mlink: Option, @@ -208,22 +215,24 @@ pub struct InitAck { impl InitAck { #[cfg(feature = "test")] pub fn rand() -> Self { - use crate::common::{ZExtUnit, ZExtZBuf}; use rand::Rng; + use crate::common::{ZExtUnit, ZExtZBuf}; + let mut rng = rand::thread_rng(); let version: u8 = rng.gen(); let whatami = WhatAmI::rand(); - let zid = ZenohId::default(); + let zid = ZenohIdProto::default(); let resolution = if rng.gen_bool(0.5) { Resolution::default() } else { Resolution::rand() }; - let batch_size: u16 = rng.gen(); + let batch_size: BatchSize = rng.gen(); let cookie = ZSlice::rand(64); let ext_qos = rng.gen_bool(0.5).then_some(ZExtUnit::rand()); + #[cfg(feature = "shared-memory")] let ext_shm = rng.gen_bool(0.5).then_some(ZExtZBuf::rand()); let ext_auth = rng.gen_bool(0.5).then_some(ZExtZBuf::rand()); let ext_mlink = rng.gen_bool(0.5).then_some(ZExtZBuf::rand()); @@ -238,6 +247,7 @@ impl InitAck { batch_size, cookie, ext_qos, + #[cfg(feature = "shared-memory")] ext_shm, ext_auth, ext_mlink, diff --git a/commons/zenoh-protocol/src/transport/join.rs b/commons/zenoh-protocol/src/transport/join.rs index c5fbb98430..e1e3f97c33 100644 --- a/commons/zenoh-protocol/src/transport/join.rs +++ b/commons/zenoh-protocol/src/transport/join.rs @@ -11,11 +11,12 @@ // Contributors: // ZettaScale Zenoh Team, // +use core::time::Duration; + use crate::{ - core::{Priority, Resolution, WhatAmI, ZenohId}, + core::{Priority, Resolution, WhatAmI, ZenohIdProto}, transport::{BatchSize, PrioritySn}, }; -use core::time::Duration; /// # Join message /// @@ -104,7 +105,7 @@ pub mod flag { pub struct Join { pub version: u8, pub whatami: WhatAmI, - pub zid: ZenohId, + pub zid: ZenohIdProto, pub resolution: Resolution, pub batch_size: BatchSize, pub lease: Duration, @@ -115,9 +116,10 @@ pub struct Join { // Extensions pub mod ext { + use alloc::boxed::Box; + use super::{Priority, PrioritySn}; use crate::{common::ZExtZBuf, zextzbuf}; - use alloc::boxed::Box; /// # QoS extension /// Used to announce next sn when QoS is enabled @@ -132,16 +134,17 @@ pub mod ext { impl Join { #[cfg(feature = "test")] pub fn rand() -> Self { - use crate::common::ZExtZBuf; use rand::Rng; + use crate::common::ZExtZBuf; + let mut rng = rand::thread_rng(); let version: u8 = rng.gen(); let whatami = WhatAmI::rand(); - let zid = ZenohId::default(); + let zid = ZenohIdProto::default(); let resolution = Resolution::rand(); - let batch_size: u16 = rng.gen(); + let batch_size: BatchSize = rng.gen(); let lease = if rng.gen_bool(0.5) { Duration::from_secs(rng.gen()) } else { diff --git a/commons/zenoh-protocol/src/transport/mod.rs b/commons/zenoh-protocol/src/transport/mod.rs index 258b43baf6..ba2ac32c4a 100644 --- a/commons/zenoh-protocol/src/transport/mod.rs +++ b/commons/zenoh-protocol/src/transport/mod.rs @@ -39,6 +39,7 @@ use crate::network::NetworkMessage; /// the boundary of the serialized messages. The length is encoded as little-endian. /// In any case, the length of a message must not exceed 65_535 bytes. pub type BatchSize = u16; +pub type AtomicBatchSize = core::sync::atomic::AtomicU16; pub mod batch_size { use super::BatchSize; @@ -84,13 +85,18 @@ pub enum TransportBodyLowLatency { pub type TransportSn = u32; -#[derive(Debug, Copy, Clone, PartialEq, Eq, Default)] +#[derive(Debug, Copy, Clone, PartialEq, Eq)] pub struct PrioritySn { pub reliable: TransportSn, pub best_effort: TransportSn, } impl PrioritySn { + pub const DEFAULT: Self = Self { + reliable: TransportSn::MIN, + best_effort: TransportSn::MIN, + }; + #[cfg(feature = "test")] pub fn rand() -> Self { use rand::Rng; @@ -249,11 +255,13 @@ impl fmt::Display for TransportMessage { pub mod ext { use crate::{common::ZExtZ64, core::Priority}; + /// ```text /// 7 6 5 4 3 2 1 0 /// +-+-+-+-+-+-+-+-+ /// %0| rsv |prio % /// +---------------+ /// - prio: Priority class + /// ``` #[repr(transparent)] #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub struct QoSType { @@ -261,7 +269,8 @@ pub mod ext { } impl QoSType<{ ID }> { - pub const P_MASK: u8 = 0b00000111; + const P_MASK: u8 = 0b00000111; + pub const DEFAULT: Self = Self::new(Priority::DEFAULT); pub const fn new(priority: Priority) -> Self { Self { @@ -285,7 +294,7 @@ pub mod ext { impl Default for QoSType<{ ID }> { fn default() -> Self { - Self::new(Priority::default()) + Self::DEFAULT } } diff --git a/commons/zenoh-protocol/src/transport/open.rs b/commons/zenoh-protocol/src/transport/open.rs index f899e8cc24..8042eeb634 100644 --- a/commons/zenoh-protocol/src/transport/open.rs +++ b/commons/zenoh-protocol/src/transport/open.rs @@ -11,10 +11,12 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::transport::TransportSn; use core::time::Duration; + use zenoh_buffers::ZSlice; +use crate::transport::TransportSn; + /// # Open message /// /// After having successfully complete the [`super::InitSyn`]-[`super::InitAck`] message exchange, @@ -78,6 +80,7 @@ pub struct OpenSyn { pub initial_sn: TransportSn, pub cookie: ZSlice, pub ext_qos: Option, + #[cfg(feature = "shared-memory")] pub ext_shm: Option, pub ext_auth: Option, pub ext_mlink: Option, @@ -87,9 +90,13 @@ pub struct OpenSyn { // Extensions pub mod ext { + #[cfg(feature = "shared-memory")] + use crate::common::ZExtZ64; + #[cfg(feature = "shared-memory")] + use crate::zextz64; use crate::{ - common::{ZExtUnit, ZExtZ64, ZExtZBuf}, - zextunit, zextz64, zextzbuf, + common::{ZExtUnit, ZExtZBuf}, + zextunit, zextzbuf, }; /// # QoS extension @@ -98,6 +105,7 @@ pub mod ext { /// # Shm extension /// Used as challenge for probing shared memory capabilities + #[cfg(feature = "shared-memory")] pub type Shm = zextz64!(0x2, false); /// # Auth extension @@ -121,9 +129,12 @@ pub mod ext { impl OpenSyn { #[cfg(feature = "test")] pub fn rand() -> Self { - use crate::common::{ZExtUnit, ZExtZ64, ZExtZBuf}; use rand::Rng; + #[cfg(feature = "shared-memory")] + use crate::common::ZExtZ64; + use crate::common::{ZExtUnit, ZExtZBuf}; + const MIN: usize = 32; const MAX: usize = 1_024; @@ -138,6 +149,7 @@ impl OpenSyn { let initial_sn: TransportSn = rng.gen(); let cookie = ZSlice::rand(rng.gen_range(MIN..=MAX)); let ext_qos = rng.gen_bool(0.5).then_some(ZExtUnit::rand()); + #[cfg(feature = "shared-memory")] let ext_shm = rng.gen_bool(0.5).then_some(ZExtZ64::rand()); let ext_auth = rng.gen_bool(0.5).then_some(ZExtZBuf::rand()); let ext_mlink = rng.gen_bool(0.5).then_some(ZExtZBuf::rand()); @@ -149,6 +161,7 @@ impl OpenSyn { initial_sn, cookie, ext_qos, + #[cfg(feature = "shared-memory")] ext_shm, ext_auth, ext_mlink, @@ -163,6 +176,7 @@ pub struct OpenAck { pub lease: Duration, pub initial_sn: TransportSn, pub ext_qos: Option, + #[cfg(feature = "shared-memory")] pub ext_shm: Option, pub ext_auth: Option, pub ext_mlink: Option, @@ -173,9 +187,12 @@ pub struct OpenAck { impl OpenAck { #[cfg(feature = "test")] pub fn rand() -> Self { - use crate::common::{ZExtUnit, ZExtZ64, ZExtZBuf}; use rand::Rng; + #[cfg(feature = "shared-memory")] + use crate::common::ZExtZ64; + use crate::common::{ZExtUnit, ZExtZBuf}; + let mut rng = rand::thread_rng(); let lease = if rng.gen_bool(0.5) { @@ -186,6 +203,7 @@ impl OpenAck { let initial_sn: TransportSn = rng.gen(); let ext_qos = rng.gen_bool(0.5).then_some(ZExtUnit::rand()); + #[cfg(feature = "shared-memory")] let ext_shm = rng.gen_bool(0.5).then_some(ZExtZ64::rand()); let ext_auth = rng.gen_bool(0.5).then_some(ZExtZBuf::rand()); let ext_mlink = rng.gen_bool(0.5).then_some(ZExtUnit::rand()); @@ -196,6 +214,7 @@ impl OpenAck { lease, initial_sn, ext_qos, + #[cfg(feature = "shared-memory")] ext_shm, ext_auth, ext_mlink, diff --git a/commons/zenoh-protocol/src/zenoh/ack.rs b/commons/zenoh-protocol/src/zenoh/ack.rs deleted file mode 100644 index d40bf58791..0000000000 --- a/commons/zenoh-protocol/src/zenoh/ack.rs +++ /dev/null @@ -1,84 +0,0 @@ -// -// Copyright (c) 2022 ZettaScale Technology -// -// This program and the accompanying materials are made available under the -// terms of the Eclipse Public License 2.0 which is available at -// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 -// which is available at https://www.apache.org/licenses/LICENSE-2.0. -// -// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 -// -// Contributors: -// ZettaScale Zenoh Team, -// -use crate::common::ZExtUnknown; -use alloc::vec::Vec; -use uhlc::Timestamp; - -/// # Ack message -/// -/// ```text -/// Flags: -/// - T: Timestamp If T==1 then the timestamp if present -/// - X: Reserved -/// - Z: Extension If Z==1 then at least one extension is present -/// -/// 7 6 5 4 3 2 1 0 -/// +-+-+-+-+-+-+-+-+ -/// |Z|X|T| ACK | -/// +-+-+-+---------+ -/// ~ ts: ~ if T==1 -/// +---------------+ -/// ~ [err_exts] ~ if Z==1 -/// +---------------+ -/// ``` -pub mod flag { - pub const T: u8 = 1 << 5; // 0x20 Timestamp if T==0 then the timestamp if present - // pub const X: u8 = 1 << 6; // 0x40 Reserved - pub const Z: u8 = 1 << 7; // 0x80 Extensions if Z==1 then an extension will follow -} - -#[derive(Debug, Clone, PartialEq, Eq)] -pub struct Ack { - pub timestamp: Option, - pub ext_sinfo: Option, - pub ext_unknown: Vec, -} - -pub mod ext { - use crate::{common::ZExtZBuf, zextzbuf}; - - /// # SourceInfo extension - /// Used to carry additional information about the source of data - pub type SourceInfo = zextzbuf!(0x1, false); - pub type SourceInfoType = crate::zenoh::ext::SourceInfoType<{ SourceInfo::ID }>; -} - -impl Ack { - #[cfg(feature = "test")] - pub fn rand() -> Self { - use crate::{common::iext, core::ZenohId}; - use rand::Rng; - let mut rng = rand::thread_rng(); - - let timestamp = rng.gen_bool(0.5).then_some({ - let time = uhlc::NTP64(rng.gen()); - let id = uhlc::ID::try_from(ZenohId::rand().to_le_bytes()).unwrap(); - Timestamp::new(time, id) - }); - let ext_sinfo = rng.gen_bool(0.5).then_some(ext::SourceInfoType::rand()); - let mut ext_unknown = Vec::new(); - for _ in 0..rng.gen_range(0..4) { - ext_unknown.push(ZExtUnknown::rand2( - iext::mid(ext::SourceInfo::ID) + 1, - false, - )); - } - - Self { - timestamp, - ext_sinfo, - ext_unknown, - } - } -} diff --git a/commons/zenoh-protocol/src/zenoh/del.rs b/commons/zenoh-protocol/src/zenoh/del.rs index 84fec5bc08..d4c6b8b3ac 100644 --- a/commons/zenoh-protocol/src/zenoh/del.rs +++ b/commons/zenoh-protocol/src/zenoh/del.rs @@ -11,10 +11,12 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::common::ZExtUnknown; use alloc::vec::Vec; + use uhlc::Timestamp; +use crate::common::ZExtUnknown; + /// # Put message /// /// ```text @@ -62,13 +64,14 @@ pub mod ext { impl Del { #[cfg(feature = "test")] pub fn rand() -> Self { - use crate::{common::iext, core::ZenohId}; use rand::Rng; + + use crate::{common::iext, core::ZenohIdProto}; let mut rng = rand::thread_rng(); let timestamp = rng.gen_bool(0.5).then_some({ let time = uhlc::NTP64(rng.gen()); - let id = uhlc::ID::try_from(ZenohId::rand().to_le_bytes()).unwrap(); + let id = uhlc::ID::try_from(ZenohIdProto::rand().to_le_bytes()).unwrap(); Timestamp::new(time, id) }); let ext_sinfo = rng.gen_bool(0.5).then_some(ext::SourceInfoType::rand()); diff --git a/commons/zenoh-protocol/src/zenoh/err.rs b/commons/zenoh-protocol/src/zenoh/err.rs index 648efff441..ab02885eac 100644 --- a/commons/zenoh-protocol/src/zenoh/err.rs +++ b/commons/zenoh-protocol/src/zenoh/err.rs @@ -11,46 +11,50 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::common::ZExtUnknown; use alloc::vec::Vec; -use uhlc::Timestamp; + +use zenoh_buffers::ZBuf; + +use crate::{common::ZExtUnknown, core::Encoding}; /// # Err message /// /// ```text /// Flags: -/// - T: Timestamp If T==1 then the timestamp if present -/// - I: Infrastructure If I==1 then the error is related to the infrastructure else to the user +/// - X: Reserved +/// - E: Encoding If E==1 then the encoding is present /// - Z: Extension If Z==1 then at least one extension is present /// /// 7 6 5 4 3 2 1 0 /// +-+-+-+-+-+-+-+-+ -/// |Z|I|T| ERR | +/// |Z|E|X| ERR | /// +-+-+-+---------+ -/// % code:z16 % -/// +---------------+ -/// ~ ts: ~ if T==1 +/// ~ encoding ~ if E==1 /// +---------------+ /// ~ [err_exts] ~ if Z==1 /// +---------------+ +/// ~ pl: ~ -- Payload +/// +---------------+ /// ``` pub mod flag { - pub const T: u8 = 1 << 5; // 0x20 Timestamp if T==0 then the timestamp if present - pub const I: u8 = 1 << 6; // 0x40 Infrastructure if I==1 then the error is related to the infrastructure else to the user + // pub const X: u8 = 1 << 5; // 0x20 Reserved + pub const E: u8 = 1 << 6; // 0x40 Encoding if E==1 then the encoding is present pub const Z: u8 = 1 << 7; // 0x80 Extensions if Z==1 then an extension will follow } #[derive(Debug, Clone, PartialEq, Eq)] pub struct Err { - pub code: u16, - pub is_infrastructure: bool, - pub timestamp: Option, + pub encoding: Encoding, pub ext_sinfo: Option, - pub ext_body: Option, + #[cfg(feature = "shared-memory")] + pub ext_shm: Option, pub ext_unknown: Vec, + pub payload: ZBuf, } pub mod ext { + #[cfg(feature = "shared-memory")] + use crate::{common::ZExtUnit, zextunit}; use crate::{common::ZExtZBuf, zextzbuf}; /// # SourceInfo extension @@ -58,44 +62,42 @@ pub mod ext { pub type SourceInfo = zextzbuf!(0x1, false); pub type SourceInfoType = crate::zenoh::ext::SourceInfoType<{ SourceInfo::ID }>; - /// # ErrBody extension - /// Used to carry a body attached to the query - /// Shared Memory extension is automatically defined by ValueType extension if - /// #[cfg(feature = "shared-memory")] is defined. - pub type ErrBodyType = crate::zenoh::ext::ValueType<{ ZExtZBuf::<0x02>::id(false) }, 0x03>; + /// # Shared Memory extension + /// Used to carry additional information about the shared-memory layout of data + #[cfg(feature = "shared-memory")] + pub type Shm = zextunit!(0x2, true); + #[cfg(feature = "shared-memory")] + pub type ShmType = crate::zenoh::ext::ShmType<{ Shm::ID }>; } impl Err { #[cfg(feature = "test")] pub fn rand() -> Self { - use crate::{common::iext, core::ZenohId}; use rand::Rng; + + use crate::common::iext; let mut rng = rand::thread_rng(); - let code: u16 = rng.gen(); - let is_infrastructure = rng.gen_bool(0.5); - let timestamp = rng.gen_bool(0.5).then_some({ - let time = uhlc::NTP64(rng.gen()); - let id = uhlc::ID::try_from(ZenohId::rand().to_le_bytes()).unwrap(); - Timestamp::new(time, id) - }); + let encoding = Encoding::rand(); let ext_sinfo = rng.gen_bool(0.5).then_some(ext::SourceInfoType::rand()); - let ext_body = rng.gen_bool(0.5).then_some(ext::ErrBodyType::rand()); + #[cfg(feature = "shared-memory")] + let ext_shm = rng.gen_bool(0.5).then_some(ext::ShmType::rand()); let mut ext_unknown = Vec::new(); for _ in 0..rng.gen_range(0..4) { ext_unknown.push(ZExtUnknown::rand2( - iext::mid(ext::ErrBodyType::SID) + 1, + iext::mid(ext::SourceInfo::ID) + 1, false, )); } + let payload = ZBuf::rand(rng.gen_range(0..=64)); Self { - code, - is_infrastructure, - timestamp, + encoding, ext_sinfo, - ext_body, + #[cfg(feature = "shared-memory")] + ext_shm, ext_unknown, + payload, } } } diff --git a/commons/zenoh-protocol/src/zenoh/mod.rs b/commons/zenoh-protocol/src/zenoh/mod.rs index f2c38c3a1d..320db6884d 100644 --- a/commons/zenoh-protocol/src/zenoh/mod.rs +++ b/commons/zenoh-protocol/src/zenoh/mod.rs @@ -11,23 +11,20 @@ // Contributors: // ZettaScale Zenoh Team, // -pub mod ack; pub mod del; pub mod err; -pub mod pull; pub mod put; pub mod query; pub mod reply; -use crate::core::Encoding; -pub use ack::Ack; pub use del::Del; pub use err::Err; -pub use pull::Pull; pub use put::Put; pub use query::{Consolidation, Query}; pub use reply::Reply; +use crate::core::Encoding; + pub mod id { pub const OAM: u8 = 0x00; pub const PUT: u8 = 0x01; @@ -35,8 +32,6 @@ pub mod id { pub const QUERY: u8 = 0x03; pub const REPLY: u8 = 0x04; pub const ERR: u8 = 0x05; - pub const ACK: u8 = 0x06; - pub const PULL: u8 = 0x07; } // DataInfo @@ -83,9 +78,6 @@ impl From for PushBody { #[derive(Debug, Clone, PartialEq, Eq)] pub enum RequestBody { Query(Query), - Put(Put), - Del(Del), - Pull(Pull), } impl RequestBody { @@ -95,10 +87,8 @@ impl RequestBody { let mut rng = rand::thread_rng(); - match rng.gen_range(0..3) { + match rng.gen_range(0..1) { 0 => RequestBody::Query(Query::rand()), - 1 => RequestBody::Put(Put::rand()), - 2 => RequestBody::Del(Del::rand()), _ => unreachable!(), } } @@ -110,39 +100,22 @@ impl From for RequestBody { } } -impl From for RequestBody { - fn from(p: Put) -> RequestBody { - RequestBody::Put(p) - } -} - -impl From for RequestBody { - fn from(d: Del) -> RequestBody { - RequestBody::Del(d) - } -} - // Response #[derive(Debug, Clone, PartialEq, Eq)] pub enum ResponseBody { Reply(Reply), Err(Err), - Ack(Ack), - Put(Put), } impl ResponseBody { #[cfg(feature = "test")] pub fn rand() -> Self { use rand::Rng; - let mut rng = rand::thread_rng(); - match rng.gen_range(0..4) { + match rng.gen_range(0..2) { 0 => ResponseBody::Reply(Reply::rand()), 1 => ResponseBody::Err(Err::rand()), - 2 => ResponseBody::Ack(Ack::rand()), - 3 => ResponseBody::Put(Put::rand()), _ => unreachable!(), } } @@ -160,17 +133,12 @@ impl From for ResponseBody { } } -impl From for ResponseBody { - fn from(r: Ack) -> ResponseBody { - ResponseBody::Ack(r) - } -} - pub mod ext { use zenoh_buffers::ZBuf; - use crate::core::{Encoding, ZenohId}; + use crate::core::{Encoding, EntityGlobalIdProto}; + /// ```text /// 7 6 5 4 3 2 1 0 /// +-+-+-+-+-+-+-+-+ /// |zid_len|X|X|X|X| @@ -181,10 +149,10 @@ pub mod ext { /// +---------------+ /// % sn % /// +---------------+ + /// ``` #[derive(Debug, Clone, PartialEq, Eq)] pub struct SourceInfoType { - pub zid: ZenohId, - pub eid: u32, + pub id: EntityGlobalIdProto, pub sn: u32, } @@ -194,10 +162,9 @@ pub mod ext { use rand::Rng; let mut rng = rand::thread_rng(); - let zid = ZenohId::rand(); - let eid: u32 = rng.gen(); + let id = EntityGlobalIdProto::rand(); let sn: u32 = rng.gen(); - Self { zid, eid, sn } + Self { id, sn } } } @@ -226,12 +193,14 @@ pub mod ext { } } + /// ```text /// 7 6 5 4 3 2 1 0 /// +-+-+-+-+-+-+-+-+ /// ~ encoding ~ /// +---------------+ /// ~ pl: [u8;z32] ~ -- Payload /// +---------------+ + /// ``` #[derive(Debug, Clone, PartialEq, Eq)] pub struct ValueType { #[cfg(feature = "shared-memory")] diff --git a/commons/zenoh-protocol/src/zenoh/pull.rs b/commons/zenoh-protocol/src/zenoh/pull.rs deleted file mode 100644 index eb4f7eb55e..0000000000 --- a/commons/zenoh-protocol/src/zenoh/pull.rs +++ /dev/null @@ -1,56 +0,0 @@ -// -// Copyright (c) 2022 ZettaScale Technology -// -// This program and the accompanying materials are made available under the -// terms of the Eclipse Public License 2.0 which is available at -// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 -// which is available at https://www.apache.org/licenses/LICENSE-2.0. -// -// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 -// -// Contributors: -// ZettaScale Zenoh Team, -// -use crate::common::ZExtUnknown; -use alloc::vec::Vec; - -/// # Pull message -/// -/// ```text -/// Flags: -/// - X: Reserved -/// - X: Reserved -/// - Z: Extension If Z==1 then at least one extension is present -/// -/// 7 6 5 4 3 2 1 0 -/// +-+-+-+-+-+-+-+-+ -/// |Z|X|X| PULL | -/// +-+-+-+---------+ -/// ~ [pull_exts] ~ if Z==1 -/// +---------------+ -/// ``` -pub mod flag { - // pub const X: u8 = 1 << 5; // 0x20 Reserved - // pub const X: u8 = 1 << 6; // 0x40 Reserved - pub const Z: u8 = 1 << 7; // 0x80 Extensions if Z==1 then an extension will follow -} - -#[derive(Debug, Clone, PartialEq, Eq)] -pub struct Pull { - pub ext_unknown: Vec, -} - -impl Pull { - #[cfg(feature = "test")] - pub fn rand() -> Self { - use rand::Rng; - let mut rng = rand::thread_rng(); - - let mut ext_unknown = Vec::new(); - for _ in 0..rng.gen_range(0..4) { - ext_unknown.push(ZExtUnknown::rand2(1, false)); - } - - Self { ext_unknown } - } -} diff --git a/commons/zenoh-protocol/src/zenoh/put.rs b/commons/zenoh-protocol/src/zenoh/put.rs index ac18aaf00a..ac45b1cc1b 100644 --- a/commons/zenoh-protocol/src/zenoh/put.rs +++ b/commons/zenoh-protocol/src/zenoh/put.rs @@ -11,11 +11,13 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::{common::ZExtUnknown, core::Encoding}; use alloc::vec::Vec; + use uhlc::Timestamp; use zenoh_buffers::ZBuf; +use crate::{common::ZExtUnknown, core::Encoding}; + /// # Put message /// /// ```text @@ -80,13 +82,14 @@ pub mod ext { impl Put { #[cfg(feature = "test")] pub fn rand() -> Self { - use crate::{common::iext, core::ZenohId}; use rand::Rng; + + use crate::{common::iext, core::ZenohIdProto}; let mut rng = rand::thread_rng(); let timestamp = rng.gen_bool(0.5).then_some({ let time = uhlc::NTP64(rng.gen()); - let id = uhlc::ID::try_from(ZenohId::rand().to_le_bytes()).unwrap(); + let id = uhlc::ID::try_from(ZenohIdProto::rand().to_le_bytes()).unwrap(); Timestamp::new(time, id) }); let encoding = Encoding::rand(); diff --git a/commons/zenoh-protocol/src/zenoh/query.rs b/commons/zenoh-protocol/src/zenoh/query.rs index 7432840492..988447b835 100644 --- a/commons/zenoh-protocol/src/zenoh/query.rs +++ b/commons/zenoh-protocol/src/zenoh/query.rs @@ -11,9 +11,10 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::{common::ZExtUnknown, core::ConsolidationMode}; use alloc::{string::String, vec::Vec}; +use crate::common::ZExtUnknown; + /// The kind of consolidation. #[repr(u8)] #[derive(Debug, Default, Clone, PartialEq, Eq, Copy)] @@ -33,35 +34,21 @@ pub enum Consolidation { Monotonic, /// Holds back samples to only send the set of samples that had the highest timestamp for their key. Latest, - /// Remove the duplicates of any samples based on the their timestamp. - Unique, + // Remove the duplicates of any samples based on the their timestamp. + // Unique, } impl Consolidation { + pub const DEFAULT: Self = Self::Auto; + #[cfg(feature = "test")] pub fn rand() -> Self { use rand::prelude::SliceRandom; let mut rng = rand::thread_rng(); - *[ - Self::None, - Self::Monotonic, - Self::Latest, - Self::Unique, - Self::Auto, - ] - .choose(&mut rng) - .unwrap() - } -} - -impl From for Consolidation { - fn from(val: ConsolidationMode) -> Self { - match val { - ConsolidationMode::None => Consolidation::None, - ConsolidationMode::Monotonic => Consolidation::Monotonic, - ConsolidationMode::Latest => Consolidation::Latest, - } + *[Self::None, Self::Monotonic, Self::Latest, Self::Auto] + .choose(&mut rng) + .unwrap() } } @@ -69,50 +56,45 @@ impl From for Consolidation { /// /// ```text /// Flags: +/// - C: Consolidation if C==1 then consolidation is present /// - P: Parameters If P==1 then the parameters are present -/// - X: Reserved /// - Z: Extension If Z==1 then at least one extension is present /// /// 7 6 5 4 3 2 1 0 /// +-+-+-+-+-+-+-+-+ -/// |Z|X|P| QUERY | +/// |Z|P|C| QUERY | /// +-+-+-+---------+ +/// % consolidation % if C==1 +/// +---------------+ /// ~ ps: ~ if P==1 /// +---------------+ /// ~ [qry_exts] ~ if Z==1 /// +---------------+ /// ``` pub mod flag { - pub const P: u8 = 1 << 5; // 0x20 Parameters if P==1 then the parameters are present - // pub const X: u8 = 1 << 6; // 0x40 Reserved + pub const C: u8 = 1 << 5; // 0x20 Consolidation if C==1 then consolidation is present + pub const P: u8 = 1 << 6; // 0x40 Parameters if P==1 then the parameters are present pub const Z: u8 = 1 << 7; // 0x80 Extensions if Z==1 then an extension will follow } #[derive(Debug, Clone, PartialEq, Eq)] pub struct Query { + pub consolidation: Consolidation, pub parameters: String, pub ext_sinfo: Option, - pub ext_consolidation: Consolidation, pub ext_body: Option, pub ext_attachment: Option, pub ext_unknown: Vec, } pub mod ext { - use crate::{ - common::{ZExtZ64, ZExtZBuf}, - zextz64, zextzbuf, - }; + use crate::{common::ZExtZBuf, zextzbuf}; /// # SourceInfo extension /// Used to carry additional information about the source of data pub type SourceInfo = zextzbuf!(0x1, false); pub type SourceInfoType = crate::zenoh::ext::SourceInfoType<{ SourceInfo::ID }>; - /// # Consolidation extension - pub type Consolidation = zextz64!(0x2, true); - pub type ConsolidationType = crate::zenoh::query::Consolidation; - /// # QueryBody extension /// Used to carry a body attached to the query /// Shared Memory extension is automatically defined by ValueType extension if @@ -127,16 +109,18 @@ pub mod ext { impl Query { #[cfg(feature = "test")] pub fn rand() -> Self { - use crate::common::iext; use rand::{ distributions::{Alphanumeric, DistString}, Rng, }; + + use crate::common::iext; let mut rng = rand::thread_rng(); const MIN: usize = 2; const MAX: usize = 16; + let consolidation = Consolidation::rand(); let parameters: String = if rng.gen_bool(0.5) { let len = rng.gen_range(MIN..MAX); Alphanumeric.sample_string(&mut rng, len) @@ -144,7 +128,6 @@ impl Query { String::new() }; let ext_sinfo = rng.gen_bool(0.5).then_some(ext::SourceInfoType::rand()); - let ext_consolidation = Consolidation::rand(); let ext_body = rng.gen_bool(0.5).then_some(ext::QueryBodyType::rand()); let ext_attachment = rng.gen_bool(0.5).then_some(ext::AttachmentType::rand()); let mut ext_unknown = Vec::new(); @@ -156,9 +139,9 @@ impl Query { } Self { + consolidation, parameters, ext_sinfo, - ext_consolidation, ext_body, ext_attachment, ext_unknown, diff --git a/commons/zenoh-protocol/src/zenoh/reply.rs b/commons/zenoh-protocol/src/zenoh/reply.rs index 0cdbcd2cdc..f29521a4a9 100644 --- a/commons/zenoh-protocol/src/zenoh/reply.rs +++ b/commons/zenoh-protocol/src/zenoh/reply.rs @@ -11,115 +11,62 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::{common::ZExtUnknown, core::Encoding}; use alloc::vec::Vec; -use uhlc::Timestamp; -use zenoh_buffers::ZBuf; + +use crate::{ + common::ZExtUnknown, + zenoh::{query::Consolidation, PushBody}, +}; /// # Reply message /// /// ```text /// Flags: -/// - T: Timestamp If T==1 then the timestamp if present -/// - E: Encoding If E==1 then the encoding is present +/// - C: Consolidation if C==1 then consolidation is present +/// - X: Reserved /// - Z: Extension If Z==1 then at least one extension is present /// /// 7 6 5 4 3 2 1 0 /// +-+-+-+-+-+-+-+-+ -/// |Z|E|T| REPLY | +/// |Z|X|C| REPLY | /// +-+-+-+---------+ -/// ~ ts: ~ if T==1 -/// +---------------+ -/// ~ encoding ~ if E==1 +/// % consolidation % if C==1 /// +---------------+ /// ~ [repl_exts] ~ if Z==1 /// +---------------+ -/// ~ pl: ~ -- Payload +/// ~ ReplyBody ~ -- Payload /// +---------------+ /// ``` pub mod flag { - pub const T: u8 = 1 << 5; // 0x20 Timestamp if T==0 then the timestamp if present - pub const E: u8 = 1 << 6; // 0x40 Encoding if E==1 then the encoding is present + pub const C: u8 = 1 << 5; // 0x20 Consolidation if C==1 then consolidation is present + // pub const X: u8 = 1 << 6; // 0x40 Reserved pub const Z: u8 = 1 << 7; // 0x80 Extensions if Z==1 then an extension will follow } #[derive(Debug, Clone, PartialEq, Eq)] pub struct Reply { - pub timestamp: Option, - pub encoding: Encoding, - pub ext_sinfo: Option, - pub ext_consolidation: ext::ConsolidationType, - #[cfg(feature = "shared-memory")] - pub ext_shm: Option, - pub ext_attachment: Option, + pub consolidation: Consolidation, pub ext_unknown: Vec, - pub payload: ZBuf, + pub payload: ReplyBody, } -pub mod ext { - #[cfg(feature = "shared-memory")] - use crate::{common::ZExtUnit, zextunit}; - use crate::{ - common::{ZExtZ64, ZExtZBuf}, - zextz64, zextzbuf, - }; - - /// # SourceInfo extension - /// Used to carry additional information about the source of data - pub type SourceInfo = zextzbuf!(0x1, false); - pub type SourceInfoType = crate::zenoh::ext::SourceInfoType<{ SourceInfo::ID }>; - - /// # Consolidation extension - pub type Consolidation = zextz64!(0x2, true); - pub type ConsolidationType = crate::zenoh::query::ext::ConsolidationType; - - /// # Shared Memory extension - /// Used to carry additional information about the shared-memory layout of data - #[cfg(feature = "shared-memory")] - pub type Shm = zextunit!(0x3, true); - #[cfg(feature = "shared-memory")] - pub type ShmType = crate::zenoh::ext::ShmType<{ Shm::ID }>; - - /// # User attachment - pub type Attachment = zextzbuf!(0x4, false); - pub type AttachmentType = crate::zenoh::ext::AttachmentType<{ Attachment::ID }>; -} +pub type ReplyBody = PushBody; impl Reply { #[cfg(feature = "test")] pub fn rand() -> Self { - use crate::{common::iext, core::ZenohId, zenoh::Consolidation}; use rand::Rng; let mut rng = rand::thread_rng(); - let timestamp = rng.gen_bool(0.5).then_some({ - let time = uhlc::NTP64(rng.gen()); - let id = uhlc::ID::try_from(ZenohId::rand().to_le_bytes()).unwrap(); - Timestamp::new(time, id) - }); - let encoding = Encoding::rand(); - let ext_sinfo = rng.gen_bool(0.5).then_some(ext::SourceInfoType::rand()); - let ext_consolidation = Consolidation::rand(); - #[cfg(feature = "shared-memory")] - let ext_shm = rng.gen_bool(0.5).then_some(ext::ShmType::rand()); - let ext_attachment = rng.gen_bool(0.5).then_some(ext::AttachmentType::rand()); + let payload = ReplyBody::rand(); + let consolidation = Consolidation::rand(); let mut ext_unknown = Vec::new(); for _ in 0..rng.gen_range(0..4) { - ext_unknown.push(ZExtUnknown::rand2( - iext::mid(ext::Attachment::ID) + 1, - false, - )); + ext_unknown.push(ZExtUnknown::rand2(1, false)); } - let payload = ZBuf::rand(rng.gen_range(1..=64)); Self { - timestamp, - encoding, - ext_sinfo, - ext_consolidation, - #[cfg(feature = "shared-memory")] - ext_shm, - ext_attachment, + consolidation, ext_unknown, payload, } diff --git a/commons/zenoh-result/src/lib.rs b/commons/zenoh-result/src/lib.rs index 60148c763f..79de74f4eb 100644 --- a/commons/zenoh-result/src/lib.rs +++ b/commons/zenoh-result/src/lib.rs @@ -20,9 +20,10 @@ #![cfg_attr(not(feature = "std"), no_std)] extern crate alloc; -use anyhow::Error as AnyError; use core::fmt; +use anyhow::Error as AnyError; + #[cold] pub const fn cold() {} pub const fn likely(b: bool) -> bool { diff --git a/commons/zenoh-runtime/Cargo.toml b/commons/zenoh-runtime/Cargo.toml index e3f0c7a3c0..bfc1a8ffb5 100644 --- a/commons/zenoh-runtime/Cargo.toml +++ b/commons/zenoh-runtime/Cargo.toml @@ -18,7 +18,6 @@ ron = { workspace = true } serde = { workspace = true } futures = { workspace = true } lazy_static = { workspace = true } +tokio = { workspace = true, features = ["fs", "io-util", "macros", "net", "rt-multi-thread", "sync", "time"] } zenoh-result = { workspace = true, features = ["std"] } -zenoh-collections = { workspace = true, features = ["std"] } zenoh-macros = { workspace = true } -tokio = { workspace = true, features = ["fs", "io-util", "macros", "net", "rt-multi-thread", "sync", "time"] } diff --git a/commons/zenoh-runtime/src/lib.rs b/commons/zenoh-runtime/src/lib.rs index 7c28218a5f..c0e1bec2ef 100644 --- a/commons/zenoh-runtime/src/lib.rs +++ b/commons/zenoh-runtime/src/lib.rs @@ -12,8 +12,6 @@ // ZettaScale Zenoh Team, // use core::panic; -use lazy_static::lazy_static; -use serde::Deserialize; use std::{ borrow::Borrow, collections::HashMap, @@ -26,6 +24,9 @@ use std::{ }, time::Duration, }; + +use lazy_static::lazy_static; +use serde::Deserialize; use tokio::runtime::{Handle, Runtime, RuntimeFlavor}; use zenoh_macros::{GenericRuntimeParam, RegisterParam}; use zenoh_result::ZResult as Result; diff --git a/commons/zenoh-shm/Cargo.toml b/commons/zenoh-shm/Cargo.toml index e6107b9a13..e5eb204a23 100644 --- a/commons/zenoh-shm/Cargo.toml +++ b/commons/zenoh-shm/Cargo.toml @@ -28,9 +28,28 @@ categories = { workspace = true } description = "Internal crate for zenoh." # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html +[features] +test = ["num_cpus"] + [dependencies] -tracing = {workspace = true} +async-trait = { workspace = true } +bincode = { workspace = true } +crc = { workspace = true } +tracing = { workspace = true } serde = { workspace = true, features = ["default"] } shared_memory = { workspace = true } -zenoh-buffers = { workspace = true } +tokio = { workspace = true } zenoh-result = { workspace = true } +zenoh-core = { workspace = true } +zenoh-macros = { workspace = true } +zenoh-buffers = { workspace = true } +rand = { workspace = true } +static_init = { workspace = true } +num-traits = { workspace = true } +num_cpus = { workspace = true, optional = true } +thread-priority = { workspace = true } +lockfree = { workspace = true } +stabby = { workspace = true } + +[dev-dependencies] +libc = { workspace = true } \ No newline at end of file diff --git a/commons/zenoh-shm/src/api/buffer/mod.rs b/commons/zenoh-shm/src/api/buffer/mod.rs new file mode 100644 index 0000000000..8a3e040da9 --- /dev/null +++ b/commons/zenoh-shm/src/api/buffer/mod.rs @@ -0,0 +1,17 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// + +pub mod traits; +pub mod zshm; +pub mod zshmmut; diff --git a/commons/zenoh-shm/src/api/buffer/traits.rs b/commons/zenoh-shm/src/api/buffer/traits.rs new file mode 100644 index 0000000000..a5d6b9eba5 --- /dev/null +++ b/commons/zenoh-shm/src/api/buffer/traits.rs @@ -0,0 +1,24 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// + +use std::ops::{Deref, DerefMut}; + +#[zenoh_macros::unstable_doc] +pub trait ShmBuf: Deref + AsRef<[u8]> { + #[zenoh_macros::unstable_doc] + fn is_valid(&self) -> bool; +} + +#[zenoh_macros::unstable_doc] +pub trait ShmBufMut: ShmBuf + DerefMut + AsMut<[u8]> {} diff --git a/commons/zenoh-shm/src/api/buffer/zshm.rs b/commons/zenoh-shm/src/api/buffer/zshm.rs new file mode 100644 index 0000000000..8a028277a7 --- /dev/null +++ b/commons/zenoh-shm/src/api/buffer/zshm.rs @@ -0,0 +1,171 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// + +use core::ops::Deref; +use std::{ + borrow::{Borrow, BorrowMut}, + ops::DerefMut, +}; + +use zenoh_buffers::{ZBuf, ZSlice}; + +use super::{traits::ShmBuf, zshmmut::zshmmut}; +use crate::ShmBufInner; + +/// An immutable SHM buffer +#[zenoh_macros::unstable_doc] +#[repr(transparent)] +#[derive(Clone, Debug, PartialEq, Eq)] +pub struct ZShm(pub(crate) ShmBufInner); + +impl ShmBuf for ZShm { + fn is_valid(&self) -> bool { + self.0.is_valid() + } +} + +impl PartialEq<&zshm> for ZShm { + fn eq(&self, other: &&zshm) -> bool { + self.0 == other.0 .0 + } +} + +impl Borrow for ZShm { + fn borrow(&self) -> &zshm { + // SAFETY: ZShm, ZShmMut, zshm and zshmmut are #[repr(transparent)] + // to ShmBufInner type, so it is safe to transmute them in any direction + unsafe { core::mem::transmute(self) } + } +} + +impl BorrowMut for ZShm { + fn borrow_mut(&mut self) -> &mut zshm { + // SAFETY: ZShm, ZShmMut, zshm and zshmmut are #[repr(transparent)] + // to ShmBufInner type, so it is safe to transmute them in any direction + unsafe { core::mem::transmute(self) } + } +} + +impl Deref for ZShm { + type Target = [u8]; + + fn deref(&self) -> &Self::Target { + self.0.as_ref() + } +} + +impl AsRef<[u8]> for ZShm { + fn as_ref(&self) -> &[u8] { + self + } +} + +impl From for ZShm { + fn from(value: ShmBufInner) -> Self { + Self(value) + } +} + +impl From for ZSlice { + fn from(value: ZShm) -> Self { + value.0.into() + } +} + +impl From for ZBuf { + fn from(value: ZShm) -> Self { + value.0.into() + } +} + +impl TryFrom<&mut ZShm> for &mut zshmmut { + type Error = (); + + fn try_from(value: &mut ZShm) -> Result { + match value.0.is_unique() && value.0.is_valid() { + true => { + // SAFETY: ZShm, ZShmMut, zshm and zshmmut are #[repr(transparent)] + // to ShmBufInner type, so it is safe to transmute them in any direction + Ok(unsafe { core::mem::transmute::<&mut ZShm, &mut zshmmut>(value) }) + } + false => Err(()), + } + } +} + +/// A borrowed immutable SHM buffer +#[zenoh_macros::unstable_doc] +#[derive(Debug, PartialEq, Eq)] +#[allow(non_camel_case_types)] +#[repr(transparent)] +pub struct zshm(ZShm); + +impl ToOwned for zshm { + type Owned = ZShm; + + fn to_owned(&self) -> Self::Owned { + self.0.clone() + } +} + +impl PartialEq for &zshm { + fn eq(&self, other: &ZShm) -> bool { + self.0 .0 == other.0 + } +} + +impl Deref for zshm { + type Target = ZShm; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +impl DerefMut for zshm { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.0 + } +} + +impl From<&ShmBufInner> for &zshm { + fn from(value: &ShmBufInner) -> Self { + // SAFETY: ZShm, ZShmMut, zshm and zshmmut are #[repr(transparent)] + // to ShmBufInner type, so it is safe to transmute them in any direction + unsafe { core::mem::transmute(value) } + } +} + +impl From<&mut ShmBufInner> for &mut zshm { + fn from(value: &mut ShmBufInner) -> Self { + // SAFETY: ZShm, ZShmMut, zshm and zshmmut are #[repr(transparent)] + // to ShmBufInner type, so it is safe to transmute them in any direction + unsafe { core::mem::transmute(value) } + } +} + +impl TryFrom<&mut zshm> for &mut zshmmut { + type Error = (); + + fn try_from(value: &mut zshm) -> Result { + match value.0 .0.is_unique() && value.0 .0.is_valid() { + true => { + // SAFETY: ZShm, ZShmMut, zshm and zshmmut are #[repr(transparent)] + // to ShmBufInner type, so it is safe to transmute them in any direction + Ok(unsafe { core::mem::transmute::<&mut zshm, &mut zshmmut>(value) }) + } + false => Err(()), + } + } +} diff --git a/commons/zenoh-shm/src/api/buffer/zshmmut.rs b/commons/zenoh-shm/src/api/buffer/zshmmut.rs new file mode 100644 index 0000000000..a116a7f421 --- /dev/null +++ b/commons/zenoh-shm/src/api/buffer/zshmmut.rs @@ -0,0 +1,188 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// + +use core::ops::{Deref, DerefMut}; +use std::borrow::{Borrow, BorrowMut}; + +use zenoh_buffers::{ZBuf, ZSlice}; + +use super::{ + traits::{ShmBuf, ShmBufMut}, + zshm::{zshm, ZShm}, +}; +use crate::ShmBufInner; + +/// A mutable SHM buffer +#[zenoh_macros::unstable_doc] +#[derive(Debug, PartialEq, Eq)] +#[repr(transparent)] +pub struct ZShmMut(ShmBufInner); + +impl ShmBuf for ZShmMut { + fn is_valid(&self) -> bool { + self.0.is_valid() + } +} + +impl ShmBufMut for ZShmMut {} + +impl ZShmMut { + pub(crate) unsafe fn new_unchecked(data: ShmBufInner) -> Self { + Self(data) + } +} + +impl PartialEq for &ZShmMut { + fn eq(&self, other: &zshmmut) -> bool { + self.0 == other.0 .0 + } +} + +impl TryFrom for ZShmMut { + type Error = ShmBufInner; + + fn try_from(value: ShmBufInner) -> Result { + match value.is_unique() && value.is_valid() { + true => Ok(Self(value)), + false => Err(value), + } + } +} + +impl TryFrom for ZShmMut { + type Error = ZShm; + + fn try_from(value: ZShm) -> Result { + match value.0.is_unique() && value.0.is_valid() { + true => Ok(Self(value.0)), + false => Err(value), + } + } +} + +impl Borrow for ZShmMut { + fn borrow(&self) -> &zshm { + // SAFETY: ZShm, ZShmMut, zshm and zshmmut are #[repr(transparent)] + // to ShmBufInner type, so it is safe to transmute them in any direction + unsafe { core::mem::transmute(self) } + } +} + +impl BorrowMut for ZShmMut { + fn borrow_mut(&mut self) -> &mut zshm { + // SAFETY: ZShm, ZShmMut, zshm and zshmmut are #[repr(transparent)] + // to ShmBufInner type, so it is safe to transmute them in any direction + unsafe { core::mem::transmute(self) } + } +} + +impl Borrow for ZShmMut { + fn borrow(&self) -> &zshmmut { + // SAFETY: ZShm, ZShmMut, zshm and zshmmut are #[repr(transparent)] + // to ShmBufInner type, so it is safe to transmute them in any direction + unsafe { core::mem::transmute(self) } + } +} + +impl BorrowMut for ZShmMut { + fn borrow_mut(&mut self) -> &mut zshmmut { + // SAFETY: ZShm, ZShmMut, zshm and zshmmut are #[repr(transparent)] + // to ShmBufInner type, so it is safe to transmute them in any direction + unsafe { core::mem::transmute(self) } + } +} + +impl Deref for ZShmMut { + type Target = [u8]; + + fn deref(&self) -> &Self::Target { + self.0.as_ref() + } +} + +impl DerefMut for ZShmMut { + fn deref_mut(&mut self) -> &mut Self::Target { + self.0.as_mut() + } +} + +impl AsRef<[u8]> for ZShmMut { + fn as_ref(&self) -> &[u8] { + self + } +} + +impl AsMut<[u8]> for ZShmMut { + fn as_mut(&mut self) -> &mut [u8] { + self + } +} + +impl From for ZShm { + fn from(value: ZShmMut) -> Self { + value.0.into() + } +} + +impl From for ZSlice { + fn from(value: ZShmMut) -> Self { + value.0.into() + } +} + +impl From for ZBuf { + fn from(value: ZShmMut) -> Self { + value.0.into() + } +} + +/// A borrowed mutable SHM buffer +#[zenoh_macros::unstable_doc] +#[derive(Debug, PartialEq, Eq)] +#[allow(non_camel_case_types)] +#[repr(transparent)] +pub struct zshmmut(ZShmMut); + +impl PartialEq for &zshmmut { + fn eq(&self, other: &ZShmMut) -> bool { + self.0 .0 == other.0 + } +} + +impl Deref for zshmmut { + type Target = ZShmMut; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +impl DerefMut for zshmmut { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.0 + } +} + +impl TryFrom<&mut ShmBufInner> for &mut zshmmut { + type Error = (); + + fn try_from(value: &mut ShmBufInner) -> Result { + match value.is_unique() && value.is_valid() { + // SAFETY: ZShm, ZShmMut, zshm and zshmmut are #[repr(transparent)] + // to ShmBufInner type, so it is safe to transmute them in any direction + true => Ok(unsafe { core::mem::transmute::<&mut ShmBufInner, &mut zshmmut>(value) }), + false => Err(()), + } + } +} diff --git a/commons/zenoh-shm/src/api/client/mod.rs b/commons/zenoh-shm/src/api/client/mod.rs new file mode 100644 index 0000000000..4a147cbf67 --- /dev/null +++ b/commons/zenoh-shm/src/api/client/mod.rs @@ -0,0 +1,16 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// + +pub mod shm_client; +pub mod shm_segment; diff --git a/commons/zenoh-shm/src/api/client/shm_client.rs b/commons/zenoh-shm/src/api/client/shm_client.rs new file mode 100644 index 0000000000..e25f818912 --- /dev/null +++ b/commons/zenoh-shm/src/api/client/shm_client.rs @@ -0,0 +1,28 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// + +use std::{fmt::Debug, sync::Arc}; + +use zenoh_result::ZResult; + +use super::shm_segment::ShmSegment; +use crate::api::common::types::SegmentID; + +/// ShmClient - client factory implementation for particular shared memory protocol +#[zenoh_macros::unstable_doc] +pub trait ShmClient: Debug + Send + Sync { + /// Attach to particular shared memory segment + #[zenoh_macros::unstable_doc] + fn attach(&self, segment: SegmentID) -> ZResult>; +} diff --git a/commons/zenoh-shm/src/api/client/shm_segment.rs b/commons/zenoh-shm/src/api/client/shm_segment.rs new file mode 100644 index 0000000000..8744fbb765 --- /dev/null +++ b/commons/zenoh-shm/src/api/client/shm_segment.rs @@ -0,0 +1,27 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// + +use std::{fmt::Debug, sync::atomic::AtomicPtr}; + +use zenoh_result::ZResult; + +use crate::api::common::types::ChunkID; + +/// ShmSegment - RAII interface to interact with particular shared memory segment +#[zenoh_macros::unstable_doc] +pub trait ShmSegment: Debug + Send + Sync { + /// Obtain the actual region of memory identified by it's id + #[zenoh_macros::unstable_doc] + fn map(&self, chunk: ChunkID) -> ZResult>; +} diff --git a/commons/zenoh-shm/src/api/client_storage/mod.rs b/commons/zenoh-shm/src/api/client_storage/mod.rs new file mode 100644 index 0000000000..07b4cd88bf --- /dev/null +++ b/commons/zenoh-shm/src/api/client_storage/mod.rs @@ -0,0 +1,156 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// + +use std::{ + collections::HashMap, + sync::{Arc, RwLock}, +}; + +use static_init::dynamic; +use zenoh_result::{bail, ZResult}; + +use crate::{ + api::{ + client::{shm_client::ShmClient, shm_segment::ShmSegment}, + common::types::ProtocolID, + protocol_implementations::posix::{ + posix_shm_client::PosixShmClient, protocol_id::POSIX_PROTOCOL_ID, + }, + }, + reader::{ClientStorage, GlobalDataSegmentID}, +}; + +#[dynamic(lazy, drop)] +/// A global lazily-initialized SHM client storage. When initialized, +/// contains default client set, see [with_default_client_set](ShmClientStorage::with_default_client_set) +#[zenoh_macros::unstable_doc] +pub static mut GLOBAL_CLIENT_STORAGE: Arc = Arc::new( + ShmClientStorage::builder() + .with_default_client_set() + .build(), +); + +/// Builder to create new client storages +#[zenoh_macros::unstable_doc] +pub struct ShmClientSetBuilder; + +impl ShmClientSetBuilder { + /// Add client to the storage (without including the default client set) + #[zenoh_macros::unstable_doc] + pub fn with_client( + self, + id: ProtocolID, + client: Arc, + ) -> ShmClientStorageBuilder { + let clients = HashMap::from([(id, client)]); + ShmClientStorageBuilder::new(clients) + } + + /// Add list of clients to the storage (without including the default client set) + #[zenoh_macros::unstable_doc] + pub fn with_clients( + self, + clients: &[(ProtocolID, Arc)], + ) -> ShmClientStorageBuilder { + let clients = clients.iter().cloned().collect(); + ShmClientStorageBuilder::new(clients) + } + + /// Include default clients + #[zenoh_macros::unstable_doc] + pub fn with_default_client_set(self) -> ShmClientStorageBuilder { + let clients = HashMap::from([( + POSIX_PROTOCOL_ID, + Arc::new(PosixShmClient {}) as Arc, + )]); + ShmClientStorageBuilder::new(clients) + } +} + +#[zenoh_macros::unstable_doc] +pub struct ShmClientStorageBuilder { + clients: HashMap>, +} + +impl ShmClientStorageBuilder { + fn new(clients: HashMap>) -> Self { + Self { clients } + } + + /// Add client to the storage + #[zenoh_macros::unstable_doc] + pub fn with_client(mut self, id: ProtocolID, client: Arc) -> ZResult { + match self.clients.entry(id) { + std::collections::hash_map::Entry::Occupied(occupied) => { + bail!("Client already exists for id {id}: {:?}!", occupied) + } + std::collections::hash_map::Entry::Vacant(vacant) => { + vacant.insert(client as Arc); + Ok(self) + } + } + } + + /// Add list of clients to the storage + #[zenoh_macros::unstable_doc] + pub fn with_clients(mut self, clients: &[(ProtocolID, Arc)]) -> Self { + self.clients.extend(clients.iter().cloned()); + self + } + + /// Build the storage with parameters specified on previous step + #[zenoh_macros::unstable_doc] + pub fn build(self) -> ShmClientStorage { + ShmClientStorage::new(self.clients) + } +} + +/// A storage for SHM clients. +/// Runtime or Session constructed with instance of this type gets capabilities to read +/// SHM buffers for Protocols added to this instance. +#[zenoh_macros::unstable_doc] +#[derive(Debug)] +pub struct ShmClientStorage { + pub(crate) clients: ClientStorage>, + pub(crate) segments: RwLock>>, +} + +impl Eq for ShmClientStorage {} + +impl PartialEq for ShmClientStorage { + fn eq(&self, other: &Self) -> bool { + std::ptr::eq(self, other) + } +} + +impl ShmClientStorage { + /// Get the builder to construct a new storage + #[zenoh_macros::unstable_doc] + pub fn builder() -> ShmClientSetBuilder { + ShmClientSetBuilder + } + + /// Get the list of supported SHM protocols. + #[zenoh_macros::unstable_doc] + pub fn supported_protocols(&self) -> Vec { + self.clients.get_clients().keys().copied().collect() + } + + fn new(clients: HashMap>) -> Self { + Self { + clients: ClientStorage::new(clients), + segments: RwLock::default(), + } + } +} diff --git a/commons/zenoh-shm/src/api/common/mod.rs b/commons/zenoh-shm/src/api/common/mod.rs new file mode 100644 index 0000000000..222c7286bf --- /dev/null +++ b/commons/zenoh-shm/src/api/common/mod.rs @@ -0,0 +1,15 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// + +pub mod types; diff --git a/commons/zenoh-shm/src/api/common/types.rs b/commons/zenoh-shm/src/api/common/types.rs new file mode 100644 index 0000000000..5f423e7459 --- /dev/null +++ b/commons/zenoh-shm/src/api/common/types.rs @@ -0,0 +1,27 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// + +/// Unique protocol identifier. +/// Here is a contract: it is up to user to make sure that incompatible ShmClient +/// and ShmProviderBackend implementations will never use the same ProtocolID +#[zenoh_macros::unstable_doc] +pub type ProtocolID = u32; + +/// Unique segment identifier +#[zenoh_macros::unstable_doc] +pub type SegmentID = u32; + +/// Chunk id within it's segment +#[zenoh_macros::unstable_doc] +pub type ChunkID = u32; diff --git a/commons/zenoh-shm/src/api/mod.rs b/commons/zenoh-shm/src/api/mod.rs new file mode 100644 index 0000000000..a87188da29 --- /dev/null +++ b/commons/zenoh-shm/src/api/mod.rs @@ -0,0 +1,20 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// + +pub mod buffer; +pub mod client; +pub mod client_storage; +pub mod common; +pub mod protocol_implementations; +pub mod provider; diff --git a/commons/zenoh-shm/src/api/protocol_implementations/mod.rs b/commons/zenoh-shm/src/api/protocol_implementations/mod.rs new file mode 100644 index 0000000000..df92f63536 --- /dev/null +++ b/commons/zenoh-shm/src/api/protocol_implementations/mod.rs @@ -0,0 +1,15 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// + +pub mod posix; diff --git a/zenoh/src/plugins/mod.rs b/commons/zenoh-shm/src/api/protocol_implementations/posix/mod.rs similarity index 65% rename from zenoh/src/plugins/mod.rs rename to commons/zenoh-shm/src/api/protocol_implementations/posix/mod.rs index be70cf75ec..e5dd7db33e 100644 --- a/zenoh/src/plugins/mod.rs +++ b/commons/zenoh-shm/src/api/protocol_implementations/posix/mod.rs @@ -12,13 +12,8 @@ // ZettaScale Zenoh Team, // -//! ⚠️ WARNING ⚠️ -//! -//! This module is intended for Zenoh's internal use. -//! -//! [Click here for Zenoh's documentation](../../zenoh/index.html) -pub(crate) mod loader; -pub(crate) mod sealed; +pub mod posix_shm_client; +pub mod posix_shm_provider_backend; +pub mod protocol_id; -#[zenoh_macros::unstable] -pub use sealed::*; +pub(crate) mod posix_shm_segment; diff --git a/commons/zenoh-shm/src/api/protocol_implementations/posix/posix_shm_client.rs b/commons/zenoh-shm/src/api/protocol_implementations/posix/posix_shm_client.rs new file mode 100644 index 0000000000..73e2a96cd9 --- /dev/null +++ b/commons/zenoh-shm/src/api/protocol_implementations/posix/posix_shm_client.rs @@ -0,0 +1,36 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// + +use std::sync::Arc; + +use zenoh_result::ZResult; + +use super::posix_shm_segment::PosixShmSegment; +use crate::api::{ + client::{shm_client::ShmClient, shm_segment::ShmSegment}, + common::types::SegmentID, +}; + +/// Client factory implementation for particular shared memory protocol +#[zenoh_macros::unstable_doc] +#[derive(Debug)] +pub struct PosixShmClient; + +impl ShmClient for PosixShmClient { + /// Attach to particular shared memory segment + #[zenoh_macros::unstable_doc] + fn attach(&self, segment: SegmentID) -> ZResult> { + Ok(Arc::new(PosixShmSegment::open(segment)?)) + } +} diff --git a/commons/zenoh-shm/src/api/protocol_implementations/posix/posix_shm_provider_backend.rs b/commons/zenoh-shm/src/api/protocol_implementations/posix/posix_shm_provider_backend.rs new file mode 100644 index 0000000000..663379e034 --- /dev/null +++ b/commons/zenoh-shm/src/api/protocol_implementations/posix/posix_shm_provider_backend.rs @@ -0,0 +1,292 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// + +use std::{ + borrow::Borrow, + cmp, + collections::BinaryHeap, + num::NonZeroUsize, + sync::{ + atomic::{AtomicPtr, AtomicUsize, Ordering}, + Mutex, + }, +}; + +use zenoh_core::zlock; +use zenoh_result::ZResult; + +use super::posix_shm_segment::PosixShmSegment; +use crate::api::{ + common::types::ChunkID, + provider::{ + chunk::{AllocatedChunk, ChunkDescriptor}, + shm_provider_backend::ShmProviderBackend, + types::{AllocAlignment, ChunkAllocResult, MemoryLayout, ZAllocError, ZLayoutError}, + }, +}; + +// TODO: MIN_FREE_CHUNK_SIZE limitation is made to reduce memory fragmentation and lower +// the CPU time needed to defragment() - that's reasonable, and there is additional thing here: +// our SHM\zerocopy functionality outperforms common buffer transmission only starting from 1K +// buffer size. In other words, there should be some minimal size threshold reasonable to use with +// SHM - and it would be good to synchronize this threshold with MIN_FREE_CHUNK_SIZE limitation! +const MIN_FREE_CHUNK_SIZE: usize = 1_024; + +#[derive(Eq, Copy, Clone, Debug)] +struct Chunk { + offset: ChunkID, + size: NonZeroUsize, +} + +impl Ord for Chunk { + fn cmp(&self, other: &Self) -> cmp::Ordering { + self.size.cmp(&other.size) + } +} + +impl PartialOrd for Chunk { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + +impl PartialEq for Chunk { + fn eq(&self, other: &Self) -> bool { + self.size == other.size + } +} + +/// Builder to create posix SHM provider +#[zenoh_macros::unstable_doc] +pub struct PosixShmProviderBackendBuilder; + +impl PosixShmProviderBackendBuilder { + /// Use existing layout + #[zenoh_macros::unstable_doc] + pub fn with_layout>( + self, + layout: Layout, + ) -> LayoutedPosixShmProviderBackendBuilder { + LayoutedPosixShmProviderBackendBuilder { layout } + } + + /// Construct layout in-place using arguments + #[zenoh_macros::unstable_doc] + pub fn with_layout_args( + self, + size: usize, + alignment: AllocAlignment, + ) -> Result, ZLayoutError> { + let layout = MemoryLayout::new(size, alignment)?; + Ok(LayoutedPosixShmProviderBackendBuilder { layout }) + } + + /// Construct layout in-place from size (default alignment will be used) + #[zenoh_macros::unstable_doc] + pub fn with_size( + self, + size: usize, + ) -> Result, ZLayoutError> { + let layout = MemoryLayout::new(size, AllocAlignment::default())?; + Ok(LayoutedPosixShmProviderBackendBuilder { layout }) + } +} + +#[zenoh_macros::unstable_doc] +pub struct LayoutedPosixShmProviderBackendBuilder> { + layout: Layout, +} + +impl> LayoutedPosixShmProviderBackendBuilder { + /// try to create PosixShmProviderBackend + #[zenoh_macros::unstable_doc] + pub fn res(self) -> ZResult { + PosixShmProviderBackend::new(self.layout.borrow()) + } +} + +/// A backend for ShmProvider based on POSIX shared memory. +/// This is the default general-purpose backed shipped with Zenoh. +#[zenoh_macros::unstable_doc] +pub struct PosixShmProviderBackend { + available: AtomicUsize, + segment: PosixShmSegment, + free_list: Mutex>, + alignment: AllocAlignment, +} + +impl PosixShmProviderBackend { + /// Get the builder to construct a new instance + #[zenoh_macros::unstable_doc] + pub fn builder() -> PosixShmProviderBackendBuilder { + PosixShmProviderBackendBuilder + } + + fn new(layout: &MemoryLayout) -> ZResult { + let segment = PosixShmSegment::create(layout.size())?; + + let mut free_list = BinaryHeap::new(); + let root_chunk = Chunk { + offset: 0, + size: layout.size(), + }; + free_list.push(root_chunk); + + tracing::trace!( + "Created PosixShmProviderBackend id {}, layout {:?}", + segment.segment.id(), + layout + ); + + Ok(Self { + available: AtomicUsize::new(layout.size().get()), + segment, + free_list: Mutex::new(free_list), + alignment: layout.alignment(), + }) + } +} + +impl ShmProviderBackend for PosixShmProviderBackend { + fn alloc(&self, layout: &MemoryLayout) -> ChunkAllocResult { + tracing::trace!("PosixShmProviderBackend::alloc({:?})", layout); + + let required_len = layout.size(); + + if self.available.load(Ordering::Relaxed) < required_len.get() { + tracing::trace!( "PosixShmProviderBackend does not have sufficient free memory to allocate {:?}, try de-fragmenting!", layout); + return Err(ZAllocError::OutOfMemory); + } + + let mut guard = zlock!(self.free_list); + // The strategy taken is the same for some Unix System V implementations -- as described in the + // famous Bach's book -- in essence keep an ordered list of free slot and always look for the + // biggest as that will give the biggest left-over. + match guard.pop() { + Some(mut chunk) if chunk.size >= required_len => { + // NOTE: don't loose any chunks here, as it will lead to memory leak + tracing::trace!("Allocator selected Chunk ({:?})", &chunk); + if chunk.size.get() - required_len.get() >= MIN_FREE_CHUNK_SIZE { + let free_chunk = Chunk { + offset: chunk.offset + required_len.get() as ChunkID, + // SAFETY: this is safe because we always operate on a leftover, which is checked above! + size: unsafe { + NonZeroUsize::new_unchecked(chunk.size.get() - required_len.get()) + }, + }; + tracing::trace!("The allocation will leave a Free Chunk: {:?}", &free_chunk); + guard.push(free_chunk); + chunk.size = required_len; + } + self.available + .fetch_sub(chunk.size.get(), Ordering::Relaxed); + + let descriptor = + ChunkDescriptor::new(self.segment.segment.id(), chunk.offset, chunk.size); + + Ok(AllocatedChunk { + descriptor, + data: unsafe { AtomicPtr::new(self.segment.segment.elem_mut(chunk.offset)) }, + }) + } + Some(c) => { + tracing::trace!("PosixShmProviderBackend::alloc({:?}) cannot find any big enough chunk\nShmManager::free_list = {:?}", layout, self.free_list); + guard.push(c); + Err(ZAllocError::NeedDefragment) + } + None => { + // NOTE: that should never happen! If this happens - there is a critical bug somewhere around! + let err = format!("PosixShmProviderBackend::alloc({:?}) cannot find any available chunk\nShmManager::free_list = {:?}", layout, self.free_list); + #[cfg(feature = "test")] + panic!("{err}"); + #[cfg(not(feature = "test"))] + { + tracing::error!("{err}"); + Err(ZAllocError::OutOfMemory) + } + } + } + } + + fn free(&self, chunk: &ChunkDescriptor) { + let free_chunk = Chunk { + offset: chunk.chunk, + size: chunk.len, + }; + self.available + .fetch_add(free_chunk.size.get(), Ordering::Relaxed); + zlock!(self.free_list).push(free_chunk); + } + + fn defragment(&self) -> usize { + fn try_merge_adjacent_chunks(a: &Chunk, b: &Chunk) -> Option { + let end_offset = a.offset as usize + a.size.get(); + if end_offset == b.offset as usize { + Some(Chunk { + // SAFETY: this is safe because we operate on non-zero sizes and it will never overflow + size: unsafe { NonZeroUsize::new_unchecked(a.size.get() + b.size.get()) }, + offset: a.offset, + }) + } else { + None + } + } + + let mut largest = 0usize; + + // TODO: optimize this! + // this is an old legacy algo for merging adjacent chunks + // we extract chunks to separate container, sort them by offset and then check each chunk for + // adjacence with neighbour. Adjacent chunks are joined and returned back to temporary container. + // If chunk is not adjacent with it's neighbour, it is placed back to self.free_list + let mut guard = zlock!(self.free_list); + if guard.len() > 1 { + let mut fbs: Vec = guard.drain().collect(); + fbs.sort_by(|x, y| x.offset.cmp(&y.offset)); + let mut current = fbs.remove(0); + let mut i = 0; + let n = fbs.len(); + for chunk in fbs.iter() { + i += 1; + let next = *chunk; + match try_merge_adjacent_chunks(¤t, &next) { + Some(c) => { + current = c; + largest = largest.max(current.size.get()); + if i == n { + guard.push(current) + } + } + None => { + guard.push(current); + if i == n { + guard.push(next); + } else { + current = next; + } + } + } + } + } + largest + } + + fn available(&self) -> usize { + self.available.load(Ordering::Relaxed) + } + + fn layout_for(&self, layout: MemoryLayout) -> Result { + layout.extend(self.alignment) + } +} diff --git a/commons/zenoh-shm/src/api/protocol_implementations/posix/posix_shm_segment.rs b/commons/zenoh-shm/src/api/protocol_implementations/posix/posix_shm_segment.rs new file mode 100644 index 0000000000..3a08d2be55 --- /dev/null +++ b/commons/zenoh-shm/src/api/protocol_implementations/posix/posix_shm_segment.rs @@ -0,0 +1,50 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// + +use std::{num::NonZeroUsize, sync::atomic::AtomicPtr}; + +use zenoh_result::ZResult; + +use crate::{ + api::{ + client::shm_segment::ShmSegment, + common::types::{ChunkID, SegmentID}, + }, + posix_shm::array::ArrayInSHM, +}; + +const POSIX_SHM_SEGMENT_PREFIX: &str = "posix_shm_provider_segment"; + +#[derive(Debug)] +pub(crate) struct PosixShmSegment { + pub(crate) segment: ArrayInSHM, +} + +impl PosixShmSegment { + pub(crate) fn create(alloc_size: NonZeroUsize) -> ZResult { + let segment = ArrayInSHM::create(alloc_size.get(), POSIX_SHM_SEGMENT_PREFIX)?; + Ok(Self { segment }) + } + + pub(crate) fn open(id: SegmentID) -> ZResult { + let segment = ArrayInSHM::open(id, POSIX_SHM_SEGMENT_PREFIX)?; + Ok(Self { segment }) + } +} + +impl ShmSegment for PosixShmSegment { + fn map(&self, chunk: ChunkID) -> ZResult> { + unsafe { Ok(AtomicPtr::new(self.segment.elem_mut(chunk))) } + } +} diff --git a/commons/zenoh-shm/src/api/protocol_implementations/posix/protocol_id.rs b/commons/zenoh-shm/src/api/protocol_implementations/posix/protocol_id.rs new file mode 100644 index 0000000000..cff39f921a --- /dev/null +++ b/commons/zenoh-shm/src/api/protocol_implementations/posix/protocol_id.rs @@ -0,0 +1,19 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// + +use crate::api::common::types::ProtocolID; + +/// Protocol identifier to use when creating ShmProvider +#[zenoh_macros::unstable_doc] +pub const POSIX_PROTOCOL_ID: ProtocolID = 0; diff --git a/commons/zenoh-shm/src/api/provider/chunk.rs b/commons/zenoh-shm/src/api/provider/chunk.rs new file mode 100644 index 0000000000..fe7d0d5cb6 --- /dev/null +++ b/commons/zenoh-shm/src/api/provider/chunk.rs @@ -0,0 +1,53 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// + +use std::{num::NonZeroUsize, sync::atomic::AtomicPtr}; + +use crate::api::common::types::{ChunkID, SegmentID}; + +/// Uniquely identifies the particular chunk within particular segment +#[zenoh_macros::unstable_doc] +#[derive(Clone, Debug, PartialEq, Eq)] +pub struct ChunkDescriptor { + pub segment: SegmentID, + pub chunk: ChunkID, + pub len: NonZeroUsize, +} + +impl ChunkDescriptor { + /// Create a new Chunk Descriptor + #[zenoh_macros::unstable_doc] + pub fn new(segment: SegmentID, chunk: ChunkID, len: NonZeroUsize) -> Self { + Self { + segment, + chunk, + len, + } + } +} + +/// A recently-allocated chunk. +#[zenoh_macros::unstable_doc] +pub struct AllocatedChunk { + pub descriptor: ChunkDescriptor, + pub data: AtomicPtr, +} + +impl AllocatedChunk { + /// Create a new Allocated Chunk + #[zenoh_macros::unstable_doc] + pub fn new(descriptor: ChunkDescriptor, data: AtomicPtr) -> Self { + Self { descriptor, data } + } +} diff --git a/commons/zenoh-shm/src/api/provider/mod.rs b/commons/zenoh-shm/src/api/provider/mod.rs new file mode 100644 index 0000000000..2d25e37c3d --- /dev/null +++ b/commons/zenoh-shm/src/api/provider/mod.rs @@ -0,0 +1,18 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// + +pub mod chunk; +pub mod shm_provider; +pub mod shm_provider_backend; +pub mod types; diff --git a/commons/zenoh-shm/src/api/provider/shm_provider.rs b/commons/zenoh-shm/src/api/provider/shm_provider.rs new file mode 100644 index 0000000000..bab1588e0c --- /dev/null +++ b/commons/zenoh-shm/src/api/provider/shm_provider.rs @@ -0,0 +1,1007 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// + +use std::{ + collections::VecDeque, + future::{Future, IntoFuture}, + marker::PhantomData, + num::NonZeroUsize, + pin::Pin, + sync::{atomic::Ordering, Arc, Mutex}, + time::Duration, +}; + +use async_trait::async_trait; +use zenoh_core::{Resolvable, Wait}; +use zenoh_result::ZResult; + +use super::{ + chunk::{AllocatedChunk, ChunkDescriptor}, + shm_provider_backend::ShmProviderBackend, + types::{ + AllocAlignment, BufAllocResult, BufLayoutAllocResult, ChunkAllocResult, MemoryLayout, + ZAllocError, ZLayoutAllocError, ZLayoutError, + }, +}; +use crate::{ + api::{buffer::zshmmut::ZShmMut, common::types::ProtocolID}, + header::{ + allocated_descriptor::AllocatedHeaderDescriptor, descriptor::HeaderDescriptor, + storage::GLOBAL_HEADER_STORAGE, + }, + watchdog::{ + allocated_watchdog::AllocatedWatchdog, + confirmator::{ConfirmedDescriptor, GLOBAL_CONFIRMATOR}, + descriptor::Descriptor, + storage::GLOBAL_STORAGE, + validator::GLOBAL_VALIDATOR, + }, + ShmBufInfo, ShmBufInner, +}; + +#[derive(Debug)] +struct BusyChunk { + descriptor: ChunkDescriptor, + header: AllocatedHeaderDescriptor, + _watchdog: AllocatedWatchdog, +} + +impl BusyChunk { + fn new( + descriptor: ChunkDescriptor, + header: AllocatedHeaderDescriptor, + watchdog: AllocatedWatchdog, + ) -> Self { + Self { + descriptor, + header, + _watchdog: watchdog, + } + } +} + +struct AllocData<'a, IDSource, Backend> +where + IDSource: ProtocolIDSource, + Backend: ShmProviderBackend, +{ + size: usize, + alignment: AllocAlignment, + provider: &'a ShmProvider, +} + +#[zenoh_macros::unstable_doc] +pub struct AllocLayoutSizedBuilder<'a, IDSource, Backend>(AllocData<'a, IDSource, Backend>) +where + IDSource: ProtocolIDSource, + Backend: ShmProviderBackend; + +impl<'a, IDSource, Backend> AllocLayoutSizedBuilder<'a, IDSource, Backend> +where + IDSource: ProtocolIDSource, + Backend: ShmProviderBackend, +{ + fn new(provider: &'a ShmProvider, size: usize) -> Self { + Self(AllocData { + provider, + size, + alignment: AllocAlignment::default(), + }) + } + + /// Set alignment + #[zenoh_macros::unstable_doc] + pub fn with_alignment(self, alignment: AllocAlignment) -> Self { + Self(AllocData { + provider: self.0.provider, + size: self.0.size, + alignment, + }) + } + + /// Try to build an allocation layout + #[zenoh_macros::unstable_doc] + pub fn into_layout(self) -> Result, ZLayoutError> { + AllocLayout::new(self.0) + } + + /// Set the allocation policy + #[zenoh_macros::unstable_doc] + pub fn with_policy(self) -> AllocBuilder2<'a, IDSource, Backend, Policy> { + AllocBuilder2 { + data: self.0, + _phantom: PhantomData, + } + } +} + +#[zenoh_macros::unstable_doc] +impl<'a, IDSource, Backend> Resolvable for AllocLayoutSizedBuilder<'a, IDSource, Backend> +where + IDSource: ProtocolIDSource, + Backend: ShmProviderBackend, +{ + type To = BufLayoutAllocResult; +} + +// Sync alloc policy +impl<'a, IDSource, Backend> Wait for AllocLayoutSizedBuilder<'a, IDSource, Backend> +where + IDSource: ProtocolIDSource, + Backend: ShmProviderBackend, +{ + fn wait(self) -> ::To { + let builder = AllocBuilder2::<'a, IDSource, Backend, JustAlloc> { + data: self.0, + _phantom: PhantomData, + }; + builder.wait() + } +} + +/// A layout for allocations. +/// This is a pre-calculated layout suitable for making series of similar allocations +/// adopted for particular ShmProvider +#[zenoh_macros::unstable_doc] +#[derive(Debug)] +pub struct AllocLayout<'a, IDSource, Backend> +where + IDSource: ProtocolIDSource, + Backend: ShmProviderBackend, +{ + size: NonZeroUsize, + provider_layout: MemoryLayout, + provider: &'a ShmProvider, +} + +impl<'a, IDSource, Backend> AllocLayout<'a, IDSource, Backend> +where + IDSource: ProtocolIDSource, + Backend: ShmProviderBackend, +{ + /// Allocate the new buffer with this layout + #[zenoh_macros::unstable_doc] + pub fn alloc(&'a self) -> AllocBuilder<'a, IDSource, Backend> { + AllocBuilder { + layout: self, + _phantom: PhantomData, + } + } + + fn new(data: AllocData<'a, IDSource, Backend>) -> Result { + // NOTE: Depending on internal implementation, provider's backend might relayout + // the allocations for bigger alignment (ex. 4-byte aligned allocation to 8-bytes aligned) + + // Create layout for specified arguments + let layout = MemoryLayout::new(data.size, data.alignment) + .map_err(|_| ZLayoutError::IncorrectLayoutArgs)?; + let size = layout.size(); + + // Obtain provider's layout for our layout + let provider_layout = data + .provider + .backend + .layout_for(layout) + .map_err(|_| ZLayoutError::ProviderIncompatibleLayout)?; + + Ok(Self { + size, + provider_layout, + provider: data.provider, + }) + } +} + +/// Trait for deallocation policies. +#[zenoh_macros::unstable_doc] +pub trait ForceDeallocPolicy { + fn dealloc( + provider: &ShmProvider, + ) -> bool; +} + +/// Try to dealloc optimal (currently eldest+1) chunk +#[zenoh_macros::unstable_doc] +pub struct DeallocOptimal; +impl ForceDeallocPolicy for DeallocOptimal { + fn dealloc( + provider: &ShmProvider, + ) -> bool { + let mut guard = provider.busy_list.lock().unwrap(); + let chunk_to_dealloc = match guard.remove(1) { + Some(val) => val, + None => match guard.pop_front() { + Some(val) => val, + None => return false, + }, + }; + drop(guard); + + provider.backend.free(&chunk_to_dealloc.descriptor); + true + } +} + +/// Try to dealloc youngest chunk +#[zenoh_macros::unstable_doc] +pub struct DeallocYoungest; +impl ForceDeallocPolicy for DeallocYoungest { + fn dealloc( + provider: &ShmProvider, + ) -> bool { + match provider.busy_list.lock().unwrap().pop_back() { + Some(val) => { + provider.backend.free(&val.descriptor); + true + } + None => false, + } + } +} + +/// Try to dealloc eldest chunk +#[zenoh_macros::unstable_doc] +pub struct DeallocEldest; +impl ForceDeallocPolicy for DeallocEldest { + fn dealloc( + provider: &ShmProvider, + ) -> bool { + match provider.busy_list.lock().unwrap().pop_front() { + Some(val) => { + provider.backend.free(&val.descriptor); + true + } + None => false, + } + } +} + +/// Trait for allocation policies +#[zenoh_macros::unstable_doc] +pub trait AllocPolicy { + fn alloc( + layout: &MemoryLayout, + provider: &ShmProvider, + ) -> ChunkAllocResult; +} + +/// Trait for async allocation policies +#[zenoh_macros::unstable_doc] +#[async_trait] +pub trait AsyncAllocPolicy: Send { + async fn alloc_async( + layout: &MemoryLayout, + provider: &ShmProvider, + ) -> ChunkAllocResult; +} + +/// Just try to allocate +#[zenoh_macros::unstable_doc] +pub struct JustAlloc; +impl AllocPolicy for JustAlloc { + fn alloc( + layout: &MemoryLayout, + provider: &ShmProvider, + ) -> ChunkAllocResult { + provider.backend.alloc(layout) + } +} + +/// Garbage collection policy. +/// Try to reclaim old buffers if allocation failed and allocate again +/// if the largest reclaimed chuk is not smaller than the one required +#[zenoh_macros::unstable_doc] +pub struct GarbageCollect +where + InnerPolicy: AllocPolicy, + AltPolicy: AllocPolicy, +{ + _phantom: PhantomData, + _phantom2: PhantomData, +} +impl AllocPolicy for GarbageCollect +where + InnerPolicy: AllocPolicy, + AltPolicy: AllocPolicy, +{ + fn alloc( + layout: &MemoryLayout, + provider: &ShmProvider, + ) -> ChunkAllocResult { + let result = InnerPolicy::alloc(layout, provider); + if let Err(ZAllocError::OutOfMemory) = result { + // try to alloc again only if GC managed to reclaim big enough chunk + if provider.garbage_collect() >= layout.size().get() { + return AltPolicy::alloc(layout, provider); + } + } + result + } +} + +/// Defragmenting policy. +/// Try to defragment if allocation failed and allocate again +/// if the largest defragmented chuk is not smaller than the one required +#[zenoh_macros::unstable_doc] +pub struct Defragment +where + InnerPolicy: AllocPolicy, + AltPolicy: AllocPolicy, +{ + _phantom: PhantomData, + _phantom2: PhantomData, +} +impl AllocPolicy for Defragment +where + InnerPolicy: AllocPolicy, + AltPolicy: AllocPolicy, +{ + fn alloc( + layout: &MemoryLayout, + provider: &ShmProvider, + ) -> ChunkAllocResult { + let result = InnerPolicy::alloc(layout, provider); + if let Err(ZAllocError::NeedDefragment) = result { + // try to alloc again only if big enough chunk was defragmented + if provider.defragment() >= layout.size().get() { + return AltPolicy::alloc(layout, provider); + } + } + result + } +} + +/// Deallocating policy. +/// Forcely deallocate up to N buffers until allocation succeeds. +#[zenoh_macros::unstable_doc] +pub struct Deallocate< + const N: usize, + InnerPolicy = JustAlloc, + AltPolicy = InnerPolicy, + DeallocatePolicy = DeallocOptimal, +> where + InnerPolicy: AllocPolicy, + AltPolicy: AllocPolicy, + DeallocatePolicy: ForceDeallocPolicy, +{ + _phantom: PhantomData, + _phantom2: PhantomData, + _phantom3: PhantomData, +} +impl AllocPolicy + for Deallocate +where + InnerPolicy: AllocPolicy, + AltPolicy: AllocPolicy, + DeallocatePolicy: ForceDeallocPolicy, +{ + fn alloc( + layout: &MemoryLayout, + provider: &ShmProvider, + ) -> ChunkAllocResult { + let mut result = InnerPolicy::alloc(layout, provider); + for _ in 0..N { + match result { + Err(ZAllocError::NeedDefragment) | Err(ZAllocError::OutOfMemory) => { + if !DeallocatePolicy::dealloc(provider) { + return result; + } + } + _ => { + return result; + } + } + result = AltPolicy::alloc(layout, provider); + } + result + } +} + +/// Blocking allocation policy. +/// This policy will block until the allocation succeeds. +/// Both sync and async modes available. +#[zenoh_macros::unstable_doc] +pub struct BlockOn +where + InnerPolicy: AllocPolicy, +{ + _phantom: PhantomData, +} + +#[async_trait] +impl AsyncAllocPolicy for BlockOn +where + InnerPolicy: AllocPolicy + Send, +{ + async fn alloc_async( + layout: &MemoryLayout, + provider: &ShmProvider, + ) -> ChunkAllocResult { + loop { + match InnerPolicy::alloc(layout, provider) { + Err(ZAllocError::NeedDefragment) | Err(ZAllocError::OutOfMemory) => { + // TODO: implement provider's async signalling instead of this! + tokio::time::sleep(Duration::from_millis(1)).await; + } + other_result => { + return other_result; + } + } + } + } +} +impl AllocPolicy for BlockOn +where + InnerPolicy: AllocPolicy, +{ + fn alloc( + layout: &MemoryLayout, + provider: &ShmProvider, + ) -> ChunkAllocResult { + loop { + match InnerPolicy::alloc(layout, provider) { + Err(ZAllocError::NeedDefragment) | Err(ZAllocError::OutOfMemory) => { + // TODO: implement provider's async signalling instead of this! + std::thread::sleep(Duration::from_millis(1)); + } + other_result => { + return other_result; + } + } + } + } +} + +// TODO: allocator API +/*pub struct ShmAllocator< + 'a, + Policy: AllocPolicy, + IDSource, + Backend: ShmProviderBackend, +> { + provider: &'a ShmProvider, + allocations: lockfree::map::Map, ShmBufInner>, + _phantom: PhantomData, +} + +impl<'a, Policy: AllocPolicy, IDSource, Backend: ShmProviderBackend> + ShmAllocator<'a, Policy, IDSource, Backend> +{ + fn allocate(&self, layout: std::alloc::Layout) -> BufAllocResult { + self.provider + .alloc_layout() + .size(layout.size()) + .alignment(AllocAlignment::new(layout.align() as u32)) + .res()? + .alloc() + .res() + } +} + +unsafe impl<'a, Policy: AllocPolicy, IDSource, Backend: ShmProviderBackend> + allocator_api2::alloc::Allocator for ShmAllocator<'a, Policy, IDSource, Backend> +{ + fn allocate( + &self, + layout: std::alloc::Layout, + ) -> Result, allocator_api2::alloc::AllocError> { + let allocation = self + .allocate(layout) + .map_err(|_| allocator_api2::alloc::AllocError)?; + + let inner = allocation.buf.load(Ordering::Relaxed); + let ptr = NonNull::new(inner).ok_or(allocator_api2::alloc::AllocError)?; + let sl = unsafe { std::slice::from_raw_parts(inner, 2) }; + let res = NonNull::from(sl); + + self.allocations.insert(ptr, allocation); + Ok(res) + } + + unsafe fn deallocate(&self, ptr: std::ptr::NonNull, _layout: std::alloc::Layout) { + let _ = self.allocations.remove(&ptr); + } +}*/ + +/// Builder for allocations +#[zenoh_macros::unstable_doc] +pub struct AllocBuilder2< + 'a, + IDSource: ProtocolIDSource, + Backend: ShmProviderBackend, + Policy = JustAlloc, +> { + data: AllocData<'a, IDSource, Backend>, + _phantom: PhantomData, +} + +// Generic impl +impl<'a, IDSource, Backend, Policy> AllocBuilder2<'a, IDSource, Backend, Policy> +where + IDSource: ProtocolIDSource, + Backend: ShmProviderBackend, +{ + /// Set the allocation policy + #[zenoh_macros::unstable_doc] + pub fn with_policy(self) -> AllocBuilder2<'a, IDSource, Backend, OtherPolicy> { + AllocBuilder2 { + data: self.data, + _phantom: PhantomData, + } + } +} + +impl<'a, IDSource, Backend, Policy> Resolvable for AllocBuilder2<'a, IDSource, Backend, Policy> +where + IDSource: ProtocolIDSource, + Backend: ShmProviderBackend, +{ + type To = BufLayoutAllocResult; +} + +// Sync alloc policy +impl<'a, IDSource, Backend, Policy> Wait for AllocBuilder2<'a, IDSource, Backend, Policy> +where + IDSource: ProtocolIDSource, + Backend: ShmProviderBackend, + Policy: AllocPolicy, +{ + fn wait(self) -> ::To { + let layout = AllocLayout::new(self.data).map_err(ZLayoutAllocError::Layout)?; + + layout + .alloc() + .with_policy::() + .wait() + .map_err(ZLayoutAllocError::Alloc) + } +} + +// Async alloc policy +impl<'a, IDSource, Backend, Policy> IntoFuture for AllocBuilder2<'a, IDSource, Backend, Policy> +where + IDSource: ProtocolIDSource, + Backend: ShmProviderBackend + Sync, + Policy: AsyncAllocPolicy, +{ + type Output = ::To; + type IntoFuture = Pin::Output> + 'a + Send>>; + + fn into_future(self) -> Self::IntoFuture { + Box::pin( + async move { + let layout = AllocLayout::new(self.data).map_err(ZLayoutAllocError::Layout)?; + layout + .alloc() + .with_policy::() + .await + .map_err(ZLayoutAllocError::Alloc) + } + .into_future(), + ) + } +} + +/// Builder for allocations +#[zenoh_macros::unstable_doc] +pub struct AllocBuilder< + 'a, + IDSource: ProtocolIDSource, + Backend: ShmProviderBackend, + Policy = JustAlloc, +> { + layout: &'a AllocLayout<'a, IDSource, Backend>, + _phantom: PhantomData, +} + +// Generic impl +impl<'a, IDSource, Backend, Policy> AllocBuilder<'a, IDSource, Backend, Policy> +where + IDSource: ProtocolIDSource, + Backend: ShmProviderBackend, +{ + /// Set the allocation policy + #[zenoh_macros::unstable_doc] + pub fn with_policy(self) -> AllocBuilder<'a, IDSource, Backend, OtherPolicy> { + AllocBuilder { + layout: self.layout, + _phantom: PhantomData, + } + } +} + +impl<'a, IDSource, Backend, Policy> Resolvable for AllocBuilder<'a, IDSource, Backend, Policy> +where + IDSource: ProtocolIDSource, + Backend: ShmProviderBackend, +{ + type To = BufAllocResult; +} + +// Sync alloc policy +impl<'a, IDSource, Backend, Policy> Wait for AllocBuilder<'a, IDSource, Backend, Policy> +where + IDSource: ProtocolIDSource, + Backend: ShmProviderBackend, + Policy: AllocPolicy, +{ + fn wait(self) -> ::To { + self.layout + .provider + .alloc_inner::(self.layout.size, &self.layout.provider_layout) + } +} + +// Async alloc policy +impl<'a, IDSource, Backend, Policy> IntoFuture for AllocBuilder<'a, IDSource, Backend, Policy> +where + IDSource: ProtocolIDSource, + Backend: ShmProviderBackend + Sync, + Policy: AsyncAllocPolicy, +{ + type Output = ::To; + type IntoFuture = Pin::To> + 'a + Send>>; + + fn into_future(self) -> Self::IntoFuture { + Box::pin( + async move { + self.layout + .provider + .alloc_inner_async::(self.layout.size, &self.layout.provider_layout) + .await + } + .into_future(), + ) + } +} + +#[zenoh_macros::unstable_doc] +pub struct ShmProviderBuilder; +impl ShmProviderBuilder { + /// Get the builder to construct ShmProvider + #[zenoh_macros::unstable_doc] + pub fn builder() -> Self { + Self + } + + /// Set compile-time-evaluated protocol ID (preferred) + #[zenoh_macros::unstable_doc] + pub fn protocol_id(self) -> ShmProviderBuilderID> { + ShmProviderBuilderID::> { + id: StaticProtocolID, + } + } + + /// Set runtime-evaluated protocol ID + #[zenoh_macros::unstable_doc] + pub fn dynamic_protocol_id(self, id: ProtocolID) -> ShmProviderBuilderID { + ShmProviderBuilderID:: { + id: DynamicProtocolID::new(id), + } + } +} + +#[zenoh_macros::unstable_doc] +pub struct ShmProviderBuilderID { + id: IDSource, +} +impl ShmProviderBuilderID { + /// Set the backend + #[zenoh_macros::unstable_doc] + pub fn backend( + self, + backend: Backend, + ) -> ShmProviderBuilderBackendID { + ShmProviderBuilderBackendID { + backend, + id: self.id, + } + } +} + +#[zenoh_macros::unstable_doc] +pub struct ShmProviderBuilderBackendID +where + IDSource: ProtocolIDSource, + Backend: ShmProviderBackend, +{ + backend: Backend, + id: IDSource, +} +impl ShmProviderBuilderBackendID +where + IDSource: ProtocolIDSource, + Backend: ShmProviderBackend, +{ + /// build ShmProvider + #[zenoh_macros::unstable_doc] + pub fn res(self) -> ShmProvider { + ShmProvider::new(self.backend, self.id) + } +} + +/// Trait to create ProtocolID sources for ShmProvider +#[zenoh_macros::unstable_doc] +pub trait ProtocolIDSource: Send + Sync { + fn id(&self) -> ProtocolID; +} + +/// Static ProtocolID source. This is a recommended API to set ProtocolID +/// when creating ShmProvider as the ID value is statically evaluated +/// at compile-time and can be optimized. +#[zenoh_macros::unstable_doc] +#[derive(Default)] +pub struct StaticProtocolID; +impl ProtocolIDSource for StaticProtocolID { + fn id(&self) -> ProtocolID { + ID + } +} + +/// Dynamic ProtocolID source. This is an alternative API to set ProtocolID +/// when creating ShmProvider for cases where ProtocolID is unknown +/// at compile-time. +#[zenoh_macros::unstable_doc] +pub struct DynamicProtocolID { + id: ProtocolID, +} +impl DynamicProtocolID { + #[zenoh_macros::unstable_doc] + pub fn new(id: ProtocolID) -> Self { + Self { id } + } +} +impl ProtocolIDSource for DynamicProtocolID { + fn id(&self) -> ProtocolID { + self.id + } +} +unsafe impl Send for DynamicProtocolID {} +unsafe impl Sync for DynamicProtocolID {} + +/// A generalized interface for shared memory data sources +#[zenoh_macros::unstable_doc] +#[derive(Debug)] +pub struct ShmProvider +where + IDSource: ProtocolIDSource, + Backend: ShmProviderBackend, +{ + backend: Backend, + busy_list: Mutex>, + id: IDSource, +} + +impl ShmProvider +where + IDSource: ProtocolIDSource, + Backend: ShmProviderBackend, +{ + /// Rich interface for making allocations + #[zenoh_macros::unstable_doc] + pub fn alloc(&self, size: usize) -> AllocLayoutSizedBuilder { + AllocLayoutSizedBuilder::new(self, size) + } + + /// Defragment memory + #[zenoh_macros::unstable_doc] + pub fn defragment(&self) -> usize { + self.backend.defragment() + } + + /// Map externally-allocated chunk into ZShmMut. + /// This method is designed to be used with push data sources. + /// Remember that chunk's len may be >= len! + #[zenoh_macros::unstable_doc] + pub fn map(&self, chunk: AllocatedChunk, len: usize) -> ZResult { + let len = len.try_into()?; + + // allocate resources for SHM buffer + let (allocated_header, allocated_watchdog, confirmed_watchdog) = Self::alloc_resources()?; + + // wrap everything to ShmBufInner + let wrapped = self.wrap( + chunk, + len, + allocated_header, + allocated_watchdog, + confirmed_watchdog, + ); + Ok(unsafe { ZShmMut::new_unchecked(wrapped) }) + } + + /// Try to collect free chunks. + /// Returns the size of largest collected chunk + #[zenoh_macros::unstable_doc] + pub fn garbage_collect(&self) -> usize { + fn is_free_chunk(chunk: &BusyChunk) -> bool { + let header = chunk.header.descriptor.header(); + if header.refcount.load(Ordering::SeqCst) != 0 { + return header.watchdog_invalidated.load(Ordering::SeqCst); + } + true + } + + tracing::trace!("Running Garbage Collector"); + + let mut largest = 0usize; + let mut guard = self.busy_list.lock().unwrap(); + guard.retain(|maybe_free| { + if is_free_chunk(maybe_free) { + tracing::trace!("Garbage Collecting Chunk: {:?}", maybe_free); + self.backend.free(&maybe_free.descriptor); + largest = largest.max(maybe_free.descriptor.len.get()); + return false; + } + true + }); + drop(guard); + + largest + } + + /// Bytes available for use + #[zenoh_macros::unstable_doc] + pub fn available(&self) -> usize { + self.backend.available() + } +} + +// PRIVATE impls +impl ShmProvider +where + IDSource: ProtocolIDSource, + Backend: ShmProviderBackend, +{ + fn new(backend: Backend, id: IDSource) -> Self { + Self { + backend, + busy_list: Mutex::new(VecDeque::default()), + id, + } + } + + fn alloc_inner(&self, size: NonZeroUsize, layout: &MemoryLayout) -> BufAllocResult + where + Policy: AllocPolicy, + { + // allocate resources for SHM buffer + let (allocated_header, allocated_watchdog, confirmed_watchdog) = Self::alloc_resources()?; + + // allocate data chunk + // Perform actions depending on the Policy + // NOTE: it is necessary to properly map this chunk OR free it if mapping fails! + // Don't loose this chunk as it leads to memory leak at the backend side! + // NOTE: self.backend.alloc(len) returns chunk with len >= required len, + // and it is necessary to handle that properly and pass this len to corresponding free(...) + let chunk = Policy::alloc(layout, self)?; + + // wrap allocated chunk to ShmBufInner + let wrapped = self.wrap( + chunk, + size, + allocated_header, + allocated_watchdog, + confirmed_watchdog, + ); + Ok(unsafe { ZShmMut::new_unchecked(wrapped) }) + } + + fn alloc_resources() -> ZResult<( + AllocatedHeaderDescriptor, + AllocatedWatchdog, + ConfirmedDescriptor, + )> { + // allocate shared header + let allocated_header = GLOBAL_HEADER_STORAGE.read().allocate_header()?; + + // allocate watchdog + let allocated_watchdog = GLOBAL_STORAGE.read().allocate_watchdog()?; + + // add watchdog to confirmator + let confirmed_watchdog = GLOBAL_CONFIRMATOR + .read() + .add_owned(&allocated_watchdog.descriptor)?; + + Ok((allocated_header, allocated_watchdog, confirmed_watchdog)) + } + + fn wrap( + &self, + chunk: AllocatedChunk, + len: NonZeroUsize, + allocated_header: AllocatedHeaderDescriptor, + allocated_watchdog: AllocatedWatchdog, + confirmed_watchdog: ConfirmedDescriptor, + ) -> ShmBufInner { + let header = allocated_header.descriptor.clone(); + let descriptor = Descriptor::from(&allocated_watchdog.descriptor); + + // add watchdog to validator + let c_header = header.clone(); + GLOBAL_VALIDATOR.read().add( + allocated_watchdog.descriptor.clone(), + Box::new(move || { + c_header + .header() + .watchdog_invalidated + .store(true, Ordering::SeqCst); + }), + ); + + // Create buffer's info + let info = ShmBufInfo::new( + chunk.descriptor.clone(), + self.id.id(), + len, + descriptor, + HeaderDescriptor::from(&header), + header.header().generation.load(Ordering::SeqCst), + ); + + // Create buffer + let shmb = ShmBufInner { + header, + buf: chunk.data, + info, + watchdog: Arc::new(confirmed_watchdog), + }; + + // Create and store busy chunk + self.busy_list.lock().unwrap().push_back(BusyChunk::new( + chunk.descriptor, + allocated_header, + allocated_watchdog, + )); + + shmb + } +} + +// PRIVATE impls for Sync backend +impl ShmProvider +where + IDSource: ProtocolIDSource, + Backend: ShmProviderBackend + Sync, +{ + async fn alloc_inner_async( + &self, + size: NonZeroUsize, + backend_layout: &MemoryLayout, + ) -> BufAllocResult + where + Policy: AsyncAllocPolicy, + { + // allocate resources for SHM buffer + let (allocated_header, allocated_watchdog, confirmed_watchdog) = Self::alloc_resources()?; + + // allocate data chunk + // Perform actions depending on the Policy + // NOTE: it is necessary to properly map this chunk OR free it if mapping fails! + // Don't loose this chunk as it leads to memory leak at the backend side! + // NOTE: self.backend.alloc(len) returns chunk with len >= required len, + // and it is necessary to handle that properly and pass this len to corresponding free(...) + let chunk = Policy::alloc_async(backend_layout, self).await?; + + // wrap allocated chunk to ShmBufInner + let wrapped = self.wrap( + chunk, + size, + allocated_header, + allocated_watchdog, + confirmed_watchdog, + ); + Ok(unsafe { ZShmMut::new_unchecked(wrapped) }) + } +} diff --git a/commons/zenoh-shm/src/api/provider/shm_provider_backend.rs b/commons/zenoh-shm/src/api/provider/shm_provider_backend.rs new file mode 100644 index 0000000000..51795f5880 --- /dev/null +++ b/commons/zenoh-shm/src/api/provider/shm_provider_backend.rs @@ -0,0 +1,49 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// +use super::{ + chunk::ChunkDescriptor, + types::{ChunkAllocResult, MemoryLayout, ZLayoutError}, +}; + +/// The provider backend trait +/// Implement this interface to create a Zenoh-compatible shared memory provider +#[zenoh_macros::unstable_doc] +pub trait ShmProviderBackend { + /// Allocate the chunk of desired size. + /// If successful, the result's chunk size will be >= len + #[zenoh_macros::unstable_doc] + fn alloc(&self, layout: &MemoryLayout) -> ChunkAllocResult; + + /// Deallocate the chunk. + /// It is guaranteed that chunk's descriptor will correspond to the one returned from alloc(...) + #[zenoh_macros::unstable_doc] + fn free(&self, chunk: &ChunkDescriptor); + + /// Defragment the memory. + /// Should return the size of largest defragmented chunk + #[zenoh_macros::unstable_doc] + fn defragment(&self) -> usize; + + /// Bytes available for use + #[zenoh_macros::unstable_doc] + fn available(&self) -> usize; + + /// Check and calculate suitable layout for layout. + /// Depending on the implementation, backend may relayout allocations for bigger layouts. + /// This method is used to: + /// - validate, if the provided layout can be used with this backend + /// - adopt the layout for backend capabilities + #[zenoh_macros::unstable_doc] + fn layout_for(&self, layout: MemoryLayout) -> Result; +} diff --git a/commons/zenoh-shm/src/api/provider/types.rs b/commons/zenoh-shm/src/api/provider/types.rs new file mode 100644 index 0000000000..71d3753e26 --- /dev/null +++ b/commons/zenoh-shm/src/api/provider/types.rs @@ -0,0 +1,247 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// + +use std::{fmt::Display, num::NonZeroUsize}; + +use super::chunk::AllocatedChunk; +use crate::api::buffer::zshmmut::ZShmMut; + +/// Allocation errors +/// +/// NeedDefragment: defragmentation needed +/// OutOfMemory: the provider is out of memory +/// Other: other error +#[zenoh_macros::unstable_doc] +#[derive(Debug)] +pub enum ZAllocError { + NeedDefragment, + OutOfMemory, + Other(zenoh_result::Error), +} + +impl From for ZAllocError { + fn from(value: zenoh_result::Error) -> Self { + Self::Other(value) + } +} + +/// alignment in powers of 2: 0 == 1-byte alignment, 1 == 2byte, 2 == 4byte, 3 == 8byte etc +#[zenoh_macros::unstable_doc] +#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord)] +pub struct AllocAlignment { + pow: u8, +} + +impl Display for AllocAlignment { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.write_fmt(format_args!("[{}]", self.get_alignment_value())) + } +} + +impl Default for AllocAlignment { + fn default() -> Self { + Self { + pow: std::mem::align_of::().ilog2() as _, + } + } +} + +impl AllocAlignment { + /// Try to create a new AllocAlignment from alignment representation in powers of 2. + /// + /// # Errors + /// + /// This function will return an error if provided alignment power cannot fit into usize. + #[zenoh_macros::unstable_doc] + pub const fn new(pow: u8) -> Result { + if pow < usize::BITS as u8 { + Ok(Self { pow }) + } else { + Err(ZLayoutError::IncorrectLayoutArgs) + } + } + + /// Get alignment in normal units (bytes) + #[zenoh_macros::unstable_doc] + pub fn get_alignment_value(&self) -> NonZeroUsize { + // SAFETY: this is safe because we limit pow in new based on usize size + unsafe { NonZeroUsize::new_unchecked(1usize << self.pow) } + } + + /// Align size according to inner alignment. + /// This call may extend the size (see the example) + /// # Examples + /// + /// ``` + /// use zenoh_shm::api::provider::types::AllocAlignment; + /// + /// let alignment = AllocAlignment::new(2).unwrap(); // 4-byte alignment + /// let initial_size = 7.try_into().unwrap(); + /// let aligned_size = alignment.align_size(initial_size); + /// assert_eq!(aligned_size.get(), 8); + /// ``` + #[zenoh_macros::unstable_doc] + pub fn align_size(&self, size: NonZeroUsize) -> NonZeroUsize { + // Notations: + // - size to align S + // - usize::BITS B + // - pow P where 0 ≤ P < B + // - alignment value A = 2^P + // - return R = min{x | x ≥ S, x % A = 0} + // + // Example 1: A = 4 = (00100)₂, S = 4 = (00100)₂ ⇒ R = 4 = (00100)₂ + // Example 2: A = 4 = (00100)₂, S = 7 = (00111)₂ ⇒ R = 8 = (01000)₂ + // Example 3: A = 4 = (00100)₂, S = 8 = (01000)₂ ⇒ R = 8 = (01000)₂ + // Example 4: A = 4 = (00100)₂, S = 9 = (01001)₂ ⇒ R = 12 = (01100)₂ + // + // Algorithm: For any x = (bₙ, ⋯, b₂, b₁)₂ in binary representation, + // 1. x % A = 0 ⇔ ∀i < P, bᵢ = 0 + // 2. f(x) ≜ x & !(A-1) leads to ∀i < P, bᵢ = 0, hence f(x) % A = 0 + // (i.e. f zeros all bits before the P-th bit) + // 3. R = min{x | x ≥ S, x % A = 0} is equivalent to find the unique R where S ≤ R < S+A and R % A = 0 + // 4. x-A < f(x) ≤ x ⇒ S-1 < f(S+A-1) ≤ S+A-1 ⇒ S ≤ f(S+A-1) < S+A + // + // Hence R = f(S+A-1) = (S+(A-1)) & !(A-1) is the desired value + + // Compute A - 1 = 2^P - 1 + let a_minus_1 = self.get_alignment_value().get() - 1; + + // Overflow check: ensure S ≤ 2^B - 2^P = (2^B - 1) - (A - 1) + // so that R < S+A ≤ 2^B and hence it's a valid usize + let bound = usize::MAX - a_minus_1; + assert!( + size.get() <= bound, + "The given size {} exceeded the maximum {}", + size.get(), + bound + ); + + // Overflow never occurs due to the check above + let r = (size.get() + a_minus_1) & !a_minus_1; + + // SAFETY: R ≥ 0 since R ≥ S ≥ 0 + unsafe { NonZeroUsize::new_unchecked(r) } + } +} + +/// Memory layout representation: alignment and size aligned for this alignment +#[zenoh_macros::unstable_doc] +#[derive(Debug)] +pub struct MemoryLayout { + size: NonZeroUsize, + alignment: AllocAlignment, +} + +impl Display for MemoryLayout { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.write_fmt(format_args!( + "[size={},alignment={}]", + self.size, self.alignment + )) + } +} + +impl MemoryLayout { + /// Try to create a new memory layout. + /// + /// # Errors + /// + /// This function will return an error if zero size have passed or if the provided size is not the multiply of the alignment. + #[zenoh_macros::unstable_doc] + pub fn new(size: T, alignment: AllocAlignment) -> Result + where + T: TryInto, + { + let Ok(size) = size.try_into() else { + return Err(ZLayoutError::IncorrectLayoutArgs); + }; + + // size of an allocation must be a miltiple of it's alignment! + match size.get() % alignment.get_alignment_value() { + 0 => Ok(Self { size, alignment }), + _ => Err(ZLayoutError::IncorrectLayoutArgs), + } + } + + #[zenoh_macros::unstable_doc] + pub fn size(&self) -> NonZeroUsize { + self.size + } + + #[zenoh_macros::unstable_doc] + pub fn alignment(&self) -> AllocAlignment { + self.alignment + } + + /// Realign the layout for new alignment. The alignment must be >= of the existing one. + /// # Examples + /// + /// ``` + /// use zenoh_shm::api::provider::types::AllocAlignment; + /// use zenoh_shm::api::provider::types::MemoryLayout; + /// + /// // 8 bytes with 4-byte alignment + /// let layout4b = MemoryLayout::new(8, AllocAlignment::new(2).unwrap()).unwrap(); + /// + /// // Try to realign with 2-byte alignment + /// let layout2b = layout4b.extend(AllocAlignment::new(1).unwrap()); + /// assert!(layout2b.is_err()); // fails because new alignment must be >= old + /// + /// // Try to realign with 8-byte alignment + /// let layout8b = layout4b.extend(AllocAlignment::new(3).unwrap()); + /// assert!(layout8b.is_ok()); // ok + /// ``` + #[zenoh_macros::unstable_doc] + pub fn extend(&self, new_alignment: AllocAlignment) -> Result { + if self.alignment <= new_alignment { + let new_size = new_alignment.align_size(self.size); + return MemoryLayout::new(new_size, new_alignment); + } + Err(ZLayoutError::IncorrectLayoutArgs) + } +} + +/// Layouting errors +/// +/// IncorrectLayoutArgs: layout arguments are incorrect +/// ProviderIncompatibleLayout: layout incompatible with provider +#[zenoh_macros::unstable_doc] +#[derive(Debug)] +pub enum ZLayoutError { + IncorrectLayoutArgs, + ProviderIncompatibleLayout, +} + +/// SHM chunk allocation result +#[zenoh_macros::unstable_doc] +pub type ChunkAllocResult = Result; + +/// SHM buffer allocation result +#[zenoh_macros::unstable_doc] +pub type BufAllocResult = Result; + +/// Layouting and allocation errors +/// +/// Alloc: allocation error +/// Layout: layouting error +#[zenoh_macros::unstable_doc] +#[derive(Debug)] +pub enum ZLayoutAllocError { + Alloc(ZAllocError), + Layout(ZLayoutError), +} + +/// SHM buffer layouting and allocation result +#[zenoh_macros::unstable_doc] +pub type BufLayoutAllocResult = Result; diff --git a/commons/zenoh-shm/src/cleanup.rs b/commons/zenoh-shm/src/cleanup.rs new file mode 100644 index 0000000000..5649732bf6 --- /dev/null +++ b/commons/zenoh-shm/src/cleanup.rs @@ -0,0 +1,47 @@ +// +// Copyright (c) 2024 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// + +use static_init::dynamic; + +/// A global cleanup, that is guaranteed to be dropped at normal program exit and that will +/// execute all registered cleanup routines at this moment +#[dynamic(lazy, drop)] +pub(crate) static mut CLEANUP: Cleanup = Cleanup::new(); + +/// An RAII object that calls all registered routines upon destruction +pub(crate) struct Cleanup { + cleanups: lockfree::queue::Queue>>, +} + +impl Cleanup { + fn new() -> Self { + Self { + cleanups: Default::default(), + } + } + + pub(crate) fn register_cleanup(&self, cleanup_fn: Box) { + self.cleanups.push(Some(cleanup_fn)); + } +} + +impl Drop for Cleanup { + fn drop(&mut self) { + while let Some(cleanup) = self.cleanups.pop() { + if let Some(f) = cleanup { + f(); + } + } + } +} diff --git a/commons/zenoh-shm/src/header/allocated_descriptor.rs b/commons/zenoh-shm/src/header/allocated_descriptor.rs new file mode 100644 index 0000000000..6cf1d1d011 --- /dev/null +++ b/commons/zenoh-shm/src/header/allocated_descriptor.rs @@ -0,0 +1,28 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// + +use super::{descriptor::OwnedHeaderDescriptor, storage::GLOBAL_HEADER_STORAGE}; + +#[derive(Debug)] +pub struct AllocatedHeaderDescriptor { + pub descriptor: OwnedHeaderDescriptor, +} + +impl Drop for AllocatedHeaderDescriptor { + fn drop(&mut self) { + GLOBAL_HEADER_STORAGE + .read() + .reclaim_header(self.descriptor.clone()); + } +} diff --git a/commons/zenoh-shm/src/header/chunk_header.rs b/commons/zenoh-shm/src/header/chunk_header.rs new file mode 100644 index 0000000000..c5eb11bb7c --- /dev/null +++ b/commons/zenoh-shm/src/header/chunk_header.rs @@ -0,0 +1,28 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// + +use std::sync::atomic::{AtomicBool, AtomicU32}; + +// Chunk header +#[stabby::stabby] +#[derive(Debug)] +pub struct ChunkHeaderType { + /* + TODO: We don't really need 32 bits here, but access to 16-bit felds with 1 byte alignment is less performant on most of the platforms. + We need to bench and select reasonable integer sizes here once we have an implementation to bench + */ + pub refcount: AtomicU32, + pub watchdog_invalidated: AtomicBool, + pub generation: AtomicU32, +} diff --git a/commons/zenoh-shm/src/header/descriptor.rs b/commons/zenoh-shm/src/header/descriptor.rs new file mode 100644 index 0000000000..7700eb90c6 --- /dev/null +++ b/commons/zenoh-shm/src/header/descriptor.rs @@ -0,0 +1,63 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// + +use std::sync::Arc; + +use super::{chunk_header::ChunkHeaderType, segment::HeaderSegment}; + +pub type HeaderSegmentID = u16; +pub type HeaderIndex = u16; + +#[derive(Clone, Eq, Hash, PartialEq, PartialOrd, Ord, Debug)] +pub struct HeaderDescriptor { + pub id: HeaderSegmentID, + pub index: HeaderIndex, +} + +impl From<&OwnedHeaderDescriptor> for HeaderDescriptor { + fn from(item: &OwnedHeaderDescriptor) -> Self { + let id = item.segment.array.id(); + let index = unsafe { item.segment.array.index(item.header) }; + + Self { id, index } + } +} + +#[derive(Clone)] +pub struct OwnedHeaderDescriptor { + segment: Arc, + header: *const ChunkHeaderType, +} + +unsafe impl Send for OwnedHeaderDescriptor {} +unsafe impl Sync for OwnedHeaderDescriptor {} + +impl OwnedHeaderDescriptor { + pub(crate) fn new(segment: Arc, header: *const ChunkHeaderType) -> Self { + Self { segment, header } + } + + #[inline(always)] + pub fn header(&self) -> &ChunkHeaderType { + unsafe { &(*self.header) } + } +} + +impl std::fmt::Debug for OwnedHeaderDescriptor { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("OwnedHeaderDescriptor") + .field("header", &self.header) + .finish() + } +} diff --git a/commons/zenoh-shm/src/header/mod.rs b/commons/zenoh-shm/src/header/mod.rs new file mode 100644 index 0000000000..84acc86e87 --- /dev/null +++ b/commons/zenoh-shm/src/header/mod.rs @@ -0,0 +1,23 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// + +pub mod descriptor; + +tested_crate_module!(storage); +tested_crate_module!(subscription); + +pub(crate) mod allocated_descriptor; +pub(crate) mod chunk_header; + +mod segment; diff --git a/commons/zenoh-shm/src/header/segment.rs b/commons/zenoh-shm/src/header/segment.rs new file mode 100644 index 0000000000..ab2353c35d --- /dev/null +++ b/commons/zenoh-shm/src/header/segment.rs @@ -0,0 +1,39 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// + +use zenoh_result::ZResult; + +use super::{ + chunk_header::ChunkHeaderType, + descriptor::{HeaderIndex, HeaderSegmentID}, +}; +use crate::posix_shm::array::ArrayInSHM; + +const HEADER_SEGMENT_PREFIX: &str = "header"; + +pub struct HeaderSegment { + pub array: ArrayInSHM, +} + +impl HeaderSegment { + pub fn create(header_count: usize) -> ZResult { + let array = ArrayInSHM::create(header_count, HEADER_SEGMENT_PREFIX)?; + Ok(Self { array }) + } + + pub fn open(id: HeaderSegmentID) -> ZResult { + let array = ArrayInSHM::open(id, HEADER_SEGMENT_PREFIX)?; + Ok(Self { array }) + } +} diff --git a/commons/zenoh-shm/src/header/storage.rs b/commons/zenoh-shm/src/header/storage.rs new file mode 100644 index 0000000000..db556937d0 --- /dev/null +++ b/commons/zenoh-shm/src/header/storage.rs @@ -0,0 +1,86 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// +use std::{ + collections::LinkedList, + sync::{Arc, Mutex}, +}; + +use static_init::dynamic; +use zenoh_result::{zerror, ZResult}; + +use super::{ + allocated_descriptor::AllocatedHeaderDescriptor, + descriptor::{HeaderIndex, OwnedHeaderDescriptor}, + segment::HeaderSegment, +}; + +#[dynamic(lazy, drop)] +pub static mut GLOBAL_HEADER_STORAGE: HeaderStorage = HeaderStorage::new(32768usize).unwrap(); + +pub struct HeaderStorage { + available: Arc>>, +} + +impl HeaderStorage { + fn new(initial_header_count: usize) -> ZResult { + let initial_segment = Arc::new(HeaderSegment::create(initial_header_count)?); + let mut initially_available = LinkedList::::default(); + + for index in 0..initial_header_count { + let header = unsafe { initial_segment.array.elem(index as HeaderIndex) }; + let descriptor = OwnedHeaderDescriptor::new(initial_segment.clone(), header); + + // init generation (this is not really necessary, but we do) + descriptor + .header() + .generation + .store(0, std::sync::atomic::Ordering::SeqCst); + + initially_available.push_back(descriptor); + } + + Ok(Self { + available: Arc::new(Mutex::new(initially_available)), + }) + } + + pub fn allocate_header(&self) -> ZResult { + let mut guard = self.available.lock().map_err(|e| zerror!("{e}"))?; + let popped = guard.pop_front(); + drop(guard); + + let descriptor = popped.ok_or_else(|| zerror!("no free headers available"))?; + + //initialize header fields + let header = descriptor.header(); + header + .refcount + .store(1, std::sync::atomic::Ordering::SeqCst); + header + .watchdog_invalidated + .store(false, std::sync::atomic::Ordering::SeqCst); + + Ok(AllocatedHeaderDescriptor { descriptor }) + } + + pub fn reclaim_header(&self, header: OwnedHeaderDescriptor) { + // header deallocated - increment it's generation to invalidate any existing references + header + .header() + .generation + .fetch_add(1, std::sync::atomic::Ordering::SeqCst); + let mut guard = self.available.lock().unwrap(); + guard.push_front(header); + } +} diff --git a/commons/zenoh-shm/src/header/subscription.rs b/commons/zenoh-shm/src/header/subscription.rs new file mode 100644 index 0000000000..6f92960aaa --- /dev/null +++ b/commons/zenoh-shm/src/header/subscription.rs @@ -0,0 +1,60 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// +use std::{ + collections::BTreeMap, + sync::{Arc, Mutex}, +}; + +use static_init::dynamic; +use zenoh_result::{zerror, ZResult}; + +use super::{ + descriptor::{HeaderDescriptor, HeaderSegmentID, OwnedHeaderDescriptor}, + segment::HeaderSegment, +}; + +#[dynamic(lazy, drop)] +pub static mut GLOBAL_HEADER_SUBSCRIPTION: Subscription = Subscription::new(); + +pub struct Subscription { + linked_table: Mutex>>, +} + +impl Subscription { + fn new() -> Self { + Self { + linked_table: Mutex::default(), + } + } + + pub fn link(&self, descriptor: &HeaderDescriptor) -> ZResult { + let mut guard = self.linked_table.lock().map_err(|e| zerror!("{e}"))?; + // ensure segment + let segment = match guard.entry(descriptor.id) { + std::collections::btree_map::Entry::Vacant(vacant) => { + let segment = Arc::new(HeaderSegment::open(descriptor.id)?); + vacant.insert(segment.clone()); + segment + } + std::collections::btree_map::Entry::Occupied(occupied) => occupied.get().clone(), + }; + drop(guard); + + // construct owned descriptor + // SAFETY: HeaderDescriptor source guarantees that descriptor.index is valid for segment + let header = unsafe { segment.array.elem(descriptor.index) }; + let owned_descriptor = OwnedHeaderDescriptor::new(segment, header); + Ok(owned_descriptor) + } +} diff --git a/commons/zenoh-shm/src/lib.rs b/commons/zenoh-shm/src/lib.rs index a75e174488..2d8173c2f9 100644 --- a/commons/zenoh-shm/src/lib.rs +++ b/commons/zenoh-shm/src/lib.rs @@ -11,145 +11,154 @@ // Contributors: // ZettaScale Zenoh Team, // -use shared_memory::{Shmem, ShmemConf, ShmemError}; use std::{ any::Any, - cmp, - collections::{binary_heap::BinaryHeap, HashMap}, - fmt, mem, - sync::atomic::{AtomicPtr, AtomicUsize, Ordering}, + num::NonZeroUsize, + sync::{ + atomic::{AtomicPtr, Ordering}, + Arc, + }, }; -use zenoh_buffers::ZSliceBuffer; -use zenoh_result::{zerror, ShmError, ZResult}; - -const MIN_FREE_CHUNK_SIZE: usize = 1_024; -const ACCOUNTED_OVERHEAD: usize = 4_096; -const ZENOH_SHM_PREFIX: &str = "zenoh_shm_zid"; - -// Chunk header -type ChunkHeaderType = AtomicUsize; -const CHUNK_HEADER_SIZE: usize = std::mem::size_of::(); - -fn align_addr_at(addr: usize, align: usize) -> usize { - match addr % align { - 0 => addr, - r => addr + (align - r), - } -} -#[derive(Eq, Copy, Clone, Debug)] -struct Chunk { - base_addr: *mut u8, - offset: usize, - size: usize, -} - -impl Ord for Chunk { - fn cmp(&self, other: &Self) -> cmp::Ordering { - self.size.cmp(&other.size) - } -} - -impl PartialOrd for Chunk { - fn partial_cmp(&self, other: &Self) -> Option { - Some(self.cmp(other)) - } -} - -impl PartialEq for Chunk { - fn eq(&self, other: &Self) -> bool { - self.size == other.size - } -} +use api::{common::types::ProtocolID, provider::chunk::ChunkDescriptor}; +use header::descriptor::{HeaderDescriptor, OwnedHeaderDescriptor}; +use watchdog::{confirmator::ConfirmedDescriptor, descriptor::Descriptor}; +use zenoh_buffers::ZSliceBuffer; -/// Information about a [`SharedMemoryBuf`]. +#[macro_export] +macro_rules! tested_module { + ($module:ident) => { + #[cfg(feature = "test")] + pub mod $module; + #[cfg(not(feature = "test"))] + mod $module; + }; +} + +#[macro_export] +macro_rules! tested_crate_module { + ($module:ident) => { + #[cfg(feature = "test")] + pub mod $module; + #[cfg(not(feature = "test"))] + pub(crate) mod $module; + }; +} + +pub mod api; +mod cleanup; +pub mod header; +pub mod posix_shm; +pub mod reader; +pub mod watchdog; + +/// Information about a [`ShmBufInner`]. /// -/// This that can be serialized and can be used to retrieve the [`SharedMemoryBuf`] in a remote process. +/// This that can be serialized and can be used to retrieve the [`ShmBufInner`] in a remote process. #[derive(Clone, Debug, PartialEq, Eq)] -pub struct SharedMemoryBufInfo { - /// The index of the beginning of the buffer in the shm segment. - pub offset: usize, - /// The length of the buffer. - pub length: usize, - /// The identifier of the shm manager that manages the shm segment this buffer points to. - pub shm_manager: String, - /// The kind of buffer. - pub kind: u8, -} - -impl SharedMemoryBufInfo { - pub fn new(offset: usize, length: usize, manager: String, kind: u8) -> SharedMemoryBufInfo { - SharedMemoryBufInfo { - offset, - length, - shm_manager: manager, - kind, +pub struct ShmBufInfo { + /// The data chunk descriptor + pub data_descriptor: ChunkDescriptor, + /// Protocol identifier for particular SHM implementation + pub shm_protocol: ProtocolID, + /// Actual data length + /// NOTE: data_descriptor's len is >= of this len and describes the actual memory length + /// dedicated in shared memory segment for this particular buffer. + pub data_len: NonZeroUsize, + + /// The watchdog descriptor + pub watchdog_descriptor: Descriptor, + /// The header descriptor + pub header_descriptor: HeaderDescriptor, + /// The generation of the buffer + pub generation: u32, +} + +impl ShmBufInfo { + pub fn new( + data_descriptor: ChunkDescriptor, + shm_protocol: ProtocolID, + data_len: NonZeroUsize, + watchdog_descriptor: Descriptor, + header_descriptor: HeaderDescriptor, + generation: u32, + ) -> ShmBufInfo { + ShmBufInfo { + data_descriptor, + shm_protocol, + data_len, + watchdog_descriptor, + header_descriptor, + generation, } } } /// A zenoh buffer in shared memory. #[non_exhaustive] -pub struct SharedMemoryBuf { - pub rc_ptr: AtomicPtr, - pub buf: AtomicPtr, - pub len: usize, - pub info: SharedMemoryBufInfo, +pub struct ShmBufInner { + pub(crate) header: OwnedHeaderDescriptor, + pub(crate) buf: AtomicPtr, + pub info: ShmBufInfo, + pub(crate) watchdog: Arc, +} + +impl PartialEq for ShmBufInner { + fn eq(&self, other: &Self) -> bool { + // currently there is no API to resize an SHM buffer, but it is intended in the future, + // so I add size comparison here to avoid future bugs :) + self.buf.load(Ordering::Relaxed) == other.buf.load(Ordering::Relaxed) + && self.info.data_len == other.info.data_len + } } +impl Eq for ShmBufInner {} -impl std::fmt::Debug for SharedMemoryBuf { +impl std::fmt::Debug for ShmBufInner { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - let ptr = self.rc_ptr.load(Ordering::SeqCst); - let rc = unsafe { (*ptr).load(Ordering::SeqCst) }; - f.debug_struct("SharedMemoryBuf") - .field("rc", &rc) + f.debug_struct("ShmBufInner") + .field("header", &self.header) .field("buf", &self.buf) - .field("len", &self.len) .field("info", &self.info) .finish() } } -impl SharedMemoryBuf { - pub fn len(&self) -> usize { - self.len - } - - pub fn is_empty(&self) -> bool { - self.len() == 0 +impl ShmBufInner { + pub fn len(&self) -> NonZeroUsize { + self.info.data_len } - pub fn get_kind(&self) -> u8 { - self.info.kind + fn is_valid(&self) -> bool { + self.header.header().generation.load(Ordering::SeqCst) == self.info.generation } - pub fn set_kind(&mut self, v: u8) { - self.info.kind = v + fn is_unique(&self) -> bool { + self.ref_count() == 1 } - pub fn owner(&self) -> String { - self.info.shm_manager.clone() + pub fn ref_count(&self) -> u32 { + self.header.header().refcount.load(Ordering::SeqCst) } - pub fn ref_count(&self) -> usize { - let rc = self.rc_ptr.load(Ordering::SeqCst); - unsafe { (*rc).load(Ordering::SeqCst) } - } - - pub fn inc_ref_count(&self) { - let rc = self.rc_ptr.load(Ordering::SeqCst); - unsafe { (*rc).fetch_add(1, Ordering::SeqCst) }; + /// Increments buffer's reference count + /// + /// # Safety + /// You should understand what you are doing, as overestimation + /// of the reference counter can lead to memory being stalled until + /// recovered by watchdog subsystem or forcely deallocated + pub unsafe fn inc_ref_count(&self) { + self.header.header().refcount.fetch_add(1, Ordering::SeqCst); } - pub fn dec_ref_count(&self) { - let rc = self.rc_ptr.load(Ordering::SeqCst); - unsafe { (*rc).fetch_sub(1, Ordering::SeqCst) }; + // PRIVATE: + fn as_slice(&self) -> &[u8] { + tracing::trace!("ShmBufInner::as_slice() == len = {:?}", self.info.data_len); + let bp = self.buf.load(Ordering::SeqCst); + unsafe { std::slice::from_raw_parts(bp, self.info.data_len.get()) } } - pub fn as_slice(&self) -> &[u8] { - tracing::trace!("SharedMemoryBuf::as_slice() == len = {:?}", self.len); - let bp = self.buf.load(Ordering::SeqCst); - unsafe { std::slice::from_raw_parts(bp, self.len) } + unsafe fn dec_ref_count(&self) { + self.header.header().refcount.fetch_sub(1, Ordering::SeqCst); } /// Gets a mutable slice. @@ -163,364 +172,59 @@ impl SharedMemoryBuf { /// /// In short, whilst this operation is marked as unsafe, you are safe if you can /// guarantee that your in applications only one process at the time will actually write. - pub unsafe fn as_mut_slice(&mut self) -> &mut [u8] { + unsafe fn as_mut_slice_inner(&mut self) -> &mut [u8] { let bp = self.buf.load(Ordering::SeqCst); - std::slice::from_raw_parts_mut(bp, self.len) + std::slice::from_raw_parts_mut(bp, self.info.data_len.get()) } } -impl Drop for SharedMemoryBuf { +impl Drop for ShmBufInner { fn drop(&mut self) { - self.dec_ref_count(); + // # Safety + // obviouly, we need to decrement refcount when dropping ShmBufInner instance + unsafe { self.dec_ref_count() }; } } -impl Clone for SharedMemoryBuf { +impl Clone for ShmBufInner { fn clone(&self) -> Self { - self.inc_ref_count(); - let rc = self.rc_ptr.load(Ordering::SeqCst); + // # Safety + // obviouly, we need to increment refcount when cloning ShmBufInner instance + unsafe { self.inc_ref_count() }; let bp = self.buf.load(Ordering::SeqCst); - SharedMemoryBuf { - rc_ptr: AtomicPtr::new(rc), + ShmBufInner { + header: self.header.clone(), buf: AtomicPtr::new(bp), - len: self.len, info: self.info.clone(), + watchdog: self.watchdog.clone(), } } } -/*************************************/ -/* SHARED MEMORY READER */ -/*************************************/ -pub struct SharedMemoryReader { - segments: HashMap, -} - -unsafe impl Send for SharedMemoryReader {} -unsafe impl Sync for SharedMemoryReader {} - -impl SharedMemoryReader { - pub fn new() -> Self { - Self { - segments: HashMap::new(), - } - } - - pub fn connect_map_to_shm(&mut self, info: &SharedMemoryBufInfo) -> ZResult<()> { - match ShmemConf::new().flink(&info.shm_manager).open() { - Ok(shm) => { - self.segments.insert(info.shm_manager.clone(), shm); - Ok(()) - } - Err(e) => { - let e = zerror!( - "Unable to bind shared memory segment {}: {:?}", - info.shm_manager, - e - ); - tracing::trace!("{}", e); - Err(ShmError(e).into()) - } - } - } - - pub fn try_read_shmbuf(&self, info: &SharedMemoryBufInfo) -> ZResult { - // Try read does not increment the reference count as it is assumed - // that the sender of this buffer has incremented for us. - match self.segments.get(&info.shm_manager) { - Some(shm) => { - let base_ptr = shm.as_ptr(); - let rc = unsafe { base_ptr.add(info.offset) as *mut ChunkHeaderType }; - let rc_ptr = AtomicPtr::::new(rc); - let buf = unsafe { base_ptr.add(info.offset + CHUNK_HEADER_SIZE) }; - let shmb = SharedMemoryBuf { - rc_ptr, - buf: AtomicPtr::new(buf), - len: info.length - CHUNK_HEADER_SIZE, - info: info.clone(), - }; - Ok(shmb) - } - None => { - let e = zerror!("Unable to find shared memory segment: {}", info.shm_manager); - tracing::trace!("{}", e); - Err(ShmError(e).into()) - } - } - } - - pub fn read_shmbuf(&mut self, info: &SharedMemoryBufInfo) -> ZResult { - // Read does not increment the reference count as it is assumed - // that the sender of this buffer has incremented for us. - self.try_read_shmbuf(info).or_else(|_| { - self.connect_map_to_shm(info)?; - self.try_read_shmbuf(info) - }) - } -} - -impl Default for SharedMemoryReader { - fn default() -> Self { - Self::new() - } -} - -impl fmt::Debug for SharedMemoryReader { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - f.debug_struct("SharedMemoryReader").finish()?; - f.debug_list().entries(self.segments.keys()).finish() - } -} - -/// A shared memory segment manager. -/// -/// Allows to access a shared memory segment and reserve some parts of this segment for writing. -pub struct SharedMemoryManager { - segment_path: String, - size: usize, - available: usize, - own_segment: Shmem, - free_list: BinaryHeap, - busy_list: Vec, - alignment: usize, -} - -unsafe impl Send for SharedMemoryManager {} - -impl SharedMemoryManager { - /// Creates a new SharedMemoryManager managing allocations of a region of the - /// given size. - pub fn make(id: String, size: usize) -> ZResult { - let mut temp_dir = std::env::temp_dir(); - let file_name: String = format!("{ZENOH_SHM_PREFIX}_{id}"); - temp_dir.push(file_name); - let path: String = temp_dir - .to_str() - .ok_or_else(|| ShmError(zerror!("Unable to parse tmp directory: {:?}", temp_dir)))? - .to_string(); - tracing::trace!("Creating file at: {}", path); - let real_size = size + ACCOUNTED_OVERHEAD; - let shmem = match ShmemConf::new() - .size(real_size) - .flink(path.clone()) - .create() - { - Ok(m) => m, - Err(ShmemError::LinkExists) => { - return Err(ShmError(zerror!( - "Unable to open SharedMemoryManager: SharedMemory already exists" - )) - .into()) - } - Err(e) => { - return Err(ShmError(zerror!("Unable to open SharedMemoryManager: {}", e)).into()) - } - }; - let base_ptr = shmem.as_ptr(); - - let mut free_list = BinaryHeap::new(); - let chunk = Chunk { - base_addr: base_ptr, - offset: 0, - size: real_size, - }; - free_list.push(chunk); - let busy_list = vec![]; - let shm = SharedMemoryManager { - segment_path: path, - size, - available: real_size, - own_segment: shmem, - free_list, - busy_list, - alignment: mem::align_of::(), - }; - tracing::trace!( - "Created SharedMemoryManager for {:?}", - shm.own_segment.as_ptr() - ); - Ok(shm) - } - - fn free_chunk_map_to_shmbuf(&self, chunk: &Chunk) -> SharedMemoryBuf { - let info = SharedMemoryBufInfo { - offset: chunk.offset, - length: chunk.size, - shm_manager: self.segment_path.clone(), - kind: 0, - }; - let rc = chunk.base_addr as *mut ChunkHeaderType; - unsafe { (*rc).store(1, Ordering::SeqCst) }; - let rc_ptr = AtomicPtr::::new(rc); - SharedMemoryBuf { - rc_ptr, - buf: AtomicPtr::::new(unsafe { chunk.base_addr.add(CHUNK_HEADER_SIZE) }), - len: chunk.size - CHUNK_HEADER_SIZE, - info, - } - } - - pub fn alloc(&mut self, len: usize) -> ZResult { - tracing::trace!("SharedMemoryManager::alloc({})", len); - // Always allocate a size that will keep the proper alignment requirements - let required_len = align_addr_at(len + CHUNK_HEADER_SIZE, self.alignment); - if self.available < required_len { - self.garbage_collect(); - } - if self.available >= required_len { - // The strategy taken is the same for some Unix System V implementations -- as described in the - // famous Bach's book -- in essence keep an ordered list of free slot and always look for the - // biggest as that will give the biggest left-over. - match self.free_list.pop() { - Some(mut chunk) if chunk.size >= required_len => { - self.available -= required_len; - tracing::trace!("Allocator selected Chunk ({:?})", &chunk); - if chunk.size - required_len >= MIN_FREE_CHUNK_SIZE { - let free_chunk = Chunk { - base_addr: unsafe { chunk.base_addr.add(required_len) }, - offset: chunk.offset + required_len, - size: chunk.size - required_len, - }; - tracing::trace!( - "The allocation will leave a Free Chunk: {:?}", - &free_chunk - ); - self.free_list.push(free_chunk); - } - chunk.size = required_len; - let shm_buf = self.free_chunk_map_to_shmbuf(&chunk); - tracing::trace!("The allocated Chunk is ({:?})", &chunk); - tracing::trace!("Allocated Shared Memory Buffer: {:?}", &shm_buf); - self.busy_list.push(chunk); - Ok(shm_buf) - } - Some(c) => { - self.free_list.push(c); - let e = zerror!("SharedMemoryManager::alloc({}) cannot find any available chunk\nSharedMemoryManager::free_list = {:?}", len, self.free_list); - Err(e.into()) - } - None => { - let e = zerror!("SharedMemoryManager::alloc({}) cannot find any available chunk\nSharedMemoryManager::free_list = {:?}", len, self.free_list); - tracing::trace!("{}", e); - Err(e.into()) - } - } - } else { - let e = zerror!( "SharedMemoryManager does not have sufficient free memory to allocate {} bytes, try de-fragmenting!", len); - tracing::warn!("{}", e); - Err(e.into()) - } - } - - fn is_free_chunk(chunk: &Chunk) -> bool { - let rc_ptr = chunk.base_addr as *mut ChunkHeaderType; - let rc = unsafe { (*rc_ptr).load(Ordering::SeqCst) }; - rc == 0 - } - - fn try_merge_adjacent_chunks(a: &Chunk, b: &Chunk) -> Option { - let end_addr = unsafe { a.base_addr.add(a.size) }; - if end_addr == b.base_addr { - Some(Chunk { - base_addr: a.base_addr, - size: a.size + b.size, - offset: a.offset, - }) - } else { - None - } - } - // Returns the amount of memory that it was able to de-fragment - pub fn defragment(&mut self) -> usize { - if self.free_list.len() > 1 { - let mut fbs: Vec = self.free_list.drain().collect(); - fbs.sort_by(|x, y| x.offset.partial_cmp(&y.offset).unwrap()); - let mut current = fbs.remove(0); - let mut defrag_mem = 0; - let mut i = 0; - let n = fbs.len(); - for chunk in fbs.iter() { - i += 1; - let next = *chunk; - match SharedMemoryManager::try_merge_adjacent_chunks(¤t, &next) { - Some(c) => { - current = c; - defrag_mem += current.size; - if i == n { - self.free_list.push(current) - } - } - None => { - self.free_list.push(current); - if i == n { - self.free_list.push(next); - } else { - current = next; - } - } - } - } - defrag_mem - } else { - 0 - } - } - - /// Returns the amount of memory freed - pub fn garbage_collect(&mut self) -> usize { - tracing::trace!("Running Garbage Collector"); - - let mut freed = 0; - let (free, busy) = self - .busy_list - .iter() - .partition(|&c| SharedMemoryManager::is_free_chunk(c)); - self.busy_list = busy; - - for f in free { - freed += f.size; - tracing::trace!("Garbage Collecting Chunk: {:?}", f); - self.free_list.push(f) - } - self.available += freed; - freed - } -} - -impl fmt::Debug for SharedMemoryManager { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - f.debug_struct("SharedMemoryManager") - .field("segment_path", &self.segment_path) - .field("size", &self.size) - .field("available", &self.available) - .field("free_list.len", &self.free_list.len()) - .field("busy_list.len", &self.busy_list.len()) - .finish() - } -} - // Buffer impls -// - SharedMemoryBuf -impl AsRef<[u8]> for SharedMemoryBuf { +// - ShmBufInner +impl AsRef<[u8]> for ShmBufInner { fn as_ref(&self) -> &[u8] { self.as_slice() } } -impl AsMut<[u8]> for SharedMemoryBuf { +impl AsMut<[u8]> for ShmBufInner { fn as_mut(&mut self) -> &mut [u8] { - unsafe { self.as_mut_slice() } + unsafe { self.as_mut_slice_inner() } } } -impl ZSliceBuffer for SharedMemoryBuf { +impl ZSliceBuffer for ShmBufInner { fn as_slice(&self) -> &[u8] { self.as_ref() } - fn as_mut_slice(&mut self) -> &mut [u8] { - self.as_mut() - } + fn as_any(&self) -> &dyn Any { self } + + fn as_any_mut(&mut self) -> &mut dyn Any { + self + } } diff --git a/commons/zenoh-shm/src/posix_shm/array.rs b/commons/zenoh-shm/src/posix_shm/array.rs new file mode 100644 index 0000000000..d092c579b5 --- /dev/null +++ b/commons/zenoh-shm/src/posix_shm/array.rs @@ -0,0 +1,124 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// + +use std::{fmt::Display, marker::PhantomData, mem::size_of}; + +use num_traits::{AsPrimitive, PrimInt, Unsigned}; +use stabby::IStable; +use zenoh_result::{bail, ZResult}; + +use super::segment::Segment; + +/// An SHM segment that is intended to be an array of elements of some certain type +#[derive(Debug)] +pub struct ArrayInSHM +where + rand::distributions::Standard: rand::distributions::Distribution, + ID: Clone + Display, +{ + inner: Segment, + _phantom: PhantomData<(Elem, ElemIndex)>, +} + +unsafe impl Sync for ArrayInSHM +where + rand::distributions::Standard: rand::distributions::Distribution, + ID: Clone + Display, +{ +} +unsafe impl Send for ArrayInSHM +where + rand::distributions::Standard: rand::distributions::Distribution, + ID: Clone + Display, +{ +} + +impl ArrayInSHM +where + rand::distributions::Standard: rand::distributions::Distribution, + ID: Clone + Display, + ElemIndex: Unsigned + PrimInt + 'static + AsPrimitive, + Elem: IStable, + isize: AsPrimitive, +{ + // Perform compile time check that Elem is not a ZST in such a way `elem_count` can not panic. + const _S: () = if size_of::() == 0 { + panic!("Elem is a ZST. ZSTs are not allowed as ArrayInSHM generic"); + }; + + pub fn create(elem_count: usize, file_prefix: &str) -> ZResult { + if elem_count == 0 { + bail!("Unable to create SHM array segment of 0 elements") + } + + let max: usize = ElemIndex::max_value().as_(); + if elem_count - 1 > max { + bail!("Unable to create SHM array segment of {elem_count} elements: out of range for ElemIndex!") + } + + let alloc_size = elem_count * size_of::(); + let inner = Segment::create(alloc_size, file_prefix)?; + Ok(Self { + inner, + _phantom: PhantomData, + }) + } + + pub fn open(id: ID, file_prefix: &str) -> ZResult { + let inner = Segment::open(id, file_prefix)?; + Ok(Self { + inner, + _phantom: PhantomData, + }) + } + + pub fn id(&self) -> ID { + self.inner.id() + } + + pub fn elem_count(&self) -> usize { + self.inner.len() / size_of::() + } + + /// # Safety + /// Retrieves const element by it's index. This is safe if the index doesn't go out of underlying array. + /// Additional assert to check the index validity is added for "test" feature + pub unsafe fn elem(&self, index: ElemIndex) -> *const Elem { + #[cfg(feature = "test")] + assert!(self.inner.len() > index.as_() * size_of::()); + (self.inner.as_ptr() as *const Elem).add(index.as_()) + } + + /// # Safety + /// Retrieves mut element by it's index. This is safe if the index doesn't go out of underlying array. + /// Additional assert to check the index validity is added for "test" feature + pub unsafe fn elem_mut(&self, index: ElemIndex) -> *mut Elem { + #[cfg(feature = "test")] + assert!(self.inner.len() > index.as_() * size_of::()); + (self.inner.as_ptr() as *mut Elem).add(index.as_()) + } + + /// # Safety + /// Calculates element's index. This is safe if the element belongs to underlying array. + /// Additional assert is added for "test" feature + pub unsafe fn index(&self, elem: *const Elem) -> ElemIndex { + let index = elem.offset_from(self.inner.as_ptr() as *const Elem); + #[cfg(feature = "test")] + { + assert!(index >= 0); + assert!(self.inner.len() > index as usize * size_of::()); + } + index.as_() + } +} diff --git a/commons/zenoh-shm/src/posix_shm/mod.rs b/commons/zenoh-shm/src/posix_shm/mod.rs new file mode 100644 index 0000000000..a63b1c9e6d --- /dev/null +++ b/commons/zenoh-shm/src/posix_shm/mod.rs @@ -0,0 +1,16 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// + +pub mod array; +tested_crate_module!(segment); diff --git a/commons/zenoh-shm/src/posix_shm/segment.rs b/commons/zenoh-shm/src/posix_shm/segment.rs new file mode 100644 index 0000000000..6a34506029 --- /dev/null +++ b/commons/zenoh-shm/src/posix_shm/segment.rs @@ -0,0 +1,133 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// + +use std::fmt::{Debug, Display}; + +use rand::Rng; +use shared_memory::{Shmem, ShmemConf, ShmemError}; +use zenoh_result::{bail, zerror, ZResult}; + +use crate::cleanup::CLEANUP; + +const SEGMENT_DEDICATE_TRIES: usize = 100; +const ECMA: crc::Crc = crc::Crc::::new(&crc::CRC_64_ECMA_182); + +/// Segment of shared memory identified by an ID +pub struct Segment +where + rand::distributions::Standard: rand::distributions::Distribution, + ID: Clone + Display, +{ + shmem: Shmem, + id: ID, +} + +impl Debug for Segment +where + ID: Debug, + rand::distributions::Standard: rand::distributions::Distribution, + ID: Clone + Display, +{ + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("Segment") + .field("shmem", &self.shmem.as_ptr()) + .field("id", &self.id) + .finish() + } +} + +impl Segment +where + rand::distributions::Standard: rand::distributions::Distribution, + ID: Clone + Display, +{ + // Automatically generate free id and create a new segment identified by this id + pub fn create(alloc_size: usize, id_prefix: &str) -> ZResult { + for _ in 0..SEGMENT_DEDICATE_TRIES { + // Generate random id + let id: ID = rand::thread_rng().gen(); + let os_id = Self::os_id(id.clone(), id_prefix); + + // Register cleanup routine to make sure Segment will be unlinked on exit + let c_os_id = os_id.clone(); + CLEANUP.read().register_cleanup(Box::new(move || { + if let Ok(mut shmem) = ShmemConf::new().os_id(c_os_id).open() { + shmem.set_owner(true); + drop(shmem); + } + })); + + // Try to create a new segment identified by prefix and generated id. + // If creation fails because segment already exists for this id, + // the creation attempt will be repeated with another id + match ShmemConf::new().size(alloc_size).os_id(os_id).create() { + Ok(shmem) => { + tracing::debug!( + "Created SHM segment, size: {alloc_size}, prefix: {id_prefix}, id: {id}" + ); + return Ok(Segment { shmem, id }); + } + Err(ShmemError::LinkExists) => {} + Err(ShmemError::MappingIdExists) => {} + Err(e) => bail!("Unable to create POSIX shm segment: {}", e), + } + } + bail!("Unable to dedicate POSIX shm segment file after {SEGMENT_DEDICATE_TRIES} tries!"); + } + + // Open an existing segment identified by id + pub fn open(id: ID, id_prefix: &str) -> ZResult { + let shmem = ShmemConf::new() + .os_id(Self::os_id(id.clone(), id_prefix)) + .open() + .map_err(|e| { + zerror!( + "Error opening POSIX shm segment id {id}, prefix: {id_prefix}: {}", + e + ) + })?; + + tracing::debug!("Opened SHM segment, prefix: {id_prefix}, id: {id}"); + + Ok(Self { shmem, id }) + } + + fn os_id(id: ID, id_prefix: &str) -> String { + let os_id_str = format!("{id_prefix}_{id}"); + let crc_os_id_str = ECMA.checksum(os_id_str.as_bytes()); + format!("{:x}", crc_os_id_str) + } + + pub fn as_ptr(&self) -> *mut u8 { + self.shmem.as_ptr() + } + + /// Returns the length of this [`Segment`]. + /// NOTE: one some platforms (at least windows) the returned len will be the actual length of an shm segment + /// (a required len rounded up to the nearest multiply of page size), on other (at least linux and macos) this + /// returns a value requested upon segment creation + pub fn len(&self) -> usize { + self.shmem.len() + } + + // TODO: dead code warning occurs because of `tested_crate_module!()` macro when feature `test` is not enabled. Better to fix that + #[allow(dead_code)] + pub fn is_empty(&self) -> bool { + self.len() == 0 + } + + pub fn id(&self) -> ID { + self.id.clone() + } +} diff --git a/commons/zenoh-shm/src/reader.rs b/commons/zenoh-shm/src/reader.rs new file mode 100644 index 0000000000..a62e8a147f --- /dev/null +++ b/commons/zenoh-shm/src/reader.rs @@ -0,0 +1,149 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// + +use std::{collections::HashMap, ops::Deref, sync::Arc}; + +use zenoh_core::{bail, zerror}; +use zenoh_result::ZResult; + +use crate::{ + api::{ + client::shm_segment::ShmSegment, + client_storage::ShmClientStorage, + common::types::{ProtocolID, SegmentID}, + }, + header::subscription::GLOBAL_HEADER_SUBSCRIPTION, + watchdog::confirmator::GLOBAL_CONFIRMATOR, + ShmBufInfo, ShmBufInner, +}; + +#[derive(Debug, Clone, Eq, PartialEq)] +pub struct ShmReader { + client_storage: Arc, +} + +impl Deref for ShmReader { + type Target = ShmClientStorage; + + fn deref(&self) -> &Self::Target { + &self.client_storage + } +} + +impl ShmReader { + pub fn new(client_storage: Arc) -> Self { + Self { client_storage } + } + + pub fn read_shmbuf(&self, info: &ShmBufInfo) -> ZResult { + // Read does not increment the reference count as it is assumed + // that the sender of this buffer has incremented it for us. + + // attach to the watchdog before doing other things + let watchdog = Arc::new(GLOBAL_CONFIRMATOR.read().add(&info.watchdog_descriptor)?); + + let segment = self.ensure_segment(info)?; + let shmb = ShmBufInner { + header: GLOBAL_HEADER_SUBSCRIPTION + .read() + .link(&info.header_descriptor)?, + buf: segment.map(info.data_descriptor.chunk)?, + info: info.clone(), + watchdog, + }; + + // Validate buffer + match shmb.is_valid() { + true => Ok(shmb), + false => bail!("Buffer is invalidated"), + } + } + + fn ensure_segment(&self, info: &ShmBufInfo) -> ZResult> { + let id = GlobalDataSegmentID::new(info.shm_protocol, info.data_descriptor.segment); + + // fastest path: try to get access to already mounted SHM segment + // read lock allows concurrent execution of multiple requests + let r_guard = self.segments.read().unwrap(); + if let Some(val) = r_guard.get(&id) { + return Ok(val.clone()); + } + // fastest path failed: need to mount a new segment + + // drop read lock because we're gonna obtain write lock further + drop(r_guard); + + // find appropriate client + let client = self + .clients + .get_clients() + .get(&id.protocol) + .ok_or_else(|| zerror!("Unsupported SHM protocol: {}", id.protocol))?; + + // obtain write lock... + let mut w_guard = self.segments.write().unwrap(); + + // many concurrent threads may be racing for mounting this particular segment, so we must check again if the segment exists + match w_guard.entry(id) { + // (rare case) segment already mounted + std::collections::hash_map::Entry::Occupied(occupied) => Ok(occupied.get().clone()), + + // (common case) mount a new segment and add it to the map + std::collections::hash_map::Entry::Vacant(vacant) => { + let new_segment = client.attach(info.data_descriptor.segment)?; + Ok(vacant.insert(new_segment).clone()) + } + } + } +} + +#[derive(Debug)] +pub(crate) struct ClientStorage +where + Inner: Sized, +{ + clients: HashMap, +} + +impl ClientStorage { + pub(crate) fn new(clients: HashMap) -> Self { + Self { clients } + } + + pub(crate) fn get_clients(&self) -> &HashMap { + &self.clients + } +} + +/// # Safety +/// Only immutable access to internal container is allowed, +/// so we are Send if the contained type is Send +unsafe impl Send for ClientStorage {} + +/// # Safety +/// Only immutable access to internal container is allowed, +/// so we are Sync if the contained type is Sync +unsafe impl Sync for ClientStorage {} + +#[derive(Debug, PartialEq, Eq, Hash)] +pub(crate) struct GlobalDataSegmentID { + protocol: ProtocolID, + segment: SegmentID, +} + +impl GlobalDataSegmentID { + fn new(protocol: ProtocolID, segment: SegmentID) -> Self { + Self { protocol, segment } + } +} diff --git a/commons/zenoh-shm/src/watchdog/allocated_watchdog.rs b/commons/zenoh-shm/src/watchdog/allocated_watchdog.rs new file mode 100644 index 0000000000..6293b157d3 --- /dev/null +++ b/commons/zenoh-shm/src/watchdog/allocated_watchdog.rs @@ -0,0 +1,35 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// + +use super::{descriptor::OwnedDescriptor, storage::GLOBAL_STORAGE, validator::GLOBAL_VALIDATOR}; + +#[derive(Debug)] +pub struct AllocatedWatchdog { + pub descriptor: OwnedDescriptor, +} + +impl AllocatedWatchdog { + pub(crate) fn new(descriptor: OwnedDescriptor) -> Self { + // reset descriptor on allocation + descriptor.validate(); + Self { descriptor } + } +} + +impl Drop for AllocatedWatchdog { + fn drop(&mut self) { + GLOBAL_VALIDATOR.read().remove(self.descriptor.clone()); + GLOBAL_STORAGE.read().free_watchdog(self.descriptor.clone()); + } +} diff --git a/commons/zenoh-shm/src/watchdog/confirmator.rs b/commons/zenoh-shm/src/watchdog/confirmator.rs new file mode 100644 index 0000000000..1a9ac0f04f --- /dev/null +++ b/commons/zenoh-shm/src/watchdog/confirmator.rs @@ -0,0 +1,191 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// + +use std::{ + collections::BTreeMap, + sync::{Arc, RwLock}, + time::Duration, +}; + +use static_init::dynamic; +use zenoh_result::{zerror, ZResult}; + +use super::{ + descriptor::{Descriptor, OwnedDescriptor, SegmentID}, + periodic_task::PeriodicTask, + segment::Segment, +}; + +#[dynamic(lazy, drop)] +pub static mut GLOBAL_CONFIRMATOR: WatchdogConfirmator = + WatchdogConfirmator::new(Duration::from_millis(50)); + +pub struct ConfirmedDescriptor { + pub owned: OwnedDescriptor, + confirmed: Arc, +} + +impl Drop for ConfirmedDescriptor { + fn drop(&mut self) { + self.confirmed.remove(self.owned.clone()); + } +} + +impl ConfirmedDescriptor { + fn new(owned: OwnedDescriptor, confirmed: Arc) -> Self { + owned.confirm(); + confirmed.add(owned.clone()); + Self { owned, confirmed } + } +} + +#[derive(PartialEq)] +enum Transaction { + Add, + Remove, +} + +struct ConfirmedSegment { + segment: Arc, + transactions: lockfree::queue::Queue<(Transaction, OwnedDescriptor)>, +} + +impl ConfirmedSegment { + fn new(segment: Arc) -> Self { + Self { + segment, + transactions: lockfree::queue::Queue::default(), + } + } + + fn add(&self, descriptor: OwnedDescriptor) { + self.transactions.push((Transaction::Add, descriptor)); + } + + fn remove(&self, descriptor: OwnedDescriptor) { + self.transactions.push((Transaction::Remove, descriptor)); + } + + fn collect_transactions(&self, watchdogs: &mut BTreeMap) { + while let Some((transaction, descriptor)) = self.transactions.pop() { + // collect transactions + match watchdogs.entry(descriptor) { + std::collections::btree_map::Entry::Vacant(vacant) => { + #[cfg(feature = "test")] + assert!(transaction == Transaction::Add); + vacant.insert(1); + } + std::collections::btree_map::Entry::Occupied(mut occupied) => match transaction { + Transaction::Add => { + *occupied.get_mut() += 1; + } + Transaction::Remove => { + if *occupied.get() == 1 { + occupied.remove(); + } else { + *occupied.get_mut() -= 1; + } + } + }, + } + } + } +} +unsafe impl Send for ConfirmedSegment {} +unsafe impl Sync for ConfirmedSegment {} + +// TODO: optimize confirmation by packing descriptors AND linked table together +// TODO: think about linked table cleanup +pub struct WatchdogConfirmator { + confirmed: RwLock>>, + segment_transactions: Arc>>, + _task: PeriodicTask, +} + +impl WatchdogConfirmator { + fn new(interval: Duration) -> Self { + let segment_transactions = Arc::>>::default(); + + let c_segment_transactions = segment_transactions.clone(); + let mut segments: Vec<(Arc, BTreeMap)> = vec![]; + let task = PeriodicTask::new("Watchdog Confirmator".to_owned(), interval, move || { + // add new segments + while let Some(new_segment) = c_segment_transactions.as_ref().pop() { + segments.push((new_segment, BTreeMap::default())); + } + + // collect all existing transactions + for (segment, watchdogs) in &mut segments { + segment.collect_transactions(watchdogs); + } + + // confirm all tracked watchdogs + for (_, watchdogs) in &segments { + for watchdog in watchdogs { + watchdog.0.confirm(); + } + } + }); + + Self { + confirmed: RwLock::default(), + segment_transactions, + _task: task, + } + } + + pub fn add_owned(&self, descriptor: &OwnedDescriptor) -> ZResult { + self.add(&Descriptor::from(descriptor)) + } + + pub fn add(&self, descriptor: &Descriptor) -> ZResult { + let guard = self.confirmed.read().map_err(|e| zerror!("{e}"))?; + if let Some(segment) = guard.get(&descriptor.id) { + return self.link(descriptor, segment); + } + drop(guard); + + let segment = Arc::new(Segment::open(descriptor.id)?); + let confirmed_segment = Arc::new(ConfirmedSegment::new(segment)); + let confirmed_descriptoir = self.link(descriptor, &confirmed_segment); + + let mut guard = self.confirmed.write().map_err(|e| zerror!("{e}"))?; + match guard.entry(descriptor.id) { + std::collections::btree_map::Entry::Vacant(vacant) => { + vacant.insert(confirmed_segment.clone()); + self.segment_transactions.push(confirmed_segment); + confirmed_descriptoir + } + std::collections::btree_map::Entry::Occupied(occupied) => { + self.link(descriptor, occupied.get()) + } + } + } + + fn link( + &self, + descriptor: &Descriptor, + segment: &Arc, + ) -> ZResult { + let index = descriptor.index_and_bitpos >> 6; + let bitpos = descriptor.index_and_bitpos & 0x3f; + + let atomic = unsafe { segment.segment.array.elem(index) }; + let mask = 1u64 << bitpos; + + let owned = OwnedDescriptor::new(segment.segment.clone(), atomic, mask); + let confirmed = ConfirmedDescriptor::new(owned, segment.clone()); + Ok(confirmed) + } +} diff --git a/commons/zenoh-shm/src/watchdog/descriptor.rs b/commons/zenoh-shm/src/watchdog/descriptor.rs new file mode 100644 index 0000000000..38fddd61e8 --- /dev/null +++ b/commons/zenoh-shm/src/watchdog/descriptor.rs @@ -0,0 +1,116 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// + +use std::{ + hash::Hash, + sync::{atomic::AtomicU64, Arc}, +}; + +use super::segment::Segment; + +pub type SegmentID = u32; + +#[derive(Clone, Eq, Hash, PartialEq, PartialOrd, Ord, Debug)] +pub struct Descriptor { + pub id: SegmentID, + pub index_and_bitpos: u32, +} + +impl From<&OwnedDescriptor> for Descriptor { + fn from(item: &OwnedDescriptor) -> Self { + let bitpos = { + // TODO: can be optimized + let mut v = item.mask; + let mut bitpos = 0u32; + while v > 1 { + bitpos += 1; + v >>= 1; + } + bitpos + }; + let index = unsafe { item.segment.array.index(item.atomic) }; + let index_and_bitpos = (index << 6) | bitpos; + Descriptor { + id: item.segment.array.id(), + index_and_bitpos, + } + } +} + +#[derive(Clone, Debug)] +pub struct OwnedDescriptor { + segment: Arc, + pub atomic: *const AtomicU64, + pub mask: u64, +} + +unsafe impl Send for OwnedDescriptor {} +unsafe impl Sync for OwnedDescriptor {} + +impl Hash for OwnedDescriptor { + fn hash(&self, state: &mut H) { + self.atomic.hash(state); + self.mask.hash(state); + } +} + +impl OwnedDescriptor { + pub(crate) fn new(segment: Arc, atomic: *const AtomicU64, mask: u64) -> Self { + Self { + segment, + atomic, + mask, + } + } + + pub fn confirm(&self) { + unsafe { + (*self.atomic).fetch_or(self.mask, std::sync::atomic::Ordering::SeqCst); + }; + } + + pub(crate) fn validate(&self) -> u64 { + unsafe { + (*self.atomic).fetch_and(!self.mask, std::sync::atomic::Ordering::SeqCst) & self.mask + } + } + + #[cfg(feature = "test")] + pub fn test_validate(&self) -> u64 { + self.validate() + } +} + +impl Ord for OwnedDescriptor { + fn cmp(&self, other: &Self) -> std::cmp::Ordering { + match self.atomic.cmp(&other.atomic) { + core::cmp::Ordering::Equal => {} + ord => return ord, + } + self.mask.cmp(&other.mask) + } +} + +impl PartialOrd for OwnedDescriptor { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + +impl PartialEq for OwnedDescriptor { + fn eq(&self, other: &Self) -> bool { + self.atomic == other.atomic && self.mask == other.mask + } +} +impl Eq for OwnedDescriptor {} diff --git a/commons/zenoh-shm/src/watchdog/mod.rs b/commons/zenoh-shm/src/watchdog/mod.rs new file mode 100644 index 0000000000..55267a5442 --- /dev/null +++ b/commons/zenoh-shm/src/watchdog/mod.rs @@ -0,0 +1,24 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// + +pub mod descriptor; + +tested_crate_module!(periodic_task); +tested_crate_module!(storage); +tested_crate_module!(validator); +tested_crate_module!(confirmator); + +pub(crate) mod allocated_watchdog; + +mod segment; diff --git a/commons/zenoh-shm/src/watchdog/periodic_task.rs b/commons/zenoh-shm/src/watchdog/periodic_task.rs new file mode 100644 index 0000000000..f68203df54 --- /dev/null +++ b/commons/zenoh-shm/src/watchdog/periodic_task.rs @@ -0,0 +1,101 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// + +use std::{ + sync::{ + atomic::{AtomicBool, Ordering}, + Arc, + }, + time::Duration, +}; + +use thread_priority::ThreadBuilder; +#[cfg(unix)] +use thread_priority::{ + set_current_thread_priority, RealtimeThreadSchedulePolicy, ThreadPriority, ThreadPriorityValue, + ThreadSchedulePolicy::Realtime, +}; + +pub struct PeriodicTask { + running: Arc, +} + +impl Drop for PeriodicTask { + fn drop(&mut self) { + self.running.store(false, Ordering::Relaxed); + } +} + +impl PeriodicTask { + pub fn new(name: String, interval: Duration, mut f: F) -> Self + where + F: FnMut() + Send + 'static, + { + let running = Arc::new(AtomicBool::new(true)); + + let c_running = running.clone(); + + #[cfg(unix)] + let builder = ThreadBuilder::default() + .name(name) + .policy(Realtime(RealtimeThreadSchedulePolicy::Fifo)) + .priority(ThreadPriority::Min); + + // TODO: deal with windows realtime scheduling + #[cfg(windows)] + let builder = ThreadBuilder::default().name(name); + + let _ = builder.spawn(move |result| { + if let Err(e) = result { + #[cfg(windows)] + tracing::warn!("{:?}: error setting scheduling priority for thread: {:?}, will run with the default one...", std::thread::current().name(), e); + #[cfg(unix)] + { + tracing::warn!("{:?}: error setting realtime FIFO scheduling policy for thread: {:?}, will run with the default one...", std::thread::current().name(), e); + for priority in (ThreadPriorityValue::MIN..ThreadPriorityValue::MAX).rev() { + if let Ok(p) = priority.try_into() { + if set_current_thread_priority(ThreadPriority::Crossplatform(p)).is_ok() { + tracing::warn!("{:?}: will use priority {}", std::thread::current().name(), priority); + break; + } + } + } + } + } + + //TODO: need mlock here! + + while c_running.load(Ordering::Relaxed) { + let cycle_start = std::time::Instant::now(); + + f(); + + // sleep for next iteration + let elapsed = cycle_start.elapsed(); + if elapsed < interval { + let sleep_interval = interval - elapsed; + std::thread::sleep(sleep_interval); + } else { + let err = format!("{:?}: timer overrun", std::thread::current().name()); + #[cfg(not(feature = "test"))] + tracing::error!("{err}"); + #[cfg(feature = "test")] + panic!("{err}"); + } + } + }); + + Self { running } + } +} diff --git a/commons/zenoh-shm/src/watchdog/segment.rs b/commons/zenoh-shm/src/watchdog/segment.rs new file mode 100644 index 0000000000..5943a10153 --- /dev/null +++ b/commons/zenoh-shm/src/watchdog/segment.rs @@ -0,0 +1,40 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// + +use std::sync::atomic::AtomicU64; + +use zenoh_result::ZResult; + +use super::descriptor::SegmentID; +use crate::posix_shm::array::ArrayInSHM; + +const WATCHDOG_SEGMENT_PREFIX: &str = "watchdog"; + +#[derive(Debug)] +pub struct Segment { + pub array: ArrayInSHM, +} + +impl Segment { + pub fn create(watchdog_count: usize) -> ZResult { + let elem_count = (watchdog_count + 63) / 64; + let array = ArrayInSHM::create(elem_count, WATCHDOG_SEGMENT_PREFIX)?; + Ok(Self { array }) + } + + pub fn open(id: SegmentID) -> ZResult { + let array = ArrayInSHM::open(id, WATCHDOG_SEGMENT_PREFIX)?; + Ok(Self { array }) + } +} diff --git a/commons/zenoh-shm/src/watchdog/storage.rs b/commons/zenoh-shm/src/watchdog/storage.rs new file mode 100644 index 0000000000..ff9772961c --- /dev/null +++ b/commons/zenoh-shm/src/watchdog/storage.rs @@ -0,0 +1,75 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// +use std::{ + collections::BTreeSet, + sync::{Arc, Mutex}, +}; + +use static_init::dynamic; +use zenoh_result::{zerror, ZResult}; + +use super::{allocated_watchdog::AllocatedWatchdog, descriptor::OwnedDescriptor, segment::Segment}; + +#[dynamic(lazy, drop)] +pub static mut GLOBAL_STORAGE: WatchdogStorage = WatchdogStorage::new(32768usize).unwrap(); + +pub struct WatchdogStorage { + available: Arc>>, +} + +// TODO: expand and shrink Storage when needed +// OR +// support multiple descriptor assignment (allow multiple buffers to be assigned to the same watchdog) +impl WatchdogStorage { + pub fn new(initial_watchdog_count: usize) -> ZResult { + let segment = Arc::new(Segment::create(initial_watchdog_count)?); + + let mut initially_available = BTreeSet::default(); + let subsegments = segment.array.elem_count(); + for subsegment in 0..subsegments { + let atomic = unsafe { segment.array.elem(subsegment as u32) }; + + for bit in 0..64 { + let mask = 1u64 << bit; + let descriptor = OwnedDescriptor::new(segment.clone(), atomic, mask); + let _new_insert = initially_available.insert(descriptor); + #[cfg(feature = "test")] + assert!(_new_insert); + } + } + + Ok(Self { + available: Arc::new(Mutex::new(initially_available)), + }) + } + + pub fn allocate_watchdog(&self) -> ZResult { + let mut guard = self.available.lock().map_err(|e| zerror!("{e}"))?; + let popped = guard.pop_first(); + drop(guard); + + let allocated = + AllocatedWatchdog::new(popped.ok_or_else(|| zerror!("no free watchdogs available"))?); + + Ok(allocated) + } + + pub(crate) fn free_watchdog(&self, descriptor: OwnedDescriptor) { + if let Ok(mut guard) = self.available.lock() { + let _new_insert = guard.insert(descriptor); + #[cfg(feature = "test")] + assert!(_new_insert); + } + } +} diff --git a/commons/zenoh-shm/src/watchdog/validator.rs b/commons/zenoh-shm/src/watchdog/validator.rs new file mode 100644 index 0000000000..5becefb547 --- /dev/null +++ b/commons/zenoh-shm/src/watchdog/validator.rs @@ -0,0 +1,101 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// + +use std::{collections::BTreeMap, sync::Arc, time::Duration}; + +use static_init::dynamic; + +use super::{descriptor::OwnedDescriptor, periodic_task::PeriodicTask}; + +pub(super) type InvalidateCallback = Box; + +#[dynamic(lazy, drop)] +pub static mut GLOBAL_VALIDATOR: WatchdogValidator = + WatchdogValidator::new(Duration::from_millis(100)); + +enum Transaction { + Add(InvalidateCallback), + Remove, +} + +#[derive(Default)] +struct ValidatedStorage { + transactions: lockfree::queue::Queue<(Transaction, OwnedDescriptor)>, +} + +impl ValidatedStorage { + fn add(&self, descriptor: OwnedDescriptor, on_invalidated: InvalidateCallback) { + self.transactions + .push((Transaction::Add(on_invalidated), descriptor)); + } + + fn remove(&self, descriptor: OwnedDescriptor) { + self.transactions.push((Transaction::Remove, descriptor)); + } + + fn collect_transactions(&self, storage: &mut BTreeMap) { + while let Some((transaction, descriptor)) = self.transactions.pop() { + match transaction { + Transaction::Add(on_invalidated) => { + let _old = storage.insert(descriptor, on_invalidated); + #[cfg(feature = "test")] + assert!(_old.is_none()); + } + Transaction::Remove => { + let _ = storage.remove(&descriptor); + } + } + } + } +} + +// TODO: optimize validation by packing descriptors +pub struct WatchdogValidator { + storage: Arc, + _task: PeriodicTask, +} + +impl WatchdogValidator { + pub fn new(interval: Duration) -> Self { + let storage = Arc::new(ValidatedStorage::default()); + + let c_storage = storage.clone(); + let mut watchdogs = BTreeMap::default(); + let task = PeriodicTask::new("Watchdog Validator".to_owned(), interval, move || { + c_storage.collect_transactions(&mut watchdogs); + + watchdogs.retain(|watchdog, on_invalidated| { + let old_val = watchdog.validate(); + if old_val == 0 { + on_invalidated(); + return false; + } + true + }); + }); + + Self { + storage, + _task: task, + } + } + + pub fn add(&self, watchdog: OwnedDescriptor, on_invalidated: InvalidateCallback) { + self.storage.add(watchdog, on_invalidated); + } + + pub fn remove(&self, watchdog: OwnedDescriptor) { + self.storage.remove(watchdog); + } +} diff --git a/commons/zenoh-shm/tests/common/mod.rs b/commons/zenoh-shm/tests/common/mod.rs new file mode 100644 index 0000000000..e1d4222b0e --- /dev/null +++ b/commons/zenoh-shm/tests/common/mod.rs @@ -0,0 +1,106 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// + +use std::{ + sync::{atomic::AtomicBool, Arc}, + thread::JoinHandle, +}; + +use zenoh_result::ZResult; + +pub const TEST_SEGMENT_PREFIX: &str = "test"; + +pub fn validate_memory(mem1: &mut [u8], mem2: &[u8]) { + assert!(mem1.len() == mem2.len()); + for cycle in 0..255u8 { + // sequentially fill segment1 with values checking segment2 having these changes + for i in 0..mem1.len() { + mem1[i] = cycle; + assert!(mem2[i] == cycle); + } + + // check the whole segment2 having proper values + for i in mem2 { + assert!(*i == cycle); + } + } +} + +pub fn execute_concurrent(concurrent_tasks: usize, iterations: usize, task_fun: TaskFun) +where + TaskFun: Fn(usize, usize) -> ZResult<()> + Clone + Send + Sync + 'static, +{ + let mut tasks = vec![]; + for task_index in 0..concurrent_tasks { + let c_task_fun = task_fun.clone(); + let task_handle = std::thread::spawn(move || { + for iteration in 0..iterations { + if let Err(e) = c_task_fun(task_index, iteration) { + panic!("task {task_index}: iteration {iteration}: {e}") + } + } + }); + tasks.push(task_handle); + } + for task in tasks { + task.join().expect("Error joining thread!"); + } +} + +pub fn load_fn( + working: Arc, +) -> impl Fn(usize, usize) -> ZResult<()> + Clone + Send + Sync + 'static { + move |_task_index: usize, _iteration: usize| -> ZResult<()> { + while working.load(std::sync::atomic::Ordering::SeqCst) {} + Ok(()) + } +} + +pub struct CpuLoad { + handle: Option>, + flag: Arc, +} + +impl Drop for CpuLoad { + fn drop(&mut self) { + self.flag.store(false, std::sync::atomic::Ordering::SeqCst); + let _ = self.handle.take().unwrap().join(); + } +} + +impl CpuLoad { + pub fn excessive() -> Self { + Self::new(1000) + } + + #[cfg(feature = "test")] + pub fn optimal_high() -> Self { + Self::new(num_cpus::get()) + } + + pub fn low() -> Self { + Self::new(1) + } + + fn new(thread_count: usize) -> Self { + let flag = Arc::new(AtomicBool::new(true)); + + let c_flag = flag.clone(); + let handle = Some(std::thread::spawn(move || { + execute_concurrent(thread_count, 1, load_fn(c_flag)); + })); + + Self { handle, flag } + } +} diff --git a/commons/zenoh-shm/tests/header.rs b/commons/zenoh-shm/tests/header.rs new file mode 100644 index 0000000000..747757a3b2 --- /dev/null +++ b/commons/zenoh-shm/tests/header.rs @@ -0,0 +1,130 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// +#![cfg(feature = "test")] +use std::sync::atomic::Ordering::Relaxed; + +use rand::Rng; +use zenoh_result::ZResult; +use zenoh_shm::header::{ + descriptor::HeaderDescriptor, storage::GLOBAL_HEADER_STORAGE, + subscription::GLOBAL_HEADER_SUBSCRIPTION, +}; + +pub mod common; +use common::execute_concurrent; + +fn header_alloc_fn() -> impl Fn(usize, usize) -> ZResult<()> + Clone + Send + Sync + 'static { + |_task_index: usize, _iteration: usize| -> ZResult<()> { + let _allocated_header = GLOBAL_HEADER_STORAGE.read().allocate_header()?; + Ok(()) + } +} + +#[test] +fn header_alloc() { + execute_concurrent(1, 1000, header_alloc_fn()); +} + +#[test] +fn header_alloc_concurrent() { + execute_concurrent(100, 1000, header_alloc_fn()); +} + +fn header_link_fn() -> impl Fn(usize, usize) -> ZResult<()> + Clone + Send + Sync + 'static { + |_task_index: usize, _iteration: usize| { + let allocated_header = GLOBAL_HEADER_STORAGE.read().allocate_header()?; + let descr = HeaderDescriptor::from(&allocated_header.descriptor); + let _linked_header = GLOBAL_HEADER_SUBSCRIPTION.read().link(&descr)?; + Ok(()) + } +} + +#[test] +fn header_link() { + execute_concurrent(1, 1000, header_link_fn()); +} + +#[test] +fn header_link_concurrent() { + execute_concurrent(100, 1000, header_link_fn()); +} + +fn header_link_failure_fn() -> impl Fn(usize, usize) -> ZResult<()> + Clone + Send + Sync + 'static +{ + |_task_index: usize, _iteration: usize| { + let allocated_header = GLOBAL_HEADER_STORAGE.read().allocate_header()?; + let descr = HeaderDescriptor::from(&allocated_header.descriptor); + drop(allocated_header); + + // Some comments on this behaviour... + // Even though the allocated_header is dropped, it's SHM segment still exists in GLOBAL_HEADER_STORAGE, + // so there is no way to detect that header is "deallocated" and the code below succeeds. The invalidation + // functionality is implemented on higher level by means of generation mechanism and protects from both header + // and watchdog link-to-deallocated issues. This generation mechanism depends on the behaviour below, so + // everything is fair :) + let _linked_header = GLOBAL_HEADER_SUBSCRIPTION.read().link(&descr)?; + Ok(()) + } +} + +#[test] +fn header_link_failure() { + execute_concurrent(1, 1000, header_link_failure_fn()); +} + +#[test] +fn header_link_failure_concurrent() { + execute_concurrent(100, 1000, header_link_failure_fn()); +} + +fn header_check_memory_fn(parallel_tasks: usize, iterations: usize) { + let task_fun = |_task_index: usize, _iteration: usize| -> ZResult<()> { + let allocated_header = GLOBAL_HEADER_STORAGE.read().allocate_header()?; + let descr = HeaderDescriptor::from(&allocated_header.descriptor); + let linked_header = GLOBAL_HEADER_SUBSCRIPTION.read().link(&descr)?; + + let mut rng = rand::thread_rng(); + let allocated = allocated_header.descriptor.header(); + let linked = linked_header.header(); + for _ in 0..100 { + let gen = rng.gen(); + allocated.generation.store(gen, Relaxed); + assert_eq!(gen, linked.generation.load(Relaxed)); + + let rc = rng.gen(); + allocated.refcount.store(rc, Relaxed); + assert_eq!(rc, linked.refcount.load(Relaxed)); + + let watchdog_inv = rng.gen(); + allocated.watchdog_invalidated.store(watchdog_inv, Relaxed); + assert_eq!(watchdog_inv, linked.watchdog_invalidated.load(Relaxed)); + + assert_eq!(gen, linked.generation.load(Relaxed)); + assert_eq!(rc, linked.refcount.load(Relaxed)); + assert_eq!(watchdog_inv, linked.watchdog_invalidated.load(Relaxed)); + } + Ok(()) + }; + execute_concurrent(parallel_tasks, iterations, task_fun); +} + +#[test] +fn header_check_memory() { + header_check_memory_fn(1, 1000); +} + +#[test] +fn header_check_memory_concurrent() { + header_check_memory_fn(100, 100); +} diff --git a/commons/zenoh-shm/tests/periodic_task.rs b/commons/zenoh-shm/tests/periodic_task.rs new file mode 100644 index 0000000000..7465c9bcd3 --- /dev/null +++ b/commons/zenoh-shm/tests/periodic_task.rs @@ -0,0 +1,172 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// +#![cfg(feature = "test")] +use std::{ + sync::{Arc, Mutex}, + time::{Duration, Instant}, +}; + +use zenoh_shm::watchdog::periodic_task::PeriodicTask; + +pub mod common; +use common::CpuLoad; + +const TASK_PERIOD: Duration = Duration::from_millis(50); +const TASK_DELTA: Duration = Duration::from_millis(5); +const TEST_TASK: Duration = Duration::from_millis(10); + +fn intensive_payload(duration: Duration) -> impl Fn() + Send + 'static { + move || { + let start = Instant::now(); + while start.elapsed() < duration { + for _i in 0..100 {} + } + } +} + +fn blocking_payload(duration: Duration) -> impl Fn() + Send + 'static { + move || { + std::thread::sleep(duration); + } +} + +fn check_duration(duration: &Duration) { + let min = TASK_PERIOD - TASK_DELTA; + let max = TASK_PERIOD + TASK_DELTA; + + assert!(min <= *duration && *duration <= max); +} + +fn make_task(task_payload: F) -> (PeriodicTask, Arc>>) +where + F: Fn() + Send + 'static, +{ + let intervals = Arc::new(Mutex::new(vec![])); + + let c_intervals = intervals.clone(); + let mut start: Option = None; + let task = PeriodicTask::new("test".to_owned(), TASK_PERIOD, move || { + if let Some(val) = &start { + let elapsed = val.elapsed(); + c_intervals.lock().unwrap().push(elapsed); + } + start = Some(Instant::now()); + task_payload(); + }); + + (task, intervals) +} + +#[test] +#[ignore] +fn periodic_task_create() { + let (_task, _intervals) = make_task(|| {}); +} + +fn check_task(task_payload: F) +where + F: Fn() + Send + 'static, +{ + let n = 100; + let (task, intervals) = make_task(task_payload); + + std::thread::sleep(TASK_PERIOD * n); + drop(task); + + let guard = intervals.lock().unwrap(); + for duration in &*guard { + check_duration(duration); + } +} + +#[test] +#[ignore] +fn periodic_task_lightweight() { + check_task(|| {}); +} + +#[test] +#[ignore] +fn periodic_task_blocking() { + check_task(blocking_payload(TEST_TASK)); +} + +#[test] +#[ignore] +fn periodic_task_intensive() { + check_task(intensive_payload(TEST_TASK)); +} + +#[test] +#[ignore] +fn periodic_task_low_load_lightweight() { + let _load = CpuLoad::low(); + check_task(|| {}); +} + +#[test] +#[ignore] +fn periodic_task_low_load_blocking() { + let _load = CpuLoad::low(); + check_task(blocking_payload(TEST_TASK)); +} + +#[test] +#[ignore] +fn periodic_task_low_load_intensive() { + let _load = CpuLoad::low(); + check_task(intensive_payload(TEST_TASK)); +} + +#[test] +#[ignore] +fn periodic_task_optimal_high_load_lightweight() { + let _load = CpuLoad::optimal_high(); + check_task(|| {}); +} + +#[test] +#[ignore] +fn periodic_task_optimal_high_load_blocking() { + let _load = CpuLoad::optimal_high(); + check_task(blocking_payload(TEST_TASK)); +} + +#[test] +#[ignore] +fn periodic_task_optimal_high_load_intensive() { + let _load = CpuLoad::optimal_high(); + check_task(intensive_payload(TEST_TASK)); +} + +#[test] +#[ignore] +fn periodic_task_excessive_load_lightweight() { + let _load = CpuLoad::excessive(); + check_task(|| {}); +} + +#[test] +#[ignore] +fn periodic_task_excessive_load_blocking() { + let _load = CpuLoad::excessive(); + check_task(blocking_payload(TEST_TASK)); +} + +#[test] +#[ignore] +fn periodic_task_excessive_load_intensive() { + let _load = CpuLoad::excessive(); + check_task(intensive_payload(TEST_TASK)); +} diff --git a/commons/zenoh-shm/tests/posix_array.rs b/commons/zenoh-shm/tests/posix_array.rs new file mode 100644 index 0000000000..83fdad88fb --- /dev/null +++ b/commons/zenoh-shm/tests/posix_array.rs @@ -0,0 +1,161 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// + +use std::{fmt::Debug, mem::size_of}; + +use num_traits::{AsPrimitive, PrimInt, Unsigned}; +use zenoh_shm::posix_shm::array::ArrayInSHM; + +pub mod common; +use common::TEST_SEGMENT_PREFIX; + +type TestSegmentID = u32; + +#[derive(Debug)] +#[stabby::stabby] +struct TestElem { + value: u32, +} + +impl TestElem { + fn fill(&mut self, counter: &mut u32) { + self.value = *counter; + *counter += 1; + } + + fn validate(&self, counter: &mut u32) { + assert_eq!(self.value, *counter); + *counter += 1; + } +} + +fn validate_array( + created_array: &mut ArrayInSHM, + opened_array: &ArrayInSHM, + expected_elem_count: usize, +) where + ElemIndex: Unsigned + PrimInt + 'static + AsPrimitive, + isize: AsPrimitive, + usize: AsPrimitive, +{ + assert!(created_array.elem_count() == expected_elem_count); + assert!(opened_array.elem_count() >= expected_elem_count); + + let mut fill_ctr = 0; + let mut validate_ctr = 0; + + // first of all, fill and validate elements sequentially + for i in 0..expected_elem_count { + unsafe { + let elem1 = &mut *created_array.elem_mut(i.as_()); + let elem2 = &*opened_array.elem(i.as_()); + + elem1.fill(&mut fill_ctr); + elem2.validate(&mut validate_ctr); + } + } + + // then fill all the elements... + for i in 0..expected_elem_count { + unsafe { + let elem1 = &mut *created_array.elem_mut(i.as_()); + elem1.fill(&mut fill_ctr); + } + } + + // ...and validate all the elements + for i in 0..expected_elem_count { + unsafe { + let elem2 = &*opened_array.elem(i.as_()); + elem2.validate(&mut validate_ctr); + } + } +} + +fn test_array() +where + ElemIndex: Unsigned + PrimInt + 'static + AsPrimitive, + isize: AsPrimitive, + usize: AsPrimitive, +{ + // Estimate elem count to test + // NOTE: for index sizes <= 16 bit we use the whole index range to test, + // and for bigger indexes we use limited index range + let elem_count = { + match size_of::() > size_of::() { + true => 100, + false => ElemIndex::max_value().as_() + 1, + } + }; + + let mut new_arr: ArrayInSHM = + ArrayInSHM::create(elem_count, TEST_SEGMENT_PREFIX).expect("error creating new array!"); + + let opened_arr: ArrayInSHM<_, TestElem, ElemIndex> = + ArrayInSHM::open(new_arr.id(), TEST_SEGMENT_PREFIX).expect("error opening existing array!"); + + validate_array(&mut new_arr, &opened_arr, elem_count); +} + +/// MEMORY CHECKS /// + +#[test] +fn arr_u8_index_memory_test() { + test_array::(); +} + +#[test] +fn arr_u16_index_memory_test() { + test_array::(); +} + +#[test] +fn arr_u32_index_memory_test() { + test_array::(); +} + +/// ELEM COUNT CHECKS /// + +fn test_invalid_elem_index() +where + ElemIndex: Unsigned + PrimInt + 'static + AsPrimitive + Debug, + isize: AsPrimitive, + usize: AsPrimitive, +{ + let invalid_elem_count = ElemIndex::max_value().as_() + 2; + + let _ = ArrayInSHM::::create( + invalid_elem_count, + TEST_SEGMENT_PREFIX, + ) + .expect_err( + format!("must fail: element count {invalid_elem_count} is out of range for ElemIndex!") + .as_str(), + ); +} + +#[test] +fn arr_u8_index_invalid_elem_count() { + test_invalid_elem_index::(); +} + +#[test] +fn arr_u16_index_invalid_elem_count() { + test_invalid_elem_index::(); +} + +#[test] +fn arr_u32_index_invalid_elem_count() { + test_invalid_elem_index::(); +} diff --git a/commons/zenoh-shm/tests/posix_segment.rs b/commons/zenoh-shm/tests/posix_segment.rs new file mode 100644 index 0000000000..879fccf298 --- /dev/null +++ b/commons/zenoh-shm/tests/posix_segment.rs @@ -0,0 +1,142 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// +#![cfg(feature = "test")] +use std::{fmt::Display, slice}; + +use zenoh_shm::posix_shm::segment::Segment; + +pub mod common; +use common::{validate_memory, TEST_SEGMENT_PREFIX}; + +fn validate_segment( + created_segment: &Segment, + opened_segment: &Segment, + expected_elem_count: usize, +) where + rand::distributions::Standard: rand::distributions::Distribution, + ID: Clone + Display, +{ + assert!(created_segment.len() == expected_elem_count); + assert!(opened_segment.len() >= expected_elem_count); + + let ptr1 = created_segment.as_ptr(); + let ptr2 = opened_segment.as_ptr(); + + let slice1 = unsafe { slice::from_raw_parts_mut(ptr1, expected_elem_count) }; + let slice2 = unsafe { slice::from_raw_parts(ptr2, expected_elem_count) }; + + validate_memory(slice1, slice2); +} + +fn test_segment() +where + rand::distributions::Standard: rand::distributions::Distribution, + ID: Copy + Clone + Display, +{ + let elem_count = 900; + + let created_segment: Segment = + Segment::create(elem_count, TEST_SEGMENT_PREFIX).expect("error creating new segment"); + + let opened_segment_instance_1 = Segment::open(created_segment.id(), TEST_SEGMENT_PREFIX) + .expect("error opening existing segment!"); + + validate_segment(&created_segment, &opened_segment_instance_1, elem_count); + + let opened_segment_instance_2 = Segment::open(created_segment.id(), TEST_SEGMENT_PREFIX) + .expect("error opening existing segment!"); + + validate_segment(&created_segment, &opened_segment_instance_1, elem_count); + validate_segment(&created_segment, &opened_segment_instance_2, elem_count); + + drop(opened_segment_instance_1); + validate_segment(&created_segment, &opened_segment_instance_2, elem_count); +} + +/// UNSIGNED /// + +#[test] +fn segment_u8_id() { + test_segment::() +} + +#[test] +fn segment_u16_id() { + test_segment::() +} + +#[test] +fn segment_u32_id() { + test_segment::() +} + +#[test] +fn segment_u64_id() { + test_segment::() +} + +#[test] +fn segment_u128_id() { + test_segment::() +} + +/// SIGNED /// + +#[test] +fn segment_i8_id() { + test_segment::() +} + +#[test] +fn segment_i16_id() { + test_segment::() +} + +#[test] +fn segment_i32_id() { + test_segment::() +} + +#[test] +fn segment_i64_id() { + test_segment::() +} + +#[test] +fn segment_i128_id() { + test_segment::() +} + +/// Behaviour checks /// + +#[test] +fn segment_open() { + let created_segment: Segment = + Segment::create(900, TEST_SEGMENT_PREFIX).expect("error creating new segment"); + + let _opened_segment = Segment::open(created_segment.id(), TEST_SEGMENT_PREFIX) + .expect("error opening existing segment!"); +} + +#[test] +fn segment_open_error() { + let id = { + let created_segment: Segment = + Segment::create(900, TEST_SEGMENT_PREFIX).expect("error creating new segment"); + created_segment.id() + }; + + let _opened_segment = Segment::open(id, TEST_SEGMENT_PREFIX) + .expect_err("must fail: opened not existing segment!"); +} diff --git a/commons/zenoh-shm/tests/posix_shm_provider.rs b/commons/zenoh-shm/tests/posix_shm_provider.rs new file mode 100644 index 0000000000..60104be6cf --- /dev/null +++ b/commons/zenoh-shm/tests/posix_shm_provider.rs @@ -0,0 +1,116 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// + +use zenoh_shm::api::{ + client::shm_client::ShmClient, + protocol_implementations::posix::{ + posix_shm_client::PosixShmClient, posix_shm_provider_backend::PosixShmProviderBackend, + }, + provider::{ + shm_provider_backend::ShmProviderBackend, + types::{AllocAlignment, MemoryLayout}, + }, +}; + +static BUFFER_NUM: usize = 100; +static BUFFER_SIZE: usize = 1024; + +#[test] +fn posix_shm_provider_create() { + let _backend = PosixShmProviderBackend::builder() + .with_size(1024) + .expect("Error creating Layout!") + .res() + .expect("Error creating PosixShmProviderBackend!"); +} + +#[test] +fn posix_shm_provider_alloc() { + let backend = PosixShmProviderBackend::builder() + .with_size(1024) + .expect("Error creating Layout!") + .res() + .expect("Error creating PosixShmProviderBackend!"); + + let layout = MemoryLayout::new(100, AllocAlignment::default()).unwrap(); + + let _buf = backend + .alloc(&layout) + .expect("PosixShmProviderBackend: error allocating buffer"); +} + +#[test] +fn posix_shm_provider_open() { + let backend = PosixShmProviderBackend::builder() + .with_size(1024) + .expect("Error creating Layout!") + .res() + .expect("Error creating PosixShmProviderBackend!"); + + let layout = MemoryLayout::new(100, AllocAlignment::default()).unwrap(); + + let buf = backend + .alloc(&layout) + .expect("PosixShmProviderBackend: error allocating buffer"); + + let client = PosixShmClient {}; + + let _segment = client + .attach(buf.descriptor.segment) + .expect("Error attaching to segment"); +} + +#[test] +fn posix_shm_provider_allocator() { + let backend = PosixShmProviderBackend::builder() + .with_size(BUFFER_SIZE * BUFFER_NUM) + .expect("Error creating Layout!") + .res() + .expect("Error creating PosixShmProviderBackend!"); + + let layout = MemoryLayout::new(BUFFER_SIZE, AllocAlignment::default()).unwrap(); + + // exaust memory by allocating it all + let mut buffers = vec![]; + for _ in 0..BUFFER_NUM { + let buf = backend + .alloc(&layout) + .expect("PosixShmProviderBackend: error allocating buffer"); + buffers.push(buf); + } + + for _ in 0..BUFFER_NUM { + // there is nothing to allocate at this point + assert_eq!(backend.available(), 0); + assert!(backend.alloc(&layout).is_err()); + + // free buffer + let to_free = buffers.pop().unwrap().descriptor; + backend.free(&to_free); + + // allocate new one + let buf = backend + .alloc(&layout) + .expect("PosixShmProviderBackend: error allocating buffer"); + buffers.push(buf); + } + + // free everything + while let Some(buffer) = buffers.pop() { + backend.free(&buffer.descriptor); + } + + // confirm that allocator is free + assert_eq!(backend.available(), BUFFER_NUM * BUFFER_SIZE); +} diff --git a/commons/zenoh-shm/tests/watchdog.rs b/commons/zenoh-shm/tests/watchdog.rs new file mode 100644 index 0000000000..bc4a75dfa9 --- /dev/null +++ b/commons/zenoh-shm/tests/watchdog.rs @@ -0,0 +1,318 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// +#![cfg(feature = "test")] +use std::{ + sync::{atomic::AtomicBool, Arc}, + time::Duration, +}; + +use zenoh_result::{bail, ZResult}; +use zenoh_shm::watchdog::{ + confirmator::GLOBAL_CONFIRMATOR, storage::GLOBAL_STORAGE, validator::GLOBAL_VALIDATOR, +}; + +pub mod common; +use common::{execute_concurrent, CpuLoad}; + +const VALIDATION_PERIOD: Duration = Duration::from_millis(100); +const CONFIRMATION_PERIOD: Duration = Duration::from_millis(50); + +fn watchdog_alloc_fn() -> impl Fn(usize, usize) -> ZResult<()> + Clone + Send + Sync + 'static { + |_task_index: usize, _iteration: usize| -> ZResult<()> { + let _allocated = GLOBAL_STORAGE.read().allocate_watchdog()?; + Ok(()) + } +} + +#[test] +fn watchdog_alloc() { + execute_concurrent(1, 10000, watchdog_alloc_fn()); +} + +#[test] +fn watchdog_alloc_concurrent() { + execute_concurrent(1000, 10000, watchdog_alloc_fn()); +} + +fn watchdog_confirmed_fn() -> impl Fn(usize, usize) -> ZResult<()> + Clone + Send + Sync + 'static { + |_task_index: usize, _iteration: usize| -> ZResult<()> { + let allocated = GLOBAL_STORAGE.read().allocate_watchdog()?; + let confirmed = GLOBAL_CONFIRMATOR.read().add_owned(&allocated.descriptor)?; + + // check that the confirmed watchdog stays valid + for i in 0..10 { + std::thread::sleep(VALIDATION_PERIOD); + let valid = confirmed.owned.test_validate() != 0; + if !valid { + bail!("Invalid watchdog, iteration {i}"); + } + } + Ok(()) + } +} + +#[test] +#[ignore] +fn watchdog_confirmed() { + execute_concurrent(1, 10, watchdog_confirmed_fn()); +} + +#[test] +#[ignore] +fn watchdog_confirmed_concurrent() { + execute_concurrent(1000, 10, watchdog_confirmed_fn()); +} + +// TODO: confirmation to dangling watchdog actually writes to potentially-existing +// other watchdog instance from other test running in the same process and changes it's behaviour, +// so we cannot run dangling test in parallel with anything else +#[test] +#[ignore] +fn watchdog_confirmed_dangling() { + let allocated = GLOBAL_STORAGE + .read() + .allocate_watchdog() + .expect("error allocating watchdog!"); + let confirmed = GLOBAL_CONFIRMATOR + .read() + .add_owned(&allocated.descriptor) + .expect("error adding watchdog to confirmator!"); + drop(allocated); + + // confirm dangling (not allocated) watchdog + for _ in 0..10 { + std::thread::sleep(VALIDATION_PERIOD); + confirmed.owned.confirm(); + } +} + +fn watchdog_validated_fn() -> impl Fn(usize, usize) -> ZResult<()> + Clone + Send + Sync + 'static { + |_task_index: usize, _iteration: usize| -> ZResult<()> { + let allocated = GLOBAL_STORAGE.read().allocate_watchdog()?; + let confirmed = GLOBAL_CONFIRMATOR.read().add_owned(&allocated.descriptor)?; + + let valid = Arc::new(AtomicBool::new(true)); + { + let c_valid = valid.clone(); + GLOBAL_VALIDATOR.read().add( + allocated.descriptor.clone(), + Box::new(move || { + c_valid.store(false, std::sync::atomic::Ordering::SeqCst); + }), + ); + } + + // check that the watchdog stays valid as it is confirmed + for i in 0..10 { + std::thread::sleep(VALIDATION_PERIOD); + if !valid.load(std::sync::atomic::Ordering::SeqCst) { + bail!("Invalid watchdog, iteration {i}"); + } + } + + // Worst-case timings: + // validation: |___________|___________|___________|___________| + // confirmation: __|_____|_____|_____|_____| + // drop(confirmed): ^ + // It means that the worst-case latency for the watchdog to become invalid is VALIDATION_PERIOD*2 + + // check that the watchdog becomes invalid once we stop it's confirmation + drop(confirmed); + std::thread::sleep(VALIDATION_PERIOD * 3 + CONFIRMATION_PERIOD); + assert!(!valid.load(std::sync::atomic::Ordering::SeqCst)); + + Ok(()) + } +} + +#[test] +#[ignore] +fn watchdog_validated() { + execute_concurrent(1, 10, watchdog_validated_fn()); +} + +#[test] +#[ignore] +fn watchdog_validated_concurrent() { + execute_concurrent(1000, 10, watchdog_validated_fn()); +} + +fn watchdog_validated_invalid_without_confirmator_fn( +) -> impl Fn(usize, usize) -> ZResult<()> + Clone + Send + Sync + 'static { + |_task_index: usize, _iteration: usize| -> ZResult<()> { + let allocated = GLOBAL_STORAGE + .read() + .allocate_watchdog() + .expect("error allocating watchdog!"); + + let valid = Arc::new(AtomicBool::new(true)); + { + let c_valid = valid.clone(); + GLOBAL_VALIDATOR.read().add( + allocated.descriptor.clone(), + Box::new(move || { + c_valid.store(false, std::sync::atomic::Ordering::SeqCst); + }), + ); + } + + assert!(allocated.descriptor.test_validate() == 0); + + // check that the watchdog becomes invalid because we do not confirm it + std::thread::sleep(VALIDATION_PERIOD * 2 + CONFIRMATION_PERIOD); + assert!(!valid.load(std::sync::atomic::Ordering::SeqCst)); + Ok(()) + } +} + +#[test] +#[ignore] +fn watchdog_validated_invalid_without_confirmator() { + execute_concurrent(1, 10, watchdog_validated_invalid_without_confirmator_fn()); +} + +#[test] +#[ignore] +fn watchdog_validated_invalid_without_confirmator_concurrent() { + execute_concurrent( + 1000, + 10, + watchdog_validated_invalid_without_confirmator_fn(), + ); +} + +fn watchdog_validated_additional_confirmation_fn( +) -> impl Fn(usize, usize) -> ZResult<()> + Clone + Send + Sync + 'static { + |_task_index: usize, _iteration: usize| -> ZResult<()> { + let allocated = GLOBAL_STORAGE + .read() + .allocate_watchdog() + .expect("error allocating watchdog!"); + let confirmed = GLOBAL_CONFIRMATOR + .read() + .add_owned(&allocated.descriptor) + .expect("error adding watchdog to confirmator!"); + + let allow_invalid = Arc::new(AtomicBool::new(false)); + { + let c_allow_invalid = allow_invalid.clone(); + GLOBAL_VALIDATOR.read().add( + allocated.descriptor.clone(), + Box::new(move || { + assert!(c_allow_invalid.load(std::sync::atomic::Ordering::SeqCst)); + c_allow_invalid.store(false, std::sync::atomic::Ordering::SeqCst); + }), + ); + } + + // make additional confirmations + for _ in 0..100 { + std::thread::sleep(VALIDATION_PERIOD / 10); + confirmed.owned.confirm(); + } + + // check that the watchdog stays valid as we stop additional confirmation + std::thread::sleep(VALIDATION_PERIOD * 10); + + // Worst-case timings: + // validation: |___________|___________|___________|___________| + // confirmation: __|_____|_____|_____|_____| + // drop(confirmed): ^ + // It means that the worst-case latency for the watchdog to become invalid is VALIDATION_PERIOD*2 + + // check that the watchdog becomes invalid once we stop it's regular confirmation + drop(confirmed); + allow_invalid.store(true, std::sync::atomic::Ordering::SeqCst); + std::thread::sleep(VALIDATION_PERIOD * 2 + CONFIRMATION_PERIOD); + // check that invalidation event happened! + assert!(!allow_invalid.load(std::sync::atomic::Ordering::SeqCst)); + Ok(()) + } +} + +#[test] +#[ignore] +fn watchdog_validated_additional_confirmation() { + execute_concurrent(1, 10, watchdog_validated_additional_confirmation_fn()); +} + +#[test] +#[ignore] +fn watchdog_validated_additional_confirmation_concurrent() { + execute_concurrent(1000, 10, watchdog_validated_additional_confirmation_fn()); +} + +fn watchdog_validated_overloaded_system_fn( +) -> impl Fn(usize, usize) -> ZResult<()> + Clone + Send + Sync + 'static { + |_task_index: usize, _iteration: usize| -> ZResult<()> { + let allocated = GLOBAL_STORAGE + .read() + .allocate_watchdog() + .expect("error allocating watchdog!"); + let confirmed = GLOBAL_CONFIRMATOR + .read() + .add_owned(&allocated.descriptor) + .expect("error adding watchdog to confirmator!"); + + let allow_invalid = Arc::new(AtomicBool::new(false)); + { + let c_allow_invalid = allow_invalid.clone(); + GLOBAL_VALIDATOR.read().add( + allocated.descriptor.clone(), + Box::new(move || { + assert!(c_allow_invalid.load(std::sync::atomic::Ordering::SeqCst)); + c_allow_invalid.store(false, std::sync::atomic::Ordering::SeqCst); + }), + ); + } + + // check that the watchdog stays valid + std::thread::sleep(VALIDATION_PERIOD * 10); + + // Worst-case timings: + // validation: |___________|___________|___________|___________| + // confirmation: __|_____|_____|_____|_____| + // drop(confirmed): ^ + // It means that the worst-case latency for the watchdog to become invalid is VALIDATION_PERIOD*2 + + // check that the watchdog becomes invalid once we stop it's regular confirmation + drop(confirmed); + allow_invalid.store(true, std::sync::atomic::Ordering::SeqCst); + std::thread::sleep(VALIDATION_PERIOD * 2 + CONFIRMATION_PERIOD); + // check that invalidation event happened! + assert!(!allow_invalid.load(std::sync::atomic::Ordering::SeqCst)); + Ok(()) + } +} + +#[test] +#[ignore] +fn watchdog_validated_low_load() { + let _load = CpuLoad::low(); + execute_concurrent(1000, 10, watchdog_validated_overloaded_system_fn()); +} + +#[test] +#[ignore] +fn watchdog_validated_high_load() { + let _load = CpuLoad::optimal_high(); + execute_concurrent(1000, 10, watchdog_validated_overloaded_system_fn()); +} + +#[test] +#[ignore] +fn watchdog_validated_overloaded_system() { + let _load = CpuLoad::excessive(); + execute_concurrent(1000, 10, watchdog_validated_overloaded_system_fn()); +} diff --git a/commons/zenoh-sync/src/condition.rs b/commons/zenoh-sync/src/condition.rs index ba615d8888..f824976b87 100644 --- a/commons/zenoh-sync/src/condition.rs +++ b/commons/zenoh-sync/src/condition.rs @@ -11,8 +11,9 @@ // Contributors: // ZettaScale Zenoh Team, // -use event_listener::{Event, EventListener}; use std::{pin::Pin, sync::MutexGuard}; + +use event_listener::{Event, EventListener}; use tokio::sync::MutexGuard as AsyncMutexGuard; pub type ConditionWaiter = Pin>; @@ -54,7 +55,7 @@ impl Condition { pub fn waiter(&self, guard: MutexGuard<'_, T>) -> ConditionWaiter { let listener = self.event.listen(); drop(guard); - listener + Box::pin(listener) } /// Notifies one pending listener diff --git a/commons/zenoh-sync/src/event.rs b/commons/zenoh-sync/src/event.rs new file mode 100644 index 0000000000..f1aa5b5b69 --- /dev/null +++ b/commons/zenoh-sync/src/event.rs @@ -0,0 +1,622 @@ +// +// Copyright (c) 2024 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// +use std::{ + fmt, + sync::{ + atomic::{AtomicU16, AtomicU8, Ordering}, + Arc, + }, + time::{Duration, Instant}, +}; + +use event_listener::{Event as EventLib, Listener}; + +// Error types +const WAIT_ERR_STR: &str = "No notifier available"; +pub struct WaitError; + +impl fmt::Display for WaitError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{:?}", self) + } +} + +impl fmt::Debug for WaitError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.write_str(WAIT_ERR_STR) + } +} + +impl std::error::Error for WaitError {} + +#[repr(u8)] +pub enum WaitDeadlineError { + Deadline, + WaitError, +} + +impl fmt::Display for WaitDeadlineError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{:?}", self) + } +} + +impl fmt::Debug for WaitDeadlineError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + Self::Deadline => f.write_str("Deadline reached"), + Self::WaitError => f.write_str(WAIT_ERR_STR), + } + } +} + +impl std::error::Error for WaitDeadlineError {} + +#[repr(u8)] +pub enum WaitTimeoutError { + Timeout, + WaitError, +} + +impl fmt::Display for WaitTimeoutError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{:?}", self) + } +} + +impl fmt::Debug for WaitTimeoutError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + Self::Timeout => f.write_str("Timeout expired"), + Self::WaitError => f.write_str(WAIT_ERR_STR), + } + } +} + +impl std::error::Error for WaitTimeoutError {} + +const NOTIFY_ERR_STR: &str = "No waiter available"; +pub struct NotifyError; + +impl fmt::Display for NotifyError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{:?}", self) + } +} + +impl fmt::Debug for NotifyError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.write_str(NOTIFY_ERR_STR) + } +} + +impl std::error::Error for NotifyError {} + +// Inner +struct EventInner { + event: EventLib, + flag: AtomicU8, + notifiers: AtomicU16, + waiters: AtomicU16, +} + +const UNSET: u8 = 0; +const OK: u8 = 1; +const ERR: u8 = 1 << 1; + +#[repr(u8)] +enum EventCheck { + Unset = UNSET, + Ok = OK, + Err = ERR, +} + +#[repr(u8)] +enum EventSet { + Ok = OK, + Err = ERR, +} + +impl EventInner { + fn check(&self) -> EventCheck { + let f = self.flag.fetch_and(!OK, Ordering::SeqCst); + if f & ERR != 0 { + return EventCheck::Err; + } + if f == OK { + return EventCheck::Ok; + } + EventCheck::Unset + } + + fn set(&self) -> EventSet { + let f = self.flag.fetch_or(OK, Ordering::SeqCst); + if f & ERR != 0 { + return EventSet::Err; + } + EventSet::Ok + } + + fn err(&self) { + self.flag.store(ERR, Ordering::SeqCst); + } +} + +/// Creates a new lock-free event variable. Every time a [`Notifier`] calls ['Notifier::notify`], one [`Waiter`] will be waken-up. +/// If no waiter is waiting when the `notify` is called, the notification will not be lost. That means the next waiter will return +/// immediately when calling `wait`. +pub fn new() -> (Notifier, Waiter) { + let inner = Arc::new(EventInner { + event: EventLib::new(), + flag: AtomicU8::new(UNSET), + notifiers: AtomicU16::new(1), + waiters: AtomicU16::new(1), + }); + (Notifier(inner.clone()), Waiter(inner)) +} + +/// A [`Notifier`] is used to notify and wake up one and only one [`Waiter`]. +#[repr(transparent)] +pub struct Notifier(Arc); + +impl Notifier { + /// Notifies one pending listener + #[inline] + pub fn notify(&self) -> Result<(), NotifyError> { + // Set the flag. + match self.0.set() { + EventSet::Ok => { + self.0.event.notify_additional_relaxed(1); + Ok(()) + } + EventSet::Err => Err(NotifyError), + } + } +} + +impl Clone for Notifier { + fn clone(&self) -> Self { + let n = self.0.notifiers.fetch_add(1, Ordering::SeqCst); + // Panic on overflow + assert!(n != 0); + Self(self.0.clone()) + } +} + +impl Drop for Notifier { + fn drop(&mut self) { + let n = self.0.notifiers.fetch_sub(1, Ordering::SeqCst); + if n == 1 { + // The last Notifier has been dropped, close the event and notify everyone + self.0.err(); + self.0.event.notify(usize::MAX); + } + } +} + +#[repr(transparent)] +pub struct Waiter(Arc); + +impl Waiter { + /// Waits for the condition to be notified + #[inline] + pub async fn wait_async(&self) -> Result<(), WaitError> { + // Wait until the flag is set. + loop { + // Check the flag. + match self.0.check() { + EventCheck::Ok => break, + EventCheck::Unset => {} + EventCheck::Err => return Err(WaitError), + } + + // Start listening for events. + let listener = self.0.event.listen(); + + // Check the flag again after creating the listener. + match self.0.check() { + EventCheck::Ok => break, + EventCheck::Unset => {} + EventCheck::Err => return Err(WaitError), + } + + // Wait for a notification and continue the loop. + listener.await; + } + + Ok(()) + } + + /// Waits for the condition to be notified + #[inline] + pub fn wait(&self) -> Result<(), WaitError> { + // Wait until the flag is set. + loop { + // Check the flag. + match self.0.check() { + EventCheck::Ok => break, + EventCheck::Unset => {} + EventCheck::Err => return Err(WaitError), + } + + // Start listening for events. + let listener = self.0.event.listen(); + + // Check the flag again after creating the listener. + match self.0.check() { + EventCheck::Ok => break, + EventCheck::Unset => {} + EventCheck::Err => return Err(WaitError), + } + + // Wait for a notification and continue the loop. + listener.wait(); + } + + Ok(()) + } + + /// Waits for the condition to be notified or returns an error when the deadline is reached + #[inline] + pub fn wait_deadline(&self, deadline: Instant) -> Result<(), WaitDeadlineError> { + // Wait until the flag is set. + loop { + // Check the flag. + match self.0.check() { + EventCheck::Ok => break, + EventCheck::Unset => {} + EventCheck::Err => return Err(WaitDeadlineError::WaitError), + } + + // Start listening for events. + let listener = self.0.event.listen(); + + // Check the flag again after creating the listener. + match self.0.check() { + EventCheck::Ok => break, + EventCheck::Unset => {} + EventCheck::Err => return Err(WaitDeadlineError::WaitError), + } + + // Wait for a notification and continue the loop. + if listener.wait_deadline(deadline).is_none() { + return Err(WaitDeadlineError::Deadline); + } + } + + Ok(()) + } + + /// Waits for the condition to be notified or returns an error when the timeout is expired + #[inline] + pub fn wait_timeout(&self, timeout: Duration) -> Result<(), WaitTimeoutError> { + // Wait until the flag is set. + loop { + // Check the flag. + match self.0.check() { + EventCheck::Ok => break, + EventCheck::Unset => {} + EventCheck::Err => return Err(WaitTimeoutError::WaitError), + } + + // Start listening for events. + let listener = self.0.event.listen(); + + // Check the flag again after creating the listener. + match self.0.check() { + EventCheck::Ok => break, + EventCheck::Unset => {} + EventCheck::Err => return Err(WaitTimeoutError::WaitError), + } + + // Wait for a notification and continue the loop. + if listener.wait_timeout(timeout).is_none() { + return Err(WaitTimeoutError::Timeout); + } + } + + Ok(()) + } +} + +impl Clone for Waiter { + fn clone(&self) -> Self { + let n = self.0.waiters.fetch_add(1, Ordering::Relaxed); + // Panic on overflow + assert!(n != 0); + Self(self.0.clone()) + } +} + +impl Drop for Waiter { + fn drop(&mut self) { + let n = self.0.waiters.fetch_sub(1, Ordering::SeqCst); + if n == 1 { + // The last Waiter has been dropped, close the event + self.0.err(); + } + } +} + +mod tests { + #[test] + fn event_timeout() { + use std::{ + sync::{Arc, Barrier}, + time::Duration, + }; + + use crate::WaitTimeoutError; + + let barrier = Arc::new(Barrier::new(2)); + let (notifier, waiter) = super::new(); + let tslot = Duration::from_secs(1); + + let bs = barrier.clone(); + let s = std::thread::spawn(move || { + // 1 - Wait one notification + match waiter.wait_timeout(tslot) { + Ok(()) => {} + Err(WaitTimeoutError::Timeout) => panic!("Timeout {:#?}", tslot), + Err(WaitTimeoutError::WaitError) => panic!("Event closed"), + } + + bs.wait(); + + // 2 - Being notified twice but waiting only once + bs.wait(); + + match waiter.wait_timeout(tslot) { + Ok(()) => {} + Err(WaitTimeoutError::Timeout) => panic!("Timeout {:#?}", tslot), + Err(WaitTimeoutError::WaitError) => panic!("Event closed"), + } + + match waiter.wait_timeout(tslot) { + Ok(()) => panic!("Event Ok but it should be Timeout"), + Err(WaitTimeoutError::Timeout) => {} + Err(WaitTimeoutError::WaitError) => panic!("Event closed"), + } + + bs.wait(); + + // 3 - Notifier has been dropped + bs.wait(); + + waiter.wait().unwrap_err(); + + bs.wait(); + }); + + let bp = barrier.clone(); + let p = std::thread::spawn(move || { + // 1 - Notify once + notifier.notify().unwrap(); + + bp.wait(); + + // 2 - Notify twice + notifier.notify().unwrap(); + notifier.notify().unwrap(); + + bp.wait(); + bp.wait(); + + // 3 - Drop notifier yielding an error in the waiter + drop(notifier); + + bp.wait(); + bp.wait(); + }); + + s.join().unwrap(); + p.join().unwrap(); + } + + #[test] + fn event_deadline() { + use std::{ + sync::{Arc, Barrier}, + time::{Duration, Instant}, + }; + + use crate::WaitDeadlineError; + + let barrier = Arc::new(Barrier::new(2)); + let (notifier, waiter) = super::new(); + let tslot = Duration::from_secs(1); + + let bs = barrier.clone(); + let s = std::thread::spawn(move || { + // 1 - Wait one notification + match waiter.wait_deadline(Instant::now() + tslot) { + Ok(()) => {} + Err(WaitDeadlineError::Deadline) => panic!("Timeout {:#?}", tslot), + Err(WaitDeadlineError::WaitError) => panic!("Event closed"), + } + + bs.wait(); + + // 2 - Being notified twice but waiting only once + bs.wait(); + + match waiter.wait_deadline(Instant::now() + tslot) { + Ok(()) => {} + Err(WaitDeadlineError::Deadline) => panic!("Timeout {:#?}", tslot), + Err(WaitDeadlineError::WaitError) => panic!("Event closed"), + } + + match waiter.wait_deadline(Instant::now() + tslot) { + Ok(()) => panic!("Event Ok but it should be Timeout"), + Err(WaitDeadlineError::Deadline) => {} + Err(WaitDeadlineError::WaitError) => panic!("Event closed"), + } + + bs.wait(); + + // 3 - Notifier has been dropped + bs.wait(); + + waiter.wait().unwrap_err(); + + bs.wait(); + }); + + let bp = barrier.clone(); + let p = std::thread::spawn(move || { + // 1 - Notify once + notifier.notify().unwrap(); + + bp.wait(); + + // 2 - Notify twice + notifier.notify().unwrap(); + notifier.notify().unwrap(); + + bp.wait(); + bp.wait(); + + // 3 - Drop notifier yielding an error in the waiter + drop(notifier); + + bp.wait(); + bp.wait(); + }); + + s.join().unwrap(); + p.join().unwrap(); + } + + #[test] + fn event_loop() { + use std::{ + sync::{ + atomic::{AtomicUsize, Ordering}, + Arc, Barrier, + }, + time::{Duration, Instant}, + }; + + const N: usize = 1_000; + static COUNTER: AtomicUsize = AtomicUsize::new(0); + + let (notifier, waiter) = super::new(); + let barrier = Arc::new(Barrier::new(2)); + + let bs = barrier.clone(); + let s = std::thread::spawn(move || { + for _ in 0..N { + waiter.wait().unwrap(); + COUNTER.fetch_add(1, Ordering::Relaxed); + bs.wait(); + } + }); + let p = std::thread::spawn(move || { + for _ in 0..N { + notifier.notify().unwrap(); + barrier.wait(); + } + }); + + let start = Instant::now(); + let tout = Duration::from_secs(60); + loop { + let n = COUNTER.load(Ordering::Relaxed); + if n == N { + break; + } + if start.elapsed() > tout { + panic!("Timeout {:#?}. Counter: {n}/{N}", tout); + } + + std::thread::sleep(Duration::from_millis(100)); + } + + s.join().unwrap(); + p.join().unwrap(); + } + + #[test] + fn event_multiple() { + use std::{ + sync::atomic::{AtomicUsize, Ordering}, + time::{Duration, Instant}, + }; + + const N: usize = 1_000; + static COUNTER: AtomicUsize = AtomicUsize::new(0); + + let (notifier, waiter) = super::new(); + + let w1 = waiter.clone(); + let s1 = std::thread::spawn(move || { + let mut n = 0; + while COUNTER.fetch_add(1, Ordering::Relaxed) < N - 2 { + w1.wait().unwrap(); + n += 1; + } + println!("S1: {}", n); + }); + let s2 = std::thread::spawn(move || { + let mut n = 0; + while COUNTER.fetch_add(1, Ordering::Relaxed) < N - 2 { + waiter.wait().unwrap(); + n += 1; + } + println!("S2: {}", n); + }); + + let n1 = notifier.clone(); + let p1 = std::thread::spawn(move || { + let mut n = 0; + while COUNTER.load(Ordering::Relaxed) < N { + n1.notify().unwrap(); + n += 1; + std::thread::sleep(Duration::from_millis(1)); + } + println!("P1: {}", n); + }); + let p2 = std::thread::spawn(move || { + let mut n = 0; + while COUNTER.load(Ordering::Relaxed) < N { + notifier.notify().unwrap(); + n += 1; + std::thread::sleep(Duration::from_millis(1)); + } + println!("P2: {}", n); + }); + + std::thread::spawn(move || { + let start = Instant::now(); + let tout = Duration::from_secs(60); + loop { + let n = COUNTER.load(Ordering::Relaxed); + if n == N { + break; + } + if start.elapsed() > tout { + panic!("Timeout {:#?}. Counter: {n}/{N}", tout); + } + + std::thread::sleep(Duration::from_millis(100)); + } + }); + + p1.join().unwrap(); + p2.join().unwrap(); + + s1.join().unwrap(); + s2.join().unwrap(); + } +} diff --git a/commons/zenoh-sync/src/fifo_queue.rs b/commons/zenoh-sync/src/fifo_queue.rs index e0ce57cb36..44bc2a5b17 100644 --- a/commons/zenoh-sync/src/fifo_queue.rs +++ b/commons/zenoh-sync/src/fifo_queue.rs @@ -11,11 +11,12 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::Condition; use tokio::sync::Mutex; use zenoh_collections::RingBuffer; use zenoh_core::zasynclock; +use crate::Condition; + pub struct FifoQueue { not_empty: Condition, not_full: Condition, diff --git a/commons/zenoh-sync/src/lib.rs b/commons/zenoh-sync/src/lib.rs index 419246dc9d..8289b29fbb 100644 --- a/commons/zenoh-sync/src/lib.rs +++ b/commons/zenoh-sync/src/lib.rs @@ -17,10 +17,16 @@ //! This module is intended for Zenoh's internal use. //! //! [Click here for Zenoh's documentation](../zenoh/index.html) +use std::{ + future::Future, + pin::Pin, + task::{Context, Poll}, +}; + use futures::FutureExt; -use std::future::Future; -use std::pin::Pin; -use std::task::{Context, Poll}; + +pub mod event; +pub use event::*; pub mod fifo_queue; pub use fifo_queue::*; diff --git a/commons/zenoh-sync/src/lifo_queue.rs b/commons/zenoh-sync/src/lifo_queue.rs index f29614d4b2..9fe541da36 100644 --- a/commons/zenoh-sync/src/lifo_queue.rs +++ b/commons/zenoh-sync/src/lifo_queue.rs @@ -12,6 +12,7 @@ // ZettaScale Zenoh Team, // use std::sync::{Condvar, Mutex}; + use zenoh_collections::StackBuffer; use zenoh_core::zlock; diff --git a/commons/zenoh-sync/src/mvar.rs b/commons/zenoh-sync/src/mvar.rs index 1b4a90e1e2..f818b44071 100644 --- a/commons/zenoh-sync/src/mvar.rs +++ b/commons/zenoh-sync/src/mvar.rs @@ -11,11 +11,13 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::Condition; use std::sync::atomic::{AtomicUsize, Ordering}; + use tokio::sync::Mutex; use zenoh_core::zasynclock; +use crate::Condition; + pub struct Mvar { inner: Mutex>, cond_put: Condition, @@ -96,9 +98,9 @@ mod tests { #[tokio::test(flavor = "multi_thread", worker_threads = 4)] async fn mvar() -> ZResult<()> { + use std::{sync::Arc, time::Duration}; + use super::Mvar; - use std::sync::Arc; - use std::time::Duration; const TIMEOUT: Duration = Duration::from_secs(60); diff --git a/commons/zenoh-sync/src/object_pool.rs b/commons/zenoh-sync/src/object_pool.rs index 83b673c449..ee6eed881b 100644 --- a/commons/zenoh-sync/src/object_pool.rs +++ b/commons/zenoh-sync/src/object_pool.rs @@ -11,15 +11,17 @@ // Contributors: // ZettaScale Zenoh Team, // -use super::LifoQueue; use std::{ any::Any, fmt, ops::{Deref, DerefMut, Drop}, sync::{Arc, Weak}, }; + use zenoh_buffers::ZSliceBuffer; +use super::LifoQueue; + /// Provides a pool of pre-allocated objects that are automaticlaly reinserted into /// the pool when dropped. pub struct RecyclingObjectPool @@ -141,10 +143,12 @@ impl ZSliceBuffer for RecyclingObject> { fn as_slice(&self) -> &[u8] { self.as_ref() } - fn as_mut_slice(&mut self) -> &mut [u8] { - self.as_mut() - } + fn as_any(&self) -> &dyn Any { self } + + fn as_any_mut(&mut self) -> &mut dyn Any { + self + } } diff --git a/commons/zenoh-sync/src/signal.rs b/commons/zenoh-sync/src/signal.rs index 74dd3e5199..053f5a13aa 100644 --- a/commons/zenoh-sync/src/signal.rs +++ b/commons/zenoh-sync/src/signal.rs @@ -11,8 +11,11 @@ // Contributors: // ZettaScale Zenoh Team, // -use std::sync::atomic::{AtomicBool, Ordering::*}; -use std::sync::Arc; +use std::sync::{ + atomic::{AtomicBool, Ordering::*}, + Arc, +}; + use tokio::sync::Semaphore; #[derive(Debug, Clone)] @@ -68,9 +71,10 @@ impl Default for Signal { #[cfg(test)] mod tests { - use super::*; use std::time::Duration; + use super::*; + #[tokio::test(flavor = "multi_thread", worker_threads = 4)] async fn signal_test() { let signal = Signal::new(); diff --git a/commons/zenoh-task/src/lib.rs b/commons/zenoh-task/src/lib.rs index a733a3de13..2a06b56b5c 100644 --- a/commons/zenoh-task/src/lib.rs +++ b/commons/zenoh-task/src/lib.rs @@ -18,13 +18,12 @@ //! //! [Click here for Zenoh's documentation](../zenoh/index.html) +use std::{future::Future, time::Duration}; + use futures::future::FutureExt; -use std::future::Future; -use std::time::Duration; use tokio::task::JoinHandle; -use tokio_util::sync::CancellationToken; -use tokio_util::task::TaskTracker; -use zenoh_core::{ResolveFuture, SyncResolve}; +use tokio_util::{sync::CancellationToken, task::TaskTracker}; +use zenoh_core::{ResolveFuture, Wait}; use zenoh_runtime::ZRuntime; #[derive(Clone)] @@ -111,7 +110,7 @@ impl TaskController { /// The call blocks until all tasks yield or timeout duration expires. /// Returns 0 in case of success, number of non terminated tasks otherwise. pub fn terminate_all(&self, timeout: Duration) -> usize { - ResolveFuture::new(async move { self.terminate_all_async(timeout).await }).res_sync() + ResolveFuture::new(async move { self.terminate_all_async(timeout).await }).wait() } /// Async version of [`TaskController::terminate_all()`]. @@ -182,7 +181,7 @@ impl TerminatableTask { /// Attempts to terminate the task. /// Returns true if task completed / aborted within timeout duration, false otherwise. pub fn terminate(&mut self, timeout: Duration) -> bool { - ResolveFuture::new(async move { self.terminate_async(timeout).await }).res_sync() + ResolveFuture::new(async move { self.terminate_async(timeout).await }).wait() } /// Async version of [`TerminatableTask::terminate()`]. diff --git a/commons/zenoh-util/Cargo.toml b/commons/zenoh-util/Cargo.toml index 48bbb17d04..38694dc1b5 100644 --- a/commons/zenoh-util/Cargo.toml +++ b/commons/zenoh-util/Cargo.toml @@ -32,12 +32,9 @@ description = "Internal crate for zenoh." maintenance = { status = "actively-developed" } [features] -std = [] test = [] -default = ["std"] [dependencies] -async-std = { workspace = true, features = ["default", "unstable"] } tokio = { workspace = true, features = ["time", "net"] } async-trait = { workspace = true } flume = { workspace = true } @@ -45,11 +42,14 @@ home = { workspace = true } humantime = { workspace = true } lazy_static = { workspace = true } libloading = { workspace = true } -tracing = {workspace = true} -tracing-subscriber = {workspace = true} +tracing = { workspace = true } +tracing-subscriber = { workspace = true } shellexpand = { workspace = true } zenoh-core = { workspace = true } zenoh-result = { workspace = true, features = ["default"] } +const_format = { workspace = true } +serde = { workspace = true, features = ["default"] } +serde_json = { workspace = true } [target.'cfg(windows)'.dependencies] winapi = { workspace = true } diff --git a/commons/zenoh-util/src/std_only/ffi/mod.rs b/commons/zenoh-util/src/ffi/mod.rs similarity index 100% rename from commons/zenoh-util/src/std_only/ffi/mod.rs rename to commons/zenoh-util/src/ffi/mod.rs diff --git a/commons/zenoh-util/src/std_only/ffi/win.rs b/commons/zenoh-util/src/ffi/win.rs similarity index 96% rename from commons/zenoh-util/src/std_only/ffi/win.rs rename to commons/zenoh-util/src/ffi/win.rs index 3a15871c20..7f0bbd986a 100644 --- a/commons/zenoh-util/src/std_only/ffi/win.rs +++ b/commons/zenoh-util/src/ffi/win.rs @@ -11,9 +11,11 @@ // Contributors: // ZettaScale Zenoh Team, // -use std::io; -use std::mem; -use std::net::{Ipv4Addr, Ipv6Addr, SocketAddr, SocketAddrV4, SocketAddrV6}; +use std::{ + io, mem, + net::{Ipv4Addr, Ipv6Addr, SocketAddr, SocketAddrV4, SocketAddrV6}, +}; + use winapi::shared::{ws2def, ws2ipdef}; #[allow(clippy::many_single_char_names)] diff --git a/commons/zenoh-util/src/lib.rs b/commons/zenoh-util/src/lib.rs index 7e02096ebb..a6cf03e5fb 100644 --- a/commons/zenoh-util/src/lib.rs +++ b/commons/zenoh-util/src/lib.rs @@ -17,28 +17,56 @@ //! This crate is intended for Zenoh's internal use. //! //! [Click here for Zenoh's documentation](../zenoh/index.html) -#![cfg_attr(not(feature = "std"), no_std)] -extern crate alloc; -#[cfg_attr(feature = "std", macro_use)] -extern crate lazy_static; +use lazy_static::lazy_static; +pub mod ffi; +mod lib_loader; +pub mod lib_search_dirs; +pub mod net; +pub mod time_range; + +pub use lib_loader::*; +pub mod timer; +pub use timer::*; +pub mod log; +pub use lib_search_dirs::*; +pub use log::*; + +/// The "ZENOH_HOME" environment variable name +pub const ZENOH_HOME_ENV_VAR: &str = "ZENOH_HOME"; + +const DEFAULT_ZENOH_HOME_DIRNAME: &str = ".zenoh"; + +/// Return the path to the ${ZENOH_HOME} directory (~/.zenoh by default). +pub fn zenoh_home() -> &'static std::path::Path { + use std::path::PathBuf; + lazy_static! { + static ref ROOT: PathBuf = { + if let Some(dir) = std::env::var_os(ZENOH_HOME_ENV_VAR) { + PathBuf::from(dir) + } else { + match home::home_dir() { + Some(mut dir) => { + dir.push(DEFAULT_ZENOH_HOME_DIRNAME); + dir + } + None => PathBuf::from(DEFAULT_ZENOH_HOME_DIRNAME), + } + } + }; + } + ROOT.as_path() +} + +#[doc(hidden)] +pub use const_format::concatcp as __concatcp; #[macro_export] macro_rules! concat_enabled_features { - (prefix = $prefix:literal, features = [$($feature:literal),*]) => { + (prefix = $prefix:literal, features = [$($feature:literal),* $(,)?]) => { { - use const_format::concatcp; - concatcp!("" $(, - if cfg!(feature = $feature) { concatcp!(" ", concatcp!($prefix, "/", $feature)) } else { "" } - )*) + $crate::__concatcp!($( + if cfg!(feature = $feature) { $crate::__concatcp!(" ", $prefix, "/", $feature) } else { "" } + ),*) } }; } - -#[deprecated = "This module is now a separate crate. Use the `zenoh_core` crate directly for shorter compile-times. You may disable this re-export by disabling `zenoh-util`'s default features."] -pub use zenoh_core as core; - -#[cfg(feature = "std")] -mod std_only; - -#[cfg(feature = "std")] -pub use std_only::*; diff --git a/commons/zenoh-util/src/std_only/lib_loader.rs b/commons/zenoh-util/src/lib_loader.rs similarity index 73% rename from commons/zenoh-util/src/std_only/lib_loader.rs rename to commons/zenoh-util/src/lib_loader.rs index 4f3621e1cc..082bb04839 100644 --- a/commons/zenoh-util/src/std_only/lib_loader.rs +++ b/commons/zenoh-util/src/lib_loader.rs @@ -11,81 +11,60 @@ // Contributors: // ZettaScale Zenoh Team, // +use std::{ + env::consts::{DLL_PREFIX, DLL_SUFFIX}, + ffi::OsString, + ops::Deref, + path::PathBuf, +}; + use libloading::Library; -use std::env::consts::{DLL_PREFIX, DLL_SUFFIX}; -use std::ffi::OsString; -use std::ops::Deref; -use std::path::PathBuf; use tracing::{debug, warn}; -use zenoh_core::zconfigurable; +use zenoh_core::{zconfigurable, zerror}; use zenoh_result::{bail, ZResult}; +use crate::LibSearchDirs; + zconfigurable! { /// The libraries prefix for the current platform (usually: `"lib"`) pub static ref LIB_PREFIX: String = DLL_PREFIX.to_string(); /// The libraries suffix for the current platform (`".dll"` or `".so"` or `".dylib"`...) pub static ref LIB_SUFFIX: String = DLL_SUFFIX.to_string(); - /// The default list of paths where to search for libraries to load - pub static ref LIB_DEFAULT_SEARCH_PATHS: String = ".:~/.zenoh/lib:/opt/homebrew/lib:/usr/local/lib:/usr/lib".to_string(); } /// LibLoader allows search for libraries and to load them. #[derive(Clone, Debug)] pub struct LibLoader { - search_paths: Vec, + search_paths: Option>, } impl LibLoader { /// Return an empty `LibLoader`. pub fn empty() -> LibLoader { - LibLoader { - search_paths: Vec::new(), - } - } - - /// Returns the list of search paths used by `LibLoader::default()` - pub fn default_search_paths() -> &'static str { - &LIB_DEFAULT_SEARCH_PATHS + LibLoader { search_paths: None } } /// Creates a new [LibLoader] with a set of paths where the libraries will be searched for. /// If `exe_parent_dir`is true, the parent directory of the current executable is also added /// to the set of paths for search. - pub fn new(search_dirs: &[S], exe_parent_dir: bool) -> LibLoader - where - S: AsRef, - { - let mut search_paths: Vec = vec![]; - for s in search_dirs { - match shellexpand::full(s) { - Ok(cow_str) => match PathBuf::from(&*cow_str).canonicalize() { - Ok(path) => search_paths.push(path), - Err(err) => debug!("Cannot search for libraries in {}: {}", cow_str, err), - }, - Err(err) => warn!("Cannot search for libraries in '{}': {} ", s.as_ref(), err), - } - } - Self::_new(search_paths, exe_parent_dir) - } - fn _new(mut search_paths: Vec, exe_parent_dir: bool) -> Self { - if exe_parent_dir { - match std::env::current_exe() { - Ok(path) => match path.parent() { - Some(p) => if p.is_dir() { - search_paths.push(p.canonicalize().unwrap()) - }, - None => warn!("Can't search for plugins in executable parent directory: no parent directory for {}.", path.to_string_lossy()), - }, - Err(e) => warn!("Can't search for plugins in executable parent directory: {}.", e), + pub fn new(dirs: LibSearchDirs) -> LibLoader { + let mut search_paths = Vec::new(); + + for path in dirs.into_iter() { + match path { + Ok(path) => search_paths.push(path), + Err(err) => tracing::error!("{err}"), } } - LibLoader { search_paths } + LibLoader { + search_paths: Some(search_paths), + } } /// Return the list of search paths used by this [LibLoader] - pub fn search_paths(&self) -> &[PathBuf] { - &self.search_paths + pub fn search_paths(&self) -> Option<&[PathBuf]> { + self.search_paths.as_deref() } /// Load a library from the specified path. @@ -115,7 +94,7 @@ impl LibLoader { /// /// This function calls [libloading::Library::new()](https://docs.rs/libloading/0.7.0/libloading/struct.Library.html#method.new) /// which is unsafe. - pub unsafe fn search_and_load(&self, name: &str) -> ZResult<(Library, PathBuf)> { + pub unsafe fn search_and_load(&self, name: &str) -> ZResult> { let filename = format!("{}{}{}", *LIB_PREFIX, name, *LIB_SUFFIX); let filename_ostr = OsString::from(&filename); tracing::debug!( @@ -123,13 +102,16 @@ impl LibLoader { filename, self.search_paths ); - for dir in &self.search_paths { + let Some(search_paths) = self.search_paths() else { + return Ok(None); + }; + for dir in search_paths { match dir.read_dir() { Ok(read_dir) => { for entry in read_dir.flatten() { if entry.file_name() == filename_ostr { let path = entry.path(); - return Ok((Library::new(path.clone())?, path)); + return Ok(Some((Library::new(path.clone())?, path))); } } } @@ -139,7 +121,7 @@ impl LibLoader { ), } } - bail!("Library file '{}' not found", filename) + Err(zerror!("Library file '{}' not found", filename).into()) } /// Search and load all libraries with filename starting with [struct@LIB_PREFIX]+`prefix` and ending with [struct@LIB_SUFFIX]. @@ -155,7 +137,7 @@ impl LibLoader { pub unsafe fn load_all_with_prefix( &self, prefix: Option<&str>, - ) -> Vec<(Library, PathBuf, String)> { + ) -> Option> { let lib_prefix = format!("{}{}", *LIB_PREFIX, prefix.unwrap_or("")); tracing::debug!( "Search for libraries {}*{} to load in {:?}", @@ -163,9 +145,8 @@ impl LibLoader { *LIB_SUFFIX, self.search_paths ); - let mut result = vec![]; - for dir in &self.search_paths { + for dir in self.search_paths()? { match dir.read_dir() { Ok(read_dir) => { for entry in read_dir.flatten() { @@ -196,7 +177,7 @@ impl LibLoader { ), } } - result + Some(result) } pub fn _plugin_name(path: &std::path::Path) -> Option<&str> { @@ -232,7 +213,6 @@ impl LibLoader { impl Default for LibLoader { fn default() -> Self { - let paths: Vec<&str> = (*LIB_DEFAULT_SEARCH_PATHS).split(':').collect(); - LibLoader::new(&paths, true) + LibLoader::new(LibSearchDirs::default()) } } diff --git a/commons/zenoh-util/src/lib_search_dirs.rs b/commons/zenoh-util/src/lib_search_dirs.rs new file mode 100644 index 0000000000..58046ff3a7 --- /dev/null +++ b/commons/zenoh-util/src/lib_search_dirs.rs @@ -0,0 +1,236 @@ +// +// Copyright (c) 2024 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// +use std::{env, error::Error, fmt::Display, path::PathBuf, str::FromStr}; + +use serde::{ + de::{value::MapAccessDeserializer, Visitor}, + Deserialize, Serialize, +}; + +#[derive(Clone, Debug, Serialize, Deserialize, Eq, Hash, PartialEq)] +#[serde(default)] +pub struct LibSearchDirs(Vec); + +impl LibSearchDirs { + pub fn from_paths>(paths: &[T]) -> Self { + Self( + paths + .iter() + .map(|s| LibSearchDir::Path(s.as_ref().to_string())) + .collect(), + ) + } + + pub fn from_specs>(paths: &[T]) -> Result { + let dirs = paths + .iter() + .map(|s| { + let de = &mut serde_json::Deserializer::from_str(s.as_ref()); + LibSearchDir::deserialize(de) + }) + .collect::, _>>()?; + + Ok(Self(dirs)) + } +} + +#[derive(Debug)] +pub struct InvalidLibSearchDir { + found: LibSearchDir, + source: String, +} + +impl Display for InvalidLibSearchDir { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!( + f, + "invalid library search directory `{:?}`: {}", + self.found, self.source + ) + } +} + +impl Error for InvalidLibSearchDir {} + +pub struct IntoIter { + iter: std::vec::IntoIter, +} + +impl Iterator for IntoIter { + type Item = Result; + + fn next(&mut self) -> Option { + self.iter.next().map(LibSearchDir::into_path) + } +} + +impl IntoIterator for LibSearchDirs { + type Item = Result; + + type IntoIter = IntoIter; + + fn into_iter(self) -> Self::IntoIter { + IntoIter { + iter: self.0.into_iter(), + } + } +} + +impl Default for LibSearchDirs { + fn default() -> Self { + LibSearchDirs(vec![ + LibSearchDir::Spec(LibSearchSpec { + kind: LibSearchSpecKind::CurrentExeParent, + value: None, + }), + LibSearchDir::Path(".".to_string()), + LibSearchDir::Path("~/.zenoh/lib".to_string()), + LibSearchDir::Path("/opt/homebrew/lib".to_string()), + LibSearchDir::Path("/usr/local/lib".to_string()), + LibSearchDir::Path("/usr/lib".to_string()), + ]) + } +} + +#[derive(Clone, Debug, Eq, Hash, PartialEq)] +pub enum LibSearchDir { + Path(String), + Spec(LibSearchSpec), +} + +impl LibSearchDir { + fn into_path(self) -> Result { + match self { + LibSearchDir::Path(path) => LibSearchSpec { + kind: LibSearchSpecKind::Path, + value: Some(path), + } + .into_path(), + LibSearchDir::Spec(spec) => spec.into_path(), + } + } +} + +#[derive(Clone, Debug, Deserialize, Serialize, Eq, Hash, PartialEq)] +#[serde(rename_all = "snake_case")] +pub struct LibSearchSpec { + kind: LibSearchSpecKind, + value: Option, +} + +impl LibSearchSpec { + fn into_path(self) -> Result { + fn error_from_source(spec: &LibSearchSpec, err: T) -> InvalidLibSearchDir { + InvalidLibSearchDir { + found: LibSearchDir::Spec(spec.clone()), + source: err.to_string(), + } + } + + fn error_from_str(spec: &LibSearchSpec, err: &str) -> InvalidLibSearchDir { + InvalidLibSearchDir { + found: LibSearchDir::Spec(spec.clone()), + source: err.to_string(), + } + } + + match self.kind { + LibSearchSpecKind::Path => { + let Some(value) = &self.value else { + return Err(error_from_str( + &self, + "`path` specs should have a `value` field", + )); + }; + + let expanded = + shellexpand::full(value).map_err(|err| error_from_source(&self, err))?; + + let path = + PathBuf::from_str(&expanded).map_err(|err| error_from_source(&self, err))?; + + Ok(path) + } + LibSearchSpecKind::CurrentExeParent => { + let current_exe = + env::current_exe().map_err(|err| error_from_source(&self, err))?; + + let Some(current_exe_parent) = current_exe.parent() else { + return Err(error_from_str( + &self, + "current executable's path has no parent directory", + )); + }; + + let canonicalized = current_exe_parent + .canonicalize() + .map_err(|err| error_from_source(&self, err))?; + + Ok(canonicalized) + } + } + } +} + +#[derive(Clone, Debug, Deserialize, Serialize, Eq, Hash, PartialEq)] +#[serde(rename_all = "snake_case")] +pub enum LibSearchSpecKind { + Path, + CurrentExeParent, +} + +impl<'de> Deserialize<'de> for LibSearchDir { + fn deserialize(deserializer: D) -> Result + where + D: serde::Deserializer<'de>, + { + deserializer.deserialize_any(LibSearchSpecOrPathVisitor) + } +} + +impl Serialize for LibSearchDir { + fn serialize(&self, serializer: S) -> Result + where + S: serde::Serializer, + { + match self { + LibSearchDir::Path(path) => serializer.serialize_str(path), + LibSearchDir::Spec(spec) => spec.serialize(serializer), + } + } +} + +struct LibSearchSpecOrPathVisitor; + +impl<'de> Visitor<'de> for LibSearchSpecOrPathVisitor { + type Value = LibSearchDir; + + fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result { + formatter.write_str("str or map with field `kind` and optionally field `value`") + } + + fn visit_str(self, v: &str) -> Result + where + E: serde::de::Error, + { + Ok(LibSearchDir::Path(v.to_string())) + } + + fn visit_map(self, map: A) -> Result + where + A: serde::de::MapAccess<'de>, + { + LibSearchSpec::deserialize(MapAccessDeserializer::new(map)).map(LibSearchDir::Spec) + } +} diff --git a/commons/zenoh-util/src/log.rs b/commons/zenoh-util/src/log.rs new file mode 100644 index 0000000000..023c77121f --- /dev/null +++ b/commons/zenoh-util/src/log.rs @@ -0,0 +1,165 @@ +// +// Copyright (c) 2024 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// +use std::{fmt, thread, thread::ThreadId}; + +use tracing::{field::Field, span, Event, Metadata, Subscriber}; +use tracing_subscriber::{ + layer::{Context, SubscriberExt}, + registry::LookupSpan, + EnvFilter, +}; + +/// This is a utility function to enable the tracing formatting subscriber from +/// the `RUST_LOG` environment variable. If `RUST_LOG` is not set, then logging is not enabled. +/// +/// # Safety +/// Calling this function initializes a `lazy_static` in the `tracing` crate +/// such static is not deallocated prior to process existing, thus tools such as `valgrind` +/// will report a memory leak. +/// Refer to this issue: +pub fn try_init_log_from_env() { + if let Ok(env_filter) = EnvFilter::try_from_default_env() { + init_env_filter(env_filter); + } +} + +/// This is a utility function to enable the tracing formatting subscriber from +/// the environment variable. If `RUST_LOG` is not set, then fallback directives are used. +/// +/// # Safety +/// Calling this function initializes a `lazy_static` in the `tracing` crate +/// such static is not deallocated prior to process existing, thus tools such as `valgrind` +/// will report a memory leak. +/// Refer to this issue: +pub fn init_log_from_env_or(fallback: S) +where + S: AsRef, +{ + let env_filter = EnvFilter::try_from_default_env().unwrap_or_else(|_| EnvFilter::new(fallback)); + init_env_filter(env_filter); +} + +fn init_env_filter(env_filter: EnvFilter) { + let subscriber = tracing_subscriber::fmt() + .with_env_filter(env_filter) + .with_thread_ids(true) + .with_thread_names(true) + .with_level(true) + .with_target(true); + + let subscriber = subscriber.finish(); + let _ = tracing::subscriber::set_global_default(subscriber); +} + +pub struct LogRecord { + pub target: String, + pub level: tracing::Level, + pub file: Option<&'static str>, + pub line: Option, + pub thread_id: ThreadId, + pub thread_name: Option, + pub message: Option, + pub attributes: Vec<(&'static str, String)>, +} + +#[derive(Clone)] +struct SpanFields(Vec<(&'static str, String)>); + +struct Layer { + enabled: Enabled, + callback: Callback, +} + +impl tracing_subscriber::Layer for Layer +where + S: Subscriber + for<'a> LookupSpan<'a>, + E: Fn(&Metadata) -> bool + 'static, + C: Fn(LogRecord) + 'static, +{ + fn enabled(&self, metadata: &Metadata<'_>, _: Context<'_, S>) -> bool { + (self.enabled)(metadata) + } + + fn on_new_span(&self, attrs: &span::Attributes<'_>, id: &span::Id, ctx: Context<'_, S>) { + let span = ctx.span(id).unwrap(); + let mut extensions = span.extensions_mut(); + let mut fields = vec![]; + attrs.record(&mut |field: &Field, value: &dyn fmt::Debug| { + fields.push((field.name(), format!("{value:?}"))) + }); + extensions.insert(SpanFields(fields)); + } + + fn on_record(&self, id: &span::Id, values: &span::Record<'_>, ctx: Context<'_, S>) { + let span = ctx.span(id).unwrap(); + let mut extensions = span.extensions_mut(); + let fields = extensions.get_mut::().unwrap(); + values.record(&mut |field: &Field, value: &dyn fmt::Debug| { + fields.0.push((field.name(), format!("{value:?}"))) + }); + } + + fn on_event(&self, event: &Event<'_>, ctx: Context<'_, S>) { + let thread = thread::current(); + let mut record = LogRecord { + target: event.metadata().target().into(), + level: *event.metadata().level(), + file: event.metadata().file(), + line: event.metadata().line(), + thread_id: thread.id(), + thread_name: thread.name().map(Into::into), + message: None, + attributes: vec![], + }; + if let Some(scope) = ctx.event_scope(event) { + for span in scope.from_root() { + let extensions = span.extensions(); + let fields = extensions.get::().unwrap(); + record.attributes.extend(fields.0.iter().cloned()); + } + } + event.record(&mut |field: &Field, value: &dyn fmt::Debug| { + if field.name() == "message" { + record.message = Some(format!("{value:?}")); + } else { + record.attributes.push((field.name(), format!("{value:?}"))) + } + }); + (self.callback)(record); + } +} + +pub fn init_log_with_callback( + enabled: impl Fn(&Metadata) -> bool + Send + Sync + 'static, + callback: impl Fn(LogRecord) + Send + Sync + 'static, +) { + let subscriber = tracing_subscriber::registry().with(Layer { enabled, callback }); + let _ = tracing::subscriber::set_global_default(subscriber); +} + +#[cfg(feature = "test")] +// Used to verify memory leaks for valgrind CI. +// `EnvFilter` internally uses a static reference that is not cleaned up yielding to false positive in valgrind. +// This function enables logging without calling `EnvFilter` for env configuration. +pub fn init_log_test() { + let subscriber = tracing_subscriber::fmt() + .with_max_level(tracing::Level::INFO) + .with_thread_ids(true) + .with_thread_names(true) + .with_level(true) + .with_target(true); + + let subscriber = subscriber.finish(); + let _ = tracing::subscriber::set_global_default(subscriber); +} diff --git a/commons/zenoh-util/src/std_only/net/mod.rs b/commons/zenoh-util/src/net/mod.rs similarity index 99% rename from commons/zenoh-util/src/std_only/net/mod.rs rename to commons/zenoh-util/src/net/mod.rs index 239cdd6647..65577ac61d 100644 --- a/commons/zenoh-util/src/std_only/net/mod.rs +++ b/commons/zenoh-util/src/net/mod.rs @@ -12,6 +12,7 @@ // ZettaScale Zenoh Team, // use std::net::{IpAddr, Ipv6Addr}; + use tokio::net::{TcpSocket, UdpSocket}; use zenoh_core::zconfigurable; #[cfg(unix)] @@ -78,9 +79,10 @@ pub fn get_interface(name: &str) -> ZResult> { #[cfg(windows)] { unsafe { - use crate::ffi; use winapi::um::iptypes::IP_ADAPTER_ADDRESSES_LH; + use crate::ffi; + let buffer = get_adapters_addresses(winapi::shared::ws2def::AF_INET)?; let mut next_iface = (buffer.as_ptr() as *mut IP_ADAPTER_ADDRESSES_LH).as_ref(); @@ -162,9 +164,10 @@ pub fn get_local_addresses(interface: Option<&str>) -> ZResult> { #[cfg(windows)] { unsafe { - use crate::ffi; use winapi::um::iptypes::IP_ADAPTER_ADDRESSES_LH; + use crate::ffi; + let buffer = get_adapters_addresses(winapi::shared::ws2def::AF_UNSPEC)?; let mut result = vec![]; @@ -242,9 +245,10 @@ pub fn get_unicast_addresses_of_interface(name: &str) -> ZResult> { #[cfg(windows)] { unsafe { - use crate::ffi; use winapi::um::iptypes::IP_ADAPTER_ADDRESSES_LH; + use crate::ffi; + let buffer = get_adapters_addresses(winapi::shared::ws2def::AF_INET)?; let mut addrs = vec![]; @@ -281,9 +285,10 @@ pub fn get_index_of_interface(addr: IpAddr) -> ZResult { #[cfg(windows)] { unsafe { - use crate::ffi; use winapi::um::iptypes::IP_ADAPTER_ADDRESSES_LH; + use crate::ffi; + let buffer = get_adapters_addresses(winapi::shared::ws2def::AF_INET)?; let mut next_iface = (buffer.as_ptr() as *mut IP_ADAPTER_ADDRESSES_LH).as_ref(); @@ -324,9 +329,10 @@ pub fn get_interface_names_by_addr(addr: IpAddr) -> ZResult> { { let mut result = vec![]; unsafe { - use crate::ffi; use winapi::um::iptypes::IP_ADAPTER_ADDRESSES_LH; + use crate::ffi; + let buffer = get_adapters_addresses(winapi::shared::ws2def::AF_UNSPEC)?; if addr.is_unspecified() { diff --git a/commons/zenoh-util/src/std_only/log.rs b/commons/zenoh-util/src/std_only/log.rs deleted file mode 100644 index 07d66d9233..0000000000 --- a/commons/zenoh-util/src/std_only/log.rs +++ /dev/null @@ -1,72 +0,0 @@ -// -// Copyright (c) 2024 ZettaScale Technology -// -// This program and the accompanying materials are made available under the -// terms of the Eclipse Public License 2.0 which is available at -// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 -// which is available at https://www.apache.org/licenses/LICENSE-2.0. -// -// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 -// -// Contributors: -// ZettaScale Zenoh Team, -// -use tracing_subscriber::EnvFilter; - -/// This is an utility function to enable the tracing formatting subscriber from -/// the `RUST_LOG` environment variable. If `RUST_LOG` is not set, then logging is not enabled. -/// -/// # Safety -/// Calling this function initializes a `lazy_static` in the `tracing` crate -/// such static is not deallocated prior to process existing, thus tools such as `valgrind` -/// will report a memory leak. -/// Refer to this issue: https://github.com/tokio-rs/tracing/issues/2069 -pub fn try_init_log_from_env() { - if let Ok(env_filter) = EnvFilter::try_from_default_env() { - init_env_filter(env_filter); - } -} - -/// This is an utility function to enable the tracing formatting subscriber from -/// the environment variable. If `RUST_LOG` is not set, then fallback directives are used. -/// -/// # Safety -/// Calling this function initializes a `lazy_static` in the `tracing` crate -/// such static is not deallocated prior to process existing, thus tools such as `valgrind` -/// will report a memory leak. -/// Refer to this issue: https://github.com/tokio-rs/tracing/issues/2069 -pub fn init_log_from_env_or(fallback: S) -where - S: AsRef, -{ - let env_filter = EnvFilter::try_from_default_env().unwrap_or_else(|_| EnvFilter::new(fallback)); - init_env_filter(env_filter); -} - -fn init_env_filter(env_filter: EnvFilter) { - let subscriber = tracing_subscriber::fmt() - .with_env_filter(env_filter) - .with_thread_ids(true) - .with_thread_names(true) - .with_level(true) - .with_target(true); - - let subscriber = subscriber.finish(); - let _ = tracing::subscriber::set_global_default(subscriber); -} - -#[cfg(feature = "test")] -// Used to verify memory leaks for valgrind CI. -// `EnvFilter` internally uses a static reference that is not cleaned up yielding to false positive in valgrind. -// This function enables logging without calling `EnvFilter` for env configuration. -pub fn init_log_test() { - let subscriber = tracing_subscriber::fmt() - .with_max_level(tracing::Level::INFO) - .with_thread_ids(true) - .with_thread_names(true) - .with_level(true) - .with_target(true); - - let subscriber = subscriber.finish(); - let _ = tracing::subscriber::set_global_default(subscriber); -} diff --git a/commons/zenoh-util/src/std_only/mod.rs b/commons/zenoh-util/src/std_only/mod.rs deleted file mode 100644 index bfd24b6525..0000000000 --- a/commons/zenoh-util/src/std_only/mod.rs +++ /dev/null @@ -1,35 +0,0 @@ -pub mod ffi; -mod lib_loader; -pub mod net; -pub mod time_range; -pub use lib_loader::*; -pub mod timer; -pub use timer::*; -pub mod log; -pub use log::*; - -/// The "ZENOH_HOME" environment variable name -pub const ZENOH_HOME_ENV_VAR: &str = "ZENOH_HOME"; - -const DEFAULT_ZENOH_HOME_DIRNAME: &str = ".zenoh"; - -/// Return the path to the ${ZENOH_HOME} directory (~/.zenoh by default). -pub fn zenoh_home() -> &'static std::path::Path { - use std::path::PathBuf; - lazy_static! { - static ref ROOT: PathBuf = { - if let Some(dir) = std::env::var_os(ZENOH_HOME_ENV_VAR) { - PathBuf::from(dir) - } else { - match home::home_dir() { - Some(mut dir) => { - dir.push(DEFAULT_ZENOH_HOME_DIRNAME); - dir - } - None => PathBuf::from(DEFAULT_ZENOH_HOME_DIRNAME), - } - } - }; - } - ROOT.as_path() -} diff --git a/commons/zenoh-util/src/std_only/time_range.rs b/commons/zenoh-util/src/time_range.rs similarity index 99% rename from commons/zenoh-util/src/std_only/time_range.rs rename to commons/zenoh-util/src/time_range.rs index bfbb4e3303..ad91f2bd92 100644 --- a/commons/zenoh-util/src/std_only/time_range.rs +++ b/commons/zenoh-util/src/time_range.rs @@ -12,7 +12,6 @@ // ZettaScale Zenoh Team, // -use humantime::{format_rfc3339, parse_rfc3339_weak}; use std::{ convert::{TryFrom, TryInto}, fmt::Display, @@ -20,6 +19,8 @@ use std::{ str::FromStr, time::{Duration, SystemTime}, }; + +use humantime::{format_rfc3339, parse_rfc3339_weak}; use zenoh_result::{bail, zerror, ZError}; const U_TO_SECS: f64 = 0.000001; @@ -51,6 +52,7 @@ const W_TO_SECS: f64 = D_TO_SECS * 7.0; /// iteratively getting values for `[t0..t1[`, `[t1..t2[`, `[t2..t3[`... #[derive(Debug, Copy, Clone, PartialEq, Eq)] pub struct TimeRange(pub TimeBound, pub TimeBound); + impl TimeRange { /// Resolves the offset bounds in the range using `now` as reference. pub fn resolve_at(self, now: SystemTime) -> TimeRange { @@ -81,6 +83,7 @@ impl TimeRange { } } } + impl TimeRange { /// Returns `true` if the provided `instant` belongs to `self`. pub fn contains(&self, instant: SystemTime) -> bool { @@ -96,17 +99,20 @@ impl TimeRange { } } } + impl From> for TimeRange { fn from(value: TimeRange) -> Self { TimeRange(value.0.into(), value.1.into()) } } + impl TryFrom> for TimeRange { type Error = (); fn try_from(value: TimeRange) -> Result { Ok(TimeRange(value.0.try_into()?, value.1.try_into()?)) } } + impl Display for TimeRange { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match &self.0 { @@ -121,6 +127,7 @@ impl Display for TimeRange { } } } + impl Display for TimeRange { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match &self.0 { @@ -195,6 +202,7 @@ pub enum TimeBound { Exclusive(T), Unbounded, } + impl From> for TimeBound { fn from(value: TimeBound) -> Self { match value { @@ -204,6 +212,7 @@ impl From> for TimeBound { } } } + impl TryFrom> for TimeBound { type Error = (); fn try_from(value: TimeBound) -> Result { @@ -214,6 +223,7 @@ impl TryFrom> for TimeBound { }) } } + impl TimeBound { /// Resolves `self` into a [`TimeBound`], using `now` as a reference for offset expressions. /// If `self` is time boundary that cannot be represented as `SystemTime` (which means it’s not inside @@ -238,11 +248,13 @@ pub enum TimeExpr { Fixed(SystemTime), Now { offset_secs: f64 }, } + impl From for TimeExpr { fn from(t: SystemTime) -> Self { Self::Fixed(t) } } + impl TryFrom for SystemTime { type Error = (); fn try_from(value: TimeExpr) -> Result { @@ -252,6 +264,7 @@ impl TryFrom for SystemTime { } } } + impl TimeExpr { /// Resolves `self` into a [`SystemTime`], using `now` as a reference for offset expressions. /// @@ -295,6 +308,7 @@ impl TimeExpr { } } } + impl Display for TimeExpr { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { diff --git a/commons/zenoh-util/src/std_only/timer.rs b/commons/zenoh-util/src/timer.rs similarity index 88% rename from commons/zenoh-util/src/std_only/timer.rs rename to commons/zenoh-util/src/timer.rs index e6eefd9335..ab52c0c996 100644 --- a/commons/zenoh-util/src/std_only/timer.rs +++ b/commons/zenoh-util/src/timer.rs @@ -11,16 +11,19 @@ // Contributors: // ZettaScale Zenoh Team, // -use async_std::prelude::*; -use async_std::sync::Mutex; -use async_std::task; +use std::{ + cmp::Ordering as ComparisonOrdering, + collections::BinaryHeap, + sync::{ + atomic::{AtomicBool, Ordering as AtomicOrdering}, + Arc, Weak, + }, + time::{Duration, Instant}, +}; + use async_trait::async_trait; use flume::{bounded, Receiver, RecvError, Sender}; -use std::cmp::Ordering as ComparisonOrdering; -use std::collections::BinaryHeap; -use std::sync::atomic::{AtomicBool, Ordering as AtomicOrdering}; -use std::sync::{Arc, Weak}; -use std::time::{Duration, Instant}; +use tokio::{runtime::Handle, select, sync::Mutex, task, time}; use zenoh_core::zconfigurable; zconfigurable! { @@ -117,7 +120,7 @@ async fn timer_task( let mut events = events.lock().await; loop { - // Fuuture for adding new events + // Future for adding new events let new = new_event.recv_async(); match events.peek() { @@ -127,12 +130,17 @@ async fn timer_task( let next = next.clone(); let now = Instant::now(); if next.when > now { - task::sleep(next.when - now).await; + time::sleep(next.when - now).await; } Ok((false, next)) }; - match new.race(wait).await { + let result = select! { + result = wait => { result }, + result = new => { result }, + }; + + match result { Ok((is_new, mut ev)) => { if is_new { // A new event has just been added: push it onto the heap @@ -201,14 +209,14 @@ impl Timer { // Start the timer task let c_e = timer.events.clone(); let fut = async move { - let _ = sl_receiver - .recv_async() - .race(timer_task(c_e, ev_receiver)) - .await; + select! { + _ = sl_receiver.recv_async() => {}, + _ = timer_task(c_e, ev_receiver) => {}, + }; tracing::trace!("A - Timer task no longer running..."); }; if spawn_blocking { - task::spawn_blocking(|| task::block_on(fut)); + task::spawn_blocking(|| Handle::current().block_on(fut)); } else { task::spawn(fut); } @@ -231,14 +239,14 @@ impl Timer { // Start the timer task let c_e = self.events.clone(); let fut = async move { - let _ = sl_receiver - .recv_async() - .race(timer_task(c_e, ev_receiver)) - .await; + select! { + _ = sl_receiver.recv_async() => {}, + _ = timer_task(c_e, ev_receiver) => {}, + }; tracing::trace!("A - Timer task no longer running..."); }; if spawn_blocking { - task::spawn_blocking(|| task::block_on(fut)); + task::spawn_blocking(|| Handle::current().block_on(fut)); } else { task::spawn(fut); } @@ -296,12 +304,18 @@ impl Default for Timer { mod tests { #[test] fn timer() { - use super::{Timed, TimedEvent, Timer}; - use async_std::task; + use std::{ + sync::{ + atomic::{AtomicUsize, Ordering}, + Arc, + }, + time::{Duration, Instant}, + }; + use async_trait::async_trait; - use std::sync::atomic::{AtomicUsize, Ordering}; - use std::sync::Arc; - use std::time::{Duration, Instant}; + use tokio::{runtime::Runtime, time}; + + use super::{Timed, TimedEvent, Timer}; #[derive(Clone)] struct MyEvent { @@ -340,7 +354,7 @@ mod tests { timer.add_async(event).await; // Wait for the event to occur - task::sleep(3 * interval).await; + time::sleep(3 * interval).await; // Load and reset the counter value let value = counter.swap(0, Ordering::SeqCst); @@ -359,7 +373,7 @@ mod tests { handle.defuse(); // Wait for the event to occur - task::sleep(3 * interval).await; + time::sleep(3 * interval).await; // Load and reset the counter value let value = counter.swap(0, Ordering::SeqCst); @@ -381,7 +395,7 @@ mod tests { timer.add_async(event).await; // Wait for the events to occur - task::sleep(to_elapse + interval).await; + time::sleep(to_elapse + interval).await; // Load and reset the counter value let value = counter.swap(0, Ordering::SeqCst); @@ -392,7 +406,7 @@ mod tests { handle.defuse(); // Wait a bit more to verify that not more events have been fired - task::sleep(to_elapse).await; + time::sleep(to_elapse).await; // Load and reset the counter value let value = counter.swap(0, Ordering::SeqCst); @@ -407,7 +421,7 @@ mod tests { timer.add_async(event).await; // Wait for the events to occur - task::sleep(to_elapse + interval).await; + time::sleep(to_elapse + interval).await; // Load and reset the counter value let value = counter.swap(0, Ordering::SeqCst); @@ -417,7 +431,7 @@ mod tests { timer.stop_async().await; // Wait some time - task::sleep(to_elapse).await; + time::sleep(to_elapse).await; // Load and reset the counter value let value = counter.swap(0, Ordering::SeqCst); @@ -427,13 +441,14 @@ mod tests { timer.start_async(false).await; // Wait for the events to occur - task::sleep(to_elapse).await; + time::sleep(to_elapse).await; // Load and reset the counter value let value = counter.swap(0, Ordering::SeqCst); assert_eq!(value, amount); } - task::block_on(run()); + let rt = Runtime::new().unwrap(); + rt.block_on(run()); } } diff --git a/deny.toml b/deny.toml index 1a4a14f763..02c6caeb32 100644 --- a/deny.toml +++ b/deny.toml @@ -7,6 +7,7 @@ allow = [ "EPL-2.0", "ISC", "Unicode-DFS-2016", + "Unicode-3.0", "Zlib", "BSD-2-Clause", "BSD-3-Clause", diff --git a/examples/Cargo.toml b/examples/Cargo.toml index b08ace3cbc..c2fb23aca3 100644 --- a/examples/Cargo.toml +++ b/examples/Cargo.toml @@ -31,26 +31,22 @@ shared-memory = ["zenoh/shared-memory"] unstable = ["zenoh/unstable"] transport_unixpipe = ["zenoh/transport_unixpipe"] -# Unfortunately, the feature "transport_unixpipe" is always -# enabled for the lines below. It looks like a Cargo bug :( -# -# [target.'cfg(unix)'.dependencies] -# zenoh = { workspace = true, features = ["transport_unixpipe"] } -# -# [target.'cfg(not(unix))'.dependencies] -# zenoh = { workspace = true } - [dependencies] tokio = { workspace = true, features = ["rt-multi-thread", "time", "io-std"] } clap = { workspace = true, features = ["derive"] } -zenoh-util = {workspace = true } +zenoh-util = { workspace = true } flume = { workspace = true } futures = { workspace = true } git-version = { workspace = true } json5 = { workspace = true } -tracing = {workspace = true} +zenoh-collections = { workspace = true } +tracing = { workspace = true } zenoh = { workspace = true, default-features = true } zenoh-ext = { workspace = true } +serde_json = { workspace = true } +serde_yaml = { workspace = true } +prost = "0.13.1" +prost-types = "0.13.1" [dev-dependencies] rand = { workspace = true, features = ["default"] } @@ -89,12 +85,17 @@ path = "examples/z_pub.rs" [[example]] name = "z_pub_shm" path = "examples/z_pub_shm.rs" -required-features = ["shared-memory"] +required-features = ["unstable", "shared-memory"] [[example]] name = "z_sub" path = "examples/z_sub.rs" +[[example]] +name = "z_sub_shm" +path = "examples/z_sub_shm.rs" +required-features = ["unstable", "shared-memory"] + [[example]] name = "z_pull" path = "examples/z_pull.rs" @@ -103,6 +104,11 @@ path = "examples/z_pull.rs" name = "z_queryable" path = "examples/z_queryable.rs" +[[example]] +name = "z_queryable_shm" +path = "examples/z_queryable_shm.rs" +required-features = ["unstable", "shared-memory"] + [[example]] name = "z_storage" path = "examples/z_storage.rs" @@ -111,6 +117,11 @@ path = "examples/z_storage.rs" name = "z_get" path = "examples/z_get.rs" +[[example]] +name = "z_get_shm" +path = "examples/z_get_shm.rs" +required-features = ["unstable", "shared-memory"] + [[example]] name = "z_forward" path = "examples/z_forward.rs" @@ -141,12 +152,32 @@ path = "examples/z_sub_thr.rs" [[example]] name = "z_pub_shm_thr" path = "examples/z_pub_shm_thr.rs" -required-features = ["shared-memory"] +required-features = ["unstable", "shared-memory"] [[example]] name = "z_ping" path = "examples/z_ping.rs" +[[example]] +name = "z_ping_shm" +path = "examples/z_ping_shm.rs" +required-features = ["unstable", "shared-memory"] + [[example]] name = "z_pong" path = "examples/z_pong.rs" + +[[example]] +name = "z_alloc_shm" +path = "examples/z_alloc_shm.rs" +required-features = ["unstable", "shared-memory"] + +[[example]] +name = "z_bytes_shm" +path = "examples/z_bytes_shm.rs" +required-features = ["unstable", "shared-memory"] + +[[example]] +name = "z_posix_shm_provider" +path = "examples/z_posix_shm_provider.rs" +required-features = ["unstable", "shared-memory"] diff --git a/examples/README.md b/examples/README.md index bd846a14f9..d187244c51 100644 --- a/examples/README.md +++ b/examples/README.md @@ -3,6 +3,7 @@ ## Start instructions When Zenoh is built in release mode: + ```bash ./target/release/example/ ``` @@ -20,6 +21,7 @@ Scouts for Zenoh peers and routers available on the network. Typical usage: + ```bash z_scout ``` @@ -29,11 +31,11 @@ Gets information about the Zenoh session. Typical usage: + ```bash z_info ``` - ### z_put Puts a path/value into Zenoh. @@ -41,10 +43,13 @@ and [z_storage](#z_storage) examples. Typical usage: + ```bash z_put ``` + or + ```bash z_put -k demo/example/test -v 'Hello World' ``` @@ -55,10 +60,13 @@ The published value will be received by all matching subscribers, for instance the [z_sub](#z_sub) and [z_storage](#z_storage) examples. Typical usage: + ```bash z_pub ``` + or + ```bash z_pub -k demo/example/test -v 'Hello World' ``` @@ -69,27 +77,32 @@ The subscriber will be notified of each `put` or `delete` made on any key expression matching the subscriber key expression, and will print this notification. Typical usage: + ```bash z_sub ``` + or + ```bash z_sub -k 'demo/**' ``` ### z_pull - Declares a key expression and a pull subscriber. - On each pull, the pull subscriber will be notified of the last `put` or `delete` made on each key expression matching the subscriber key expression, and will print this notification. - + Declares a key expression and a pull subscriber. + On each pull, the pull subscriber will be notified of the last N `put` or `delete` made on each key expression matching the subscriber key expression, and will print this notification. Typical usage: + ```bash z_pull ``` + or + ```bash - z_pull -k 'demo/**' + z_pull -k demo/** --size 3 ``` ### z_get @@ -99,10 +112,13 @@ will receive this query and reply with paths/values that will be received by the receiver stream. Typical usage: + ```bash z_get ``` + or + ```bash z_get -s 'demo/**' ``` @@ -114,10 +130,13 @@ with a selector that matches the path, and will return a value to the querier. Typical usage: + ```bash z_queryable ``` + or + ```bash z_queryable -k demo/example/queryable -v 'This is the result' ``` @@ -131,10 +150,13 @@ and that match the queried selector. Typical usage: + ```bash z_storage ``` + or + ```bash z_storage -k 'demo/**' ``` @@ -145,11 +167,13 @@ Note that on subscriber side, the same `z_sub` example than for non-shared-memory example is used. Typical Subscriber usage: + ```bash z_sub ``` Typical Publisher usage: + ```bash z_pub_shm ``` @@ -161,11 +185,13 @@ put operations and a subscriber receiving notifications of those puts. Typical Subscriber usage: + ```bash z_sub_thr ``` Typical Publisher usage: + ```bash z_pub_thr 1024 ``` @@ -182,11 +208,13 @@ :warning: z_pong needs to start first to avoid missing the kickoff from z_ping. Typical Pong usage: + ```bash z_pong ``` Typical Ping usage: + ```bash z_ping 1024 ``` @@ -200,11 +228,13 @@ Note that on subscriber side, the same `z_sub_thr` example than for non-shared-memory example is used. Typical Subscriber usage: + ```bash z_sub_thr ``` Typical Publisher usage: + ```bash z_pub_shm_thr ``` @@ -217,10 +247,13 @@ or killing the `z_liveliness` example. Typical usage: + ```bash z_liveliness ``` + or + ```bash z_liveliness -k 'group1/member1' ``` @@ -231,10 +264,13 @@ (`group1/**` by default). Those tokens could be declared by the `z_liveliness` example. Typical usage: + ```bash z_get_liveliness ``` + or + ```bash z_get_liveliness -k 'group1/**' ``` @@ -249,10 +285,17 @@ matching liveliness tokens that were alive before it's start. Typical usage: + ```bash z_sub_liveliness ``` + or + ```bash z_sub_liveliness -k 'group1/**' ``` + +### z_bytes + + Show how to serialize different message types into ZBytes, and then deserialize from ZBytes to the original message types. diff --git a/examples/examples/z_alloc_shm.rs b/examples/examples/z_alloc_shm.rs new file mode 100644 index 0000000000..e96ca7dab1 --- /dev/null +++ b/examples/examples/z_alloc_shm.rs @@ -0,0 +1,146 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// +use zenoh::{ + prelude::*, + shm::{ + AllocAlignment, BlockOn, Deallocate, Defragment, GarbageCollect, PosixShmProviderBackend, + ShmProviderBuilder, POSIX_PROTOCOL_ID, + }, + Config, +}; + +#[tokio::main] +async fn main() { + // Initiate logging + zenoh::try_init_log_from_env(); + run().await.unwrap() +} + +async fn run() -> ZResult<()> { + // create an SHM backend... + // NOTE: For extended PosixShmProviderBackend API please check z_posix_shm_provider.rs + let backend = PosixShmProviderBackend::builder() + .with_size(65536) + .unwrap() + .res() + .unwrap(); + // ...and an SHM provider + let provider = ShmProviderBuilder::builder() + .protocol_id::() + .backend(backend) + .res(); + + // There are two API-defined ways of making shm buffer allocations: direct and through the layout... + + // Direct allocation + // The direct allocation calculates all layouting checks on each allocation. It is good for making + // uniquely-layouted allocations. For making series of similar allocations, please refer to layout + // allocation API which is shown later in this example... + let _direct_allocation = { + // OPTION: Simple allocation + let simple = provider.alloc(512).wait().unwrap(); + + // OPTION: Allocation with custom alignment and alloc policy customization + let _comprehensive = provider + .alloc(512) + .with_alignment(AllocAlignment::new(2).unwrap()) + // for more examples on policies, please see allocation policy usage below (for layout allocation API) + .with_policy::() + .wait() + .unwrap(); + + // OPTION: Allocation with custom alignment and async alloc policy + let _async = provider + .alloc(512) + .with_alignment(AllocAlignment::new(2).unwrap()) + // for more examples on policies, please see allocation policy usage below (for layout allocation API) + .with_policy::>>() + .await + .unwrap(); + + simple + }; + + // Create a layout for particular allocation arguments and particular SHM provider + // The layout is validated for argument correctness and also is checked + // against particular SHM provider's layouting capabilities. + // This layout is reusable and can handle series of similar allocations + let buffer_layout = { + // OPTION: Simple configuration: + let simple_layout = provider.alloc(512).into_layout().unwrap(); + + // OPTION: Comprehensive configuration: + let _comprehensive_layout = provider + .alloc(512) + .with_alignment(AllocAlignment::new(2).unwrap()) + .into_layout() + .unwrap(); + + simple_layout + }; + + // Allocate ShmBufInner + // Policy is a generics-based API to describe necessary allocation behaviour + // that will be highly optimized at compile-time. + // Policy resolvable can be sync and async. + // The basic policies are: + // -JustAlloc (sync) + // -GarbageCollect (sync) + // -Deallocate (sync) + // --contains own set of dealloc policy generics: + // ---DeallocateYoungest + // ---DeallocateEldest + // ---DeallocateOptimal + // -BlockOn (sync and async) + let mut sbuf = async { + // Some examples on how to use layout's interface: + + // OPTION: The default allocation with default JustAlloc policy + let default_alloc = buffer_layout.alloc().wait().unwrap(); + + // OPTION: The async allocation + let _async_alloc = buffer_layout + .alloc() + .with_policy::() + .await + .unwrap(); + + // OPTION: The comprehensive allocation policy that blocks if provider is not able to allocate + let _comprehensive_alloc = buffer_layout + .alloc() + .with_policy::>>() + .wait() + .unwrap(); + + // OPTION: The comprehensive allocation policy that deallocates up to 1000 buffers if provider is not able to allocate + let _comprehensive_alloc = buffer_layout + .alloc() + .with_policy::>>() + .wait() + .unwrap(); + + default_alloc + } + .await; + + // Fill recently-allocated buffer with data + sbuf[0..8].fill(0); + + // Declare Session and Publisher (common code) + let session = zenoh::open(Config::default()).await?; + let publisher = session.declare_publisher("my/key/expr").await?; + + // Publish SHM buffer + publisher.put(sbuf).await +} diff --git a/examples/examples/z_bytes.rs b/examples/examples/z_bytes.rs new file mode 100644 index 0000000000..ac4a2cc94a --- /dev/null +++ b/examples/examples/z_bytes.rs @@ -0,0 +1,143 @@ +// +// Copyright (c) 2024 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// + +use std::{borrow::Cow, collections::HashMap, io::Cursor}; + +use zenoh::bytes::ZBytes; + +fn main() { + // Numeric: u8, u16, u32, u128, usize, i8, i16, i32, i128, isize, f32, f64 + let input = 1234_u32; + let payload = ZBytes::from(input); + let output: u32 = payload.deserialize().unwrap(); + assert_eq!(input, output); + // Corresponding encoding to be used in operations like `.put()`, `.reply()`, etc. + // let encoding = Encoding::ZENOH_UINT32; + + // String + let input = String::from("test"); + let payload = ZBytes::from(&input); + let output: String = payload.deserialize().unwrap(); + assert_eq!(input, output); + // Corresponding encoding to be used in operations like `.put()`, `.reply()`, etc. + // let encoding = Encoding::ZENOH_STRING; + + // Cow + let input = Cow::from("test"); + let payload = ZBytes::from(&input); + let output: Cow = payload.deserialize().unwrap(); + assert_eq!(input, output); + // Corresponding encoding to be used in operations like `.put()`, `.reply()`, etc. + // let encoding = Encoding::ZENOH_STRING; + + // Vec: The deserialization should be infallible + let input: Vec = vec![1, 2, 3, 4]; + let payload = ZBytes::from(&input); + let output: Vec = payload.into(); + assert_eq!(input, output); + // Corresponding encoding to be used in operations like `.put()`, `.reply()`, etc. + // let encoding = Encoding::ZENOH_BYTES; + + // Writer & Reader + // serialization + let mut bytes = ZBytes::empty(); + let mut writer = bytes.writer(); + let i1 = 1234_u32; + let i2 = String::from("test"); + let i3 = vec![1, 2, 3, 4]; + writer.serialize(i1); + writer.serialize(&i2); + writer.serialize(&i3); + // deserialization + let mut reader = bytes.reader(); + let o1: u32 = reader.deserialize().unwrap(); + let o2: String = reader.deserialize().unwrap(); + let o3: Vec = reader.deserialize().unwrap(); + assert_eq!(i1, o1); + assert_eq!(i2, o2); + assert_eq!(i3, o3); + + // Tuple + let input = (1234_u32, String::from("test")); + let payload = ZBytes::serialize(input.clone()); + let output: (u32, String) = payload.deserialize().unwrap(); + assert_eq!(input, output); + + // Iterator + let input: [i32; 4] = [1, 2, 3, 4]; + let payload = ZBytes::from_iter(input.iter()); + for (idx, value) in payload.iter::().enumerate() { + assert_eq!(input[idx], value.unwrap()); + } + + // HashMap + let mut input: HashMap = HashMap::new(); + input.insert(0, String::from("abc")); + input.insert(1, String::from("def")); + let payload = ZBytes::from(input.clone()); + let output = payload.deserialize::>().unwrap(); + assert_eq!(input, output); + + // JSON + let data = r#" + { + "name": "John Doe", + "age": 43, + "phones": [ + "+44 1234567", + "+44 2345678" + ] + }"#; + let input: serde_json::Value = serde_json::from_str(data).unwrap(); + let payload = ZBytes::try_serialize(input.clone()).unwrap(); + let output: serde_json::Value = payload.deserialize().unwrap(); + assert_eq!(input, output); + // Corresponding encoding to be used in operations like `.put()`, `.reply()`, etc. + // let encoding = Encoding::APPLICATION_JSON; + + // YAML + let data = r#" + name: "John Doe" + age: 43 + phones: + - "+44 1234567" + - "+44 2345678" + "#; + let input: serde_yaml::Value = serde_yaml::from_str(data).unwrap(); + let payload = ZBytes::try_serialize(input.clone()).unwrap(); + let output: serde_yaml::Value = payload.deserialize().unwrap(); + assert_eq!(input, output); + // Corresponding encoding to be used in operations like `.put()`, `.reply()`, etc. + // let encoding = Encoding::APPLICATION_YAML; + + // Protobuf + use prost::Message; + #[derive(Message, Eq, PartialEq)] + struct EntityInfo { + #[prost(uint32)] + id: u32, + #[prost(string)] + name: String, + } + let input = EntityInfo { + id: 1234, + name: String::from("John Doe"), + }; + let payload = ZBytes::from(input.encode_to_vec()); + let output = + EntityInfo::decode(Cursor::new(payload.deserialize::>().unwrap())).unwrap(); + assert_eq!(input, output); + // Corresponding encoding to be used in operations like `.put()`, `.reply()`, etc. + // let encoding = Encoding::APPLICATION_PROTOBUF; +} diff --git a/examples/examples/z_bytes_shm.rs b/examples/examples/z_bytes_shm.rs new file mode 100644 index 0000000000..c30710214a --- /dev/null +++ b/examples/examples/z_bytes_shm.rs @@ -0,0 +1,97 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// +use zenoh::{ + bytes::ZBytes, + prelude::*, + shm::{ + zshm, zshmmut, PosixShmProviderBackend, ShmProviderBuilder, ZShm, ZShmMut, + POSIX_PROTOCOL_ID, + }, +}; + +fn main() { + // create an SHM backend... + // NOTE: For extended PosixShmProviderBackend API please check z_posix_shm_provider.rs + let backend = PosixShmProviderBackend::builder() + .with_size(4096) + .unwrap() + .res() + .unwrap(); + // ...and an SHM provider + let provider = ShmProviderBuilder::builder() + .protocol_id::() + .backend(backend) + .res(); + + // Allocate an SHM buffer + // NOTE: For allocation API please check z_alloc_shm.rs example + // NOTE: For buf's API please check z_bytes_shm.rs example + let mut owned_shm_buf_mut = provider.alloc(1024).wait().unwrap(); + + // mutable and immutable API + let _data: &[u8] = &owned_shm_buf_mut; + let _data_mut: &mut [u8] = &mut owned_shm_buf_mut; + + // convert into immutable owned buffer (ZShmMut -> ZShm) + let owned_shm_buf: ZShm = owned_shm_buf_mut.into(); + + // immutable API + let _data: &[u8] = &owned_shm_buf; + + // convert again into mutable owned buffer (ZShm -> ZShmMut) + let mut owned_shm_buf_mut: ZShmMut = owned_shm_buf.try_into().unwrap(); + + // mutable and immutable API + let _data: &[u8] = &owned_shm_buf_mut; + let _data_mut: &mut [u8] = &mut owned_shm_buf_mut; + + // build a ZBytes from an SHM buffer (ZShmMut -> ZBytes) + let mut payload: ZBytes = owned_shm_buf_mut.into(); + + // branch to illustrate immutable access to SHM data + { + // deserialize ZBytes as an immutably borrowed zshm (ZBytes -> &zshm) + let borrowed_shm_buf: &zshm = payload.deserialize().unwrap(); + + // immutable API + let _data: &[u8] = borrowed_shm_buf; + + // construct owned buffer from borrowed type (&zshm -> ZShm) + let owned = borrowed_shm_buf.to_owned(); + + // immutable API + let _data: &[u8] = &owned; + + // try to construct mutable ZShmMut (ZShm -> ZShmMut) + let owned_mut: Result = owned.try_into(); + // the attempt fails because ZShm has two existing references ('owned' and inside 'payload') + assert!(owned_mut.is_err()) + } + + // branch to illustrate mutable access to SHM data + { + // deserialize ZBytes as mutably borrowed zshm (ZBytes -> &mut zshm) + let borrowed_shm_buf: &mut zshm = payload.deserialize_mut().unwrap(); + + // immutable API + let _data: &[u8] = borrowed_shm_buf; + + // convert zshm to zshmmut (&mut zshm -> &mut zshmmut) + let borrowed_shm_buf_mut: &mut zshmmut = borrowed_shm_buf.try_into().unwrap(); + + // mutable and immutable API + let _data: &[u8] = borrowed_shm_buf_mut; + let _data_mut: &mut [u8] = borrowed_shm_buf_mut; + } +} diff --git a/examples/examples/z_delete.rs b/examples/examples/z_delete.rs index 046ac29ce9..090aadac48 100644 --- a/examples/examples/z_delete.rs +++ b/examples/examples/z_delete.rs @@ -12,24 +12,23 @@ // ZettaScale Zenoh Team, // use clap::Parser; -use zenoh::config::Config; -use zenoh::prelude::r#async::*; +use zenoh::{key_expr::KeyExpr, Config}; use zenoh_examples::CommonArgs; #[tokio::main] async fn main() { // initiate logging - zenoh_util::try_init_log_from_env(); + zenoh::try_init_log_from_env(); let (config, key_expr) = parse_args(); println!("Opening session..."); - let session = zenoh::open(config).res().await.unwrap(); + let session = zenoh::open(config).await.unwrap(); println!("Deleting resources matching '{key_expr}'..."); - session.delete(&key_expr).res().await.unwrap(); + session.delete(&key_expr).await.unwrap(); - session.close().res().await.unwrap(); + session.close().await.unwrap(); } #[derive(clap::Parser, Clone, PartialEq, Eq, Hash, Debug)] diff --git a/examples/examples/z_formats.rs b/examples/examples/z_formats.rs index 357448143e..a423e9b756 100644 --- a/examples/examples/z_formats.rs +++ b/examples/examples/z_formats.rs @@ -12,9 +12,11 @@ // ZettaScale Zenoh Team, // -use zenoh::prelude::keyexpr; - -zenoh::kedefine!( +use zenoh::key_expr::{ + format::{kedefine, keformat}, + keyexpr, +}; +kedefine!( pub file_format: "user_id/${user_id:*}/file/${file:*/**}", pub(crate) settings_format: "user_id/${user_id:*}/settings/${setting:**}" ); @@ -23,7 +25,7 @@ fn main() { // Formatting let mut formatter = file_format::formatter(); let file = "hi/there"; - let ke = zenoh::keformat!(formatter, user_id = 42, file).unwrap(); + let ke = keformat!(formatter, user_id = 42, file).unwrap(); println!("{formatter:?} => {ke}"); // Parsing let settings_ke = keyexpr::new("user_id/30/settings/dark_mode").unwrap(); diff --git a/examples/examples/z_forward.rs b/examples/examples/z_forward.rs index 0c7d90dc7a..be9df7e2b0 100644 --- a/examples/examples/z_forward.rs +++ b/examples/examples/z_forward.rs @@ -12,25 +12,24 @@ // ZettaScale Zenoh Team, // use clap::Parser; -use zenoh::config::Config; -use zenoh::prelude::r#async::*; +use zenoh::{key_expr::KeyExpr, prelude::*, Config}; use zenoh_examples::CommonArgs; use zenoh_ext::SubscriberForward; #[tokio::main] async fn main() { // Initiate logging - zenoh_util::try_init_log_from_env(); + zenoh::try_init_log_from_env(); let (config, key_expr, forward) = parse_args(); println!("Opening session..."); - let session = zenoh::open(config).res().await.unwrap(); + let session = zenoh::open(config).await.unwrap(); println!("Declaring Subscriber on '{key_expr}'..."); - let mut subscriber = session.declare_subscriber(&key_expr).res().await.unwrap(); + let mut subscriber = session.declare_subscriber(&key_expr).await.unwrap(); println!("Declaring Publisher on '{forward}'..."); - let publisher = session.declare_publisher(&forward).res().await.unwrap(); + let publisher = session.declare_publisher(&forward).await.unwrap(); println!("Forwarding data from '{key_expr}' to '{forward}'..."); subscriber.forward(publisher).await.unwrap(); } diff --git a/examples/examples/z_get.rs b/examples/examples/z_get.rs index 234642503d..a83eeb5034 100644 --- a/examples/examples/z_get.rs +++ b/examples/examples/z_get.rs @@ -11,41 +11,59 @@ // Contributors: // ZettaScale Zenoh Team, // -use clap::Parser; -use std::convert::TryFrom; use std::time::Duration; -use zenoh::config::Config; -use zenoh::prelude::r#async::*; + +use clap::Parser; +use zenoh::{ + query::{QueryTarget, Selector}, + Config, +}; use zenoh_examples::CommonArgs; #[tokio::main] async fn main() { // initiate logging - zenoh_util::try_init_log_from_env(); + zenoh::try_init_log_from_env(); - let (config, selector, value, target, timeout) = parse_args(); + let (config, selector, payload, target, timeout) = parse_args(); println!("Opening session..."); - let session = zenoh::open(config).res().await.unwrap(); + let session = zenoh::open(config).await.unwrap(); println!("Sending Query '{selector}'..."); - let replies = match value { - Some(value) => session.get(&selector).with_value(value), - None => session.get(&selector), - } - .target(target) - .timeout(timeout) - .res() - .await - .unwrap(); + let replies = session + .get(&selector) + // // By default get receives replies from a FIFO. + // // Uncomment this line to use a ring channel instead. + // // More information on the ring channel are available in the z_pull example. + // .with(zenoh::handlers::RingChannel::default()) + // Refer to z_bytes.rs to see how to serialize different types of message + .payload(payload.unwrap_or_default()) + .target(target) + .timeout(timeout) + .await + .unwrap(); while let Ok(reply) = replies.recv_async().await { - match reply.sample { - Ok(sample) => println!( - ">> Received ('{}': '{}')", - sample.key_expr.as_str(), - sample.value, - ), - Err(err) => println!(">> Received (ERROR: '{}')", String::try_from(&err).unwrap()), + match reply.result() { + Ok(sample) => { + // Refer to z_bytes.rs to see how to deserialize different types of message + let payload = sample + .payload() + .deserialize::() + .unwrap_or_else(|e| format!("{}", e)); + println!( + ">> Received ('{}': '{}')", + sample.key_expr().as_str(), + payload, + ); + } + Err(err) => { + let payload = err + .payload() + .deserialize::() + .unwrap_or_else(|e| format!("{}", e)); + println!(">> Received (ERROR: '{}')", payload); + } } } } @@ -64,8 +82,8 @@ struct Args { /// The selection of resources to query selector: Selector<'static>, #[arg(short, long)] - /// An optional value to put in the query. - value: Option, + /// An optional payload to put in the query. + payload: Option, #[arg(short, long, default_value = "BEST_MATCHING")] /// The target queryables of the query. target: Qt, @@ -87,7 +105,7 @@ fn parse_args() -> ( ( args.common.into(), args.selector, - args.value, + args.payload, match args.target { Qt::BestMatching => QueryTarget::BestMatching, Qt::All => QueryTarget::All, diff --git a/examples/examples/z_get_liveliness.rs b/examples/examples/z_get_liveliness.rs index 7e808f5675..53f7abc92a 100644 --- a/examples/examples/z_get_liveliness.rs +++ b/examples/examples/z_get_liveliness.rs @@ -11,35 +11,39 @@ // Contributors: // ZettaScale Zenoh Team, // -use clap::Parser; -use std::convert::TryFrom; use std::time::Duration; -use zenoh::config::Config; -use zenoh::prelude::r#async::*; + +use clap::Parser; +use zenoh::{key_expr::KeyExpr, prelude::*, Config}; use zenoh_examples::CommonArgs; #[tokio::main] async fn main() { // initiate logging - zenoh_util::try_init_log_from_env(); + zenoh::try_init_log_from_env(); let (config, key_expr, timeout) = parse_args(); println!("Opening session..."); - let session = zenoh::open(config).res().await.unwrap(); + let session = zenoh::open(config).await.unwrap(); println!("Sending Liveliness Query '{key_expr}'..."); let replies = session .liveliness() .get(&key_expr) .timeout(timeout) - .res() .await .unwrap(); while let Ok(reply) = replies.recv_async().await { - match reply.sample { - Ok(sample) => println!(">> Alive token ('{}')", sample.key_expr.as_str(),), - Err(err) => println!(">> Received (ERROR: '{}')", String::try_from(&err).unwrap()), + match reply.result() { + Ok(sample) => println!(">> Alive token ('{}')", sample.key_expr().as_str(),), + Err(err) => { + let payload = err + .payload() + .deserialize::() + .unwrap_or_else(|e| format!("{}", e)); + println!(">> Received (ERROR: '{}')", payload); + } } } } diff --git a/examples/examples/z_get_shm.rs b/examples/examples/z_get_shm.rs new file mode 100644 index 0000000000..b3c2dedd6e --- /dev/null +++ b/examples/examples/z_get_shm.rs @@ -0,0 +1,141 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// +use std::time::Duration; + +use clap::Parser; +use zenoh::{ + query::{QueryTarget, Selector}, + shm::{ + zshm, BlockOn, GarbageCollect, PosixShmProviderBackend, ShmProviderBuilder, + POSIX_PROTOCOL_ID, + }, + Config, +}; +use zenoh_examples::CommonArgs; + +const N: usize = 10; + +#[tokio::main] +async fn main() { + // initiate logging + zenoh::try_init_log_from_env(); + + let (config, selector, mut payload, target, timeout) = parse_args(); + + println!("Opening session..."); + let session = zenoh::open(config).await.unwrap(); + + println!("Creating POSIX SHM provider..."); + // create an SHM backend... + // NOTE: For extended PosixShmProviderBackend API please check z_posix_shm_provider.rs + let backend = PosixShmProviderBackend::builder() + .with_size(N * 1024) + .unwrap() + .res() + .unwrap(); + // ...and an SHM provider + let provider = ShmProviderBuilder::builder() + .protocol_id::() + .backend(backend) + .res(); + + // Allocate an SHM buffer + // NOTE: For allocation API please check z_alloc_shm.rs example + // NOTE: For buf's API please check z_bytes_shm.rs example + println!("Allocating Shared Memory Buffer..."); + let mut sbuf = provider + .alloc(1024) + .with_policy::>() + .await + .unwrap(); + + let content = payload + .take() + .unwrap_or_else(|| "Get from Rust SHM!".to_string()); + sbuf[0..content.len()].copy_from_slice(content.as_bytes()); + + println!("Sending Query '{selector}'..."); + let replies = session + .get(&selector) + .payload(sbuf) + .target(target) + .timeout(timeout) + .await + .unwrap(); + + while let Ok(reply) = replies.recv_async().await { + match reply.result() { + Ok(sample) => { + print!(">> Received ('{}': ", sample.key_expr().as_str()); + match sample.payload().deserialize::<&zshm>() { + Ok(payload) => println!("'{}')", String::from_utf8_lossy(payload),), + Err(e) => println!("'Not a ShmBufInner: {:?}')", e), + } + } + Err(err) => { + let payload = err + .payload() + .deserialize::() + .unwrap_or_else(|e| format!("{}", e)); + println!(">> Received (ERROR: '{}')", payload); + } + } + } +} + +#[derive(clap::ValueEnum, Clone, Copy, Debug)] +#[value(rename_all = "SCREAMING_SNAKE_CASE")] +enum Qt { + BestMatching, + All, + AllComplete, +} + +#[derive(Parser, Clone, Debug)] +struct Args { + #[arg(short, long, default_value = "demo/example/**")] + /// The selection of resources to query + selector: Selector<'static>, + /// The payload to publish. + payload: Option, + #[arg(short, long, default_value = "BEST_MATCHING")] + /// The target queryables of the query. + target: Qt, + #[arg(short = 'o', long, default_value = "10000")] + /// The query timeout in milliseconds. + timeout: u64, + #[command(flatten)] + common: CommonArgs, +} + +fn parse_args() -> ( + Config, + Selector<'static>, + Option, + QueryTarget, + Duration, +) { + let args = Args::parse(); + ( + args.common.into(), + args.selector, + args.payload, + match args.target { + Qt::BestMatching => QueryTarget::BestMatching, + Qt::All => QueryTarget::All, + Qt::AllComplete => QueryTarget::AllComplete, + }, + Duration::from_millis(args.timeout), + ) +} diff --git a/examples/examples/z_info.rs b/examples/examples/z_info.rs index f28b9ad72c..aa40ef62d4 100644 --- a/examples/examples/z_info.rs +++ b/examples/examples/z_info.rs @@ -12,29 +12,28 @@ // ZettaScale Zenoh Team, // use clap::Parser; -use zenoh::config::Config; -use zenoh::prelude::r#async::*; +use zenoh::{prelude::*, session::ZenohId}; use zenoh_examples::CommonArgs; #[tokio::main] async fn main() { // initiate logging - zenoh_util::try_init_log_from_env(); + zenoh::try_init_log_from_env(); let config = parse_args(); println!("Opening session..."); - let session = zenoh::open(config).res().await.unwrap(); + let session = zenoh::open(config).await.unwrap(); let info = session.info(); - println!("zid: {}", info.zid().res().await); + println!("zid: {}", info.zid().await); println!( "routers zid: {:?}", - info.routers_zid().res().await.collect::>() + info.routers_zid().await.collect::>() ); println!( "peers zid: {:?}", - info.peers_zid().res().await.collect::>() + info.peers_zid().await.collect::>() ); } @@ -44,7 +43,7 @@ struct Args { common: CommonArgs, } -fn parse_args() -> Config { +fn parse_args() -> zenoh::Config { let args = Args::parse(); args.common.into() } diff --git a/examples/examples/z_liveliness.rs b/examples/examples/z_liveliness.rs index 9e17ba3965..bf8890a267 100644 --- a/examples/examples/z_liveliness.rs +++ b/examples/examples/z_liveliness.rs @@ -12,38 +12,28 @@ // ZettaScale Zenoh Team, // use clap::Parser; -use zenoh::config::Config; -use zenoh::prelude::r#async::*; +use zenoh::{key_expr::KeyExpr, prelude::*, Config}; use zenoh_examples::CommonArgs; #[tokio::main] async fn main() { // Initiate logging - zenoh_util::try_init_log_from_env(); + zenoh::try_init_log_from_env(); let (config, key_expr) = parse_args(); println!("Opening session..."); - let session = zenoh::open(config).res().await.unwrap(); + let session = zenoh::open(config).await.unwrap(); println!("Declaring LivelinessToken on '{}'...", &key_expr); - let mut token = Some( - session - .liveliness() - .declare_token(&key_expr) - .res() - .await - .unwrap(), - ); + let token = session.liveliness().declare_token(&key_expr).await.unwrap(); println!("Press CTRL-C to undeclare LivelinessToken and quit..."); std::thread::park(); + // LivelinessTokens are automatically closed when dropped // Use the code below to manually undeclare it if needed - if let Some(token) = token.take() { - println!("Undeclaring LivelinessToken..."); - token.undeclare().res().await.unwrap(); - }; + token.undeclare().await.unwrap(); } #[derive(clap::Parser, Clone, PartialEq, Eq, Hash, Debug)] diff --git a/examples/examples/z_ping.rs b/examples/examples/z_ping.rs index 8d0c13f0ef..eec9324173 100644 --- a/examples/examples/z_ping.rs +++ b/examples/examples/z_ping.rs @@ -11,19 +11,18 @@ // Contributors: // ZettaScale Zenoh Team, // -use clap::Parser; use std::time::{Duration, Instant}; -use zenoh::config::Config; -use zenoh::prelude::sync::*; -use zenoh::publication::CongestionControl; + +use clap::Parser; +use zenoh::{bytes::ZBytes, key_expr::keyexpr, prelude::*, qos::CongestionControl, Config}; use zenoh_examples::CommonArgs; fn main() { // initiate logging - zenoh_util::try_init_log_from_env(); + zenoh::try_init_log_from_env(); - let (config, warmup, size, n) = parse_args(); - let session = zenoh::open(config).res().unwrap(); + let (config, warmup, size, n, express) = parse_args(); + let session = zenoh::open(config).wait().unwrap(); // The key expression to publish data on let key_expr_ping = keyexpr::new("test/ping").unwrap(); @@ -31,14 +30,15 @@ fn main() { // The key expression to wait the response back let key_expr_pong = keyexpr::new("test/pong").unwrap(); - let sub = session.declare_subscriber(key_expr_pong).res().unwrap(); + let sub = session.declare_subscriber(key_expr_pong).wait().unwrap(); let publisher = session .declare_publisher(key_expr_ping) .congestion_control(CongestionControl::Block) - .res() + .express(express) + .wait() .unwrap(); - let data: Value = (0usize..size) + let data: ZBytes = (0usize..size) .map(|i| (i % 10) as u8) .collect::>() .into(); @@ -50,7 +50,7 @@ fn main() { let now = Instant::now(); while now.elapsed() < warmup { let data = data.clone(); - publisher.put(data).res().unwrap(); + publisher.put(data).wait().unwrap(); let _ = sub.recv(); } @@ -58,7 +58,7 @@ fn main() { for _ in 0..n { let data = data.clone(); let write_time = Instant::now(); - publisher.put(data).res().unwrap(); + publisher.put(data).wait().unwrap(); let _ = sub.recv(); let ts = write_time.elapsed().as_micros(); @@ -78,6 +78,9 @@ fn main() { #[derive(Parser)] struct Args { + /// express for sending data + #[arg(long, default_value = "false")] + no_express: bool, #[arg(short, long, default_value = "1")] /// The number of seconds to warm up (float) warmup: f64, @@ -90,12 +93,13 @@ struct Args { common: CommonArgs, } -fn parse_args() -> (Config, Duration, usize, usize) { +fn parse_args() -> (Config, Duration, usize, usize, bool) { let args = Args::parse(); ( args.common.into(), Duration::from_secs_f64(args.warmup), args.payload_size, args.samples, + !args.no_express, ) } diff --git a/examples/examples/z_ping_shm.rs b/examples/examples/z_ping_shm.rs new file mode 100644 index 0000000000..de33819818 --- /dev/null +++ b/examples/examples/z_ping_shm.rs @@ -0,0 +1,122 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// +use std::time::{Duration, Instant}; + +use clap::Parser; +use zenoh::{ + bytes::ZBytes, + key_expr::keyexpr, + prelude::*, + qos::CongestionControl, + shm::{PosixShmProviderBackend, ShmProviderBuilder, POSIX_PROTOCOL_ID}, + Config, +}; +use zenoh_examples::CommonArgs; + +fn main() { + // Initiate logging + zenoh::try_init_log_from_env(); + + let (config, warmup, size, n) = parse_args(); + + let session = zenoh::open(config).wait().unwrap(); + + // The key expression to publish data on + let key_expr_ping = keyexpr::new("test/ping").unwrap(); + + // The key expression to wait the response back + let key_expr_pong = keyexpr::new("test/pong").unwrap(); + + let sub = session.declare_subscriber(key_expr_pong).wait().unwrap(); + let publisher = session + .declare_publisher(key_expr_ping) + .congestion_control(CongestionControl::Block) + .wait() + .unwrap(); + + let mut samples = Vec::with_capacity(n); + + // create an SHM backend... + // NOTE: For extended PosixShmProviderBackend API please check z_posix_shm_provider.rs + let backend = PosixShmProviderBackend::builder() + .with_size(size) + .unwrap() + .res() + .unwrap(); + // ...and an SHM provider + let provider = ShmProviderBuilder::builder() + .protocol_id::() + .backend(backend) + .res(); + + // Allocate an SHM buffer + // NOTE: For allocation API please check z_alloc_shm.rs example + // NOTE: For buf's API please check z_bytes_shm.rs example + let buf = provider.alloc(size).wait().unwrap(); + + // convert ZShmMut into ZBytes as ZShmMut does not support Clone + let buf: ZBytes = buf.into(); + + // -- warmup -- + println!("Warming up for {warmup:?}..."); + let now = Instant::now(); + while now.elapsed() < warmup { + publisher.put(buf.clone()).wait().unwrap(); + let _ = sub.recv().unwrap(); + } + + for _ in 0..n { + let buf = buf.clone(); + let write_time = Instant::now(); + publisher.put(buf).wait().unwrap(); + + let _ = sub.recv(); + let ts = write_time.elapsed().as_micros(); + samples.push(ts); + } + + for (i, rtt) in samples.iter().enumerate().take(n) { + println!( + "{} bytes: seq={} rtt={:?}µs lat={:?}µs", + size, + i, + rtt, + rtt / 2 + ); + } +} + +#[derive(Parser)] +struct Args { + #[arg(short, long, default_value = "1")] + /// The number of seconds to warm up (float) + warmup: f64, + #[arg(short = 'n', long, default_value = "100")] + /// The number of round-trips to measure + samples: usize, + /// Sets the size of the payload to publish + payload_size: usize, + #[command(flatten)] + common: CommonArgs, +} + +fn parse_args() -> (Config, Duration, usize, usize) { + let args = Args::parse(); + ( + args.common.into(), + Duration::from_secs_f64(args.warmup), + args.payload_size, + args.samples, + ) +} diff --git a/examples/examples/z_pong.rs b/examples/examples/z_pong.rs index f57b5d9685..86b31d41f3 100644 --- a/examples/examples/z_pong.rs +++ b/examples/examples/z_pong.rs @@ -12,18 +12,16 @@ // ZettaScale Zenoh Team, // use clap::Parser; -use zenoh::config::Config; -use zenoh::prelude::sync::*; -use zenoh::publication::CongestionControl; +use zenoh::{key_expr::keyexpr, prelude::*, qos::CongestionControl, Config}; use zenoh_examples::CommonArgs; fn main() { // initiate logging - zenoh_util::try_init_log_from_env(); + zenoh::try_init_log_from_env(); - let config = parse_args(); + let (config, express) = parse_args(); - let session = zenoh::open(config).res().unwrap().into_arc(); + let session = zenoh::open(config).wait().unwrap().into_arc(); // The key expression to read the data from let key_expr_ping = keyexpr::new("test/ping").unwrap(); @@ -34,24 +32,28 @@ fn main() { let publisher = session .declare_publisher(key_expr_pong) .congestion_control(CongestionControl::Block) - .res() + .express(express) + .wait() .unwrap(); let _sub = session .declare_subscriber(key_expr_ping) - .callback(move |sample| publisher.put(sample.value).res().unwrap()) - .res() + .callback(move |sample| publisher.put(sample.payload().clone()).wait().unwrap()) + .wait() .unwrap(); std::thread::park(); } #[derive(clap::Parser, Clone, PartialEq, Eq, Hash, Debug)] struct Args { + /// express for sending data + #[arg(long, default_value = "false")] + no_express: bool, #[command(flatten)] common: CommonArgs, } -fn parse_args() -> Config { +fn parse_args() -> (Config, bool) { let args = Args::parse(); - args.common.into() + (args.common.into(), !args.no_express) } diff --git a/examples/examples/z_posix_shm_provider.rs b/examples/examples/z_posix_shm_provider.rs new file mode 100644 index 0000000000..7c68d56bd3 --- /dev/null +++ b/examples/examples/z_posix_shm_provider.rs @@ -0,0 +1,46 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// +use zenoh::shm::{ + AllocAlignment, MemoryLayout, PosixShmProviderBackend, ShmProviderBuilder, POSIX_PROTOCOL_ID, +}; + +fn main() { + // Construct an SHM backend + let backend = { + // NOTE: code in this block is a specific PosixShmProviderBackend API. + + // Total amount of shared memory to allocate + let size = 4096; + + // An alignment for POSIX SHM provider + // Due to internal optimization, all allocations will be aligned corresponding to this alignment, + // so the provider will be able to satisfy allocation layouts with alignment <= provider_alignment + let provider_alignment = AllocAlignment::default(); + + // A layout for POSIX Provider's memory + let provider_layout = MemoryLayout::new(size, provider_alignment).unwrap(); + + // Build a provider backend + PosixShmProviderBackend::builder() + .with_layout(provider_layout) + .res() + .unwrap() + }; + + // Construct an SHM provider for particular backend and POSIX_PROTOCOL_ID + let _shm_provider = ShmProviderBuilder::builder() + .protocol_id::() + .backend(backend) + .res(); +} diff --git a/examples/examples/z_pub.rs b/examples/examples/z_pub.rs index 5de92231a1..4ff177c32a 100644 --- a/examples/examples/z_pub.rs +++ b/examples/examples/z_pub.rs @@ -11,40 +11,37 @@ // Contributors: // ZettaScale Zenoh Team, // -use clap::Parser; use std::time::Duration; -use zenoh::config::Config; -use zenoh::prelude::r#async::*; + +use clap::Parser; +use zenoh::{bytes::Encoding, key_expr::KeyExpr, prelude::*, Config}; use zenoh_examples::CommonArgs; #[tokio::main] async fn main() { // Initiate logging - zenoh_util::try_init_log_from_env(); + zenoh::try_init_log_from_env(); - let (config, key_expr, value, attachment) = parse_args(); + let (config, key_expr, payload, attachment) = parse_args(); println!("Opening session..."); - let session = zenoh::open(config).res().await.unwrap(); + let session = zenoh::open(config).await.unwrap(); println!("Declaring Publisher on '{key_expr}'..."); - let publisher = session.declare_publisher(&key_expr).res().await.unwrap(); + let publisher = session.declare_publisher(&key_expr).await.unwrap(); println!("Press CTRL-C to quit..."); for idx in 0..u32::MAX { tokio::time::sleep(Duration::from_secs(1)).await; - let buf = format!("[{idx:4}] {value}"); + let buf = format!("[{idx:4}] {payload}"); println!("Putting Data ('{}': '{}')...", &key_expr, buf); - let mut put = publisher.put(buf); - if let Some(attachment) = &attachment { - put = put.with_attachment( - attachment - .split('&') - .map(|pair| split_once(pair, '=')) - .collect(), - ) - } - put.res().await.unwrap(); + // Refer to z_bytes.rs to see how to serialize different types of message + publisher + .put(buf) + .encoding(Encoding::TEXT_PLAIN) // Optionally set the encoding metadata + .attachment(&attachment) // Optionally add an attachment + .await + .unwrap(); } } @@ -54,29 +51,16 @@ struct Args { /// The key expression to write to. key: KeyExpr<'static>, #[arg(short, long, default_value = "Pub from Rust!")] - /// The value to write. - value: String, + /// The payload to write. + payload: String, #[arg(short, long)] /// The attachments to add to each put. - /// - /// The key-value pairs are &-separated, and = serves as the separator between key and value. attach: Option, #[command(flatten)] common: CommonArgs, } -fn split_once(s: &str, c: char) -> (&[u8], &[u8]) { - let s_bytes = s.as_bytes(); - match s.find(c) { - Some(index) => { - let (l, r) = s_bytes.split_at(index); - (l, &r[1..]) - } - None => (s_bytes, &[]), - } -} - fn parse_args() -> (Config, KeyExpr<'static>, String, Option) { let args = Args::parse(); - (args.common.into(), args.key, args.value, args.attach) + (args.common.into(), args.key, args.payload, args.attach) } diff --git a/examples/examples/z_pub_shm.rs b/examples/examples/z_pub_shm.rs index 3601680bf8..6731ae8d0d 100644 --- a/examples/examples/z_pub_shm.rs +++ b/examples/examples/z_pub_shm.rs @@ -12,96 +12,77 @@ // ZettaScale Zenoh Team, // use clap::Parser; -use std::time::Duration; -use zenoh::config::Config; -use zenoh::prelude::r#async::*; -use zenoh::shm::SharedMemoryManager; +use zenoh::{ + key_expr::KeyExpr, + prelude::*, + shm::{ + BlockOn, GarbageCollect, PosixShmProviderBackend, ShmProviderBuilder, POSIX_PROTOCOL_ID, + }, + Config, +}; use zenoh_examples::CommonArgs; const N: usize = 10; -const K: u32 = 3; #[tokio::main] -async fn main() -> Result<(), zenoh::Error> { +async fn main() -> Result<(), ZError> { // Initiate logging - zenoh_util::try_init_log_from_env(); + zenoh::try_init_log_from_env(); - let (mut config, path, value) = parse_args(); - - // A probing procedure for shared memory is performed upon session opening. To enable `z_pub_shm` to operate - // over shared memory (and to not fallback on network mode), shared memory needs to be enabled also on the - // subscriber side. By doing so, the probing procedure will succeed and shared memory will operate as expected. - config.transport.shared_memory.set_enabled(true).unwrap(); + let (config, path, payload) = parse_args(); println!("Opening session..."); - let session = zenoh::open(config).res().await.unwrap(); + let session = zenoh::open(config).await.unwrap(); + + println!("Creating POSIX SHM provider..."); + // create an SHM backend... + // NOTE: For extended PosixShmProviderBackend API please check z_posix_shm_provider.rs + let backend = PosixShmProviderBackend::builder() + .with_size(N * 1024) + .unwrap() + .res() + .unwrap(); + // ...and an SHM provider + let provider = ShmProviderBuilder::builder() + .protocol_id::() + .backend(backend) + .res(); - println!("Creating Shared Memory Manager..."); - let id = session.zid(); - let mut shm = SharedMemoryManager::make(id.to_string(), N * 1024).unwrap(); + let publisher = session.declare_publisher(&path).await.unwrap(); + // Create allocation layout for series of similar allocations println!("Allocating Shared Memory Buffer..."); - let publisher = session.declare_publisher(&path).res().await.unwrap(); + let layout = provider.alloc(1024).into_layout().unwrap(); println!("Press CTRL-C to quit..."); - for idx in 0..(K * N as u32) { - tokio::time::sleep(Duration::from_secs(1)).await; - let mut sbuf = match shm.alloc(1024) { - Ok(buf) => buf, - Err(_) => { - tokio::time::sleep(Duration::from_millis(100)).await; - println!( - "After failing allocation the GC collected: {} bytes -- retrying", - shm.garbage_collect() - ); - println!( - "Trying to de-fragment memory... De-fragmented {} bytes", - shm.defragment() - ); - shm.alloc(1024).unwrap() - } - }; + for idx in 0..u32::MAX { + tokio::time::sleep(std::time::Duration::from_secs(1)).await; + + // Allocate particular SHM buffer using pre-created layout + let mut sbuf = layout + .alloc() + .with_policy::>() + .await + .unwrap(); // We reserve a small space at the beginning of the buffer to include the iteration index // of the write. This is simply to have the same format as zn_pub. let prefix = format!("[{idx:4}] "); let prefix_len = prefix.as_bytes().len(); + let slice_len = prefix_len + payload.as_bytes().len(); - // Retrieve a mutable slice from the SharedMemoryBuf. - // - // This operation is marked unsafe since we cannot guarantee a single mutable reference - // across multiple processes. Thus if you use it, and you'll inevitable have to use it, - // you have to keep in mind that if you have multiple process retrieving a mutable slice - // you may get into concurrent writes. That said, if you have a serial pipeline and - // the buffer is flowing through the pipeline this will not create any issues. - // - // In short, whilst this operation is marked as unsafe, you are safe if you can - // guarantee that in your application only one process at the time will actually write. - let slice = unsafe { sbuf.as_mut_slice() }; - let slice_len = prefix_len + value.as_bytes().len(); - slice[0..prefix_len].copy_from_slice(prefix.as_bytes()); - slice[prefix_len..slice_len].copy_from_slice(value.as_bytes()); + sbuf[0..prefix_len].copy_from_slice(prefix.as_bytes()); + sbuf[prefix_len..slice_len].copy_from_slice(payload.as_bytes()); // Write the data println!( "Put SHM Data ('{}': '{}')", path, - String::from_utf8_lossy(&slice[0..slice_len]) + String::from_utf8_lossy(&sbuf[0..slice_len]) ); - publisher.put(sbuf.clone()).res().await?; - if idx % K == 0 { - let freed = shm.garbage_collect(); - println!("The Gargabe collector freed {freed} bytes"); - let defrag = shm.defragment(); - println!("De-fragmented {defrag} bytes"); - } - // Dropping the SharedMemoryBuf means to free it. - drop(sbuf); + publisher.put(sbuf).await?; } - // Signal the SharedMemoryManager to garbage collect all the freed SharedMemoryBuf. - let _freed = shm.garbage_collect(); - Ok(()) } @@ -109,15 +90,15 @@ async fn main() -> Result<(), zenoh::Error> { struct Args { #[arg(short, long, default_value = "demo/example/zenoh-rs-pub")] /// The key expression to publish onto. - path: KeyExpr<'static>, - #[arg(short, long, default_value = "Pub from SharedMemory Rust!")] - /// The value of to publish. - value: String, + key: KeyExpr<'static>, + #[arg(short, long, default_value = "Pub from Rust SHM!")] + /// The payload of to publish. + payload: String, #[command(flatten)] common: CommonArgs, } fn parse_args() -> (Config, KeyExpr<'static>, String) { let args = Args::parse(); - (args.common.into(), args.path, args.value) + (args.common.into(), args.key, args.payload) } diff --git a/examples/examples/z_pub_shm_thr.rs b/examples/examples/z_pub_shm_thr.rs index 437f6b2d6d..17bd6de804 100644 --- a/examples/examples/z_pub_shm_thr.rs +++ b/examples/examples/z_pub_shm_thr.rs @@ -12,39 +12,58 @@ // ZettaScale Zenoh Team, // use clap::Parser; -use zenoh::config::Config; -use zenoh::prelude::r#async::*; -use zenoh::publication::CongestionControl; -use zenoh::shm::SharedMemoryManager; +use zenoh::{ + bytes::ZBytes, + prelude::*, + qos::CongestionControl, + shm::{PosixShmProviderBackend, ShmProviderBuilder, POSIX_PROTOCOL_ID}, + Config, +}; use zenoh_examples::CommonArgs; #[tokio::main] async fn main() { // initiate logging - zenoh_util::try_init_log_from_env(); - let (mut config, sm_size, size) = parse_args(); + zenoh::try_init_log_from_env(); + let (config, sm_size, size) = parse_args(); - // A probing procedure for shared memory is performed upon session opening. To enable `z_pub_shm_thr` to operate - // over shared memory (and to not fallback on network mode), shared memory needs to be enabled also on the - // subscriber side. By doing so, the probing procedure will succeed and shared memory will operate as expected. - config.transport.shared_memory.set_enabled(true).unwrap(); + let z = zenoh::open(config).await.unwrap(); - let z = zenoh::open(config).res().await.unwrap(); - let id = z.zid(); - let mut shm = SharedMemoryManager::make(id.to_string(), sm_size).unwrap(); - let mut buf = shm.alloc(size).unwrap(); - let bs = unsafe { buf.as_mut_slice() }; - for b in bs { + // create an SHM backend... + // NOTE: For extended PosixShmProviderBackend API please check z_posix_shm_provider.rs + let backend = PosixShmProviderBackend::builder() + .with_size(sm_size) + .unwrap() + .res() + .unwrap(); + // ...and an SHM provider + let provider = ShmProviderBuilder::builder() + .protocol_id::() + .backend(backend) + .res(); + + // Allocate an SHM buffer + // NOTE: For allocation API please check z_alloc_shm.rs example + // NOTE: For buf's API please check z_bytes_shm.rs example + let mut buf = provider.alloc(size).wait().unwrap(); + + for b in buf.as_mut() { *b = rand::random::(); } - let publisher = z.declare_publisher("test/thr") - // Make sure to not drop messages because of congestion control - .congestion_control(CongestionControl::Block).res().await.unwrap(); + let publisher = z + .declare_publisher("test/thr") + // Make sure to not drop messages because of congestion control + .congestion_control(CongestionControl::Block) + .await + .unwrap(); + + // convert ZShmMut into ZBytes as ZShmMut does not support Clone + let buf: ZBytes = buf.into(); println!("Press CTRL-C to quit..."); loop { - publisher.put(buf.clone()).res().await.unwrap(); + publisher.put(buf.clone()).await.unwrap(); } } diff --git a/examples/examples/z_pub_thr.rs b/examples/examples/z_pub_thr.rs index 737e7f197f..e6c063318e 100644 --- a/examples/examples/z_pub_thr.rs +++ b/examples/examples/z_pub_thr.rs @@ -12,43 +12,48 @@ // ZettaScale Zenoh Team, // -use clap::Parser; use std::convert::TryInto; -use zenoh::prelude::sync::*; -use zenoh::publication::CongestionControl; + +use clap::Parser; +use zenoh::{ + bytes::ZBytes, + prelude::*, + qos::{CongestionControl, Priority}, +}; use zenoh_examples::CommonArgs; fn main() { // initiate logging - zenoh_util::try_init_log_from_env(); + zenoh::try_init_log_from_env(); let args = Args::parse(); - let mut prio = Priority::default(); + let mut prio = Priority::DEFAULT; if let Some(p) = args.priority { prio = p.try_into().unwrap(); } let payload_size = args.payload_size; - let data: Value = (0..payload_size) + let data: ZBytes = (0..payload_size) .map(|i| (i % 10) as u8) .collect::>() .into(); - let session = zenoh::open(args.common).res().unwrap(); + let session = zenoh::open(args.common).wait().unwrap(); let publisher = session .declare_publisher("test/thr") .congestion_control(CongestionControl::Block) .priority(prio) - .res() + .express(args.express) + .wait() .unwrap(); println!("Press CTRL-C to quit..."); let mut count: usize = 0; let mut start = std::time::Instant::now(); loop { - publisher.put(data.clone()).res().unwrap(); + publisher.put(data.clone()).wait().unwrap(); if args.print { if count < args.number { @@ -65,6 +70,9 @@ fn main() { #[derive(Parser, Clone, PartialEq, Eq, Hash, Debug)] struct Args { + /// express for sending data + #[arg(long, default_value = "false")] + express: bool, /// Priority for sending data #[arg(short, long)] priority: Option, diff --git a/examples/examples/z_pull.rs b/examples/examples/z_pull.rs index f397c249ff..6716ef8cc5 100644 --- a/examples/examples/z_pull.rs +++ b/examples/examples/z_pull.rs @@ -11,57 +11,103 @@ // Contributors: // ZettaScale Zenoh Team, // -use clap::Parser; use std::time::Duration; -use zenoh::config::Config; -use zenoh::prelude::r#async::*; + +use clap::Parser; +use zenoh::{handlers::RingChannel, key_expr::KeyExpr, prelude::*, Config}; use zenoh_examples::CommonArgs; #[tokio::main] async fn main() { // initiate logging - zenoh_util::try_init_log_from_env(); + zenoh::try_init_log_from_env(); - let (config, key_expr) = parse_args(); + let (config, key_expr, size, interval) = parse_args(); println!("Opening session..."); - let session = zenoh::open(config).res().await.unwrap(); + let session = zenoh::open(config).await.unwrap(); println!("Declaring Subscriber on '{key_expr}'..."); - let subscriber = session .declare_subscriber(&key_expr) - .pull_mode() - .callback(|sample| { - println!( - ">> [Subscriber] Received {} ('{}': '{}')", - sample.kind, - sample.key_expr.as_str(), - sample.value, - ); - }) - .res() + .with(RingChannel::new(size)) .await .unwrap(); println!("Press CTRL-C to quit..."); - for idx in 0..u32::MAX { - tokio::time::sleep(Duration::from_secs(1)).await; - println!("[{idx:4}] Pulling..."); - subscriber.pull().res().await.unwrap(); + + // Blocking recv. If the ring is empty, wait for the first sample to arrive. + loop { + // Use .recv() for the synchronous version. + match subscriber.recv_async().await { + Ok(sample) => { + let payload = sample + .payload() + .deserialize::() + .unwrap_or_else(|e| format!("{}", e)); + println!( + ">> [Subscriber] Pulled {} ('{}': '{}')... performing a computation of {:#?}", + sample.kind(), + sample.key_expr().as_str(), + payload, + interval + ); + tokio::time::sleep(interval).await; + } + Err(e) => { + println!(">> [Subscriber] Pull error: {e}"); + return; + } + } } + + // Non-blocking recv. This can be usually used to implement a polling mechanism. + // loop { + // match subscriber.try_recv() { + // Ok(Some(sample)) => { + // let payload = sample + // .payload() + // .deserialize::() + // .unwrap_or_else(|e| format!("{}", e)); + // println!( + // ">> [Subscriber] Pulled {} ('{}': '{}')", + // sample.kind(), + // sample.key_expr().as_str(), + // payload, + // ); + // } + // Ok(None) => { + // println!( + // ">> [Subscriber] Pulled nothing... sleep for {:#?}", + // interval + // ); + // tokio::time::sleep(interval).await; + // } + // Err(e) => { + // println!(">> [Subscriber] Pull error: {e}"); + // return; + // } + // } + // } } -#[derive(clap::Parser, Clone, PartialEq, Eq, Hash, Debug)] +#[derive(clap::Parser, Clone, PartialEq, Debug)] struct SubArgs { #[arg(short, long, default_value = "demo/example/**")] /// The Key Expression to subscribe to. key: KeyExpr<'static>, + /// The size of the ringbuffer. + #[arg(short, long, default_value = "3")] + size: usize, + /// The interval for pulling the ringbuffer. + #[arg(short, long, default_value = "5.0")] + interval: f32, #[command(flatten)] common: CommonArgs, } -fn parse_args() -> (Config, KeyExpr<'static>) { +fn parse_args() -> (Config, KeyExpr<'static>, usize, Duration) { let args = SubArgs::parse(); - (args.common.into(), args.key) + let interval = Duration::from_secs_f32(args.interval); + (args.common.into(), args.key, args.size, interval) } diff --git a/examples/examples/z_put.rs b/examples/examples/z_put.rs index 141f4190c8..0097f99139 100644 --- a/examples/examples/z_put.rs +++ b/examples/examples/z_put.rs @@ -12,22 +12,22 @@ // ZettaScale Zenoh Team, // use clap::Parser; -use zenoh::config::Config; -use zenoh::prelude::r#async::*; +use zenoh::{key_expr::KeyExpr, Config}; use zenoh_examples::CommonArgs; #[tokio::main] async fn main() { // initiate logging - zenoh_util::try_init_log_from_env(); + zenoh::try_init_log_from_env(); - let (config, key_expr, value) = parse_args(); + let (config, key_expr, payload) = parse_args(); println!("Opening session..."); - let session = zenoh::open(config).res().await.unwrap(); + let session = zenoh::open(config).await.unwrap(); - println!("Putting Data ('{key_expr}': '{value}')..."); - session.put(&key_expr, value).res().await.unwrap(); + println!("Putting Data ('{key_expr}': '{payload}')..."); + // Refer to z_bytes.rs to see how to serialize different types of message + session.put(&key_expr, payload).await.unwrap(); } #[derive(clap::Parser, Clone, PartialEq, Eq, Hash, Debug)] @@ -36,13 +36,13 @@ struct Args { /// The key expression to write to. key: KeyExpr<'static>, #[arg(short, long, default_value = "Put from Rust!")] - /// The value to write. - value: String, + /// The payload to write. + payload: String, #[command(flatten)] common: CommonArgs, } fn parse_args() -> (Config, KeyExpr<'static>, String) { let args = Args::parse(); - (args.common.into(), args.key, args.value) + (args.common.into(), args.key, args.payload) } diff --git a/examples/examples/z_put_float.rs b/examples/examples/z_put_float.rs index 83a362bac0..35ece437f3 100644 --- a/examples/examples/z_put_float.rs +++ b/examples/examples/z_put_float.rs @@ -12,24 +12,23 @@ // ZettaScale Zenoh Team, // use clap::Parser; -use zenoh::config::Config; -use zenoh::prelude::r#async::*; +use zenoh::{key_expr::KeyExpr, Config}; use zenoh_examples::CommonArgs; #[tokio::main] async fn main() { // initiate logging - zenoh_util::try_init_log_from_env(); + zenoh::try_init_log_from_env(); - let (config, key_expr, value) = parse_args(); + let (config, key_expr, payload) = parse_args(); println!("Opening session..."); - let session = zenoh::open(config).res().await.unwrap(); + let session = zenoh::open(config).await.unwrap(); - println!("Putting Float ('{key_expr}': '{value}')..."); - session.put(&key_expr, value).res().await.unwrap(); + println!("Putting Float ('{key_expr}': '{payload}')..."); + session.put(&key_expr, payload).await.unwrap(); - session.close().res().await.unwrap(); + session.close().await.unwrap(); } #[derive(clap::Parser, Clone, PartialEq, Debug)] @@ -38,13 +37,13 @@ struct Args { /// The key expression to write to. key: KeyExpr<'static>, #[arg(short, long, default_value_t = std::f64::consts::PI)] - /// The value to write. - value: f64, + /// The payload to write. + payload: f64, #[command(flatten)] common: CommonArgs, } fn parse_args() -> (Config, KeyExpr<'static>, f64) { let args = Args::parse(); - (args.common.into(), args.key, args.value) + (args.common.into(), args.key, args.payload) } diff --git a/examples/examples/z_queryable.rs b/examples/examples/z_queryable.rs index 397c76297e..4b950a0a33 100644 --- a/examples/examples/z_queryable.rs +++ b/examples/examples/z_queryable.rs @@ -12,47 +12,54 @@ // ZettaScale Zenoh Team, // use clap::Parser; -use zenoh::config::Config; -use zenoh::prelude::r#async::*; +use zenoh::{key_expr::KeyExpr, prelude::*, Config}; use zenoh_examples::CommonArgs; #[tokio::main] async fn main() { // initiate logging - zenoh_util::try_init_log_from_env(); + zenoh::try_init_log_from_env(); - let (config, key_expr, value, complete) = parse_args(); + let (config, key_expr, payload, complete) = parse_args(); println!("Opening session..."); - let session = zenoh::open(config).res().await.unwrap(); + let session = zenoh::open(config).await.unwrap(); println!("Declaring Queryable on '{key_expr}'..."); let queryable = session .declare_queryable(&key_expr) + // // By default queryable receives queries from a FIFO. + // // Uncomment this line to use a ring channel instead. + // // More information on the ring channel are available in the z_pull example. + // .with(zenoh::handlers::RingChannel::default()) .complete(complete) - .res() .await .unwrap(); println!("Press CTRL-C to quit..."); while let Ok(query) = queryable.recv_async().await { - match query.value() { + match query.payload() { None => println!(">> [Queryable ] Received Query '{}'", query.selector()), - Some(value) => println!( - ">> [Queryable ] Received Query '{}' with value '{}'", - query.selector(), - value - ), + Some(query_payload) => { + // Refer to z_bytes.rs to see how to deserialize different types of message + let deserialized_payload = query_payload + .deserialize::() + .unwrap_or_else(|e| format!("{}", e)); + println!( + ">> [Queryable ] Received Query '{}' with payload '{}'", + query.selector(), + deserialized_payload + ) + } } println!( ">> [Queryable ] Responding ('{}': '{}')", key_expr.as_str(), - value, + payload, ); - let reply = Ok(Sample::new(key_expr.clone(), value.clone())); + // Refer to z_bytes.rs to see how to serialize different types of message query - .reply(reply) - .res() + .reply(key_expr.clone(), payload.clone()) .await .unwrap_or_else(|e| println!(">> [Queryable ] Error sending reply: {e}")); } @@ -64,8 +71,8 @@ struct Args { /// The key expression matching queries to reply to. key: KeyExpr<'static>, #[arg(short, long, default_value = "Queryable from Rust!")] - /// The value to reply to queries. - value: String, + /// The payload to reply to queries. + payload: String, #[arg(long)] /// Declare the queryable as complete w.r.t. the key expression. complete: bool, @@ -75,5 +82,5 @@ struct Args { fn parse_args() -> (Config, KeyExpr<'static>, String, bool) { let args = Args::parse(); - (args.common.into(), args.key, args.value, args.complete) + (args.common.into(), args.key, args.payload, args.complete) } diff --git a/examples/examples/z_queryable_shm.rs b/examples/examples/z_queryable_shm.rs new file mode 100644 index 0000000000..e92efbdc38 --- /dev/null +++ b/examples/examples/z_queryable_shm.rs @@ -0,0 +1,165 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// +use clap::Parser; +use zenoh::{ + bytes::ZBytes, + key_expr::KeyExpr, + prelude::*, + shm::{ + zshm, BlockOn, GarbageCollect, PosixShmProviderBackend, ShmProviderBuilder, + POSIX_PROTOCOL_ID, + }, + Config, +}; +use zenoh_examples::CommonArgs; + +const N: usize = 10; + +#[tokio::main] +async fn main() { + // initiate logging + zenoh::try_init_log_from_env(); + + let (config, key_expr, payload, complete) = parse_args(); + + println!("Opening session..."); + let session = zenoh::open(config).await.unwrap(); + + println!("Creating POSIX SHM provider..."); + // create an SHM backend... + // NOTE: For extended PosixShmProviderBackend API please check z_posix_shm_provider.rs + let backend = PosixShmProviderBackend::builder() + .with_size(N * 1024) + .unwrap() + .res() + .unwrap(); + // ...and an SHM provider + let provider = ShmProviderBuilder::builder() + .protocol_id::() + .backend(backend) + .res(); + + println!("Declaring Queryable on '{key_expr}'..."); + let queryable = session + .declare_queryable(&key_expr) + .complete(complete) + .await + .unwrap(); + + println!("Press CTRL-C to quit..."); + while let Ok(query) = queryable.recv_async().await { + // Print overall query payload information + match query.payload() { + Some(payload) => { + let (payload_type, payload) = handle_bytes(payload); + print!( + ">> [Queryable] Received Query ('{}': '{}') [{}]", + query.selector(), + payload, + payload_type, + ); + } + None => { + print!(">> Received Query '{}'", query.selector()); + } + }; + + // Print attachment information + if let Some(att) = query.attachment() { + let (attachment_type, attachment) = handle_bytes(att); + print!(" ({}: {})", attachment_type, attachment); + } + + println!(); + + // Allocate an SHM buffer + // NOTE: For allocation API please check z_alloc_shm.rs example + // NOTE: For buf's API please check z_bytes_shm.rs example + println!("Allocating Shared Memory Buffer..."); + let mut sbuf = provider + .alloc(1024) + .with_policy::>() + .await + .unwrap(); + + sbuf[0..payload.len()].copy_from_slice(payload.as_bytes()); + + println!( + ">> [Queryable] Responding ('{}': '{}')", + key_expr.as_str(), + payload, + ); + query + .reply(key_expr.clone(), sbuf) + .await + .unwrap_or_else(|e| println!(">> [Queryable ] Error sending reply: {e}")); + } +} + +#[derive(clap::Parser, Clone, PartialEq, Eq, Hash, Debug)] +struct Args { + #[arg(short, long, default_value = "demo/example/zenoh-rs-queryable")] + /// The key expression matching queries to reply to. + key: KeyExpr<'static>, + #[arg(short, long, default_value = "Queryable from Rust SHM!")] + /// The payload to reply to queries. + payload: String, + #[arg(long)] + /// Declare the queryable as complete w.r.t. the key expression. + complete: bool, + #[command(flatten)] + common: CommonArgs, +} + +fn parse_args() -> (Config, KeyExpr<'static>, String, bool) { + let args = Args::parse(); + (args.common.into(), args.key, args.payload, args.complete) +} + +fn handle_bytes(bytes: &ZBytes) -> (&str, String) { + // Determine buffer type for indication purpose + let bytes_type = { + // if Zenoh is built without SHM support, the only buffer type it can receive is RAW + #[cfg(not(feature = "shared-memory"))] + { + "RAW" + } + + // if Zenoh is built with SHM support but without SHM API (that is unstable), it can + // receive buffers of any type, but there is no way to detect the buffer type + #[cfg(all(feature = "shared-memory", not(feature = "unstable")))] + { + "UNKNOWN" + } + + // if Zenoh is built with SHM support and with SHM API we can detect the exact buffer type + #[cfg(all(feature = "shared-memory", feature = "unstable"))] + match bytes.deserialize::<&zshm>() { + Ok(_) => "SHM", + Err(_) => "RAW", + } + }; + + // In order to indicate the real underlying buffer type the code above is written ^^^ + // Sample is SHM-agnostic: Sample handling code works both with SHM and RAW data transparently. + // In other words, the common application compiled with "shared-memory" feature will be able to + // handle incoming SHM data without any changes in the application code. + // + // Refer to z_bytes.rs to see how to deserialize different types of message + let bytes_string = bytes + .deserialize::() + .unwrap_or_else(|e| format!("{}", e)); + + (bytes_type, bytes_string) +} diff --git a/examples/examples/z_scout.rs b/examples/examples/z_scout.rs index b960b4698a..1d485991fd 100644 --- a/examples/examples/z_scout.rs +++ b/examples/examples/z_scout.rs @@ -11,18 +11,15 @@ // Contributors: // ZettaScale Zenoh Team, // -use zenoh::config::Config; -use zenoh::prelude::r#async::*; -use zenoh::scouting::WhatAmI; +use zenoh::{config::WhatAmI, scout, Config}; #[tokio::main] async fn main() { // initiate logging - zenoh_util::try_init_log_from_env(); + zenoh::try_init_log_from_env(); println!("Scouting..."); - let receiver = zenoh::scout(WhatAmI::Peer | WhatAmI::Router, Config::default()) - .res() + let receiver = scout(WhatAmI::Peer | WhatAmI::Router, Config::default()) .await .unwrap(); diff --git a/examples/examples/z_storage.rs b/examples/examples/z_storage.rs index c5b19c2ab1..f812c78094 100644 --- a/examples/examples/z_storage.rs +++ b/examples/examples/z_storage.rs @@ -13,33 +13,37 @@ // #![recursion_limit = "256"] +use std::collections::HashMap; + use clap::Parser; use futures::select; -use std::collections::HashMap; -use zenoh::config::Config; -use zenoh::prelude::r#async::*; +use zenoh::{ + key_expr::{keyexpr, KeyExpr}, + prelude::*, + sample::{Sample, SampleKind}, + Config, +}; use zenoh_examples::CommonArgs; #[tokio::main] async fn main() { // initiate logging - zenoh_util::try_init_log_from_env(); + zenoh::try_init_log_from_env(); let (config, key_expr, complete) = parse_args(); let mut stored: HashMap = HashMap::new(); println!("Opening session..."); - let session = zenoh::open(config).res().await.unwrap(); + let session = zenoh::open(config).await.unwrap(); println!("Declaring Subscriber on '{key_expr}'..."); - let subscriber = session.declare_subscriber(&key_expr).res().await.unwrap(); + let subscriber = session.declare_subscriber(&key_expr).await.unwrap(); println!("Declaring Queryable on '{key_expr}'..."); let queryable = session .declare_queryable(&key_expr) .complete(complete) - .res() .await .unwrap(); @@ -48,21 +52,20 @@ async fn main() { select!( sample = subscriber.recv_async() => { let sample = sample.unwrap(); - println!(">> [Subscriber] Received {} ('{}': '{}')", - sample.kind, sample.key_expr.as_str(), sample.value); - if sample.kind == SampleKind::Delete { - stored.remove(&sample.key_expr.to_string()); - } else { - stored.insert(sample.key_expr.to_string(), sample); - } + let payload = sample.payload().deserialize::().unwrap_or_else(|e| format!("{}", e)); + println!(">> [Subscriber] Received {} ('{}': '{}')", sample.kind(), sample.key_expr().as_str(),payload); + match sample.kind() { + SampleKind::Delete => stored.remove(&sample.key_expr().to_string()), + SampleKind::Put => stored.insert(sample.key_expr().to_string(), sample), + }; }, query = queryable.recv_async() => { let query = query.unwrap(); println!(">> [Queryable ] Received Query '{}'", query.selector()); for (stored_name, sample) in stored.iter() { - if query.selector().key_expr.intersects(unsafe {keyexpr::from_str_unchecked(stored_name)}) { - query.reply(Ok(sample.clone())).res().await.unwrap(); + if query.key_expr().intersects(unsafe {keyexpr::from_str_unchecked(stored_name)}) { + query.reply(sample.key_expr().clone(), sample.payload().clone()).await.unwrap(); } } } diff --git a/examples/examples/z_sub.rs b/examples/examples/z_sub.rs index d043fb892e..7f3a93c5fb 100644 --- a/examples/examples/z_sub.rs +++ b/examples/examples/z_sub.rs @@ -12,37 +12,43 @@ // ZettaScale Zenoh Team, // use clap::Parser; -use zenoh::config::Config; -use zenoh::prelude::r#async::*; +use zenoh::{key_expr::KeyExpr, prelude::*, Config}; use zenoh_examples::CommonArgs; #[tokio::main] async fn main() { // Initiate logging - zenoh_util::try_init_log_from_env(); + zenoh::try_init_log_from_env(); - let (mut config, key_expr) = parse_args(); - - // A probing procedure for shared memory is performed upon session opening. To enable `z_pub_shm` to operate - // over shared memory (and to not fallback on network mode), shared memory needs to be enabled also on the - // subscriber side. By doing so, the probing procedure will succeed and shared memory will operate as expected. - config.transport.shared_memory.set_enabled(true).unwrap(); + let (config, key_expr) = parse_args(); println!("Opening session..."); - let session = zenoh::open(config).res().await.unwrap(); + let session = zenoh::open(config).await.unwrap(); println!("Declaring Subscriber on '{}'...", &key_expr); - - let subscriber = session.declare_subscriber(&key_expr).res().await.unwrap(); + let subscriber = session.declare_subscriber(&key_expr).await.unwrap(); println!("Press CTRL-C to quit..."); while let Ok(sample) = subscriber.recv_async().await { - println!( + // Refer to z_bytes.rs to see how to deserialize different types of message + let payload = sample + .payload() + .deserialize::() + .unwrap_or_else(|e| format!("{}", e)); + + print!( ">> [Subscriber] Received {} ('{}': '{}')", - sample.kind, - sample.key_expr.as_str(), - sample.value + sample.kind(), + sample.key_expr().as_str(), + payload ); + if let Some(att) = sample.attachment() { + let att = att + .deserialize::() + .unwrap_or_else(|e| format!("{}", e)); + print!(" ({})", att); + } + println!(); } } diff --git a/examples/examples/z_sub_liveliness.rs b/examples/examples/z_sub_liveliness.rs index c05311a09c..bb91c9f491 100644 --- a/examples/examples/z_sub_liveliness.rs +++ b/examples/examples/z_sub_liveliness.rs @@ -12,39 +12,37 @@ // ZettaScale Zenoh Team, // use clap::Parser; -use zenoh::config::Config; -use zenoh::prelude::r#async::*; +use zenoh::{key_expr::KeyExpr, prelude::*, sample::SampleKind, Config}; use zenoh_examples::CommonArgs; #[tokio::main] async fn main() { // Initiate logging - zenoh_util::try_init_log_from_env(); + zenoh::try_init_log_from_env(); let (config, key_expr) = parse_args(); println!("Opening session..."); - let session = zenoh::open(config).res().await.unwrap(); + let session = zenoh::open(config).await.unwrap(); println!("Declaring Liveliness Subscriber on '{}'...", &key_expr); let subscriber = session .liveliness() .declare_subscriber(&key_expr) - .res() .await .unwrap(); println!("Press CTRL-C to quit..."); while let Ok(sample) = subscriber.recv_async().await { - match sample.kind { + match sample.kind() { SampleKind::Put => println!( ">> [LivelinessSubscriber] New alive token ('{}')", - sample.key_expr.as_str() + sample.key_expr().as_str() ), SampleKind::Delete => println!( ">> [LivelinessSubscriber] Dropped token ('{}')", - sample.key_expr.as_str() + sample.key_expr().as_str() ), } } diff --git a/examples/examples/z_sub_shm.rs b/examples/examples/z_sub_shm.rs new file mode 100644 index 0000000000..f45dab099d --- /dev/null +++ b/examples/examples/z_sub_shm.rs @@ -0,0 +1,124 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// +use clap::Parser; +#[cfg(all(feature = "shared-memory", feature = "unstable"))] +use zenoh::shm::zshm; +use zenoh::{bytes::ZBytes, config::Config, key_expr::KeyExpr, prelude::*}; +use zenoh_examples::CommonArgs; + +#[tokio::main] +async fn main() { + // Initiate logging + zenoh::try_init_log_from_env(); + + let (config, key_expr) = parse_args(); + + println!("Opening session..."); + let session = zenoh::open(config).await.unwrap(); + + println!("Declaring Subscriber on '{}'...", &key_expr); + let subscriber = session.declare_subscriber(&key_expr).await.unwrap(); + + println!("Press CTRL-C to quit..."); + while let Ok(sample) = subscriber.recv_async().await { + // Print overall payload information + let (payload_type, payload) = handle_bytes(sample.payload()); + print!( + ">> [Subscriber] Received {} ('{}': '{}') [{}] ", + sample.kind(), + sample.key_expr().as_str(), + payload, + payload_type, + ); + + // Print attachment information + if let Some(att) = sample.attachment() { + let (attachment_type, attachment) = handle_bytes(att); + print!(" ({}: {})", attachment_type, attachment); + } + + println!(); + } + + // // Try to get a mutable reference to the SHM buffer. If this subscriber is the only subscriber + // // holding a reference to the SHM buffer, then it will be able to get a mutable reference to it. + // // With the mutable reference at hand, it's possible to mutate in place the SHM buffer content. + // + // use zenoh::shm::zshmmut; + + // while let Ok(mut sample) = subscriber.recv_async().await { + // let kind = sample.kind(); + // let key_expr = sample.key_expr().to_string(); + // match sample.payload_mut().deserialize_mut::<&mut zshmmut>() { + // Ok(payload) => println!( + // ">> [Subscriber] Received {} ('{}': '{:02x?}')", + // kind, key_expr, payload + // ), + // Err(e) => { + // println!(">> [Subscriber] Not a ShmBufInner: {:?}", e); + // } + // } + // } +} + +#[derive(clap::Parser, Clone, PartialEq, Eq, Hash, Debug)] +struct SubArgs { + #[arg(short, long, default_value = "demo/example/**")] + /// The Key Expression to subscribe to. + key: KeyExpr<'static>, + #[command(flatten)] + common: CommonArgs, +} + +fn parse_args() -> (Config, KeyExpr<'static>) { + let args = SubArgs::parse(); + (args.common.into(), args.key) +} + +fn handle_bytes(bytes: &ZBytes) -> (&str, String) { + // Determine buffer type for indication purpose + let bytes_type = { + // if Zenoh is built without SHM support, the only buffer type it can receive is RAW + #[cfg(not(feature = "shared-memory"))] + { + "RAW" + } + + // if Zenoh is built with SHM support but without SHM API (that is unstable), it can + // receive buffers of any type, but there is no way to detect the buffer type + #[cfg(all(feature = "shared-memory", not(feature = "unstable")))] + { + "UNKNOWN" + } + + // if Zenoh is built with SHM support and with SHM API we can detect the exact buffer type + #[cfg(all(feature = "shared-memory", feature = "unstable"))] + match bytes.deserialize::<&zshm>() { + Ok(_) => "SHM", + Err(_) => "RAW", + } + }; + + // In order to indicate the real underlying buffer type the code above is written ^^^ + // Sample is SHM-agnostic: Sample handling code works both with SHM and RAW data transparently. + // In other words, the common application compiled with "shared-memory" feature will be able to + // handle incoming SHM data without any changes in the application code. + // + // Refer to z_bytes.rs to see how to deserialize different types of message + let bytes_string = bytes + .deserialize::() + .unwrap_or_else(|e| format!("{}", e)); + + (bytes_type, bytes_string) +} diff --git a/examples/examples/z_sub_thr.rs b/examples/examples/z_sub_thr.rs index 7e2018b846..78626d1d1d 100644 --- a/examples/examples/z_sub_thr.rs +++ b/examples/examples/z_sub_thr.rs @@ -11,10 +11,10 @@ // Contributors: // ZettaScale Zenoh Team, // -use clap::Parser; use std::time::Instant; -use zenoh::config::Config; -use zenoh::prelude::sync::*; + +use clap::Parser; +use zenoh::{prelude::*, Config}; use zenoh_examples::CommonArgs; struct Stats { @@ -69,21 +69,16 @@ impl Drop for Stats { fn main() { // initiate logging - zenoh_util::try_init_log_from_env(); - - let (mut config, m, n) = parse_args(); + zenoh::try_init_log_from_env(); - // A probing procedure for shared memory is performed upon session opening. To enable `z_pub_shm_thr` to operate - // over shared memory (and to not fallback on network mode), shared memory needs to be enabled also on the - // subscriber side. By doing so, the probing procedure will succeed and shared memory will operate as expected. - config.transport.shared_memory.set_enabled(true).unwrap(); + let (config, m, n) = parse_args(); - let session = zenoh::open(config).res().unwrap(); + let session = zenoh::open(config).wait().unwrap(); let key_expr = "test/thr"; let mut stats = Stats::new(n); - let _sub = session + session .declare_subscriber(key_expr) .callback_mut(move |_sample| { stats.increment(); @@ -91,8 +86,10 @@ fn main() { std::process::exit(0) } }) - .res() - .unwrap(); + .wait() + .unwrap() + // Make the subscriber run in background, until the session is closed. + .background(); println!("Press CTRL-C to quit..."); std::thread::park(); diff --git a/examples/src/lib.rs b/examples/src/lib.rs index 0be3809cf2..6562942361 100644 --- a/examples/src/lib.rs +++ b/examples/src/lib.rs @@ -33,7 +33,7 @@ pub struct CommonArgs { /// Disable the multicast-based scouting mechanism. no_multicast_scouting: bool, #[arg(long)] - /// Disable the multicast-based scouting mechanism. + /// Enable shared-memory feature. enable_shm: bool, } @@ -49,17 +49,25 @@ impl From<&CommonArgs> for Config { None => Config::default(), }; match value.mode { - Some(Wai::Peer) => config.set_mode(Some(zenoh::scouting::WhatAmI::Peer)), - Some(Wai::Client) => config.set_mode(Some(zenoh::scouting::WhatAmI::Client)), - Some(Wai::Router) => config.set_mode(Some(zenoh::scouting::WhatAmI::Router)), + Some(Wai::Peer) => config.set_mode(Some(zenoh::config::WhatAmI::Peer)), + Some(Wai::Client) => config.set_mode(Some(zenoh::config::WhatAmI::Client)), + Some(Wai::Router) => config.set_mode(Some(zenoh::config::WhatAmI::Router)), None => Ok(None), } .unwrap(); if !value.connect.is_empty() { - config.connect.endpoints = value.connect.iter().map(|v| v.parse().unwrap()).collect(); + config + .connect + .endpoints + .set(value.connect.iter().map(|v| v.parse().unwrap()).collect()) + .unwrap(); } if !value.listen.is_empty() { - config.listen.endpoints = value.listen.iter().map(|v| v.parse().unwrap()).collect(); + config + .listen + .endpoints + .set(value.listen.iter().map(|v| v.parse().unwrap()).collect()) + .unwrap(); } if value.no_multicast_scouting { config.scouting.multicast.set_enabled(Some(false)).unwrap(); diff --git a/io/zenoh-link-commons/Cargo.toml b/io/zenoh-link-commons/Cargo.toml index 12b70cad6d..7ec7c533d7 100644 --- a/io/zenoh-link-commons/Cargo.toml +++ b/io/zenoh-link-commons/Cargo.toml @@ -26,14 +26,15 @@ version = { workspace = true } [features] compression = [] +tls = ["dep:rustls", "dep:rustls-webpki", "dep:webpki-roots"] [dependencies] async-trait = { workspace = true } base64 = { workspace = true, optional = true } flume = { workspace = true } futures = { workspace = true } -rustls = { workspace = true } -rustls-webpki = { workspace = true } +rustls = { workspace = true, optional = true } +rustls-webpki = { workspace = true, optional = true } serde = { workspace = true, features = ["default"] } tokio = { workspace = true, features = [ "fs", diff --git a/io/zenoh-link-commons/src/lib.rs b/io/zenoh-link-commons/src/lib.rs index f9ad7166ee..46c0968f3f 100644 --- a/io/zenoh-link-commons/src/lib.rs +++ b/io/zenoh-link-commons/src/lib.rs @@ -21,17 +21,19 @@ extern crate alloc; mod listener; mod multicast; +#[cfg(feature = "tls")] pub mod tls; mod unicast; use alloc::{borrow::ToOwned, boxed::Box, string::String, vec, vec::Vec}; -use async_trait::async_trait; use core::{cmp::PartialEq, fmt, hash::Hash}; + +use async_trait::async_trait; pub use listener::*; pub use multicast::*; use serde::Serialize; pub use unicast::*; -use zenoh_protocol::core::Locator; +use zenoh_protocol::{core::Locator, transport::BatchSize}; use zenoh_result::ZResult; /*************************************/ @@ -45,10 +47,11 @@ pub struct Link { pub src: Locator, pub dst: Locator, pub group: Option, - pub mtu: u16, + pub mtu: BatchSize, pub is_reliable: bool, pub is_streamed: bool, pub interfaces: Vec, + pub auth_identifier: LinkAuthId, } #[async_trait] @@ -77,6 +80,7 @@ impl From<&LinkUnicast> for Link { is_reliable: link.is_reliable(), is_streamed: link.is_streamed(), interfaces: link.get_interface_names(), + auth_identifier: link.get_auth_id().clone(), } } } @@ -97,6 +101,7 @@ impl From<&LinkMulticast> for Link { is_reliable: link.is_reliable(), is_streamed: false, interfaces: vec![], + auth_identifier: LinkAuthId::default(), } } } diff --git a/io/zenoh-link-commons/src/listener.rs b/io/zenoh-link-commons/src/listener.rs index be61e9cf89..48930a7a65 100644 --- a/io/zenoh-link-commons/src/listener.rs +++ b/io/zenoh-link-commons/src/listener.rs @@ -11,11 +11,13 @@ // Contributors: // ZettaScale Zenoh Team, // +use std::{ + collections::HashMap, + net::{IpAddr, SocketAddr}, + sync::{Arc, RwLock}, +}; + use futures::Future; -use std::collections::HashMap; -use std::net::IpAddr; -use std::net::SocketAddr; -use std::sync::{Arc, RwLock}; use tokio::task::JoinHandle; use tokio_util::sync::CancellationToken; use zenoh_core::{zread, zwrite}; diff --git a/io/zenoh-link-commons/src/multicast.rs b/io/zenoh-link-commons/src/multicast.rs index 65bc7195b6..ee07c4eb58 100644 --- a/io/zenoh-link-commons/src/multicast.rs +++ b/io/zenoh-link-commons/src/multicast.rs @@ -12,17 +12,18 @@ // ZettaScale Zenoh Team, // use alloc::{borrow::Cow, boxed::Box, sync::Arc, vec::Vec}; -use async_trait::async_trait; use core::{ fmt, hash::{Hash, Hasher}, ops::Deref, }; + +use async_trait::async_trait; use zenoh_buffers::{reader::HasReader, writer::HasWriter}; use zenoh_codec::{RCodec, WCodec, Zenoh080}; use zenoh_protocol::{ core::{EndPoint, Locator}, - transport::TransportMessage, + transport::{BatchSize, TransportMessage}, }; use zenoh_result::{zerror, ZResult}; @@ -44,7 +45,7 @@ pub struct LinkMulticast(pub Arc); #[async_trait] pub trait LinkMulticastTrait: Send + Sync { - fn get_mtu(&self) -> u16; + fn get_mtu(&self) -> BatchSize; fn get_src(&self) -> &Locator; fn get_dst(&self) -> &Locator; fn is_reliable(&self) -> bool; diff --git a/io/zenoh-link-commons/src/tls.rs b/io/zenoh-link-commons/src/tls.rs index 562b02c81e..427880b812 100644 --- a/io/zenoh-link-commons/src/tls.rs +++ b/io/zenoh-link-commons/src/tls.rs @@ -1,4 +1,5 @@ use alloc::vec::Vec; + use rustls::{ client::{ danger::{ServerCertVerified, ServerCertVerifier}, diff --git a/io/zenoh-link-commons/src/unicast.rs b/io/zenoh-link-commons/src/unicast.rs index cce12858e8..62f39bf86c 100644 --- a/io/zenoh-link-commons/src/unicast.rs +++ b/io/zenoh-link-commons/src/unicast.rs @@ -12,14 +12,19 @@ // ZettaScale Zenoh Team, // use alloc::{boxed::Box, string::String, sync::Arc, vec::Vec}; -use async_trait::async_trait; use core::{ fmt, hash::{Hash, Hasher}, ops::Deref, }; use std::net::SocketAddr; -use zenoh_protocol::core::{EndPoint, Locator}; + +use async_trait::async_trait; +use serde::Serialize; +use zenoh_protocol::{ + core::{EndPoint, Locator}, + transport::BatchSize, +}; use zenoh_result::ZResult; pub type LinkManagerUnicast = Arc; @@ -41,12 +46,13 @@ pub struct LinkUnicast(pub Arc); #[async_trait] pub trait LinkUnicastTrait: Send + Sync { - fn get_mtu(&self) -> u16; + fn get_mtu(&self) -> BatchSize; fn get_src(&self) -> &Locator; fn get_dst(&self) -> &Locator; fn is_reliable(&self) -> bool; fn is_streamed(&self) -> bool; fn get_interface_names(&self) -> Vec; + fn get_auth_id(&self) -> &LinkAuthId; async fn write(&self, buffer: &[u8]) -> ZResult; async fn write_all(&self, buffer: &[u8]) -> ZResult<()>; async fn read(&self, buffer: &mut [u8]) -> ZResult; @@ -114,3 +120,76 @@ pub fn get_ip_interface_names(addr: &SocketAddr) -> Vec { } } } + +#[derive(Clone, Debug, Serialize, Hash, PartialEq, Eq)] +pub enum LinkAuthType { + Tls, + Quic, + None, +} + +#[derive(Clone, Debug, Serialize, Hash, PartialEq, Eq)] +pub struct LinkAuthId { + auth_type: LinkAuthType, + auth_value: Option, +} + +impl LinkAuthId { + pub const NONE: Self = Self { + auth_type: LinkAuthType::None, + auth_value: None, + }; + pub fn get_type(&self) -> &LinkAuthType { + &self.auth_type + } + pub fn get_value(&self) -> &Option { + &self.auth_value + } + pub fn builder() -> LinkAuthIdBuilder { + LinkAuthIdBuilder::new() + } +} + +impl Default for LinkAuthId { + fn default() -> Self { + LinkAuthId::NONE.clone() + } +} + +#[derive(Debug)] +pub struct LinkAuthIdBuilder { + pub auth_type: LinkAuthType, // HAS to be provided when building + pub auth_value: Option, // actual value added to the above type; is None for None type +} + +impl Default for LinkAuthIdBuilder { + fn default() -> Self { + Self::new() + } +} + +impl LinkAuthIdBuilder { + pub fn new() -> LinkAuthIdBuilder { + LinkAuthIdBuilder { + auth_type: LinkAuthType::None, + auth_value: None, + } + } + + pub fn auth_type(mut self, auth_type: LinkAuthType) -> Self { + self.auth_type = auth_type; + self + } + + pub fn auth_value(mut self, auth_value: Option) -> Self { + self.auth_value = auth_value; + self + } + + pub fn build(self) -> LinkAuthId { + LinkAuthId { + auth_type: self.auth_type.clone(), + auth_value: self.auth_value.clone(), + } + } +} diff --git a/io/zenoh-link/src/lib.rs b/io/zenoh-link/src/lib.rs index 21f26ecf1b..7898cf087d 100644 --- a/io/zenoh-link/src/lib.rs +++ b/io/zenoh-link/src/lib.rs @@ -18,40 +18,41 @@ //! //! [Click here for Zenoh's documentation](../zenoh/index.html) use std::collections::HashMap; -use zenoh_config::Config; -use zenoh_result::{bail, ZResult}; +use zenoh_config::Config; +pub use zenoh_link_commons::*; +#[cfg(feature = "transport_quic")] +pub use zenoh_link_quic as quic; +#[cfg(feature = "transport_quic")] +use zenoh_link_quic::{ + LinkManagerUnicastQuic, QuicConfigurator, QuicLocatorInspector, QUIC_LOCATOR_PREFIX, +}; +#[cfg(feature = "transport_serial")] +pub use zenoh_link_serial as serial; +#[cfg(feature = "transport_serial")] +use zenoh_link_serial::{LinkManagerUnicastSerial, SerialLocatorInspector, SERIAL_LOCATOR_PREFIX}; #[cfg(feature = "transport_tcp")] pub use zenoh_link_tcp as tcp; #[cfg(feature = "transport_tcp")] use zenoh_link_tcp::{LinkManagerUnicastTcp, TcpLocatorInspector, TCP_LOCATOR_PREFIX}; - -#[cfg(feature = "transport_udp")] -pub use zenoh_link_udp as udp; -#[cfg(feature = "transport_udp")] -use zenoh_link_udp::{ - LinkManagerMulticastUdp, LinkManagerUnicastUdp, UdpLocatorInspector, UDP_LOCATOR_PREFIX, -}; - #[cfg(feature = "transport_tls")] pub use zenoh_link_tls as tls; #[cfg(feature = "transport_tls")] use zenoh_link_tls::{ LinkManagerUnicastTls, TlsConfigurator, TlsLocatorInspector, TLS_LOCATOR_PREFIX, }; - -#[cfg(feature = "transport_quic")] -pub use zenoh_link_quic as quic; -#[cfg(feature = "transport_quic")] -use zenoh_link_quic::{ - LinkManagerUnicastQuic, QuicConfigurator, QuicLocatorInspector, QUIC_LOCATOR_PREFIX, +#[cfg(feature = "transport_udp")] +pub use zenoh_link_udp as udp; +#[cfg(feature = "transport_udp")] +use zenoh_link_udp::{ + LinkManagerMulticastUdp, LinkManagerUnicastUdp, UdpLocatorInspector, UDP_LOCATOR_PREFIX, +}; +#[cfg(feature = "transport_unixpipe")] +pub use zenoh_link_unixpipe as unixpipe; +#[cfg(feature = "transport_unixpipe")] +use zenoh_link_unixpipe::{ + LinkManagerUnicastPipe, UnixPipeConfigurator, UnixPipeLocatorInspector, UNIXPIPE_LOCATOR_PREFIX, }; - -#[cfg(feature = "transport_ws")] -pub use zenoh_link_ws as ws; -#[cfg(feature = "transport_ws")] -use zenoh_link_ws::{LinkManagerUnicastWs, WsLocatorInspector, WS_LOCATOR_PREFIX}; - #[cfg(all(feature = "transport_unixsock-stream", target_family = "unix"))] pub use zenoh_link_unixsock_stream as unixsock_stream; #[cfg(all(feature = "transport_unixsock-stream", target_family = "unix"))] @@ -59,26 +60,16 @@ use zenoh_link_unixsock_stream::{ LinkManagerUnicastUnixSocketStream, UnixSockStreamLocatorInspector, UNIXSOCKSTREAM_LOCATOR_PREFIX, }; - -#[cfg(feature = "transport_serial")] -pub use zenoh_link_serial as serial; -#[cfg(feature = "transport_serial")] -use zenoh_link_serial::{LinkManagerUnicastSerial, SerialLocatorInspector, SERIAL_LOCATOR_PREFIX}; - -#[cfg(feature = "transport_unixpipe")] -pub use zenoh_link_unixpipe as unixpipe; -#[cfg(feature = "transport_unixpipe")] -use zenoh_link_unixpipe::{ - LinkManagerUnicastPipe, UnixPipeConfigurator, UnixPipeLocatorInspector, UNIXPIPE_LOCATOR_PREFIX, -}; - #[cfg(all(feature = "transport_vsock", target_os = "linux"))] pub use zenoh_link_vsock as vsock; #[cfg(all(feature = "transport_vsock", target_os = "linux"))] use zenoh_link_vsock::{LinkManagerUnicastVsock, VsockLocatorInspector, VSOCK_LOCATOR_PREFIX}; - -pub use zenoh_link_commons::*; +#[cfg(feature = "transport_ws")] +pub use zenoh_link_ws as ws; +#[cfg(feature = "transport_ws")] +use zenoh_link_ws::{LinkManagerUnicastWs, WsLocatorInspector, WS_LOCATOR_PREFIX}; pub use zenoh_protocol::core::{EndPoint, Locator}; +use zenoh_result::{bail, ZResult}; pub const PROTOCOLS: &[&str] = &[ #[cfg(feature = "transport_quic")] diff --git a/io/zenoh-links/zenoh-link-quic/Cargo.toml b/io/zenoh-links/zenoh-link-quic/Cargo.toml index d86e75847b..ff634d9d15 100644 --- a/io/zenoh-links/zenoh-link-quic/Cargo.toml +++ b/io/zenoh-links/zenoh-link-quic/Cargo.toml @@ -45,9 +45,11 @@ tokio-rustls = { workspace = true } tokio-util = { workspace = true, features = ["rt"] } tracing = { workspace = true } webpki-roots = { workspace = true } +x509-parser = { workspace = true } +zenoh-collections = { workspace = true } zenoh-config = { workspace = true } zenoh-core = { workspace = true } -zenoh-link-commons = { workspace = true } +zenoh-link-commons = { workspace = true, features = ["tls"] } zenoh-protocol = { workspace = true } zenoh-result = { workspace = true } zenoh-runtime = { workspace = true } diff --git a/io/zenoh-links/zenoh-link-quic/src/lib.rs b/io/zenoh-links/zenoh-link-quic/src/lib.rs index deed695ace..cde9c589a3 100644 --- a/io/zenoh-links/zenoh-link-quic/src/lib.rs +++ b/io/zenoh-links/zenoh-link-quic/src/lib.rs @@ -18,10 +18,9 @@ //! //! [Click here for Zenoh's documentation](../zenoh/index.html) use async_trait::async_trait; - use zenoh_core::zconfigurable; use zenoh_link_commons::LocatorInspector; -use zenoh_protocol::core::Locator; +use zenoh_protocol::{core::Locator, transport::BatchSize}; use zenoh_result::ZResult; mod unicast; @@ -38,7 +37,7 @@ pub const ALPN_QUIC_HTTP: &[&[u8]] = &[b"hq-29"]; // adopted in Zenoh and the usage of 16 bits in Zenoh to encode the // payload length in byte-streamed, the QUIC MTU is constrained to // 2^16 - 1 bytes (i.e., 65535). -const QUIC_MAX_MTU: u16 = u16::MAX; +const QUIC_MAX_MTU: BatchSize = BatchSize::MAX; pub const QUIC_LOCATOR_PREFIX: &str = "quic"; #[derive(Default, Clone, Copy, Debug)] @@ -57,7 +56,7 @@ impl LocatorInspector for QuicLocatorInspector { zconfigurable! { // Default MTU (QUIC PDU) in bytes. - static ref QUIC_DEFAULT_MTU: u16 = QUIC_MAX_MTU; + static ref QUIC_DEFAULT_MTU: BatchSize = QUIC_MAX_MTU; // The LINGER option causes the shutdown() call to block until (1) all application data is delivered // to the remote end or (2) a timeout expires. The timeout is expressed in seconds. // More info on the LINGER option and its dynamics can be found at: diff --git a/io/zenoh-links/zenoh-link-quic/src/unicast.rs b/io/zenoh-links/zenoh-link-quic/src/unicast.rs index 8dde380577..2e0d9e0a19 100644 --- a/io/zenoh-links/zenoh-link-quic/src/unicast.rs +++ b/io/zenoh-links/zenoh-link-quic/src/unicast.rs @@ -12,27 +12,34 @@ // ZettaScale Zenoh Team, // -use crate::{ - utils::{get_quic_addr, TlsClientConfig, TlsServerConfig}, - ALPN_QUIC_HTTP, QUIC_ACCEPT_THROTTLE_TIME, QUIC_DEFAULT_MTU, QUIC_LOCATOR_PREFIX, +use std::{ + fmt, + net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}, + sync::Arc, + time::Duration, }; + use async_trait::async_trait; use quinn::crypto::rustls::{QuicClientConfig, QuicServerConfig}; -use std::fmt; -use std::net::IpAddr; -use std::net::{Ipv4Addr, Ipv6Addr, SocketAddr}; -use std::sync::Arc; -use std::time::Duration; use tokio::sync::Mutex as AsyncMutex; use tokio_util::sync::CancellationToken; +use x509_parser::prelude::*; use zenoh_core::zasynclock; use zenoh_link_commons::{ - get_ip_interface_names, LinkManagerUnicastTrait, LinkUnicast, LinkUnicastTrait, - ListenersUnicastIP, NewLinkChannelSender, + get_ip_interface_names, LinkAuthId, LinkAuthType, LinkManagerUnicastTrait, LinkUnicast, + LinkUnicastTrait, ListenersUnicastIP, NewLinkChannelSender, +}; +use zenoh_protocol::{ + core::{EndPoint, Locator}, + transport::BatchSize, }; -use zenoh_protocol::core::{EndPoint, Locator}; use zenoh_result::{bail, zerror, ZResult}; +use crate::{ + utils::{get_quic_addr, TlsClientConfig, TlsServerConfig}, + ALPN_QUIC_HTTP, QUIC_ACCEPT_THROTTLE_TIME, QUIC_DEFAULT_MTU, QUIC_LOCATOR_PREFIX, +}; + pub struct LinkUnicastQuic { connection: quinn::Connection, src_addr: SocketAddr, @@ -40,6 +47,7 @@ pub struct LinkUnicastQuic { dst_locator: Locator, send: AsyncMutex, recv: AsyncMutex, + auth_identifier: LinkAuthId, } impl LinkUnicastQuic { @@ -49,6 +57,7 @@ impl LinkUnicastQuic { dst_locator: Locator, send: quinn::SendStream, recv: quinn::RecvStream, + auth_identifier: LinkAuthId, ) -> LinkUnicastQuic { // Build the Quic object LinkUnicastQuic { @@ -58,6 +67,7 @@ impl LinkUnicastQuic { dst_locator, send: AsyncMutex::new(send), recv: AsyncMutex::new(recv), + auth_identifier, } } } @@ -132,7 +142,7 @@ impl LinkUnicastTrait for LinkUnicastQuic { } #[inline(always)] - fn get_mtu(&self) -> u16 { + fn get_mtu(&self) -> BatchSize { *QUIC_DEFAULT_MTU } @@ -150,6 +160,11 @@ impl LinkUnicastTrait for LinkUnicastQuic { fn is_streamed(&self) -> bool { true } + + #[inline(always)] + fn get_auth_id(&self) -> &LinkAuthId { + &self.auth_identifier + } } impl Drop for LinkUnicastQuic { @@ -243,12 +258,15 @@ impl LinkManagerUnicastTrait for LinkManagerUnicastQuic { .await .map_err(|e| zerror!("Can not create a new QUIC link bound to {}: {}", host, e))?; + let auth_id = get_cert_common_name(&quic_conn)?; + let link = Arc::new(LinkUnicastQuic::new( quic_conn, src_addr, endpoint.into(), send, recv, + auth_id.into(), )); Ok(LinkUnicast(link)) @@ -396,6 +414,8 @@ async fn accept_task( } }; let dst_addr = quic_conn.remote_address(); + // Get Quic auth identifier + let auth_id = get_cert_common_name(&quic_conn)?; tracing::debug!("Accepted QUIC connection on {:?}: {:?}", src_addr, dst_addr); // Create the new link object @@ -405,6 +425,7 @@ async fn accept_task( Locator::new(QUIC_LOCATOR_PREFIX, dst_addr.to_string(), "")?, send, recv, + auth_id.into() )); // Communicate the new link to the initial transport manager @@ -429,3 +450,39 @@ async fn accept_task( } Ok(()) } + +fn get_cert_common_name(conn: &quinn::Connection) -> ZResult { + let mut auth_id = QuicAuthId { auth_value: None }; + if let Some(pi) = conn.peer_identity() { + let serv_certs = pi + .downcast::>() + .unwrap(); + if let Some(item) = serv_certs.iter().next() { + let (_, cert) = X509Certificate::from_der(item.as_ref()).unwrap(); + let subject_name = cert + .subject + .iter_common_name() + .next() + .and_then(|cn| cn.as_str().ok()) + .unwrap(); + auth_id = QuicAuthId { + auth_value: Some(subject_name.to_string()), + }; + } + } + Ok(auth_id) +} + +#[derive(Debug, Clone)] +struct QuicAuthId { + auth_value: Option, +} + +impl From for LinkAuthId { + fn from(value: QuicAuthId) -> Self { + LinkAuthId::builder() + .auth_type(LinkAuthType::Quic) + .auth_value(value.auth_value.clone()) + .build() + } +} diff --git a/io/zenoh-links/zenoh-link-quic/src/utils.rs b/io/zenoh-links/zenoh-link-quic/src/utils.rs index bba5b41787..b5cc7c49f8 100644 --- a/io/zenoh-links/zenoh-link-quic/src/utils.rs +++ b/io/zenoh-links/zenoh-link-quic/src/utils.rs @@ -11,7 +11,14 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::config::*; +use std::{ + fs::File, + io, + io::{BufReader, Cursor}, + net::SocketAddr, + sync::Arc, +}; + use rustls::{ pki_types::{CertificateDer, PrivateKeyDer, TrustAnchor}, server::WebPkiClientVerifier, @@ -19,20 +26,17 @@ use rustls::{ ClientConfig, RootCertStore, ServerConfig, }; use secrecy::ExposeSecret; -use std::fs::File; -use std::io; -use std::net::SocketAddr; -use std::{ - io::{BufReader, Cursor}, - sync::Arc, -}; use webpki::anchor_from_trusted_cert; use zenoh_config::Config as ZenohConfig; use zenoh_link_commons::{tls::WebPkiVerifierAnyServerName, ConfigurationInspector}; -use zenoh_protocol::core::endpoint::Config; -use zenoh_protocol::core::endpoint::{self, Address}; +use zenoh_protocol::core::{ + endpoint::{Address, Config}, + parameters, +}; use zenoh_result::{bail, zerror, ZError, ZResult}; +use crate::config::*; + #[derive(Default, Clone, Copy, Debug)] pub struct TlsConfigurator; @@ -136,10 +140,7 @@ impl ConfigurationInspector for TlsConfigurator { }; } - let mut s = String::new(); - endpoint::Parameters::extend(ps.drain(..), &mut s); - - Ok(s) + Ok(parameters::from_iter(ps.drain(..))) } } @@ -479,8 +480,7 @@ pub async fn get_quic_addr(address: &Address<'_>) -> ZResult { } pub fn base64_decode(data: &str) -> ZResult> { - use base64::engine::general_purpose; - use base64::Engine; + use base64::{engine::general_purpose, Engine}; Ok(general_purpose::STANDARD .decode(data) .map_err(|e| zerror!("Unable to perform base64 decoding: {e:?}"))?) diff --git a/io/zenoh-links/zenoh-link-serial/src/lib.rs b/io/zenoh-links/zenoh-link-serial/src/lib.rs index fb4d7fcc12..3d2ddcd0e6 100644 --- a/io/zenoh-links/zenoh-link-serial/src/lib.rs +++ b/io/zenoh-links/zenoh-link-serial/src/lib.rs @@ -19,16 +19,20 @@ //! [Click here for Zenoh's documentation](../zenoh/index.html) mod unicast; -use async_trait::async_trait; use std::str::FromStr; + +use async_trait::async_trait; pub use unicast::*; use zenoh_core::zconfigurable; use zenoh_link_commons::LocatorInspector; -use zenoh_protocol::core::{endpoint::Address, EndPoint, Locator}; +use zenoh_protocol::{ + core::{endpoint::Address, EndPoint, Locator}, + transport::BatchSize, +}; use zenoh_result::ZResult; // Maximum MTU (Serial PDU) in bytes. -const SERIAL_MAX_MTU: u16 = z_serial::MAX_MTU as u16; +const SERIAL_MAX_MTU: BatchSize = z_serial::MAX_MTU as BatchSize; const DEFAULT_BAUDRATE: u32 = 9_600; @@ -36,11 +40,11 @@ const DEFAULT_EXCLUSIVE: bool = true; pub const SERIAL_LOCATOR_PREFIX: &str = "serial"; -const SERIAL_MTU_LIMIT: u16 = SERIAL_MAX_MTU; +const SERIAL_MTU_LIMIT: BatchSize = SERIAL_MAX_MTU; zconfigurable! { // Default MTU (UDP PDU) in bytes. - static ref SERIAL_DEFAULT_MTU: u16 = SERIAL_MTU_LIMIT; + static ref SERIAL_DEFAULT_MTU: BatchSize = SERIAL_MTU_LIMIT; // Amount of time in microseconds to throttle the accept loop upon an error. // Default set to 100 ms. static ref SERIAL_ACCEPT_THROTTLE_TIME: u64 = 100_000; diff --git a/io/zenoh-links/zenoh-link-serial/src/unicast.rs b/io/zenoh-links/zenoh-link-serial/src/unicast.rs index 3e79ac1eac..5711e5fe5c 100644 --- a/io/zenoh-links/zenoh-link-serial/src/unicast.rs +++ b/io/zenoh-links/zenoh-link-serial/src/unicast.rs @@ -12,34 +12,40 @@ // ZettaScale Zenoh Team, // +use std::{ + cell::UnsafeCell, + collections::HashMap, + fmt, + sync::{ + atomic::{AtomicBool, Ordering}, + Arc, + }, + time::Duration, +}; + use async_trait::async_trait; -use std::cell::UnsafeCell; -use std::collections::HashMap; -use std::fmt; -use std::sync::{ - atomic::{AtomicBool, Ordering}, - Arc, +use tokio::{ + sync::{Mutex as AsyncMutex, RwLock as AsyncRwLock}, + task::JoinHandle, }; -use std::time::Duration; -use tokio::sync::{Mutex as AsyncMutex, RwLock as AsyncRwLock}; -use tokio::task::JoinHandle; use tokio_util::sync::CancellationToken; +use z_serial::ZSerial; use zenoh_core::{zasynclock, zasyncread, zasyncwrite}; use zenoh_link_commons::{ - ConstructibleLinkManagerUnicast, LinkManagerUnicastTrait, LinkUnicast, LinkUnicastTrait, - NewLinkChannelSender, + ConstructibleLinkManagerUnicast, LinkAuthId, LinkManagerUnicastTrait, LinkUnicast, + LinkUnicastTrait, NewLinkChannelSender, +}; +use zenoh_protocol::{ + core::{EndPoint, Locator}, + transport::BatchSize, }; -use zenoh_protocol::core::{EndPoint, Locator}; use zenoh_result::{zerror, ZResult}; -use z_serial::ZSerial; - -use crate::get_exclusive; - use super::{ get_baud_rate, get_unix_path_as_string, SERIAL_ACCEPT_THROTTLE_TIME, SERIAL_DEFAULT_MTU, SERIAL_LOCATOR_PREFIX, }; +use crate::get_exclusive; struct LinkUnicastSerial { // The underlying serial port as returned by ZSerial (tokio-serial) @@ -177,7 +183,7 @@ impl LinkUnicastTrait for LinkUnicastSerial { } #[inline(always)] - fn get_mtu(&self) -> u16 { + fn get_mtu(&self) -> BatchSize { *SERIAL_DEFAULT_MTU } @@ -206,6 +212,11 @@ impl LinkUnicastTrait for LinkUnicastSerial { fn is_streamed(&self) -> bool { false } + + #[inline(always)] + fn get_auth_id(&self) -> &LinkAuthId { + &LinkAuthId::NONE + } } impl fmt::Display for LinkUnicastSerial { diff --git a/io/zenoh-links/zenoh-link-tcp/Cargo.toml b/io/zenoh-links/zenoh-link-tcp/Cargo.toml index ca94412382..4a501f61ed 100644 --- a/io/zenoh-links/zenoh-link-tcp/Cargo.toml +++ b/io/zenoh-links/zenoh-link-tcp/Cargo.toml @@ -26,6 +26,7 @@ description = "Internal crate for zenoh." [dependencies] async-trait = { workspace = true } +socket2 = { workspace = true } tokio = { workspace = true, features = ["net", "io-util", "rt", "time"] } tokio-util = { workspace = true, features = ["rt"] } tracing = {workspace = true} diff --git a/io/zenoh-links/zenoh-link-tcp/src/lib.rs b/io/zenoh-links/zenoh-link-tcp/src/lib.rs index 1a7d6ae705..ebc2bba70b 100644 --- a/io/zenoh-links/zenoh-link-tcp/src/lib.rs +++ b/io/zenoh-links/zenoh-link-tcp/src/lib.rs @@ -17,11 +17,15 @@ //! This crate is intended for Zenoh's internal use. //! //! [Click here for Zenoh's documentation](../zenoh/index.html) -use async_trait::async_trait; use std::net::SocketAddr; + +use async_trait::async_trait; use zenoh_core::zconfigurable; use zenoh_link_commons::LocatorInspector; -use zenoh_protocol::core::{endpoint::Address, Locator}; +use zenoh_protocol::{ + core::{endpoint::Address, Locator}, + transport::BatchSize, +}; use zenoh_result::{zerror, ZResult}; mod unicast; @@ -33,7 +37,7 @@ pub use unicast::*; // adopted in Zenoh and the usage of 16 bits in Zenoh to encode the // payload length in byte-streamed, the TCP MTU is constrained to // 2^16 - 1 bytes (i.e., 65535). -const TCP_MAX_MTU: u16 = u16::MAX; +const TCP_MAX_MTU: BatchSize = BatchSize::MAX; pub const TCP_LOCATOR_PREFIX: &str = "tcp"; @@ -52,7 +56,7 @@ impl LocatorInspector for TcpLocatorInspector { zconfigurable! { // Default MTU (TCP PDU) in bytes. - static ref TCP_DEFAULT_MTU: u16 = TCP_MAX_MTU; + static ref TCP_DEFAULT_MTU: BatchSize = TCP_MAX_MTU; // The LINGER option causes the shutdown() call to block until (1) all application data is delivered // to the remote end or (2) a timeout expires. The timeout is expressed in seconds. // More info on the LINGER option and its dynamics can be found at: diff --git a/io/zenoh-links/zenoh-link-tcp/src/unicast.rs b/io/zenoh-links/zenoh-link-tcp/src/unicast.rs index c07d6f15b9..7532055f8e 100644 --- a/io/zenoh-links/zenoh-link-tcp/src/unicast.rs +++ b/io/zenoh-links/zenoh-link-tcp/src/unicast.rs @@ -11,27 +11,28 @@ // Contributors: // ZettaScale Zenoh Team, // +use std::{cell::UnsafeCell, convert::TryInto, fmt, net::SocketAddr, sync::Arc, time::Duration}; + use async_trait::async_trait; -use std::cell::UnsafeCell; -use std::convert::TryInto; -use std::fmt; -use std::net::SocketAddr; -use std::sync::Arc; -use std::time::Duration; -use tokio::io::{AsyncReadExt, AsyncWriteExt}; +use tokio::{ + io::{AsyncReadExt, AsyncWriteExt}, + net::{TcpListener, TcpSocket, TcpStream}, +}; use tokio_util::sync::CancellationToken; use zenoh_link_commons::{ - get_ip_interface_names, LinkManagerUnicastTrait, LinkUnicast, LinkUnicastTrait, + get_ip_interface_names, LinkAuthId, LinkManagerUnicastTrait, LinkUnicast, LinkUnicastTrait, ListenersUnicastIP, NewLinkChannelSender, BIND_INTERFACE, }; -use zenoh_protocol::core::{EndPoint, Locator}; +use zenoh_protocol::{ + core::{EndPoint, Locator}, + transport::BatchSize, +}; use zenoh_result::{bail, zerror, Error as ZError, ZResult}; use super::{ get_tcp_addrs, TCP_ACCEPT_THROTTLE_TIME, TCP_DEFAULT_MTU, TCP_LINGER_TIMEOUT, TCP_LOCATOR_PREFIX, }; -use tokio::net::{TcpListener, TcpSocket, TcpStream}; pub struct LinkUnicastTcp { // The underlying socket as returned from the tokio library @@ -42,6 +43,8 @@ pub struct LinkUnicastTcp { // The destination socket address of this link (address used on the remote host) dst_addr: SocketAddr, dst_locator: Locator, + // The computed mtu + mtu: BatchSize, } unsafe impl Sync for LinkUnicastTcp {} @@ -70,6 +73,29 @@ impl LinkUnicastTcp { ); } + // Compute the MTU + // See IETF RFC6691: https://datatracker.ietf.org/doc/rfc6691/ + let header = match src_addr.ip() { + std::net::IpAddr::V4(_) => 40, + std::net::IpAddr::V6(_) => 60, + }; + #[allow(unused_mut)] // mut is not needed when target_family != unix + let mut mtu = *TCP_DEFAULT_MTU - header; + + // target limitation of socket2: https://docs.rs/socket2/latest/src/socket2/sys/unix.rs.html#1544 + #[cfg(target_family = "unix")] + { + let socket = socket2::SockRef::from(&socket); + // Get the MSS and divide it by 2 to ensure we can at least fill half the MSS + let mss = socket.mss().unwrap_or(mtu as u32) / 2; + // Compute largest multiple of TCP MSS that is smaller of default MTU + let mut tgt = mss; + while (tgt + mss) < mtu as u32 { + tgt += mss; + } + mtu = (mtu as u32).min(tgt) as BatchSize; + } + // Build the Tcp object LinkUnicastTcp { socket: UnsafeCell::new(socket), @@ -77,8 +103,10 @@ impl LinkUnicastTcp { src_locator: Locator::new(TCP_LOCATOR_PREFIX, src_addr.to_string(), "").unwrap(), dst_addr, dst_locator: Locator::new(TCP_LOCATOR_PREFIX, dst_addr.to_string(), "").unwrap(), + mtu, } } + #[allow(clippy::mut_from_ref)] fn get_mut_socket(&self) -> &mut TcpStream { unsafe { &mut *self.socket.get() } @@ -145,8 +173,8 @@ impl LinkUnicastTrait for LinkUnicastTcp { } #[inline(always)] - fn get_mtu(&self) -> u16 { - *TCP_DEFAULT_MTU + fn get_mtu(&self) -> BatchSize { + self.mtu } #[inline(always)] @@ -163,6 +191,11 @@ impl LinkUnicastTrait for LinkUnicastTcp { fn is_streamed(&self) -> bool { true } + + #[inline(always)] + fn get_auth_id(&self) -> &LinkAuthId { + &LinkAuthId::NONE + } } // // WARN: This sometimes causes timeout in routing test @@ -189,6 +222,7 @@ impl fmt::Debug for LinkUnicastTcp { f.debug_struct("Tcp") .field("src", &self.src_addr) .field("dst", &self.dst_addr) + .field("mtu", &self.get_mtu()) .finish() } } diff --git a/io/zenoh-links/zenoh-link-tls/Cargo.toml b/io/zenoh-links/zenoh-link-tls/Cargo.toml index 91fb72787e..3bd357d1e4 100644 --- a/io/zenoh-links/zenoh-link-tls/Cargo.toml +++ b/io/zenoh-links/zenoh-link-tls/Cargo.toml @@ -33,14 +33,17 @@ rustls-pemfile = { workspace = true } rustls-pki-types = { workspace = true } rustls-webpki = { workspace = true } secrecy = { workspace = true } +socket2 = { workspace = true } tokio = { workspace = true, features = ["fs", "io-util", "net", "sync"] } tokio-rustls = { workspace = true } tokio-util = { workspace = true, features = ["rt"] } tracing = { workspace = true } +x509-parser = { workspace = true } webpki-roots = { workspace = true } +zenoh-collections = { workspace = true } zenoh-config = { workspace = true } zenoh-core = { workspace = true } -zenoh-link-commons = { workspace = true } +zenoh-link-commons = { workspace = true, features = ["tls"] } zenoh-protocol = { workspace = true } zenoh-result = { workspace = true } zenoh-runtime = { workspace = true } diff --git a/io/zenoh-links/zenoh-link-tls/src/lib.rs b/io/zenoh-links/zenoh-link-tls/src/lib.rs index b9002cc397..9fe6a3ea14 100644 --- a/io/zenoh-links/zenoh-link-tls/src/lib.rs +++ b/io/zenoh-links/zenoh-link-tls/src/lib.rs @@ -20,7 +20,7 @@ use async_trait::async_trait; use zenoh_core::zconfigurable; use zenoh_link_commons::LocatorInspector; -use zenoh_protocol::core::Locator; +use zenoh_protocol::{core::Locator, transport::BatchSize}; use zenoh_result::ZResult; mod unicast; @@ -34,7 +34,7 @@ pub use utils::TlsConfigurator; // adopted in Zenoh and the usage of 16 bits in Zenoh to encode the // payload length in byte-streamed, the TLS MTU is constrained to // 2^16 - 1 bytes (i.e., 65535). -const TLS_MAX_MTU: u16 = u16::MAX; +const TLS_MAX_MTU: BatchSize = BatchSize::MAX; pub const TLS_LOCATOR_PREFIX: &str = "tls"; #[derive(Default, Clone, Copy)] @@ -52,7 +52,7 @@ impl LocatorInspector for TlsLocatorInspector { zconfigurable! { // Default MTU (TLS PDU) in bytes. - static ref TLS_DEFAULT_MTU: u16 = TLS_MAX_MTU; + static ref TLS_DEFAULT_MTU: BatchSize = TLS_MAX_MTU; // The LINGER option causes the shutdown() call to block until (1) all application data is delivered // to the remote end or (2) a timeout expires. The timeout is expressed in seconds. // More info on the LINGER option and its dynamics can be found at: diff --git a/io/zenoh-links/zenoh-link-tls/src/unicast.rs b/io/zenoh-links/zenoh-link-tls/src/unicast.rs index 8776e0ae40..716eac2121 100644 --- a/io/zenoh-links/zenoh-link-tls/src/unicast.rs +++ b/io/zenoh-links/zenoh-link-tls/src/unicast.rs @@ -11,31 +11,36 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::{ - utils::{get_tls_addr, get_tls_host, get_tls_server_name, TlsClientConfig, TlsServerConfig}, - TLS_ACCEPT_THROTTLE_TIME, TLS_DEFAULT_MTU, TLS_LINGER_TIMEOUT, TLS_LOCATOR_PREFIX, -}; +use std::{cell::UnsafeCell, convert::TryInto, fmt, net::SocketAddr, sync::Arc, time::Duration}; use async_trait::async_trait; -use std::cell::UnsafeCell; -use std::convert::TryInto; -use std::fmt; -use std::net::SocketAddr; -use std::sync::Arc; -use std::time::Duration; -use tokio::io::{AsyncReadExt, AsyncWriteExt}; -use tokio::net::{TcpListener, TcpStream}; -use tokio::sync::Mutex as AsyncMutex; +use tokio::{ + io::{AsyncReadExt, AsyncWriteExt}, + net::{TcpListener, TcpStream}, + sync::Mutex as AsyncMutex, +}; use tokio_rustls::{TlsAcceptor, TlsConnector, TlsStream}; use tokio_util::sync::CancellationToken; +use x509_parser::prelude::*; use zenoh_core::zasynclock; use zenoh_link_commons::{ - get_ip_interface_names, LinkManagerUnicastTrait, LinkUnicast, LinkUnicastTrait, - ListenersUnicastIP, NewLinkChannelSender, + get_ip_interface_names, LinkAuthId, LinkAuthType, LinkManagerUnicastTrait, LinkUnicast, + LinkUnicastTrait, ListenersUnicastIP, NewLinkChannelSender, +}; +use zenoh_protocol::{ + core::{EndPoint, Locator}, + transport::BatchSize, }; -use zenoh_protocol::core::{EndPoint, Locator}; use zenoh_result::{zerror, ZResult}; +use crate::{ + utils::{get_tls_addr, get_tls_host, get_tls_server_name, TlsClientConfig, TlsServerConfig}, + TLS_ACCEPT_THROTTLE_TIME, TLS_DEFAULT_MTU, TLS_LINGER_TIMEOUT, TLS_LOCATOR_PREFIX, +}; + +#[derive(Default, Debug, PartialEq, Eq, Hash)] +pub struct TlsCommonName(String); + pub struct LinkUnicastTls { // The underlying socket as returned from the async-rustls library // NOTE: TlsStream requires &mut for read and write operations. This means @@ -55,6 +60,8 @@ pub struct LinkUnicastTls { // Make sure there are no concurrent read or writes write_mtx: AsyncMutex<()>, read_mtx: AsyncMutex<()>, + auth_identifier: LinkAuthId, + mtu: BatchSize, } unsafe impl Send for LinkUnicastTls {} @@ -65,6 +72,7 @@ impl LinkUnicastTls { socket: TlsStream, src_addr: SocketAddr, dst_addr: SocketAddr, + auth_identifier: LinkAuthId, ) -> LinkUnicastTls { let (tcp_stream, _) = socket.get_ref(); // Set the TLS nodelay option @@ -89,6 +97,29 @@ impl LinkUnicastTls { ); } + // Compute the MTU + // See IETF RFC6691: https://datatracker.ietf.org/doc/rfc6691/ + let header = match src_addr.ip() { + std::net::IpAddr::V4(_) => 40, + std::net::IpAddr::V6(_) => 60, + }; + #[allow(unused_mut)] // mut is not needed when target_family != unix + let mut mtu = *TLS_DEFAULT_MTU - header; + + // target limitation of socket2: https://docs.rs/socket2/latest/src/socket2/sys/unix.rs.html#1544 + #[cfg(target_family = "unix")] + { + let socket = socket2::SockRef::from(&tcp_stream); + // Get the MSS and divide it by 2 to ensure we can at least fill half the MSS + let mss = socket.mss().unwrap_or(mtu as u32) / 2; + // Compute largest multiple of TCP MSS that is smaller of default MTU + let mut tgt = mss; + while (tgt + mss) < mtu as u32 { + tgt += mss; + } + mtu = (mtu as u32).min(tgt) as BatchSize; + } + // Build the Tls object LinkUnicastTls { inner: UnsafeCell::new(socket), @@ -98,6 +129,8 @@ impl LinkUnicastTls { dst_locator: Locator::new(TLS_LOCATOR_PREFIX, dst_addr.to_string(), "").unwrap(), write_mtx: AsyncMutex::new(()), read_mtx: AsyncMutex::new(()), + auth_identifier, + mtu, } } @@ -105,7 +138,7 @@ impl LinkUnicastTls { // or concurrent writes will ever happen. The read_mtx and write_mtx // are respectively acquired in any read and write operation. #[allow(clippy::mut_from_ref)] - fn get_sock_mut(&self) -> &mut TlsStream { + fn get_mut_socket(&self) -> &mut TlsStream { unsafe { &mut *self.inner.get() } } } @@ -116,7 +149,7 @@ impl LinkUnicastTrait for LinkUnicastTls { tracing::trace!("Closing TLS link: {}", self); // Flush the TLS stream let _guard = zasynclock!(self.write_mtx); - let tls_stream = self.get_sock_mut(); + let tls_stream = self.get_mut_socket(); let res = tls_stream.flush().await; tracing::trace!("TLS link flush {}: {:?}", self, res); // Close the underlying TCP stream @@ -128,7 +161,7 @@ impl LinkUnicastTrait for LinkUnicastTls { async fn write(&self, buffer: &[u8]) -> ZResult { let _guard = zasynclock!(self.write_mtx); - self.get_sock_mut().write(buffer).await.map_err(|e| { + self.get_mut_socket().write(buffer).await.map_err(|e| { tracing::trace!("Write error on TLS link {}: {}", self, e); zerror!(e).into() }) @@ -136,7 +169,7 @@ impl LinkUnicastTrait for LinkUnicastTls { async fn write_all(&self, buffer: &[u8]) -> ZResult<()> { let _guard = zasynclock!(self.write_mtx); - self.get_sock_mut().write_all(buffer).await.map_err(|e| { + self.get_mut_socket().write_all(buffer).await.map_err(|e| { tracing::trace!("Write error on TLS link {}: {}", self, e); zerror!(e).into() }) @@ -144,7 +177,7 @@ impl LinkUnicastTrait for LinkUnicastTls { async fn read(&self, buffer: &mut [u8]) -> ZResult { let _guard = zasynclock!(self.read_mtx); - self.get_sock_mut().read(buffer).await.map_err(|e| { + self.get_mut_socket().read(buffer).await.map_err(|e| { tracing::trace!("Read error on TLS link {}: {}", self, e); zerror!(e).into() }) @@ -152,10 +185,14 @@ impl LinkUnicastTrait for LinkUnicastTls { async fn read_exact(&self, buffer: &mut [u8]) -> ZResult<()> { let _guard = zasynclock!(self.read_mtx); - let _ = self.get_sock_mut().read_exact(buffer).await.map_err(|e| { - tracing::trace!("Read error on TLS link {}: {}", self, e); - zerror!(e) - })?; + let _ = self + .get_mut_socket() + .read_exact(buffer) + .await + .map_err(|e| { + tracing::trace!("Read error on TLS link {}: {}", self, e); + zerror!(e) + })?; Ok(()) } @@ -170,8 +207,8 @@ impl LinkUnicastTrait for LinkUnicastTls { } #[inline(always)] - fn get_mtu(&self) -> u16 { - *TLS_DEFAULT_MTU + fn get_mtu(&self) -> BatchSize { + self.mtu } #[inline(always)] @@ -188,12 +225,17 @@ impl LinkUnicastTrait for LinkUnicastTls { fn is_streamed(&self) -> bool { true } + + #[inline(always)] + fn get_auth_id(&self) -> &LinkAuthId { + &self.auth_identifier + } } impl Drop for LinkUnicastTls { fn drop(&mut self) { // Close the underlying TCP stream - let (tcp_stream, _) = self.get_sock_mut().get_mut(); + let (tcp_stream, _) = self.get_mut_socket().get_mut(); let _ = zenoh_runtime::ZRuntime::Acceptor .block_in_place(async move { tcp_stream.shutdown().await }); } @@ -281,9 +323,18 @@ impl LinkManagerUnicastTrait for LinkManagerUnicastTls { e ) })?; + + let (_, tls_conn) = tls_stream.get_ref(); + let auth_identifier = get_server_cert_common_name(tls_conn)?; + let tls_stream = TlsStream::Client(tls_stream); - let link = Arc::new(LinkUnicastTls::new(tls_stream, src_addr, dst_addr)); + let link = Arc::new(LinkUnicastTls::new( + tls_stream, + src_addr, + dst_addr, + auth_identifier.into(), + )); Ok(LinkUnicast(link)) } @@ -391,11 +442,18 @@ async fn accept_task( } }; - + // Get TLS auth identifier + let (_, tls_conn) = tls_stream.get_ref(); + let auth_identifier = get_client_cert_common_name(tls_conn)?; tracing::debug!("Accepted TLS connection on {:?}: {:?}", src_addr, dst_addr); // Create the new link object - let link = Arc::new(LinkUnicastTls::new(tls_stream, src_addr, dst_addr)); + let link = Arc::new(LinkUnicastTls::new( + tls_stream, + src_addr, + dst_addr, + auth_identifier.into(), + )); // Communicate the new link to the initial transport manager if let Err(e) = manager.send_async(LinkUnicast(link)).await { @@ -419,3 +477,56 @@ async fn accept_task( Ok(()) } + +fn get_client_cert_common_name(tls_conn: &rustls::CommonState) -> ZResult { + if let Some(serv_certs) = tls_conn.peer_certificates() { + let (_, cert) = X509Certificate::from_der(serv_certs[0].as_ref())?; + let subject_name = &cert + .subject + .iter_common_name() + .next() + .and_then(|cn| cn.as_str().ok()) + .unwrap(); + + Ok(TlsAuthId { + auth_value: Some(subject_name.to_string()), + }) + } else { + Ok(TlsAuthId { auth_value: None }) + } +} + +fn get_server_cert_common_name(tls_conn: &rustls::ClientConnection) -> ZResult { + let serv_certs = tls_conn.peer_certificates().unwrap(); + let mut auth_id = TlsAuthId { auth_value: None }; + + // Need the first certificate in the chain so no need for looping + if let Some(item) = serv_certs.iter().next() { + let (_, cert) = X509Certificate::from_der(item.as_ref())?; + let subject_name = &cert + .subject + .iter_common_name() + .next() + .and_then(|cn| cn.as_str().ok()) + .unwrap(); + + auth_id = TlsAuthId { + auth_value: Some(subject_name.to_string()), + }; + return Ok(auth_id); + } + Ok(auth_id) +} + +struct TlsAuthId { + auth_value: Option, +} + +impl From for LinkAuthId { + fn from(value: TlsAuthId) -> Self { + LinkAuthId::builder() + .auth_type(LinkAuthType::Tls) + .auth_value(value.auth_value.clone()) + .build() + } +} diff --git a/io/zenoh-links/zenoh-link-tls/src/utils.rs b/io/zenoh-links/zenoh-link-tls/src/utils.rs index 1c78cd93b3..d6fde3d243 100644 --- a/io/zenoh-links/zenoh-link-tls/src/utils.rs +++ b/io/zenoh-links/zenoh-link-tls/src/utils.rs @@ -11,7 +11,15 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::config::*; +use std::{ + convert::TryFrom, + fs::File, + io, + io::{BufReader, Cursor}, + net::SocketAddr, + sync::Arc, +}; + use rustls::{ pki_types::{CertificateDer, PrivateKeyDer, TrustAnchor}, server::WebPkiClientVerifier, @@ -20,20 +28,17 @@ use rustls::{ }; use rustls_pki_types::ServerName; use secrecy::ExposeSecret; -use std::fs::File; -use std::io; -use std::{convert::TryFrom, net::SocketAddr}; -use std::{ - io::{BufReader, Cursor}, - sync::Arc, -}; use webpki::anchor_from_trusted_cert; use zenoh_config::Config as ZenohConfig; use zenoh_link_commons::{tls::WebPkiVerifierAnyServerName, ConfigurationInspector}; -use zenoh_protocol::core::endpoint::Config; -use zenoh_protocol::core::endpoint::{self, Address}; +use zenoh_protocol::core::{ + endpoint::{Address, Config}, + parameters, +}; use zenoh_result::{bail, zerror, ZError, ZResult}; +use crate::config::*; + #[derive(Default, Clone, Copy, Debug)] pub struct TlsConfigurator; @@ -137,10 +142,7 @@ impl ConfigurationInspector for TlsConfigurator { }; } - let mut s = String::new(); - endpoint::Parameters::extend(ps.drain(..), &mut s); - - Ok(s) + Ok(parameters::from_iter(ps.drain(..))) } } @@ -473,8 +475,7 @@ fn load_trust_anchors(config: &Config<'_>) -> ZResult> { } pub fn base64_decode(data: &str) -> ZResult> { - use base64::engine::general_purpose; - use base64::Engine; + use base64::{engine::general_purpose, Engine}; Ok(general_purpose::STANDARD .decode(data) .map_err(|e| zerror!("Unable to perform base64 decoding: {e:?}"))?) diff --git a/io/zenoh-links/zenoh-link-udp/src/lib.rs b/io/zenoh-links/zenoh-link-udp/src/lib.rs index 31ca32e71b..3386ca387c 100644 --- a/io/zenoh-links/zenoh-link-udp/src/lib.rs +++ b/io/zenoh-links/zenoh-link-udp/src/lib.rs @@ -20,13 +20,17 @@ mod multicast; mod unicast; +use std::net::SocketAddr; + use async_trait::async_trait; pub use multicast::*; -use std::net::SocketAddr; pub use unicast::*; use zenoh_core::zconfigurable; use zenoh_link_commons::LocatorInspector; -use zenoh_protocol::core::{endpoint::Address, Locator}; +use zenoh_protocol::{ + core::{endpoint::Address, Locator}, + transport::BatchSize, +}; use zenoh_result::{zerror, ZResult}; // NOTE: In case of using UDP in high-throughput scenarios, it is recommended to set the @@ -46,24 +50,24 @@ use zenoh_result::{zerror, ZResult}; /// /// Although in IPv6 it is possible to have UDP datagrams of size greater than 65,535 bytes via IPv6 /// Jumbograms, its usage in Zenoh is discouraged unless the consequences are very well understood. -const UDP_MAX_MTU: u16 = u16::MAX - 8 - 40; +const UDP_MAX_MTU: BatchSize = u16::MAX - 8 - 40; pub const UDP_LOCATOR_PREFIX: &str = "udp"; #[cfg(any(target_os = "linux", target_os = "windows"))] // Linux default value of a maximum datagram size is set to UDP MAX MTU. -const UDP_MTU_LIMIT: u16 = UDP_MAX_MTU; +const UDP_MTU_LIMIT: BatchSize = UDP_MAX_MTU; #[cfg(target_os = "macos")] // Mac OS X default value of a maximum datagram size is set to 9216 bytes. -const UDP_MTU_LIMIT: u16 = 9_216; +const UDP_MTU_LIMIT: BatchSize = 9_216; #[cfg(not(any(target_os = "linux", target_os = "macos", target_os = "windows")))] -const UDP_MTU_LIMIT: u16 = 8_192; +const UDP_MTU_LIMIT: BatchSize = 8_192; zconfigurable! { // Default MTU (UDP PDU) in bytes. - static ref UDP_DEFAULT_MTU: u16 = UDP_MTU_LIMIT; + static ref UDP_DEFAULT_MTU: BatchSize = UDP_MTU_LIMIT; // Amount of time in microseconds to throttle the accept loop upon an error. // Default set to 100 ms. static ref UDP_ACCEPT_THROTTLE_TIME: u64 = 100_000; diff --git a/io/zenoh-links/zenoh-link-udp/src/multicast.rs b/io/zenoh-links/zenoh-link-udp/src/multicast.rs index 94d79739bf..1ab1fbb398 100644 --- a/io/zenoh-links/zenoh-link-udp/src/multicast.rs +++ b/io/zenoh-links/zenoh-link-udp/src/multicast.rs @@ -11,18 +11,26 @@ // Contributors: // ZettaScale Zenoh Team, // -use super::{config::*, UDP_DEFAULT_MTU}; -use crate::{get_udp_addrs, socket_addr_to_udp_locator}; +use std::{ + borrow::Cow, + fmt, + net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}, + sync::Arc, +}; + use async_trait::async_trait; use socket2::{Domain, Protocol, Socket, Type}; -use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; -use std::sync::Arc; -use std::{borrow::Cow, fmt}; use tokio::net::UdpSocket; use zenoh_link_commons::{LinkManagerMulticastTrait, LinkMulticast, LinkMulticastTrait}; -use zenoh_protocol::core::{Config, EndPoint, Locator}; +use zenoh_protocol::{ + core::{Config, EndPoint, Locator}, + transport::BatchSize, +}; use zenoh_result::{bail, zerror, Error as ZError, ZResult}; +use super::{config::*, UDP_DEFAULT_MTU}; +use crate::{get_udp_addrs, socket_addr_to_udp_locator}; + pub struct LinkMulticastUdp { // The unicast socket address of this link unicast_addr: SocketAddr, @@ -119,7 +127,7 @@ impl LinkMulticastTrait for LinkMulticastUdp { } #[inline(always)] - fn get_mtu(&self) -> u16 { + fn get_mtu(&self) -> BatchSize { *UDP_DEFAULT_MTU } diff --git a/io/zenoh-links/zenoh-link-udp/src/unicast.rs b/io/zenoh-links/zenoh-link-udp/src/unicast.rs index fba3e23b69..e67e821363 100644 --- a/io/zenoh-links/zenoh-link-udp/src/unicast.rs +++ b/io/zenoh-links/zenoh-link-udp/src/unicast.rs @@ -11,28 +11,34 @@ // Contributors: // ZettaScale Zenoh Team, // -use super::{ - get_udp_addrs, socket_addr_to_udp_locator, UDP_ACCEPT_THROTTLE_TIME, UDP_DEFAULT_MTU, - UDP_MAX_MTU, +use std::{ + collections::HashMap, + fmt, + net::{Ipv4Addr, Ipv6Addr, SocketAddr}, + sync::{Arc, Mutex, Weak}, + time::Duration, }; + use async_trait::async_trait; -use std::collections::HashMap; -use std::fmt; -use std::net::{Ipv4Addr, Ipv6Addr, SocketAddr}; -use std::sync::{Arc, Mutex, Weak}; -use std::time::Duration; -use tokio::net::UdpSocket; -use tokio::sync::Mutex as AsyncMutex; +use tokio::{net::UdpSocket, sync::Mutex as AsyncMutex}; use tokio_util::sync::CancellationToken; use zenoh_core::{zasynclock, zlock}; use zenoh_link_commons::{ - get_ip_interface_names, ConstructibleLinkManagerUnicast, LinkManagerUnicastTrait, LinkUnicast, - LinkUnicastTrait, ListenersUnicastIP, NewLinkChannelSender, BIND_INTERFACE, + get_ip_interface_names, ConstructibleLinkManagerUnicast, LinkAuthId, LinkManagerUnicastTrait, + LinkUnicast, LinkUnicastTrait, ListenersUnicastIP, NewLinkChannelSender, BIND_INTERFACE, +}; +use zenoh_protocol::{ + core::{EndPoint, Locator}, + transport::BatchSize, }; -use zenoh_protocol::core::{EndPoint, Locator}; use zenoh_result::{bail, zerror, Error as ZError, ZResult}; use zenoh_sync::Mvar; +use super::{ + get_udp_addrs, socket_addr_to_udp_locator, UDP_ACCEPT_THROTTLE_TIME, UDP_DEFAULT_MTU, + UDP_MAX_MTU, +}; + type LinkHashMap = Arc>>>; type LinkInput = (Vec, usize); type LinkLeftOver = (Vec, usize, usize); @@ -200,7 +206,7 @@ impl LinkUnicastTrait for LinkUnicastUdp { } #[inline(always)] - fn get_mtu(&self) -> u16 { + fn get_mtu(&self) -> BatchSize { *UDP_DEFAULT_MTU } @@ -218,6 +224,11 @@ impl LinkUnicastTrait for LinkUnicastUdp { fn is_streamed(&self) -> bool { false } + + #[inline(always)] + fn get_auth_id(&self) -> &LinkAuthId { + &LinkAuthId::NONE + } } impl fmt::Display for LinkUnicastUdp { diff --git a/io/zenoh-links/zenoh-link-unixpipe/Cargo.toml b/io/zenoh-links/zenoh-link-unixpipe/Cargo.toml index 22ff335d25..b52bdc2802 100644 --- a/io/zenoh-links/zenoh-link-unixpipe/Cargo.toml +++ b/io/zenoh-links/zenoh-link-unixpipe/Cargo.toml @@ -32,6 +32,7 @@ async-trait = { workspace = true } tracing = {workspace = true} rand = { workspace = true, features = ["default"] } zenoh-buffers = { workspace = true } +zenoh-collections = { workspace = true } zenoh-core = { workspace = true } zenoh-config = { workspace = true } zenoh-link-commons = { workspace = true } diff --git a/io/zenoh-links/zenoh-link-unixpipe/src/unix/mod.rs b/io/zenoh-links/zenoh-link-unixpipe/src/unix/mod.rs index bcafaaba3c..ff1c2f983b 100644 --- a/io/zenoh-links/zenoh-link-unixpipe/src/unix/mod.rs +++ b/io/zenoh-links/zenoh-link-unixpipe/src/unix/mod.rs @@ -24,7 +24,7 @@ pub use unicast::*; use zenoh_config::Config; use zenoh_core::zconfigurable; use zenoh_link_commons::{ConfigurationInspector, LocatorInspector}; -use zenoh_protocol::core::{Locator, Parameters}; +use zenoh_protocol::core::{parameters, Locator}; use zenoh_result::ZResult; pub const UNIXPIPE_LOCATOR_PREFIX: &str = "unixpipe"; @@ -56,8 +56,7 @@ impl ConfigurationInspector for UnixPipeConfigurator { properties.push((config::FILE_ACCESS_MASK, &file_access_mask_)); } - let mut s = String::new(); - Parameters::extend(properties.drain(..), &mut s); + let s = parameters::from_iter(properties.drain(..)); Ok(s) } diff --git a/io/zenoh-links/zenoh-link-unixpipe/src/unix/unicast.rs b/io/zenoh-links/zenoh-link-unixpipe/src/unix/unicast.rs index 8964955140..df93b9cc61 100644 --- a/io/zenoh-links/zenoh-link-unixpipe/src/unix/unicast.rs +++ b/io/zenoh-links/zenoh-link-unixpipe/src/unix/unicast.rs @@ -11,42 +11,45 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::config; +use std::{ + cell::UnsafeCell, + collections::HashMap, + fmt, + fs::{File, OpenOptions}, + io::{ErrorKind, Read, Write}, + os::unix::fs::OpenOptionsExt, + sync::Arc, +}; + #[cfg(not(target_os = "macos"))] use advisory_lock::{AdvisoryFileLock, FileLockMode}; use async_trait::async_trait; use filepath::FilePath; -use nix::libc; -use nix::unistd::unlink; +use nix::{libc, unistd::unlink}; use rand::Rng; -use std::cell::UnsafeCell; -use std::collections::HashMap; -use std::fmt; -use std::fs::{File, OpenOptions}; -use std::io::ErrorKind; -use std::io::{Read, Write}; -use std::os::unix::fs::OpenOptionsExt; -use std::sync::Arc; -use tokio::fs::remove_file; -use tokio::io::unix::AsyncFd; -use tokio::io::Interest; -use tokio::task::JoinHandle; +use tokio::{ + fs::remove_file, + io::{unix::AsyncFd, Interest}, + task::JoinHandle, +}; use tokio_util::sync::CancellationToken; -use zenoh_core::{zasyncread, zasyncwrite, ResolveFuture, SyncResolve}; -use zenoh_protocol::core::{EndPoint, Locator}; -use zenoh_runtime::ZRuntime; - use unix_named_pipe::{create, open_write}; - +use zenoh_core::{zasyncread, zasyncwrite, ResolveFuture, Wait}; use zenoh_link_commons::{ - ConstructibleLinkManagerUnicast, LinkManagerUnicastTrait, LinkUnicast, LinkUnicastTrait, - NewLinkChannelSender, + ConstructibleLinkManagerUnicast, LinkAuthId, LinkManagerUnicastTrait, LinkUnicast, + LinkUnicastTrait, NewLinkChannelSender, +}; +use zenoh_protocol::{ + core::{EndPoint, Locator}, + transport::BatchSize, }; use zenoh_result::{bail, ZResult}; +use zenoh_runtime::ZRuntime; use super::FILE_ACCESS_MASK; +use crate::config; -const LINUX_PIPE_MAX_MTU: u16 = 65_535; +const LINUX_PIPE_MAX_MTU: BatchSize = BatchSize::MAX; const LINUX_PIPE_DEDICATE_TRIES: usize = 100; static PIPE_INVITATION: &[u8] = &[0xDE, 0xAD, 0xBE, 0xEF]; @@ -330,7 +333,7 @@ impl UnicastPipeListener { fn stop_listening(self) { self.token.cancel(); - let _ = ResolveFuture::new(self.handle).res_sync(); + let _ = ResolveFuture::new(self.handle).wait(); } } @@ -502,7 +505,7 @@ impl LinkUnicastTrait for UnicastPipe { } #[inline(always)] - fn get_mtu(&self) -> u16 { + fn get_mtu(&self) -> BatchSize { LINUX_PIPE_MAX_MTU } @@ -522,6 +525,11 @@ impl LinkUnicastTrait for UnicastPipe { fn is_streamed(&self) -> bool { true } + + #[inline(always)] + fn get_auth_id(&self) -> &LinkAuthId { + &LinkAuthId::NONE + } } impl fmt::Display for UnicastPipe { diff --git a/io/zenoh-links/zenoh-link-unixsock_stream/src/lib.rs b/io/zenoh-links/zenoh-link-unixsock_stream/src/lib.rs index b6c180cd8d..771782e62a 100644 --- a/io/zenoh-links/zenoh-link-unixsock_stream/src/lib.rs +++ b/io/zenoh-links/zenoh-link-unixsock_stream/src/lib.rs @@ -20,7 +20,10 @@ use async_trait::async_trait; use zenoh_core::zconfigurable; use zenoh_link_commons::LocatorInspector; -use zenoh_protocol::core::{endpoint::Address, Locator}; +use zenoh_protocol::{ + core::{endpoint::Address, Locator}, + transport::BatchSize, +}; use zenoh_result::ZResult; #[cfg(target_family = "unix")] mod unicast; @@ -33,13 +36,13 @@ pub use unicast::*; // adopted in Zenoh and the usage of 16 bits in Zenoh to encode the // payload length in byte-streamed, the UNIXSOCKSTREAM MTU is constrained to // 2^16 - 1 bytes (i.e., 65535). -const UNIXSOCKSTREAM_MAX_MTU: u16 = u16::MAX; +const UNIXSOCKSTREAM_MAX_MTU: BatchSize = BatchSize::MAX; pub const UNIXSOCKSTREAM_LOCATOR_PREFIX: &str = "unixsock-stream"; zconfigurable! { // Default MTU (UNIXSOCKSTREAM PDU) in bytes. - static ref UNIXSOCKSTREAM_DEFAULT_MTU: u16 = UNIXSOCKSTREAM_MAX_MTU; + static ref UNIXSOCKSTREAM_DEFAULT_MTU: BatchSize = UNIXSOCKSTREAM_MAX_MTU; // Amount of time in microseconds to throttle the accept loop upon an error. // Default set to 100 ms. static ref UNIXSOCKSTREAM_ACCEPT_THROTTLE_TIME: u64 = 100_000; diff --git a/io/zenoh-links/zenoh-link-unixsock_stream/src/unicast.rs b/io/zenoh-links/zenoh-link-unixsock_stream/src/unicast.rs index 1d95af7eab..a07267416d 100644 --- a/io/zenoh-links/zenoh-link-unixsock_stream/src/unicast.rs +++ b/io/zenoh-links/zenoh-link-unixsock_stream/src/unicast.rs @@ -11,30 +11,34 @@ // Contributors: // ZettaScale Zenoh Team, // -use super::UNIXSOCKSTREAM_ACCEPT_THROTTLE_TIME; +use std::{ + cell::UnsafeCell, collections::HashMap, fmt, fs::remove_file, os::unix::io::RawFd, + path::PathBuf, sync::Arc, time::Duration, +}; + use async_trait::async_trait; -use std::cell::UnsafeCell; -use std::collections::HashMap; -use std::fmt; -use std::fs::remove_file; -use std::os::unix::io::RawFd; -use std::path::PathBuf; -use std::sync::Arc; -use std::time::Duration; -use tokio::io::{AsyncReadExt, AsyncWriteExt}; -use tokio::net::{UnixListener, UnixStream}; -use tokio::sync::RwLock as AsyncRwLock; -use tokio::task::JoinHandle; +use tokio::{ + io::{AsyncReadExt, AsyncWriteExt}, + net::{UnixListener, UnixStream}, + sync::RwLock as AsyncRwLock, + task::JoinHandle, +}; use tokio_util::sync::CancellationToken; use uuid::Uuid; use zenoh_core::{zasyncread, zasyncwrite}; use zenoh_link_commons::{ - LinkManagerUnicastTrait, LinkUnicast, LinkUnicastTrait, NewLinkChannelSender, + LinkAuthId, LinkManagerUnicastTrait, LinkUnicast, LinkUnicastTrait, NewLinkChannelSender, +}; +use zenoh_protocol::{ + core::{EndPoint, Locator}, + transport::BatchSize, }; -use zenoh_protocol::core::{EndPoint, Locator}; use zenoh_result::{zerror, ZResult}; -use super::{get_unix_path_as_string, UNIXSOCKSTREAM_DEFAULT_MTU, UNIXSOCKSTREAM_LOCATOR_PREFIX}; +use super::{ + get_unix_path_as_string, UNIXSOCKSTREAM_ACCEPT_THROTTLE_TIME, UNIXSOCKSTREAM_DEFAULT_MTU, + UNIXSOCKSTREAM_LOCATOR_PREFIX, +}; pub struct LinkUnicastUnixSocketStream { // The underlying socket as returned from the tokio library @@ -119,7 +123,7 @@ impl LinkUnicastTrait for LinkUnicastUnixSocketStream { } #[inline(always)] - fn get_mtu(&self) -> u16 { + fn get_mtu(&self) -> BatchSize { *UNIXSOCKSTREAM_DEFAULT_MTU } @@ -139,6 +143,11 @@ impl LinkUnicastTrait for LinkUnicastUnixSocketStream { fn is_streamed(&self) -> bool { true } + + #[inline(always)] + fn get_auth_id(&self) -> &LinkAuthId { + &LinkAuthId::NONE + } } impl Drop for LinkUnicastUnixSocketStream { @@ -319,6 +328,8 @@ impl LinkManagerUnicastTrait for LinkManagerUnicastUnixSocketStream { })?; // We try to acquire the lock + // @TODO: flock is deprecated and upgrading to new Flock will require some refactoring of this module + #[allow(deprecated)] nix::fcntl::flock(lock_fd, nix::fcntl::FlockArg::LockExclusiveNonblock).map_err(|e| { let _ = nix::unistd::close(lock_fd); let e = zerror!( @@ -418,6 +429,8 @@ impl LinkManagerUnicastTrait for LinkManagerUnicastUnixSocketStream { listener.handle.await??; //Release the lock + // @TODO: flock is deprecated and upgrading to new Flock will require some refactoring of this module + #[allow(deprecated)] let _ = nix::fcntl::flock(listener.lock_fd, nix::fcntl::FlockArg::UnlockNonblock); let _ = nix::unistd::close(listener.lock_fd); let _ = remove_file(path.clone()); diff --git a/io/zenoh-links/zenoh-link-vsock/src/lib.rs b/io/zenoh-links/zenoh-link-vsock/src/lib.rs index 7834050796..d58250fed3 100644 --- a/io/zenoh-links/zenoh-link-vsock/src/lib.rs +++ b/io/zenoh-links/zenoh-link-vsock/src/lib.rs @@ -22,7 +22,7 @@ use async_trait::async_trait; use zenoh_core::zconfigurable; use zenoh_link_commons::LocatorInspector; -use zenoh_protocol::core::Locator; +use zenoh_protocol::{core::Locator, transport::BatchSize}; use zenoh_result::ZResult; #[cfg(target_os = "linux")] @@ -47,7 +47,7 @@ impl LocatorInspector for VsockLocatorInspector { zconfigurable! { // Default MTU in bytes. - static ref VSOCK_DEFAULT_MTU: u16 = u16::MAX; + static ref VSOCK_DEFAULT_MTU: BatchSize = BatchSize::MAX; // Amount of time in microseconds to throttle the accept loop upon an error. // Default set to 100 ms. static ref VSOCK_ACCEPT_THROTTLE_TIME: u64 = 100_000; diff --git a/io/zenoh-links/zenoh-link-vsock/src/unicast.rs b/io/zenoh-links/zenoh-link-vsock/src/unicast.rs index 2700fcf04d..e7b261f292 100644 --- a/io/zenoh-links/zenoh-link-vsock/src/unicast.rs +++ b/io/zenoh-links/zenoh-link-vsock/src/unicast.rs @@ -12,30 +12,31 @@ // ZettaScale Zenoh Team, // +use std::{cell::UnsafeCell, collections::HashMap, fmt, sync::Arc, time::Duration}; + use async_trait::async_trait; use libc::VMADDR_PORT_ANY; -use std::cell::UnsafeCell; -use std::collections::HashMap; -use std::fmt; -use std::sync::Arc; -use std::time::Duration; -use tokio::io::{AsyncReadExt, AsyncWriteExt}; -use tokio::sync::RwLock as AsyncRwLock; -use tokio::task::JoinHandle; +use tokio::{ + io::{AsyncReadExt, AsyncWriteExt}, + sync::RwLock as AsyncRwLock, + task::JoinHandle, +}; use tokio_util::sync::CancellationToken; +use tokio_vsock::{ + VsockAddr, VsockListener, VsockStream, VMADDR_CID_ANY, VMADDR_CID_HOST, VMADDR_CID_HYPERVISOR, + VMADDR_CID_LOCAL, +}; use zenoh_core::{zasyncread, zasyncwrite}; use zenoh_link_commons::{ - LinkManagerUnicastTrait, LinkUnicast, LinkUnicastTrait, NewLinkChannelSender, + LinkAuthId, LinkManagerUnicastTrait, LinkUnicast, LinkUnicastTrait, NewLinkChannelSender, +}; +use zenoh_protocol::{ + core::{endpoint::Address, EndPoint, Locator}, + transport::BatchSize, }; -use zenoh_protocol::core::endpoint::Address; -use zenoh_protocol::core::{EndPoint, Locator}; use zenoh_result::{bail, zerror, ZResult}; use super::{VSOCK_ACCEPT_THROTTLE_TIME, VSOCK_DEFAULT_MTU, VSOCK_LOCATOR_PREFIX}; -use tokio_vsock::{ - VsockAddr, VsockListener, VsockStream, VMADDR_CID_ANY, VMADDR_CID_HOST, VMADDR_CID_HYPERVISOR, - VMADDR_CID_LOCAL, -}; pub const VSOCK_VMADDR_CID_ANY: &str = "VMADDR_CID_ANY"; pub const VSOCK_VMADDR_CID_HYPERVISOR: &str = "VMADDR_CID_HYPERVISOR"; @@ -82,7 +83,7 @@ pub fn get_vsock_addr(address: Address<'_>) -> ZResult { } pub struct LinkUnicastVsock { - // The underlying socket as returned from the async-std library + // The underlying socket as returned from the tokio library socket: UnsafeCell, // The source socket address of this link (address used on the local host) src_addr: VsockAddr, @@ -170,7 +171,7 @@ impl LinkUnicastTrait for LinkUnicastVsock { } #[inline(always)] - fn get_mtu(&self) -> u16 { + fn get_mtu(&self) -> BatchSize { *VSOCK_DEFAULT_MTU } @@ -188,6 +189,11 @@ impl LinkUnicastTrait for LinkUnicastVsock { fn is_streamed(&self) -> bool { true } + + #[inline(always)] + fn get_auth_id(&self) -> &LinkAuthId { + &LinkAuthId::NONE + } } impl fmt::Display for LinkUnicastVsock { diff --git a/io/zenoh-links/zenoh-link-ws/src/lib.rs b/io/zenoh-links/zenoh-link-ws/src/lib.rs index f68a20d15d..6a97ed99b6 100644 --- a/io/zenoh-links/zenoh-link-ws/src/lib.rs +++ b/io/zenoh-links/zenoh-link-ws/src/lib.rs @@ -17,12 +17,16 @@ //! This crate is intended for Zenoh's internal use. //! //! [Click here for Zenoh's documentation](../zenoh/index.html) -use async_trait::async_trait; use std::net::SocketAddr; + +use async_trait::async_trait; use url::Url; use zenoh_core::zconfigurable; use zenoh_link_commons::LocatorInspector; -use zenoh_protocol::core::{endpoint::Address, Locator}; +use zenoh_protocol::{ + core::{endpoint::Address, Locator}, + transport::BatchSize, +}; use zenoh_result::{bail, ZResult}; mod unicast; pub use unicast::*; @@ -33,7 +37,7 @@ pub use unicast::*; // adopted in Zenoh and the usage of 16 bits in Zenoh to encode the // payload length in byte-streamed, the TCP MTU is constrained to // 2^16 - 1 bytes (i.e., 65535). -const WS_MAX_MTU: u16 = u16::MAX; +const WS_MAX_MTU: BatchSize = BatchSize::MAX; pub const WS_LOCATOR_PREFIX: &str = "ws"; @@ -51,7 +55,7 @@ impl LocatorInspector for WsLocatorInspector { zconfigurable! { // Default MTU (TCP PDU) in bytes. - static ref WS_DEFAULT_MTU: u16 = WS_MAX_MTU; + static ref WS_DEFAULT_MTU: BatchSize = WS_MAX_MTU; // Amount of time in microseconds to throttle the accept loop upon an error. // Default set to 100 ms. static ref TCP_ACCEPT_THROTTLE_TIME: u64 = 100_000; diff --git a/io/zenoh-links/zenoh-link-ws/src/unicast.rs b/io/zenoh-links/zenoh-link-ws/src/unicast.rs index f1aa0088f0..193c9a1724 100644 --- a/io/zenoh-links/zenoh-link-ws/src/unicast.rs +++ b/io/zenoh-links/zenoh-link-ws/src/unicast.rs @@ -12,28 +12,34 @@ // ZettaScale Zenoh Team, // +use std::{ + collections::HashMap, + fmt, + net::{Ipv4Addr, Ipv6Addr, SocketAddr}, + sync::Arc, + time::Duration, +}; + use async_trait::async_trait; -use futures_util::stream::SplitSink; -use futures_util::stream::SplitStream; -use futures_util::SinkExt; -use futures_util::StreamExt; -use std::collections::HashMap; -use std::fmt; -use std::net::{Ipv4Addr, Ipv6Addr, SocketAddr}; -use std::sync::Arc; -use std::time::Duration; -use tokio::net::{TcpListener, TcpStream}; -use tokio::sync::{Mutex as AsyncMutex, RwLock as AsyncRwLock}; -use tokio::task::JoinHandle; -use tokio_tungstenite::accept_async; -use tokio_tungstenite::tungstenite::Message; -use tokio_tungstenite::{MaybeTlsStream, WebSocketStream}; +use futures_util::{ + stream::{SplitSink, SplitStream}, + SinkExt, StreamExt, +}; +use tokio::{ + net::{TcpListener, TcpStream}, + sync::{Mutex as AsyncMutex, RwLock as AsyncRwLock}, + task::JoinHandle, +}; +use tokio_tungstenite::{accept_async, tungstenite::Message, MaybeTlsStream, WebSocketStream}; use tokio_util::sync::CancellationToken; use zenoh_core::{zasynclock, zasyncread, zasyncwrite}; use zenoh_link_commons::{ - LinkManagerUnicastTrait, LinkUnicast, LinkUnicastTrait, NewLinkChannelSender, + LinkAuthId, LinkManagerUnicastTrait, LinkUnicast, LinkUnicastTrait, NewLinkChannelSender, +}; +use zenoh_protocol::{ + core::{EndPoint, Locator}, + transport::BatchSize, }; -use zenoh_protocol::core::{EndPoint, Locator}; use zenoh_result::{bail, zerror, ZResult}; use super::{get_ws_addr, get_ws_url, TCP_ACCEPT_THROTTLE_TIME, WS_DEFAULT_MTU, WS_LOCATOR_PREFIX}; @@ -200,7 +206,7 @@ impl LinkUnicastTrait for LinkUnicastWs { } #[inline(always)] - fn get_mtu(&self) -> u16 { + fn get_mtu(&self) -> BatchSize { *WS_DEFAULT_MTU } @@ -220,6 +226,11 @@ impl LinkUnicastTrait for LinkUnicastWs { fn is_streamed(&self) -> bool { false } + + #[inline(always)] + fn get_auth_id(&self) -> &LinkAuthId { + &LinkAuthId::NONE + } } impl Drop for LinkUnicastWs { @@ -296,7 +307,7 @@ impl LinkManagerUnicastTrait for LinkManagerUnicastWs { async fn new_link(&self, endpoint: EndPoint) -> ZResult { let dst_url = get_ws_url(endpoint.address()).await?; - let (stream, _) = tokio_tungstenite::connect_async(&dst_url) + let (stream, _) = tokio_tungstenite::connect_async(dst_url.as_str()) .await .map_err(|e| { zerror!( diff --git a/io/zenoh-transport/Cargo.toml b/io/zenoh-transport/Cargo.toml index 9f6594761e..a3dabbae0e 100644 --- a/io/zenoh-transport/Cargo.toml +++ b/io/zenoh-transport/Cargo.toml @@ -29,6 +29,7 @@ shared-memory = [ "zenoh-protocol/shared-memory", "zenoh-shm", "zenoh-codec/shared-memory", + "zenoh-buffers/shared-memory", ] auth_pubkey = ["transport_auth", "rsa"] auth_usrpwd = ["transport_auth"] @@ -51,6 +52,7 @@ default = ["test", "transport_multilink"] [dependencies] async-trait = { workspace = true } +crossbeam-utils = { workspace = true } tokio = { workspace = true, features = [ "sync", "fs", @@ -60,6 +62,7 @@ tokio = { workspace = true, features = [ "io-util", "net", ] } +lazy_static = { workspace = true } tokio-util = { workspace = true, features = ["rt"]} flume = { workspace = true } tracing = {workspace = true} diff --git a/io/zenoh-transport/src/common/batch.rs b/io/zenoh-transport/src/common/batch.rs index efae776980..5537ec46fb 100644 --- a/io/zenoh-transport/src/common/batch.rs +++ b/io/zenoh-transport/src/common/batch.rs @@ -12,6 +12,7 @@ // ZettaScale Zenoh Team, // use std::num::NonZeroUsize; + use zenoh_buffers::{ buffer::Buffer, reader::{DidntRead, HasReader}, @@ -119,14 +120,6 @@ impl BatchConfig { .then_some(BatchHeader::new(BatchHeader::COMPRESSION)) } } - - pub fn max_buffer_size(&self) -> usize { - let mut len = self.mtu as usize; - if self.is_streamed { - len += BatchSize::BITS as usize / 8; - } - len - } } // Batch header @@ -148,7 +141,7 @@ impl BatchHeader { self.0 } - /// Verify that the [`WBatch`][WBatch] is for a stream-based protocol, i.e., the first + /// Verify that the [`WBatch`] is for a stream-based protocol, i.e., the first /// 2 bytes are reserved to encode the total amount of serialized bytes as 16-bits little endian. #[cfg(feature = "transport_compression")] #[inline(always)] @@ -180,22 +173,22 @@ pub enum Finalize { /// Write Batch /// -/// A [`WBatch`][WBatch] is a non-expandable and contiguous region of memory -/// that is used to serialize [`TransportMessage`][TransportMessage] and [`ZenohMessage`][ZenohMessage]. +/// A [`WBatch`] is a non-expandable and contiguous region of memory +/// that is used to serialize [`TransportMessage`] and [`NetworkMessage`]. /// -/// [`TransportMessage`][TransportMessage] are always serialized on the batch as they are, while -/// [`ZenohMessage`][ZenohMessage] are always serializaed on the batch as part of a [`TransportMessage`] +/// [`TransportMessage`] are always serialized on the batch as they are, while +/// [`NetworkMessage`] are always serializaed on the batch as part of a [`TransportMessage`] /// [TransportMessage] Frame. Reliable and Best Effort Frames can be interleaved on the same -/// [`WBatch`][WBatch] as long as they fit in the remaining buffer capacity. +/// [`WBatch`] as long as they fit in the remaining buffer capacity. /// -/// In the serialized form, the [`WBatch`][WBatch] always contains one or more -/// [`TransportMessage`][TransportMessage]. In the particular case of [`TransportMessage`][TransportMessage] Frame, -/// its payload is either (i) one or more complete [`ZenohMessage`][ZenohMessage] or (ii) a fragment of a -/// a [`ZenohMessage`][ZenohMessage]. +/// In the serialized form, the [`WBatch`] always contains one or more +/// [`TransportMessage`]. In the particular case of [`TransportMessage`] Frame, +/// its payload is either (i) one or more complete [`NetworkMessage`] or (ii) a fragment of a +/// a [`NetworkMessage`]. /// -/// As an example, the content of the [`WBatch`][WBatch] in memory could be: +/// As an example, the content of the [`WBatch`] in memory could be: /// -/// | Keep Alive | Frame Reliable | Frame Best Effort | +/// | Keep Alive | Frame Reliable\ | Frame Best Effort\ | /// #[derive(Clone, Debug)] pub struct WBatch { @@ -213,7 +206,7 @@ pub struct WBatch { impl WBatch { pub fn new(config: BatchConfig) -> Self { let mut batch = Self { - buffer: BBuf::with_capacity(config.max_buffer_size()), + buffer: BBuf::with_capacity(config.mtu as usize), codec: Zenoh080Batch::new(), config, #[cfg(feature = "stats")] @@ -226,20 +219,20 @@ impl WBatch { batch } - /// Verify that the [`WBatch`][WBatch] has no serialized bytes. + /// Verify that the [`WBatch`] has no serialized bytes. #[inline(always)] pub fn is_empty(&self) -> bool { self.len() == 0 } - /// Get the total number of bytes that have been serialized on the [`WBatch`][WBatch]. + /// Get the total number of bytes that have been serialized on the [`WBatch`]. #[inline(always)] pub fn len(&self) -> BatchSize { let (_l, _h, p) = Self::split(self.buffer.as_slice(), &self.config); p.len() as BatchSize } - /// Clear the [`WBatch`][WBatch] memory buffer and related internal state. + /// Clear the [`WBatch`] memory buffer and related internal state. #[inline(always)] pub fn clear(&mut self) { self.buffer.clear(); @@ -321,11 +314,15 @@ impl WBatch { // Compress the actual content let (_length, _header, payload) = Self::split(self.buffer.as_slice(), &self.config); let mut writer = support.writer(); - writer - .with_slot(writer.remaining(), |b| { - lz4_flex::block::compress_into(payload, b).unwrap_or(0) + // SAFETY: assertion ensures `with_slot` precondition + unsafe { + writer.with_slot(writer.remaining(), |b| { + let len = lz4_flex::block::compress_into(payload, b).unwrap_or(0); + assert!(len <= b.len()); + len }) - .map_err(|_| zerror!("Compression error"))?; + } + .map_err(|_| zerror!("Compression error"))?; // Verify whether the resulting compressed data is smaller than the initial input if support.len() < self.buffer.len() { @@ -423,7 +420,7 @@ impl RBatch { pub fn initialize(&mut self, #[allow(unused_variables)] buff: C) -> ZResult<()> where C: Fn() -> T + Copy, - T: ZSliceBuffer + 'static, + T: AsMut<[u8]> + ZSliceBuffer + 'static, { #[allow(unused_variables)] let (l, h, p) = Self::split(self.buffer.as_slice(), &self.config); @@ -446,7 +443,7 @@ impl RBatch { self.buffer = self .buffer - .subslice(l.len() + h.len(), self.buffer.len()) + .subslice(l.len() + h.len()..self.buffer.len()) .ok_or_else(|| zerror!("Invalid batch length"))?; Ok(()) @@ -455,12 +452,12 @@ impl RBatch { #[cfg(feature = "transport_compression")] fn decompress(&self, payload: &[u8], mut buff: impl FnMut() -> T) -> ZResult where - T: ZSliceBuffer + 'static, + T: AsMut<[u8]> + ZSliceBuffer + 'static, { let mut into = (buff)(); - let n = lz4_flex::block::decompress_into(payload, into.as_mut_slice()) + let n = lz4_flex::block::decompress_into(payload, into.as_mut()) .map_err(|_| zerror!("Decompression error"))?; - let zslice = ZSlice::make(Arc::new(into), 0, n) + let zslice = ZSlice::new(Arc::new(into), 0, n) .map_err(|_| zerror!("Invalid decompression buffer length"))?; Ok(zslice) } @@ -497,7 +494,6 @@ impl Decode<(TransportMessage, BatchSize)> for &mut RBatch { mod tests { use std::vec; - use super::*; use rand::Rng; use zenoh_buffers::ZBuf; use zenoh_core::zcondfeat; @@ -511,6 +507,8 @@ mod tests { zenoh::{PushBody, Put}, }; + use super::*; + #[test] fn rw_batch() { let mut rng = rand::thread_rng(); @@ -574,12 +572,12 @@ mod tests { let tmsg: TransportMessage = KeepAlive.into(); let nmsg: NetworkMessage = Push { wire_expr: WireExpr::empty(), - ext_qos: ext::QoSType::new(Priority::default(), CongestionControl::Block, false), + ext_qos: ext::QoSType::new(Priority::DEFAULT, CongestionControl::Block, false), ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), + ext_nodeid: ext::NodeIdType::DEFAULT, payload: PushBody::Put(Put { timestamp: None, - encoding: Encoding::default(), + encoding: Encoding::empty(), ext_sinfo: None, #[cfg(feature = "shared-memory")] ext_shm: None, @@ -601,7 +599,7 @@ mod tests { let mut frame = FrameHeader { reliability: Reliability::Reliable, sn: 0, - ext_qos: frame::ext::QoSType::default(), + ext_qos: frame::ext::QoSType::DEFAULT, }; // Serialize with a frame diff --git a/io/zenoh-transport/src/common/defragmentation.rs b/io/zenoh-transport/src/common/defragmentation.rs index 8fab075fe4..476fad632c 100644 --- a/io/zenoh-transport/src/common/defragmentation.rs +++ b/io/zenoh-transport/src/common/defragmentation.rs @@ -11,7 +11,6 @@ // Contributors: // ZettaScale Zenoh Team, // -use super::seq_num::SeqNum; use zenoh_buffers::{buffer::Buffer, reader::HasReader, ZBuf, ZSlice}; use zenoh_codec::{RCodec, Zenoh080Reliability}; use zenoh_protocol::{ @@ -21,6 +20,8 @@ use zenoh_protocol::{ }; use zenoh_result::{bail, ZResult}; +use super::seq_num::SeqNum; + #[derive(Debug)] pub(crate) struct DefragBuffer { reliability: Reliability, diff --git a/io/zenoh-transport/src/common/pipeline.rs b/io/zenoh-transport/src/common/pipeline.rs index 37351596c3..60ea3b215d 100644 --- a/io/zenoh-transport/src/common/pipeline.rs +++ b/io/zenoh-transport/src/common/pipeline.rs @@ -1,5 +1,3 @@ -use crate::common::batch::BatchConfig; - // // Copyright (c) 2023 ZettaScale Technology // @@ -13,18 +11,16 @@ use crate::common::batch::BatchConfig; // Contributors: // ZettaScale Zenoh Team, // -use super::{ - batch::{Encode, WBatch}, - priority::{TransportChannelTx, TransportPriorityTx}, -}; -use flume::{bounded, Receiver, Sender}; -use ringbuffer_spsc::{RingBuffer, RingBufferReader, RingBufferWriter}; -use std::sync::{Arc, Mutex, MutexGuard}; -use std::time::Duration; use std::{ - sync::atomic::{AtomicBool, AtomicU16, Ordering}, - time::Instant, + sync::{ + atomic::{AtomicBool, AtomicU32, Ordering}, + Arc, Mutex, MutexGuard, + }, + time::{Duration, Instant}, }; + +use crossbeam_utils::CachePadded; +use ringbuffer_spsc::{RingBuffer, RingBufferReader, RingBufferWriter}; use zenoh_buffers::{ reader::{HasReader, Reader}, writer::HasWriter, @@ -33,26 +29,28 @@ use zenoh_buffers::{ use zenoh_codec::{transport::batch::BatchError, WCodec, Zenoh080}; use zenoh_config::QueueSizeConf; use zenoh_core::zlock; -use zenoh_protocol::core::Reliability; -use zenoh_protocol::network::NetworkMessage; use zenoh_protocol::{ - core::Priority, + core::{Priority, Reliability}, + network::NetworkMessage, transport::{ fragment::FragmentHeader, frame::{self, FrameHeader}, - BatchSize, TransportMessage, + AtomicBatchSize, BatchSize, TransportMessage, }, }; +use zenoh_sync::{event, Notifier, Waiter}; -// It's faster to work directly with nanoseconds. -// Backoff will never last more the u32::MAX nanoseconds. -type NanoSeconds = u32; +use super::{ + batch::{Encode, WBatch}, + priority::{TransportChannelTx, TransportPriorityTx}, +}; +use crate::common::batch::BatchConfig; const RBLEN: usize = QueueSizeConf::MAX; // Inner structure to reuse serialization batches struct StageInRefill { - n_ref_r: Receiver<()>, + n_ref_r: Waiter, s_ref_r: RingBufferReader, } @@ -62,36 +60,48 @@ impl StageInRefill { } fn wait(&self) -> bool { - self.n_ref_r.recv().is_ok() + self.n_ref_r.wait().is_ok() } fn wait_deadline(&self, instant: Instant) -> bool { - self.n_ref_r.recv_deadline(instant).is_ok() + self.n_ref_r.wait_deadline(instant).is_ok() } } +lazy_static::lazy_static! { + static ref LOCAL_EPOCH: Instant = Instant::now(); +} + +type AtomicMicroSeconds = AtomicU32; +type MicroSeconds = u32; + +struct AtomicBackoff { + active: CachePadded, + bytes: CachePadded, + first_write: CachePadded, +} + // Inner structure to link the initial stage with the final stage of the pipeline struct StageInOut { - n_out_w: Sender<()>, + n_out_w: Notifier, s_out_w: RingBufferWriter, - bytes: Arc, - backoff: Arc, + atomic_backoff: Arc, } impl StageInOut { #[inline] fn notify(&self, bytes: BatchSize) { - self.bytes.store(bytes, Ordering::Relaxed); - if !self.backoff.load(Ordering::Relaxed) { - let _ = self.n_out_w.try_send(()); + self.atomic_backoff.bytes.store(bytes, Ordering::Relaxed); + if !self.atomic_backoff.active.load(Ordering::Relaxed) { + let _ = self.n_out_w.notify(); } } #[inline] fn move_batch(&mut self, batch: WBatch) { let _ = self.s_out_w.push(batch); - self.bytes.store(0, Ordering::Relaxed); - let _ = self.n_out_w.try_send(()); + self.atomic_backoff.bytes.store(0, Ordering::Relaxed); + let _ = self.n_out_w.notify(); } } @@ -123,6 +133,7 @@ struct StageIn { s_out: StageInOut, mutex: StageInMutex, fragbuf: ZBuf, + batching: bool, } impl StageIn { @@ -143,6 +154,7 @@ impl StageIn { None => match self.s_ref.pull() { Some(mut batch) => { batch.clear(); + self.s_out.atomic_backoff.first_write.store(LOCAL_EPOCH.elapsed().as_micros() as MicroSeconds, Ordering::Relaxed); break batch; } None => { @@ -177,12 +189,18 @@ impl StageIn { } macro_rules! zretok { - ($batch:expr) => {{ - let bytes = $batch.len(); - *c_guard = Some($batch); - drop(c_guard); - self.s_out.notify(bytes); - return true; + ($batch:expr, $msg:expr) => {{ + if !self.batching || $msg.is_express() { + // Move out existing batch + self.s_out.move_batch($batch); + return true; + } else { + let bytes = $batch.len(); + *c_guard = Some($batch); + drop(c_guard); + self.s_out.notify(bytes); + return true; + } }}; } @@ -190,7 +208,7 @@ impl StageIn { let mut batch = zgetbatch_rets!(false, {}); // Attempt the serialization on the current batch let e = match batch.encode(&*msg) { - Ok(_) => zretok!(batch), + Ok(_) => zretok!(batch, msg), Err(e) => e, }; @@ -210,7 +228,7 @@ impl StageIn { if let BatchError::NewFrame = e { // Attempt a serialization with a new frame if batch.encode((&*msg, &frame)).is_ok() { - zretok!(batch); + zretok!(batch, msg); } } @@ -222,7 +240,7 @@ impl StageIn { // Attempt a second serialization on fully empty batch if batch.encode((&*msg, &frame)).is_ok() { - zretok!(batch); + zretok!(batch, msg); } // The second serialization attempt has failed. This means that the message is @@ -291,6 +309,10 @@ impl StageIn { None => match self.s_ref.pull() { Some(mut batch) => { batch.clear(); + self.s_out.atomic_backoff.first_write.store( + LOCAL_EPOCH.elapsed().as_micros() as MicroSeconds, + Ordering::Relaxed, + ); break batch; } None => { @@ -308,18 +330,23 @@ impl StageIn { macro_rules! zretok { ($batch:expr) => {{ - let bytes = $batch.len(); - *c_guard = Some($batch); - drop(c_guard); - self.s_out.notify(bytes); - return true; + if !self.batching { + // Move out existing batch + self.s_out.move_batch($batch); + return true; + } else { + let bytes = $batch.len(); + *c_guard = Some($batch); + drop(c_guard); + self.s_out.notify(bytes); + return true; + } }}; } // Get the current serialization batch. let mut batch = zgetbatch_rets!(); // Attempt the serialization on the current batch - // Attempt the serialization on the current batch match batch.encode(&msg) { Ok(_) => zretok!(batch), Err(_) => { @@ -340,54 +367,27 @@ impl StageIn { enum Pull { Some(WBatch), None, - Backoff(NanoSeconds), + Backoff(MicroSeconds), } // Inner structure to keep track and signal backoff operations #[derive(Clone)] struct Backoff { - tslot: NanoSeconds, - retry_time: NanoSeconds, + threshold: Duration, last_bytes: BatchSize, - bytes: Arc, - backoff: Arc, + atomic: Arc, + // active: bool, } impl Backoff { - fn new(tslot: NanoSeconds, bytes: Arc, backoff: Arc) -> Self { + fn new(threshold: Duration, atomic: Arc) -> Self { Self { - tslot, - retry_time: 0, + threshold, last_bytes: 0, - bytes, - backoff, + atomic, + // active: false, } } - - fn next(&mut self) { - if self.retry_time == 0 { - self.retry_time = self.tslot; - self.backoff.store(true, Ordering::Relaxed); - } else { - match self.retry_time.checked_mul(2) { - Some(rt) => { - self.retry_time = rt; - } - None => { - self.retry_time = NanoSeconds::MAX; - tracing::warn!( - "Pipeline pull backoff overflow detected! Retrying in {}ns.", - self.retry_time - ); - } - } - } - } - - fn reset(&mut self) { - self.retry_time = 0; - self.backoff.store(false, Ordering::Relaxed); - } } // Inner structure to link the final stage with the initial stage of the pipeline @@ -408,13 +408,38 @@ impl StageOutIn { } fn try_pull_deep(&mut self) -> Pull { - let new_bytes = self.backoff.bytes.load(Ordering::Relaxed); - let old_bytes = self.backoff.last_bytes; - self.backoff.last_bytes = new_bytes; + // Verify first backoff is not active + let mut pull = !self.backoff.atomic.active.load(Ordering::Relaxed); + + // If backoff is active, verify the current number of bytes is equal to the old number + // of bytes seen in the previous backoff iteration + if !pull { + let new_bytes = self.backoff.atomic.bytes.load(Ordering::Relaxed); + let old_bytes = self.backoff.last_bytes; + self.backoff.last_bytes = new_bytes; + + pull = new_bytes == old_bytes; + } - if new_bytes == old_bytes { + // Verify that we have not been doing backoff for too long + let mut backoff = 0; + if !pull { + let diff = LOCAL_EPOCH.elapsed().as_micros() as MicroSeconds + - self.backoff.atomic.first_write.load(Ordering::Relaxed); + let threshold = self.backoff.threshold.as_micros() as MicroSeconds; + + if diff >= threshold { + pull = true; + } else { + backoff = threshold - diff; + } + } + + if pull { // It seems no new bytes have been written on the batch, try to pull if let Ok(mut g) = self.current.try_lock() { + self.backoff.atomic.active.store(false, Ordering::Relaxed); + // First try to pull from stage OUT to make sure we are not in the case // where new_bytes == old_bytes are because of two identical serializations if let Some(batch) = self.s_out_r.pull() { @@ -431,24 +456,25 @@ impl StageOutIn { } } } - // Go to backoff } + // Activate backoff + self.backoff.atomic.active.store(true, Ordering::Relaxed); + // Do backoff - self.backoff.next(); - Pull::Backoff(self.backoff.retry_time) + Pull::Backoff(backoff) } } struct StageOutRefill { - n_ref_w: Sender<()>, + n_ref_w: Notifier, s_ref_w: RingBufferWriter, } impl StageOutRefill { fn refill(&mut self, batch: WBatch) { assert!(self.s_ref_w.push(batch).is_none()); - let _ = self.n_ref_w.try_send(()); + let _ = self.n_ref_w.notify(); } } @@ -487,7 +513,8 @@ pub(crate) struct TransmissionPipelineConf { pub(crate) batch: BatchConfig, pub(crate) queue_size: [usize; Priority::NUM], pub(crate) wait_before_drop: Duration, - pub(crate) backoff: Duration, + pub(crate) batching_enabled: bool, + pub(crate) batching_time_limit: Duration, } // A 2-stage transmission pipeline @@ -501,7 +528,7 @@ impl TransmissionPipeline { let mut stage_in = vec![]; let mut stage_out = vec![]; - let default_queue_size = [config.queue_size[Priority::default() as usize]]; + let default_queue_size = [config.queue_size[Priority::DEFAULT as usize]]; let size_iter = if priority.len() == 1 { default_queue_size.iter() } else { @@ -510,7 +537,7 @@ impl TransmissionPipeline { // Create the channel for notifying that new batches are in the out ring buffer // This is a MPSC channel - let (n_out_w, n_out_r) = bounded(1); + let (n_out_w, n_out_r) = event::new(); for (prio, num) in size_iter.enumerate() { assert!(*num != 0 && *num <= RBLEN); @@ -525,28 +552,33 @@ impl TransmissionPipeline { } // Create the channel for notifying that new batches are in the refill ring buffer // This is a SPSC channel - let (n_ref_w, n_ref_r) = bounded(1); + let (n_ref_w, n_ref_r) = event::new(); // Create the refill ring buffer // This is a SPSC ring buffer let (s_out_w, s_out_r) = RingBuffer::::init(); let current = Arc::new(Mutex::new(None)); - let bytes = Arc::new(AtomicU16::new(0)); - let backoff = Arc::new(AtomicBool::new(false)); + let bytes = Arc::new(AtomicBackoff { + active: CachePadded::new(AtomicBool::new(false)), + bytes: CachePadded::new(AtomicBatchSize::new(0)), + first_write: CachePadded::new(AtomicMicroSeconds::new( + LOCAL_EPOCH.elapsed().as_micros() as MicroSeconds, + )), + }); stage_in.push(Mutex::new(StageIn { s_ref: StageInRefill { n_ref_r, s_ref_r }, s_out: StageInOut { n_out_w: n_out_w.clone(), s_out_w, - bytes: bytes.clone(), - backoff: backoff.clone(), + atomic_backoff: bytes.clone(), }, mutex: StageInMutex { current: current.clone(), priority: priority[prio].clone(), }, fragbuf: ZBuf::empty(), + batching: config.batching_enabled, })); // The stage out for this priority @@ -554,7 +586,7 @@ impl TransmissionPipeline { s_in: StageOutIn { s_out_r, current, - backoff: Backoff::new(config.backoff.as_nanos() as NanoSeconds, bytes, backoff), + backoff: Backoff::new(config.batching_time_limit, bytes), }, s_ref: StageOutRefill { n_ref_w, s_ref_w }, }); @@ -592,7 +624,7 @@ impl TransmissionPipelineProducer { let priority = msg.priority(); (priority as usize, priority) } else { - (0, Priority::default()) + (0, Priority::DEFAULT) }; // If message is droppable, compute a deadline after which the sample could be dropped let deadline_before_drop = if msg.is_droppable() { @@ -636,28 +668,23 @@ impl TransmissionPipelineProducer { pub(crate) struct TransmissionPipelineConsumer { // A single Mutex for all the priority queues stage_out: Box<[StageOut]>, - n_out_r: Receiver<()>, + n_out_r: Waiter, active: Arc, } impl TransmissionPipelineConsumer { pub(crate) async fn pull(&mut self) -> Option<(WBatch, usize)> { - // Reset backoff before pulling - for queue in self.stage_out.iter_mut() { - queue.s_in.backoff.reset(); - } - while self.active.load(Ordering::Relaxed) { + let mut backoff = MicroSeconds::MAX; // Calculate the backoff maximum - let mut bo = NanoSeconds::MAX; for (prio, queue) in self.stage_out.iter_mut().enumerate() { match queue.try_pull() { Pull::Some(batch) => { return Some((batch, prio)); } - Pull::Backoff(b) => { - if b < bo { - bo = b; + Pull::Backoff(deadline) => { + if deadline < backoff { + backoff = deadline; } } Pull::None => {} @@ -671,9 +698,11 @@ impl TransmissionPipelineConsumer { tokio::task::yield_now().await; // Wait for the backoff to expire or for a new message - let res = - tokio::time::timeout(Duration::from_nanos(bo as u64), self.n_out_r.recv_async()) - .await; + let res = tokio::time::timeout( + Duration::from_micros(backoff as u64), + self.n_out_r.wait_async(), + ) + .await; match res { Ok(Ok(())) => { // We have received a notification from the channel that some bytes are available, retry to pull. @@ -722,7 +751,6 @@ impl TransmissionPipelineConsumer { #[cfg(test)] mod tests { - use super::*; use std::{ convert::TryFrom, sync::{ @@ -731,8 +759,8 @@ mod tests { }, time::{Duration, Instant}, }; - use tokio::task; - use tokio::time::timeout; + + use tokio::{task, time::timeout}; use zenoh_buffers::{ reader::{DidntRead, HasReader}, ZBuf, @@ -746,6 +774,8 @@ mod tests { }; use zenoh_result::ZResult; + use super::*; + const SLEEP: Duration = Duration::from_millis(100); const TIMEOUT: Duration = Duration::from_secs(60); @@ -757,8 +787,9 @@ mod tests { is_compression: true, }, queue_size: [1; Priority::NUM], + batching_enabled: true, wait_before_drop: Duration::from_millis(1), - backoff: Duration::from_micros(1), + batching_time_limit: Duration::from_micros(1), }; const CONFIG_NOT_STREAMED: TransmissionPipelineConf = TransmissionPipelineConf { @@ -769,8 +800,9 @@ mod tests { is_compression: false, }, queue_size: [1; Priority::NUM], + batching_enabled: true, wait_before_drop: Duration::from_millis(1), - backoff: Duration::from_micros(1), + batching_time_limit: Duration::from_micros(1), }; #[tokio::test(flavor = "multi_thread", worker_threads = 4)] @@ -784,10 +816,10 @@ mod tests { wire_expr: key, ext_qos: ext::QoSType::new(Priority::Control, CongestionControl::Block, false), ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), + ext_nodeid: ext::NodeIdType::DEFAULT, payload: PushBody::Put(Put { timestamp: None, - encoding: Encoding::default(), + encoding: Encoding::empty(), ext_sinfo: None, #[cfg(feature = "shared-memory")] ext_shm: None, @@ -912,10 +944,10 @@ mod tests { wire_expr: key, ext_qos: ext::QoSType::new(Priority::Control, CongestionControl::Block, false), ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), + ext_nodeid: ext::NodeIdType::DEFAULT, payload: PushBody::Put(Put { timestamp: None, - encoding: Encoding::default(), + encoding: Encoding::empty(), ext_sinfo: None, #[cfg(feature = "shared-memory")] ext_shm: None, @@ -1029,10 +1061,10 @@ mod tests { false, ), ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), + ext_nodeid: ext::NodeIdType::DEFAULT, payload: PushBody::Put(Put { timestamp: None, - encoding: Encoding::default(), + encoding: Encoding::empty(), ext_sinfo: None, #[cfg(feature = "shared-memory")] ext_shm: None, diff --git a/io/zenoh-transport/src/common/priority.rs b/io/zenoh-transport/src/common/priority.rs index 8644cdacb7..fb5c520e3d 100644 --- a/io/zenoh-transport/src/common/priority.rs +++ b/io/zenoh-transport/src/common/priority.rs @@ -11,9 +11,8 @@ // Contributors: // ZettaScale Zenoh Team, // -use super::defragmentation::DefragBuffer; -use super::seq_num::{SeqNum, SeqNumGenerator}; use std::sync::{Arc, Mutex}; + use zenoh_core::zlock; use zenoh_protocol::{ core::{Bits, Reliability}, @@ -21,6 +20,11 @@ use zenoh_protocol::{ }; use zenoh_result::ZResult; +use super::{ + defragmentation::DefragBuffer, + seq_num::{SeqNum, SeqNumGenerator}, +}; + #[derive(Debug)] pub(crate) struct TransportChannelTx { pub(crate) sn: SeqNumGenerator, diff --git a/io/zenoh-transport/src/common/stats.rs b/io/zenoh-transport/src/common/stats.rs index f095a58273..da6e57d518 100644 --- a/io/zenoh-transport/src/common/stats.rs +++ b/io/zenoh-transport/src/common/stats.rs @@ -167,8 +167,9 @@ macro_rules! stats_struct { } } -use serde::{Deserialize, Serialize}; use std::sync::atomic::{AtomicUsize, Ordering}; + +use serde::{Deserialize, Serialize}; stats_struct! { #[derive(Clone, Debug, Deserialize, Serialize)] pub struct DiscriminatedStats { @@ -208,6 +209,10 @@ stats_struct! { # TYPE "counter" pub tx_z_del_msgs DiscriminatedStats, + # HELP "Counter of received bytes in zenoh del message attachments." + # TYPE "counter" + pub tx_z_del_pl_bytes DiscriminatedStats, + # HELP "Counter of sent zenoh query messages." # TYPE "counter" pub tx_z_query_msgs DiscriminatedStats, @@ -252,6 +257,10 @@ stats_struct! { # TYPE "counter" pub rx_z_del_msgs DiscriminatedStats, + # HELP "Counter of received bytes in zenoh del message attachments." + # TYPE "counter" + pub rx_z_del_pl_bytes DiscriminatedStats, + # HELP "Counter of received zenoh query messages." # TYPE "counter" pub rx_z_query_msgs DiscriminatedStats, diff --git a/io/zenoh-transport/src/lib.rs b/io/zenoh-transport/src/lib.rs index 5e00bed2e7..bfdd79685d 100644 --- a/io/zenoh-transport/src/lib.rs +++ b/io/zenoh-transport/src/lib.rs @@ -28,16 +28,19 @@ pub use common::stats; #[cfg(feature = "shared-memory")] mod shm; -use crate::{multicast::TransportMulticast, unicast::TransportUnicast}; +use std::{any::Any, sync::Arc}; + pub use manager::*; use serde::Serialize; -use std::any::Any; -use std::sync::Arc; use zenoh_link::Link; -use zenoh_protocol::core::{WhatAmI, ZenohId}; -use zenoh_protocol::network::NetworkMessage; +use zenoh_protocol::{ + core::{WhatAmI, ZenohIdProto}, + network::NetworkMessage, +}; use zenoh_result::ZResult; +use crate::{multicast::TransportMulticast, unicast::TransportUnicast}; + /*************************************/ /* TRANSPORT */ /*************************************/ @@ -105,7 +108,7 @@ impl TransportMulticastEventHandler for DummyTransportMulticastEventHandler { #[derive(Clone, Debug, Serialize, PartialEq, Eq)] #[serde(rename = "Transport")] pub struct TransportPeer { - pub zid: ZenohId, + pub zid: ZenohIdProto, pub whatami: WhatAmI, pub is_qos: bool, #[serde(skip)] diff --git a/io/zenoh-transport/src/manager.rs b/io/zenoh-transport/src/manager.rs index 2657f5cbd4..305ccab574 100644 --- a/io/zenoh-transport/src/manager.rs +++ b/io/zenoh-transport/src/manager.rs @@ -11,35 +11,41 @@ // Contributors: // ZettaScale Zenoh Team, // -use super::unicast::manager::{ - TransportManagerBuilderUnicast, TransportManagerConfigUnicast, TransportManagerStateUnicast, -}; -use super::TransportEventHandler; -use crate::multicast::manager::{ - TransportManagerBuilderMulticast, TransportManagerConfigMulticast, - TransportManagerStateMulticast, -}; +use std::{collections::HashMap, sync::Arc, time::Duration}; + use rand::{RngCore, SeedableRng}; -use std::collections::HashMap; -use std::sync::Arc; -use std::time::Duration; use tokio::sync::Mutex as AsyncMutex; use zenoh_config::{Config, LinkRxConf, QueueConf, QueueSizeConf}; use zenoh_crypto::{BlockCipher, PseudoRng}; use zenoh_link::NewLinkChannelSender; use zenoh_protocol::{ - core::{EndPoint, Field, Locator, Priority, Resolution, WhatAmI, ZenohId}, + core::{EndPoint, Field, Locator, Priority, Resolution, WhatAmI, ZenohIdProto}, transport::BatchSize, VERSION, }; use zenoh_result::{bail, ZResult}; +#[cfg(feature = "shared-memory")] +use zenoh_shm::api::client_storage::GLOBAL_CLIENT_STORAGE; +#[cfg(feature = "shared-memory")] +use zenoh_shm::reader::ShmReader; use zenoh_task::TaskController; +use super::{ + unicast::manager::{ + TransportManagerBuilderUnicast, TransportManagerConfigUnicast, TransportManagerStateUnicast, + }, + TransportEventHandler, +}; +use crate::multicast::manager::{ + TransportManagerBuilderMulticast, TransportManagerConfigMulticast, + TransportManagerStateMulticast, +}; + /// # Examples /// ``` /// use std::sync::Arc; /// use std::time::Duration; -/// use zenoh_protocol::core::{ZenohId, Resolution, Field, Bits, WhatAmI, whatami}; +/// use zenoh_protocol::core::{ZenohIdProto, Resolution, Field, Bits, WhatAmI, whatami}; /// use zenoh_transport::*; /// use zenoh_result::ZResult; /// @@ -79,7 +85,7 @@ use zenoh_task::TaskController; /// let mut resolution = Resolution::default(); /// resolution.set(Field::FrameSN, Bits::U8); /// let manager = TransportManager::builder() -/// .zid(ZenohId::rand()) +/// .zid(ZenohIdProto::rand().into()) /// .whatami(WhatAmI::Peer) /// .batch_size(1_024) // Use a batch size of 1024 bytes /// .resolution(resolution) // Use a sequence number resolution of 128 @@ -90,10 +96,11 @@ use zenoh_task::TaskController; pub struct TransportManagerConfig { pub version: u8, - pub zid: ZenohId, + pub zid: ZenohIdProto, pub whatami: WhatAmI, pub resolution: Resolution, - pub batch_size: u16, + pub batch_size: BatchSize, + pub batching: bool, pub wait_before_drop: Duration, pub queue_size: [usize; Priority::NUM], pub queue_backoff: Duration, @@ -119,13 +126,14 @@ pub struct TransportManagerParams { pub struct TransportManagerBuilder { version: u8, - zid: ZenohId, + zid: ZenohIdProto, whatami: WhatAmI, resolution: Resolution, - batch_size: u16, + batch_size: BatchSize, + batching_enabled: bool, + batching_time_limit: Duration, wait_before_drop: Duration, queue_size: QueueSizeConf, - queue_backoff: Duration, defrag_buff_size: usize, link_rx_buffer_size: usize, unicast: TransportManagerBuilderUnicast, @@ -133,10 +141,18 @@ pub struct TransportManagerBuilder { endpoints: HashMap, // (protocol, config) tx_threads: usize, protocols: Option>, + #[cfg(feature = "shared-memory")] + shm_reader: Option, } impl TransportManagerBuilder { - pub fn zid(mut self, zid: ZenohId) -> Self { + #[cfg(feature = "shared-memory")] + pub fn shm_reader(mut self, shm_reader: Option) -> Self { + self.shm_reader = shm_reader; + self + } + + pub fn zid(mut self, zid: ZenohIdProto) -> Self { self.zid = zid; self } @@ -151,11 +167,21 @@ impl TransportManagerBuilder { self } - pub fn batch_size(mut self, batch_size: u16) -> Self { + pub fn batch_size(mut self, batch_size: BatchSize) -> Self { self.batch_size = batch_size; self } + pub fn batching_enabled(mut self, batching_enabled: bool) -> Self { + self.batching_enabled = batching_enabled; + self + } + + pub fn batching_time_limit(mut self, batching_time_limit: Duration) -> Self { + self.batching_time_limit = batching_time_limit; + self + } + pub fn wait_before_drop(mut self, wait_before_drop: Duration) -> Self { self.wait_before_drop = wait_before_drop; self @@ -166,11 +192,6 @@ impl TransportManagerBuilder { self } - pub fn queue_backoff(mut self, queue_backoff: Duration) -> Self { - self.queue_backoff = queue_backoff; - self - } - pub fn defrag_buff_size(mut self, defrag_buff_size: usize) -> Self { self.defrag_buff_size = defrag_buff_size; self @@ -207,7 +228,7 @@ impl TransportManagerBuilder { } pub async fn from_config(mut self, config: &Config) -> ZResult { - self = self.zid(*config.id()); + self = self.zid((*config.id()).into()); if let Some(v) = config.mode() { self = self.whatami(*v); } @@ -217,13 +238,16 @@ impl TransportManagerBuilder { resolution.set(Field::FrameSN, *link.tx().sequence_number_resolution()); self = self.resolution(resolution); self = self.batch_size(*link.tx().batch_size()); + self = self.batching_enabled(*link.tx().queue().batching().enabled()); + self = self.batching_time_limit(Duration::from_millis( + *link.tx().queue().batching().time_limit(), + )); self = self.defrag_buff_size(*link.rx().max_message_size()); self = self.link_rx_buffer_size(*link.rx().buffer_size()); self = self.wait_before_drop(Duration::from_micros( *link.tx().queue().congestion_control().wait_before_drop(), )); self = self.queue_size(link.tx().queue().size().clone()); - self = self.queue_backoff(Duration::from_nanos(*link.tx().queue().backoff())); self = self.tx_threads(*link.tx().threads()); self = self.protocols(link.protocols().clone()); @@ -251,7 +275,16 @@ impl TransportManagerBuilder { // Initialize the PRNG and the Cipher let mut prng = PseudoRng::from_entropy(); - let unicast = self.unicast.build(&mut prng)?; + #[cfg(feature = "shared-memory")] + let shm_reader = self + .shm_reader + .unwrap_or_else(|| ShmReader::new((*GLOBAL_CLIENT_STORAGE.read()).clone())); + + let unicast = self.unicast.build( + &mut prng, + #[cfg(feature = "shared-memory")] + &shm_reader, + )?; let multicast = self.multicast.build()?; let mut queue_size = [0; Priority::NUM]; @@ -270,9 +303,10 @@ impl TransportManagerBuilder { whatami: self.whatami, resolution: self.resolution, batch_size: self.batch_size, + batching: self.batching_enabled, wait_before_drop: self.wait_before_drop, queue_size, - queue_backoff: self.queue_backoff, + queue_backoff: self.batching_time_limit, defrag_buff_size: self.defrag_buff_size, link_rx_buffer_size: self.link_rx_buffer_size, unicast: unicast.config, @@ -295,7 +329,12 @@ impl TransportManagerBuilder { let params = TransportManagerParams { config, state }; - Ok(TransportManager::new(params, prng)) + Ok(TransportManager::new( + params, + prng, + #[cfg(feature = "shared-memory")] + shm_reader, + )) } } @@ -303,17 +342,18 @@ impl Default for TransportManagerBuilder { fn default() -> Self { let link_rx = LinkRxConf::default(); let queue = QueueConf::default(); - let backoff = *queue.backoff(); + let backoff = *queue.batching().time_limit(); let wait_before_drop = *queue.congestion_control().wait_before_drop(); Self { version: VERSION, - zid: ZenohId::rand(), + zid: ZenohIdProto::rand(), whatami: zenoh_config::defaults::mode, resolution: Resolution::default(), batch_size: BatchSize::MAX, + batching_enabled: true, wait_before_drop: Duration::from_micros(wait_before_drop), queue_size: queue.size, - queue_backoff: Duration::from_nanos(backoff), + batching_time_limit: Duration::from_millis(backoff), defrag_buff_size: *link_rx.max_message_size(), link_rx_buffer_size: *link_rx.buffer_size(), endpoints: HashMap::new(), @@ -321,6 +361,8 @@ impl Default for TransportManagerBuilder { multicast: TransportManagerBuilderMulticast::default(), tx_threads: 1, protocols: None, + #[cfg(feature = "shared-memory")] + shm_reader: None, } } } @@ -333,13 +375,19 @@ pub struct TransportManager { pub(crate) cipher: Arc, pub(crate) locator_inspector: zenoh_link::LocatorInspector, pub(crate) new_unicast_link_sender: NewLinkChannelSender, + #[cfg(feature = "shared-memory")] + pub(crate) shmr: ShmReader, #[cfg(feature = "stats")] pub(crate) stats: Arc, pub(crate) task_controller: TaskController, } impl TransportManager { - pub fn new(params: TransportManagerParams, mut prng: PseudoRng) -> TransportManager { + pub fn new( + params: TransportManagerParams, + mut prng: PseudoRng, + #[cfg(feature = "shared-memory")] shmr: ShmReader, + ) -> TransportManager { // Initialize the Cipher let mut key = [0_u8; BlockCipher::BLOCK_SIZE]; prng.fill_bytes(&mut key); @@ -357,6 +405,8 @@ impl TransportManager { new_unicast_link_sender, #[cfg(feature = "stats")] stats: std::sync::Arc::new(crate::stats::TransportStats::default()), + #[cfg(feature = "shared-memory")] + shmr, task_controller: TaskController::default(), }; @@ -386,7 +436,7 @@ impl TransportManager { TransportManagerBuilder::default() } - pub fn zid(&self) -> ZenohId { + pub fn zid(&self) -> ZenohIdProto { self.config.zid } diff --git a/io/zenoh-transport/src/multicast/establishment.rs b/io/zenoh-transport/src/multicast/establishment.rs index a0b7576f03..0c24626697 100644 --- a/io/zenoh-transport/src/multicast/establishment.rs +++ b/io/zenoh-transport/src/multicast/establishment.rs @@ -11,6 +11,17 @@ // Contributors: // ZettaScale Zenoh Team, // +use std::sync::Arc; + +use rand::Rng; +use zenoh_core::zasynclock; +use zenoh_link::LinkMulticast; +use zenoh_protocol::{ + core::{Field, Priority}, + transport::PrioritySn, +}; +use zenoh_result::{bail, ZResult}; + use crate::{ common::{batch::BatchConfig, seq_num}, multicast::{ @@ -20,15 +31,6 @@ use crate::{ }, TransportManager, }; -use rand::Rng; -use std::sync::Arc; -use zenoh_core::zasynclock; -use zenoh_link::LinkMulticast; -use zenoh_protocol::{ - core::{Field, Priority}, - transport::PrioritySn, -}; -use zenoh_result::{bail, ZResult}; pub(crate) async fn open_link( manager: &TransportManager, diff --git a/io/zenoh-transport/src/multicast/link.rs b/io/zenoh-transport/src/multicast/link.rs index 193df5ca67..d0d5ef4fb0 100644 --- a/io/zenoh-transport/src/multicast/link.rs +++ b/io/zenoh-transport/src/multicast/link.rs @@ -11,36 +11,38 @@ // Contributors: // ZettaScale Zenoh Team, // -#[cfg(feature = "stats")] -use crate::stats::TransportStats; -use crate::{ - common::{ - batch::{BatchConfig, Encode, Finalize, RBatch, WBatch}, - pipeline::{ - TransmissionPipeline, TransmissionPipelineConf, TransmissionPipelineConsumer, - TransmissionPipelineProducer, - }, - priority::TransportPriorityTx, - }, - multicast::transport::TransportMulticastInner, -}; use std::{ convert::TryInto, fmt, sync::Arc, time::{Duration, Instant}, }; + use tokio::task::JoinHandle; use zenoh_buffers::{BBuf, ZSlice, ZSliceBuffer}; use zenoh_core::{zcondfeat, zlock}; use zenoh_link::{Link, LinkMulticast, Locator}; use zenoh_protocol::{ - core::{Bits, Priority, Resolution, WhatAmI, ZenohId}, + core::{Bits, Priority, Resolution, WhatAmI, ZenohIdProto}, transport::{BatchSize, Close, Join, PrioritySn, TransportMessage, TransportSn}, }; use zenoh_result::{zerror, ZResult}; use zenoh_sync::{RecyclingObject, RecyclingObjectPool, Signal}; +#[cfg(feature = "stats")] +use crate::stats::TransportStats; +use crate::{ + common::{ + batch::{BatchConfig, Encode, Finalize, RBatch, WBatch}, + pipeline::{ + TransmissionPipeline, TransmissionPipelineConf, TransmissionPipelineConsumer, + TransmissionPipelineProducer, + }, + priority::TransportPriorityTx, + }, + multicast::transport::TransportMulticastInner, +}; + /****************************/ /* TRANSPORT MULTICAST LINK */ /****************************/ @@ -71,9 +73,7 @@ impl TransportLinkMulticast { .batch .is_compression .then_some(BBuf::with_capacity( - lz4_flex::block::get_maximum_output_size( - self.config.batch.max_buffer_size() - ), + lz4_flex::block::get_maximum_output_size(self.config.batch.mtu as usize), )), None ), @@ -205,13 +205,13 @@ impl TransportLinkMulticastRx { pub async fn recv_batch(&self, buff: C) -> ZResult<(RBatch, Locator)> where C: Fn() -> T + Copy, - T: ZSliceBuffer + 'static, + T: AsMut<[u8]> + ZSliceBuffer + 'static, { const ERR: &str = "Read error from link: "; let mut into = (buff)(); - let (n, locator) = self.inner.link.read(into.as_mut_slice()).await?; - let buffer = ZSlice::make(Arc::new(into), 0, n).map_err(|_| zerror!("Error"))?; + let (n, locator) = self.inner.link.read(into.as_mut()).await?; + let buffer = ZSlice::new(Arc::new(into), 0, n).map_err(|_| zerror!("Error"))?; let mut batch = RBatch::new(self.inner.config.batch, buffer); batch.initialize(buff).map_err(|_| zerror!("{ERR}{self}"))?; Ok((batch, locator.into_owned())) @@ -249,7 +249,7 @@ impl fmt::Debug for TransportLinkMulticastRx { /**************************************/ pub(super) struct TransportLinkMulticastConfigUniversal { pub(super) version: u8, - pub(super) zid: ZenohId, + pub(super) zid: ZenohIdProto, pub(super) whatami: WhatAmI, pub(super) lease: Duration, pub(super) join_interval: Duration, @@ -321,7 +321,8 @@ impl TransportLinkMulticastUniversal { batch: self.link.config.batch, queue_size: self.transport.manager.config.queue_size, wait_before_drop: self.transport.manager.config.wait_before_drop, - backoff: self.transport.manager.config.queue_backoff, + batching_enabled: self.transport.manager.config.batching, + batching_time_limit: self.transport.manager.config.queue_backoff, }; // The pipeline let (producer, consumer) = TransmissionPipeline::make(tpc, &priority_tx); @@ -491,7 +492,7 @@ async fn tx_task( .collect::>(); let (next_sn, ext_qos) = if next_sns.len() == Priority::NUM { let tmp: [PrioritySn; Priority::NUM] = next_sns.try_into().unwrap(); - (PrioritySn::default(), Some(Box::new(tmp))) + (PrioritySn::DEFAULT, Some(Box::new(tmp))) } else { (next_sns[0], None) }; @@ -539,7 +540,7 @@ async fn rx_task( where T: ZSliceBuffer + 'static, F: Fn() -> T, - RecyclingObject: ZSliceBuffer, + RecyclingObject: AsMut<[u8]> + ZSliceBuffer, { let (rbatch, locator) = link .recv_batch(|| pool.try_take().unwrap_or_else(|| pool.alloc())) @@ -548,7 +549,7 @@ async fn rx_task( } // The pool of buffers - let mtu = link.inner.config.batch.max_buffer_size(); + let mtu = link.inner.config.batch.mtu as usize; let mut n = rx_buffer_size / mtu; if rx_buffer_size % mtu != 0 { n += 1; diff --git a/io/zenoh-transport/src/multicast/manager.rs b/io/zenoh-transport/src/multicast/manager.rs index 033daa1791..e2899b1d1a 100644 --- a/io/zenoh-transport/src/multicast/manager.rs +++ b/io/zenoh-transport/src/multicast/manager.rs @@ -11,25 +11,27 @@ // Contributors: // ZettaScale Zenoh Team, // -#[cfg(feature = "shared-memory")] -use crate::multicast::shm::SharedMemoryMulticast; -use crate::multicast::{transport::TransportMulticastInner, TransportMulticast}; -use crate::TransportManager; -use std::collections::HashMap; -use std::sync::Arc; -use std::time::Duration; +use std::{collections::HashMap, sync::Arc, time::Duration}; + use tokio::sync::Mutex; #[cfg(feature = "transport_compression")] use zenoh_config::CompressionMulticastConf; #[cfg(feature = "shared-memory")] -use zenoh_config::SharedMemoryConf; +use zenoh_config::ShmConf; use zenoh_config::{Config, LinkTxConf}; use zenoh_core::zasynclock; use zenoh_link::*; -use zenoh_protocol::core::ZenohId; -use zenoh_protocol::{core::endpoint, transport::close}; +use zenoh_protocol::{ + core::{parameters, ZenohIdProto}, + transport::close, +}; use zenoh_result::{bail, zerror, ZResult}; +use crate::{ + multicast::{transport::TransportMulticastInner, TransportMulticast}, + TransportManager, +}; + pub struct TransportManagerConfigMulticast { pub lease: Duration, pub keep_alive: usize, @@ -59,9 +61,6 @@ pub struct TransportManagerStateMulticast { pub(crate) protocols: Arc>>, // Established transports pub(crate) transports: Arc>>>, - // Shared memory - #[cfg(feature = "shared-memory")] - pub(super) shm: Arc, } pub struct TransportManagerParamsMulticast { @@ -141,8 +140,6 @@ impl TransportManagerBuilderMulticast { let state = TransportManagerStateMulticast { protocols: Arc::new(Mutex::new(HashMap::new())), transports: Arc::new(Mutex::new(HashMap::new())), - #[cfg(feature = "shared-memory")] - shm: Arc::new(SharedMemoryMulticast::make()?), }; let params = TransportManagerParamsMulticast { config, state }; @@ -155,7 +152,7 @@ impl Default for TransportManagerBuilderMulticast { fn default() -> TransportManagerBuilderMulticast { let link_tx = LinkTxConf::default(); #[cfg(feature = "shared-memory")] - let shm = SharedMemoryConf::default(); + let shm = ShmConf::default(); #[cfg(feature = "transport_compression")] let compression = CompressionMulticastConf::default(); @@ -261,7 +258,7 @@ impl TransportManager { if let Some(config) = self.config.endpoints.get(endpoint.protocol().as_str()) { endpoint .config_mut() - .extend(endpoint::Parameters::iter(config))?; + .extend_from_iter(parameters::iter(config))?; } // Open the link @@ -269,7 +266,7 @@ impl TransportManager { super::establishment::open_link(self, link).await } - pub async fn get_transport_multicast(&self, zid: &ZenohId) -> Option { + pub async fn get_transport_multicast(&self, zid: &ZenohIdProto) -> Option { for t in zasynclock!(self.state.multicast.transports).values() { if t.get_peers().iter().any(|p| p.zid == *zid) { return Some(t.into()); diff --git a/io/zenoh-transport/src/multicast/mod.rs b/io/zenoh-transport/src/multicast/mod.rs index daf9b069ff..78d76bb6c8 100644 --- a/io/zenoh-transport/src/multicast/mod.rs +++ b/io/zenoh-transport/src/multicast/mod.rs @@ -14,23 +14,18 @@ pub(crate) mod establishment; pub(crate) mod link; pub(crate) mod manager; pub(crate) mod rx; -#[cfg(feature = "shared-memory")] -pub(crate) mod shm; pub(crate) mod transport; pub(crate) mod tx; -use super::common; -use crate::{ - multicast::link::TransportLinkMulticast, TransportMulticastEventHandler, TransportPeer, +use std::{ + fmt::{self, Write}, + sync::{Arc, Weak}, }; + pub use manager::{ TransportManagerBuilderMulticast, TransportManagerConfigMulticast, TransportManagerParamsMulticast, }; -use std::{ - fmt::{self, Write}, - sync::{Arc, Weak}, -}; use transport::TransportMulticastInner; use zenoh_core::{zcondfeat, zread}; use zenoh_link::Link; @@ -41,6 +36,11 @@ use zenoh_protocol::{ }; use zenoh_result::{zerror, ZResult}; +use super::common; +use crate::{ + multicast::link::TransportLinkMulticast, TransportMulticastEventHandler, TransportPeer, +}; + /*************************************/ /* TRANSPORT MULTICAST */ /*************************************/ diff --git a/io/zenoh-transport/src/multicast/rx.rs b/io/zenoh-transport/src/multicast/rx.rs index 6d662f2873..93dc3c727a 100644 --- a/io/zenoh-transport/src/multicast/rx.rs +++ b/io/zenoh-transport/src/multicast/rx.rs @@ -11,12 +11,8 @@ // Contributors: // ZettaScale Zenoh Team, // -use super::transport::{TransportMulticastInner, TransportMulticastPeer}; -use crate::common::{ - batch::{Decode, RBatch}, - priority::TransportChannelRx, -}; use std::sync::MutexGuard; + use zenoh_core::{zlock, zread}; use zenoh_protocol::{ core::{Locator, Priority, Reliability}, @@ -28,6 +24,12 @@ use zenoh_protocol::{ }; use zenoh_result::{bail, zerror, ZResult}; +use super::transport::{TransportMulticastInner, TransportMulticastPeer}; +use crate::common::{ + batch::{Decode, RBatch}, + priority::TransportChannelRx, +}; + /*************************************/ /* TRANSPORT RX */ /*************************************/ @@ -42,7 +44,10 @@ impl TransportMulticastInner { #[cfg(feature = "shared-memory")] { if self.manager.config.multicast.is_shm { - crate::shm::map_zmsg_to_shmbuf(&mut msg, &self.manager.state.multicast.shm.reader)?; + if let Err(e) = crate::shm::map_zmsg_to_shmbuf(&mut msg, &self.manager.shmr) { + tracing::debug!("Error receiving SHM buffer: {e}"); + return Ok(()); + } } } @@ -145,7 +150,7 @@ impl TransportMulticastInner { let priority = ext_qos.priority(); let c = if self.is_qos() { &peer.priority_rx[priority as usize] - } else if priority == Priority::default() { + } else if priority == Priority::DEFAULT { &peer.priority_rx[0] } else { bail!( @@ -181,7 +186,7 @@ impl TransportMulticastInner { let priority = ext_qos.priority(); let c = if self.is_qos() { &peer.priority_rx[priority as usize] - } else if priority == Priority::default() { + } else if priority == Priority::DEFAULT { &peer.priority_rx[0] } else { bail!( diff --git a/io/zenoh-transport/src/multicast/shm.rs b/io/zenoh-transport/src/multicast/shm.rs deleted file mode 100644 index 060198d927..0000000000 --- a/io/zenoh-transport/src/multicast/shm.rs +++ /dev/null @@ -1,44 +0,0 @@ -// -// Copyright (c) 2022 ZettaScale Technology -// -// This program and the accompanying materials are made available under the -// terms of the Eclipse Public License 2.0 which is available at -// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 -// which is available at https://www.apache.org/licenses/LICENSE-2.0. -// -// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 -// -// Contributors: -// ZettaScale Zenoh Team, -// -use rand::{Rng, SeedableRng}; -use tokio::sync::RwLock; -use zenoh_crypto::PseudoRng; -use zenoh_result::ZResult; -use zenoh_shm::{SharedMemoryManager, SharedMemoryReader}; - -pub(crate) type Challenge = u64; -const NAME: &str = "zshm_mcast"; - -pub(crate) struct SharedMemoryMulticast { - pub(crate) _manager: SharedMemoryManager, - pub(crate) reader: RwLock, -} - -unsafe impl Sync for SharedMemoryMulticast {} - -impl SharedMemoryMulticast { - pub fn make() -> ZResult { - let mut prng = PseudoRng::from_entropy(); - let nonce = prng.gen::(); - let size = std::mem::size_of::(); - - let mut _manager = SharedMemoryManager::make(format!("{NAME}.{nonce}"), size)?; - - let shmauth = SharedMemoryMulticast { - _manager, - reader: RwLock::new(SharedMemoryReader::new()), - }; - Ok(shmauth) - } -} diff --git a/io/zenoh-transport/src/multicast/transport.rs b/io/zenoh-transport/src/multicast/transport.rs index 155b6b5568..f0dfec4813 100644 --- a/io/zenoh-transport/src/multicast/transport.rs +++ b/io/zenoh-transport/src/multicast/transport.rs @@ -11,16 +11,6 @@ // Contributors: // ZettaScale Zenoh Team, // -use super::common::priority::{TransportPriorityRx, TransportPriorityTx}; -use super::link::{TransportLinkMulticastConfigUniversal, TransportLinkMulticastUniversal}; -#[cfg(feature = "stats")] -use crate::stats::TransportStats; -use crate::{ - multicast::{ - link::TransportLinkMulticast, TransportConfigMulticast, TransportMulticastEventHandler, - }, - TransportManager, TransportPeer, TransportPeerEventHandler, -}; use std::{ collections::HashMap, sync::{ @@ -29,17 +19,31 @@ use std::{ }, time::Duration, }; + use tokio_util::sync::CancellationToken; use zenoh_core::{zcondfeat, zread, zwrite}; use zenoh_link::{Link, Locator}; -use zenoh_protocol::core::Resolution; -use zenoh_protocol::transport::{batch_size, Close, TransportMessage}; use zenoh_protocol::{ - core::{Bits, Field, Priority, WhatAmI, ZenohId}, - transport::{close, Join}, + core::{Bits, Field, Priority, Resolution, WhatAmI, ZenohIdProto}, + transport::{batch_size, close, Close, Join, TransportMessage}, }; use zenoh_result::{bail, ZResult}; use zenoh_task::TaskController; + +use super::{ + common::priority::{TransportPriorityRx, TransportPriorityTx}, + link::{TransportLinkMulticastConfigUniversal, TransportLinkMulticastUniversal}, +}; +#[cfg(feature = "shared-memory")] +use crate::shm::MulticastTransportShmConfig; +#[cfg(feature = "stats")] +use crate::stats::TransportStats; +use crate::{ + multicast::{ + link::TransportLinkMulticast, TransportConfigMulticast, TransportMulticastEventHandler, + }, + TransportManager, TransportPeer, TransportPeerEventHandler, +}; // use zenoh_util::{Timed, TimedEvent, TimedHandle, Timer}; /*************************************/ @@ -49,7 +53,7 @@ use zenoh_task::TaskController; pub(super) struct TransportMulticastPeer { pub(super) version: u8, pub(super) locator: Locator, - pub(super) zid: ZenohId, + pub(super) zid: ZenohIdProto, pub(super) whatami: WhatAmI, pub(super) resolution: Resolution, pub(super) lease: Duration, @@ -88,6 +92,8 @@ pub(crate) struct TransportMulticastInner { // Transport statistics #[cfg(feature = "stats")] pub(super) stats: Arc, + #[cfg(feature = "shared-memory")] + pub(super) shm: Option, } impl TransportMulticastInner { @@ -109,6 +115,12 @@ impl TransportMulticastInner { #[cfg(feature = "stats")] let stats = Arc::new(TransportStats::new(Some(manager.get_stats().clone()))); + #[cfg(feature = "shared-memory")] + let shm = match manager.config.multicast.is_shm { + true => Some(MulticastTransportShmConfig), + false => None, + }; + let ti = TransportMulticastInner { manager, priority_tx: priority_tx.into_boxed_slice().into(), @@ -119,6 +131,8 @@ impl TransportMulticastInner { task_controller: TaskController::default(), #[cfg(feature = "stats")] stats, + #[cfg(feature = "shared-memory")] + shm, }; let link = TransportLinkMulticastUniversal::new(ti.clone(), config.link); diff --git a/io/zenoh-transport/src/multicast/tx.rs b/io/zenoh-transport/src/multicast/tx.rs index 3b58277402..775131703a 100644 --- a/io/zenoh-transport/src/multicast/tx.rs +++ b/io/zenoh-transport/src/multicast/tx.rs @@ -11,10 +11,13 @@ // Contributors: // ZettaScale Zenoh Team, // -use super::transport::TransportMulticastInner; use zenoh_core::zread; use zenoh_protocol::network::NetworkMessage; +use super::transport::TransportMulticastInner; +#[cfg(feature = "shared-memory")] +use crate::shm::map_zmsg_to_partner; + //noinspection ALL impl TransportMulticastInner { fn schedule_on_link(&self, msg: NetworkMessage) -> bool { @@ -53,12 +56,7 @@ impl TransportMulticastInner { pub(super) fn schedule(&self, mut msg: NetworkMessage) -> bool { #[cfg(feature = "shared-memory")] { - let res = if self.manager.config.multicast.is_shm { - crate::shm::map_zmsg_to_shminfo(&mut msg) - } else { - crate::shm::map_zmsg_to_shmbuf(&mut msg, &self.manager.state.multicast.shm.reader) - }; - if let Err(e) = res { + if let Err(e) = map_zmsg_to_partner(&mut msg, &self.shm) { tracing::trace!("Failed SHM conversion: {}", e); return false; } diff --git a/io/zenoh-transport/src/shm.rs b/io/zenoh-transport/src/shm.rs index 7b87f038e5..c562e47135 100644 --- a/io/zenoh-transport/src/shm.rs +++ b/io/zenoh-transport/src/shm.rs @@ -11,69 +11,176 @@ // Contributors: // ZettaScale Zenoh Team, // -use tokio::sync::RwLock; +use std::collections::HashSet; + use zenoh_buffers::{reader::HasReader, writer::HasWriter, ZBuf, ZSlice, ZSliceKind}; use zenoh_codec::{RCodec, WCodec, Zenoh080}; -use zenoh_core::{zasyncread, zasyncwrite, zerror}; +use zenoh_core::zerror; use zenoh_protocol::{ network::{NetworkBody, NetworkMessage, Push, Request, Response}, zenoh::{ - err::{ext::ErrBodyType, Err}, + err::Err, ext::ShmType, query::{ext::QueryBodyType, Query}, PushBody, Put, Reply, RequestBody, ResponseBody, }, }; use zenoh_result::ZResult; -use zenoh_shm::{SharedMemoryBuf, SharedMemoryBufInfo, SharedMemoryReader}; +use zenoh_shm::{api::common::types::ProtocolID, reader::ShmReader, ShmBufInfo, ShmBufInner}; + +use crate::unicast::establishment::ext::shm::AuthSegment; + +#[derive(Clone, Debug, PartialEq, Eq)] +pub struct TransportShmConfig { + partner_protocols: HashSet, +} + +impl PartnerShmConfig for TransportShmConfig { + fn supports_protocol(&self, protocol: ProtocolID) -> bool { + self.partner_protocols.contains(&protocol) + } +} + +impl TransportShmConfig { + pub fn new(partner_segment: AuthSegment) -> Self { + Self { + partner_protocols: partner_segment.protocols().iter().cloned().collect(), + } + } +} + +#[derive(Clone)] +pub struct MulticastTransportShmConfig; + +impl PartnerShmConfig for MulticastTransportShmConfig { + fn supports_protocol(&self, _protocol: ProtocolID) -> bool { + true + } +} + +pub fn map_zmsg_to_partner( + msg: &mut NetworkMessage, + partner_shm_cfg: &Option, +) -> ZResult<()> { + match &mut msg.body { + NetworkBody::Push(Push { payload, .. }) => match payload { + PushBody::Put(b) => b.map_to_partner(partner_shm_cfg), + PushBody::Del(_) => Ok(()), + }, + NetworkBody::Request(Request { payload, .. }) => match payload { + RequestBody::Query(b) => b.map_to_partner(partner_shm_cfg), + }, + NetworkBody::Response(Response { payload, .. }) => match payload { + ResponseBody::Reply(b) => b.map_to_partner(partner_shm_cfg), + ResponseBody::Err(b) => b.map_to_partner(partner_shm_cfg), + }, + NetworkBody::ResponseFinal(_) + | NetworkBody::Interest(_) + | NetworkBody::Declare(_) + | NetworkBody::OAM(_) => Ok(()), + } +} + +pub fn map_zmsg_to_shmbuf(msg: &mut NetworkMessage, shmr: &ShmReader) -> ZResult<()> { + match &mut msg.body { + NetworkBody::Push(Push { payload, .. }) => match payload { + PushBody::Put(b) => b.map_to_shmbuf(shmr), + PushBody::Del(_) => Ok(()), + }, + NetworkBody::Request(Request { payload, .. }) => match payload { + RequestBody::Query(b) => b.map_to_shmbuf(shmr), + }, + NetworkBody::Response(Response { payload, .. }) => match payload { + ResponseBody::Err(b) => b.map_to_shmbuf(shmr), + ResponseBody::Reply(b) => b.map_to_shmbuf(shmr), + }, + NetworkBody::ResponseFinal(_) + | NetworkBody::Interest(_) + | NetworkBody::Declare(_) + | NetworkBody::OAM(_) => Ok(()), + } +} -// Traits +pub trait PartnerShmConfig { + fn supports_protocol(&self, protocol: ProtocolID) -> bool; +} + +// Currently, there can be three forms of ZSlice: +// rawbuf - usual non-shm buffer +// shminfo - small SHM info that can be used to mount SHM buffer and get access to it's contents +// shmbuf - mounted SHM buffer +// On RX and TX we need to do the following conversion: trait MapShm { - fn map_to_shminfo(&mut self) -> ZResult; - fn map_to_shmbuf(&mut self, shmr: &RwLock) -> ZResult; + // RX: + // - shminfo -> shmbuf + // - rawbuf -> rawbuf (no changes) + fn map_to_shmbuf(&mut self, shmr: &ShmReader) -> ZResult<()>; + + // TX: + // - shmbuf -> shminfo if partner supports shmbuf's SHM protocol + // - shmbuf -> rawbuf if partner does not support shmbuf's SHM protocol + // - rawbuf -> rawbuf (no changes) + fn map_to_partner( + &mut self, + partner_shm_cfg: &Option, + ) -> ZResult<()>; } -macro_rules! map_to_shminfo { - ($zbuf:expr, $ext_shm:expr) => {{ - let res = map_zbuf_to_shminfo($zbuf)?; - if res { - *$ext_shm = Some(ShmType::new()); +macro_rules! map_to_partner { + ($zbuf:expr, $ext_shm:expr, $partner_shm_cfg:expr) => {{ + match $partner_shm_cfg { + Some(shm_cfg) => { + let res = to_shm_partner($zbuf, shm_cfg)?; + if res { + *$ext_shm = Some(ShmType::new()); + } + } + None => { + to_non_shm_partner($zbuf); + } } - Ok(res) + + Ok(()) }}; } -macro_rules! map_to_shmbuf { +macro_rules! map_zbuf_to_shmbuf { ($zbuf:expr, $ext_shm:expr, $shmr:expr) => {{ if $ext_shm.is_some() { *$ext_shm = None; map_zbuf_to_shmbuf($zbuf, $shmr) } else { - Ok(false) + Ok(()) } }}; } // Impl - Put impl MapShm for Put { - fn map_to_shminfo(&mut self) -> ZResult { + fn map_to_partner( + &mut self, + partner_shm_cfg: &Option, + ) -> ZResult<()> { let Self { payload, ext_shm, .. } = self; - map_to_shminfo!(payload, ext_shm) + map_to_partner!(payload, ext_shm, partner_shm_cfg) } - fn map_to_shmbuf(&mut self, shmr: &RwLock) -> ZResult { + fn map_to_shmbuf(&mut self, shmr: &ShmReader) -> ZResult<()> { let Self { payload, ext_shm, .. } = self; - map_to_shmbuf!(payload, ext_shm, shmr) + map_zbuf_to_shmbuf!(payload, ext_shm, shmr) } } // Impl - Query impl MapShm for Query { - fn map_to_shminfo(&mut self) -> ZResult { + fn map_to_partner( + &mut self, + partner_shm_cfg: &Option, + ) -> ZResult<()> { if let Self { ext_body: Some(QueryBodyType { payload, ext_shm, .. @@ -81,13 +188,13 @@ impl MapShm for Query { .. } = self { - map_to_shminfo!(payload, ext_shm) + map_to_partner!(payload, ext_shm, partner_shm_cfg) } else { - Ok(false) + Ok(()) } } - fn map_to_shmbuf(&mut self, shmr: &RwLock) -> ZResult { + fn map_to_shmbuf(&mut self, shmr: &ShmReader) -> ZResult<()> { if let Self { ext_body: Some(QueryBodyType { payload, ext_shm, .. @@ -95,98 +202,75 @@ impl MapShm for Query { .. } = self { - map_to_shmbuf!(payload, ext_shm, shmr) + map_zbuf_to_shmbuf!(payload, ext_shm, shmr) } else { - Ok(false) + Ok(()) } } } // Impl - Reply impl MapShm for Reply { - fn map_to_shminfo(&mut self) -> ZResult { - let Self { - payload, ext_shm, .. - } = self; - map_to_shminfo!(payload, ext_shm) + fn map_to_partner( + &mut self, + partner_shm_cfg: &Option, + ) -> ZResult<()> { + match &mut self.payload { + PushBody::Put(put) => { + let Put { + payload, ext_shm, .. + } = put; + map_to_partner!(payload, ext_shm, partner_shm_cfg) + } + PushBody::Del(_) => Ok(()), + } } - fn map_to_shmbuf(&mut self, shmr: &RwLock) -> ZResult { - let Self { - payload, ext_shm, .. - } = self; - map_to_shmbuf!(payload, ext_shm, shmr) + fn map_to_shmbuf(&mut self, shmr: &ShmReader) -> ZResult<()> { + match &mut self.payload { + PushBody::Put(put) => { + let Put { + payload, ext_shm, .. + } = put; + map_zbuf_to_shmbuf!(payload, ext_shm, shmr) + } + PushBody::Del(_) => Ok(()), + } } } // Impl - Err impl MapShm for Err { - fn map_to_shminfo(&mut self) -> ZResult { - if let Self { - ext_body: Some(ErrBodyType { - payload, ext_shm, .. - }), - .. - } = self - { - map_to_shminfo!(payload, ext_shm) - } else { - Ok(false) - } + fn map_to_partner( + &mut self, + partner_shm_cfg: &Option, + ) -> ZResult<()> { + let Self { + payload, ext_shm, .. + } = self; + map_to_partner!(payload, ext_shm, partner_shm_cfg) } - fn map_to_shmbuf(&mut self, shmr: &RwLock) -> ZResult { - if let Self { - ext_body: Some(ErrBodyType { - payload, ext_shm, .. - }), - .. - } = self - { - map_to_shmbuf!(payload, ext_shm, shmr) - } else { - Ok(false) - } - } -} - -// ShmBuf -> ShmInfo -pub fn map_zmsg_to_shminfo(msg: &mut NetworkMessage) -> ZResult { - match &mut msg.body { - NetworkBody::Push(Push { payload, .. }) => match payload { - PushBody::Put(b) => b.map_to_shminfo(), - PushBody::Del(_) => Ok(false), - }, - NetworkBody::Request(Request { payload, .. }) => match payload { - RequestBody::Query(b) => b.map_to_shminfo(), - RequestBody::Put(b) => b.map_to_shminfo(), - RequestBody::Del(_) | RequestBody::Pull(_) => Ok(false), - }, - NetworkBody::Response(Response { payload, .. }) => match payload { - ResponseBody::Reply(b) => b.map_to_shminfo(), - ResponseBody::Put(b) => b.map_to_shminfo(), - ResponseBody::Err(b) => b.map_to_shminfo(), - ResponseBody::Ack(_) => Ok(false), - }, - NetworkBody::ResponseFinal(_) | NetworkBody::Declare(_) | NetworkBody::OAM(_) => Ok(false), + fn map_to_shmbuf(&mut self, shmr: &ShmReader) -> ZResult<()> { + let Self { + payload, ext_shm, .. + } = self; + map_zbuf_to_shmbuf!(payload, ext_shm, shmr) } } -// Mapping -pub fn map_zbuf_to_shminfo(zbuf: &mut ZBuf) -> ZResult { - let mut res = false; - for zs in zbuf.zslices_mut() { - if let Some(shmb) = zs.downcast_ref::() { - *zs = map_zslice_to_shminfo(shmb)?; - res = true; - } - } - Ok(res) +#[cold] +#[inline(never)] +pub fn shmbuf_to_rawbuf(shmb: &ShmBufInner) -> ZSlice { + // Convert shmb to raw buffer + // TODO: optimize this! We should not make additional buffer copy here, + // but we need to make serializer serialize SHM buffer as raw buffer. + shmb.as_ref().to_vec().into() } #[cold] #[inline(never)] -pub fn map_zslice_to_shminfo(shmb: &SharedMemoryBuf) -> ZResult { +pub fn shmbuf_to_shminfo(shmb: &ShmBufInner) -> ZResult { // Serialize the shmb info let codec = Zenoh080::new(); let mut info = vec![]; @@ -194,75 +278,63 @@ pub fn map_zslice_to_shminfo(shmb: &SharedMemoryBuf) -> ZResult { codec .write(&mut writer, &shmb.info) .map_err(|e| zerror!("{:?}", e))?; - // Increase the reference count so to keep the SharedMemoryBuf valid - shmb.inc_ref_count(); + // Increase the reference count so to keep the ShmBufInner valid + unsafe { shmb.inc_ref_count() }; // Replace the content of the slice let mut zslice: ZSlice = info.into(); zslice.kind = ZSliceKind::ShmPtr; Ok(zslice) } -// ShmInfo -> ShmBuf -pub fn map_zmsg_to_shmbuf( - msg: &mut NetworkMessage, - shmr: &RwLock, +fn to_shm_partner( + zbuf: &mut ZBuf, + partner_shm_cfg: &ShmCfg, ) -> ZResult { - match &mut msg.body { - NetworkBody::Push(Push { payload, .. }) => match payload { - PushBody::Put(b) => b.map_to_shmbuf(shmr), - PushBody::Del(_) => Ok(false), - }, - NetworkBody::Request(Request { payload, .. }) => match payload { - RequestBody::Query(b) => b.map_to_shmbuf(shmr), - RequestBody::Put(b) => b.map_to_shmbuf(shmr), - RequestBody::Del(_) | RequestBody::Pull(_) => Ok(false), - }, - NetworkBody::Response(Response { payload, .. }) => match payload { - ResponseBody::Put(b) => b.map_to_shmbuf(shmr), - ResponseBody::Err(b) => b.map_to_shmbuf(shmr), - ResponseBody::Reply(b) => b.map_to_shmbuf(shmr), - ResponseBody::Ack(_) => Ok(false), - }, - NetworkBody::ResponseFinal(_) | NetworkBody::Declare(_) | NetworkBody::OAM(_) => Ok(false), + let mut res = false; + for zs in zbuf.zslices_mut() { + if let Some(shmb) = zs.downcast_ref::() { + if partner_shm_cfg.supports_protocol(shmb.info.shm_protocol) { + *zs = shmbuf_to_shminfo(shmb)?; + res = true; + } else { + // Replace the content of the slice with rawbuf + *zs = shmbuf_to_rawbuf(shmb) + } + } } + Ok(res) } -// Mapping -pub fn map_zbuf_to_shmbuf(zbuf: &mut ZBuf, shmr: &RwLock) -> ZResult { - let mut res = false; +fn to_non_shm_partner(zbuf: &mut ZBuf) { + for zs in zbuf.zslices_mut() { + if let Some(shmb) = zs.downcast_ref::() { + // Replace the content of the slice with rawbuf + *zs = shmbuf_to_rawbuf(shmb) + } + } +} + +pub fn map_zbuf_to_shmbuf(zbuf: &mut ZBuf, shmr: &ShmReader) -> ZResult<()> { for zs in zbuf.zslices_mut().filter(|x| x.kind == ZSliceKind::ShmPtr) { - res |= map_zslice_to_shmbuf(zs, shmr)?; + map_zslice_to_shmbuf(zs, shmr)?; } - Ok(res) + Ok(()) } #[cold] #[inline(never)] -pub fn map_zslice_to_shmbuf( - zslice: &mut ZSlice, - shmr: &RwLock, -) -> ZResult { - // Deserialize the shmb info into shm buff +pub fn map_zslice_to_shmbuf(zslice: &mut ZSlice, shmr: &ShmReader) -> ZResult<()> { let codec = Zenoh080::new(); let mut reader = zslice.reader(); - let shmbinfo: SharedMemoryBufInfo = codec.read(&mut reader).map_err(|e| zerror!("{:?}", e))?; + // Deserialize the shminfo + let shmbinfo: ShmBufInfo = codec.read(&mut reader).map_err(|e| zerror!("{:?}", e))?; - // First, try in read mode allowing concurrenct lookups - let r_guard = tokio::task::block_in_place(|| { - tokio::runtime::Handle::current().block_on(async { zasyncread!(shmr) }) - }); - let smb = r_guard.try_read_shmbuf(&shmbinfo).or_else(|_| { - drop(r_guard); - let mut w_guard = tokio::task::block_in_place(|| { - tokio::runtime::Handle::current().block_on(async { zasyncwrite!(shmr) }) - }); - w_guard.read_shmbuf(&shmbinfo) - })?; + // Mount shmbuf + let smb = shmr.read_shmbuf(&shmbinfo)?; // Replace the content of the slice - let zs: ZSlice = smb.into(); - *zslice = zs; + *zslice = smb.into(); - Ok(true) + Ok(()) } diff --git a/io/zenoh-transport/src/unicast/authentication.rs b/io/zenoh-transport/src/unicast/authentication.rs new file mode 100644 index 0000000000..0654085968 --- /dev/null +++ b/io/zenoh-transport/src/unicast/authentication.rs @@ -0,0 +1,54 @@ +// +// Copyright (c) 2024 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// +use zenoh_link::{LinkAuthId, LinkAuthType}; + +#[cfg(feature = "auth_usrpwd")] +use super::establishment::ext::auth::UsrPwdId; + +#[derive(Clone, Debug, PartialEq, Eq)] +pub enum AuthId { + CertCommonName(String), + Username(String), + None, +} + +impl From for AuthId { + fn from(lid: LinkAuthId) -> Self { + match (lid.get_type(), lid.get_value()) { + (LinkAuthType::Tls | LinkAuthType::Quic, Some(auth_value)) => { + AuthId::CertCommonName(auth_value.clone()) + } + _ => AuthId::None, + } + } +} + +#[cfg(feature = "auth_usrpwd")] +impl From for AuthId { + fn from(user_password_id: UsrPwdId) -> Self { + match user_password_id.0 { + Some(username) => { + // Convert username from Vec to String + match std::str::from_utf8(&username) { + Ok(name) => AuthId::Username(name.to_owned()), + Err(e) => { + tracing::error!("Error in extracting username {}", e); + AuthId::None + } + } + } + None => AuthId::None, + } + } +} diff --git a/io/zenoh-transport/src/unicast/establishment/accept.rs b/io/zenoh-transport/src/unicast/establishment/accept.rs index a901aba6ec..64949357c6 100644 --- a/io/zenoh-transport/src/unicast/establishment/accept.rs +++ b/io/zenoh-transport/src/unicast/establishment/accept.rs @@ -11,23 +11,10 @@ // Contributors: // ZettaScale Zenoh Team, // -#[cfg(feature = "shared-memory")] -use crate::unicast::shared_memory_unicast::Challenge; -use crate::{ - common::batch::BatchConfig, - unicast::{ - establishment::{compute_sn, ext, AcceptFsm, Cookie, Zenoh080Cookie}, - link::{ - LinkUnicastWithOpenAck, TransportLinkUnicast, TransportLinkUnicastConfig, - TransportLinkUnicastDirection, - }, - TransportConfigUnicast, - }, - TransportManager, -}; +use std::time::Duration; + use async_trait::async_trait; use rand::Rng; -use std::time::Duration; use tokio::sync::Mutex; use zenoh_buffers::{reader::HasReader, writer::HasWriter, ZSlice}; use zenoh_codec::{RCodec, WCodec, Zenoh080}; @@ -35,7 +22,7 @@ use zenoh_core::{zasynclock, zcondfeat, zerror}; use zenoh_crypto::{BlockCipher, PseudoRng}; use zenoh_link::LinkUnicast; use zenoh_protocol::{ - core::{Field, Resolution, WhatAmI, ZenohId}, + core::{Field, Resolution, WhatAmI, ZenohIdProto}, transport::{ batch_size, close::{self, Close}, @@ -44,6 +31,25 @@ use zenoh_protocol::{ }; use zenoh_result::ZResult; +#[cfg(feature = "auth_usrpwd")] +use super::ext::auth::UsrPwdId; +#[cfg(feature = "shared-memory")] +use super::ext::shm::AuthSegment; +#[cfg(feature = "shared-memory")] +use crate::shm::TransportShmConfig; +use crate::{ + common::batch::BatchConfig, + unicast::{ + establishment::{compute_sn, ext, AcceptFsm, Cookie, Zenoh080Cookie}, + link::{ + LinkUnicastWithOpenAck, TransportLinkUnicast, TransportLinkUnicastConfig, + TransportLinkUnicastDirection, + }, + TransportConfigUnicast, + }, + TransportManager, +}; + pub(super) type AcceptError = (zenoh_result::Error, Option); struct StateTransport { @@ -76,24 +82,26 @@ struct RecvInitSynIn { mine_version: u8, } struct RecvInitSynOut { - other_zid: ZenohId, + other_zid: ZenohIdProto, other_whatami: WhatAmI, #[cfg(feature = "shared-memory")] - ext_shm: Challenge, + ext_shm: Option, } // InitAck struct SendInitAckIn { mine_version: u8, - mine_zid: ZenohId, + mine_zid: ZenohIdProto, mine_whatami: WhatAmI, - other_zid: ZenohId, + other_zid: ZenohIdProto, other_whatami: WhatAmI, #[cfg(feature = "shared-memory")] - ext_shm: Challenge, + ext_shm: Option, } struct SendInitAckOut { cookie_nonce: u64, + #[cfg(feature = "shared-memory")] + ext_shm: Option, } // OpenSyn @@ -101,17 +109,19 @@ struct RecvOpenSynIn { cookie_nonce: u64, } struct RecvOpenSynOut { - other_zid: ZenohId, + other_zid: ZenohIdProto, other_whatami: WhatAmI, other_lease: Duration, other_initial_sn: TransportSn, + #[cfg(feature = "auth_usrpwd")] + other_auth_id: UsrPwdId, } // OpenAck struct SendOpenAckIn { - mine_zid: ZenohId, + mine_zid: ZenohIdProto, mine_lease: Duration, - other_zid: ZenohId, + other_zid: ZenohIdProto, } struct SendOpenAckOut { open_ack: OpenAck, @@ -126,7 +136,8 @@ struct AcceptLink<'a> { #[cfg(feature = "transport_multilink")] ext_mlink: ext::multilink::MultiLinkFsm<'a>, #[cfg(feature = "shared-memory")] - ext_shm: ext::shm::ShmFsm<'a>, + // Will be None if SHM operation is disabled by Config + ext_shm: Option>, #[cfg(feature = "transport_auth")] ext_auth: ext::auth::AuthFsm<'a>, ext_lowlatency: ext::lowlatency::LowLatencyFsm<'a>, @@ -152,6 +163,12 @@ impl<'a, 'b: 'a> AcceptFsm for &'a mut AcceptLink<'b> { .await .map_err(|e| (e, Some(close::reason::INVALID)))?; + tracing::trace!( + "Establishment Accept InitSyn: {}. Received: {:?}", + self.link, + msg + ); + let init_syn = match msg.body { TransportBody::InitSyn(init_syn) => init_syn, _ => { @@ -167,9 +184,11 @@ impl<'a, 'b: 'a> AcceptFsm for &'a mut AcceptLink<'b> { // Check if the version is supported if init_syn.version != input.mine_version { let e = zerror!( - "Rejecting InitSyn on {} because of unsupported Zenoh version from peer: {}", + "Rejecting InitSyn on {} because of unsupported Zenoh protocol version (expected: {}, received: {}) from: {}", self.link, - init_syn.zid + input.mine_version, + init_syn.version, + init_syn.zid, ); return Err((e.into(), Some(close::reason::INVALID))); } @@ -206,11 +225,13 @@ impl<'a, 'b: 'a> AcceptFsm for &'a mut AcceptLink<'b> { // Extension Shm #[cfg(feature = "shared-memory")] - let ext_shm = self - .ext_shm - .recv_init_syn((&mut state.transport.ext_shm, init_syn.ext_shm)) - .await - .map_err(|e| (e, Some(close::reason::GENERIC)))?; + let ext_shm = match &self.ext_shm { + Some(my_shm) => my_shm + .recv_init_syn(init_syn.ext_shm) + .await + .map_err(|e| (e, Some(close::reason::GENERIC)))?, + _ => None, + }; // Extension Auth #[cfg(feature = "transport_auth")] @@ -265,14 +286,14 @@ impl<'a, 'b: 'a> AcceptFsm for &'a mut AcceptLink<'b> { .map_err(|e| (e, Some(close::reason::GENERIC)))?; // Extension Shm - let ext_shm = zcondfeat!( - "shared-memory", - self.ext_shm - .send_init_ack((&mut state.transport.ext_shm, input.ext_shm)) + #[cfg(feature = "shared-memory")] + let ext_shm = match self.ext_shm.as_ref() { + Some(my_shm) => my_shm + .send_init_ack(&input.ext_shm) .await .map_err(|e| (e, Some(close::reason::GENERIC)))?, - None - ); + _ => None, + }; // Extension Auth let ext_auth = zcondfeat!( @@ -347,7 +368,7 @@ impl<'a, 'b: 'a> AcceptFsm for &'a mut AcceptLink<'b> { let cookie: ZSlice = encrypted.into(); // Send the message on the link - let message: TransportMessage = InitAck { + let msg: TransportMessage = InitAck { version: input.mine_version, whatami: input.mine_whatami, zid: input.mine_zid, @@ -355,6 +376,7 @@ impl<'a, 'b: 'a> AcceptFsm for &'a mut AcceptLink<'b> { batch_size: state.transport.batch_size, cookie, ext_qos, + #[cfg(feature = "shared-memory")] ext_shm, ext_auth, ext_mlink, @@ -365,11 +387,21 @@ impl<'a, 'b: 'a> AcceptFsm for &'a mut AcceptLink<'b> { let _ = self .link - .send(&message) + .send(&msg) .await .map_err(|e| (e, Some(close::reason::GENERIC)))?; - let output = SendInitAckOut { cookie_nonce }; + tracing::trace!( + "Establishment Accept InitAck: {}. Sent: {:?}", + self.link, + msg + ); + + let output = SendInitAckOut { + cookie_nonce, + #[cfg(feature = "shared-memory")] + ext_shm: input.ext_shm, + }; Ok(output) } @@ -385,6 +417,12 @@ impl<'a, 'b: 'a> AcceptFsm for &'a mut AcceptLink<'b> { .await .map_err(|e| (e, Some(close::reason::INVALID)))?; + tracing::trace!( + "Establishment Accept OpenSyn: {}. Received: {:?}", + self.link, + msg + ); + let open_syn = match msg.body { TransportBody::OpenSyn(open_syn) => open_syn, TransportBody::Close(Close { reason, .. }) => { @@ -462,17 +500,21 @@ impl<'a, 'b: 'a> AcceptFsm for &'a mut AcceptLink<'b> { // Extension Shm #[cfg(feature = "shared-memory")] - self.ext_shm - .recv_open_syn((&mut state.transport.ext_shm, open_syn.ext_shm)) - .await - .map_err(|e| (e, Some(close::reason::GENERIC)))?; + if let Some(my_shm) = self.ext_shm.as_ref() { + my_shm + .recv_open_syn((&mut state.transport.ext_shm, open_syn.ext_shm)) + .await + .map_err(|e| (e, Some(close::reason::GENERIC)))?; + } // Extension Auth - #[cfg(feature = "transport_auth")] - self.ext_auth + #[cfg(feature = "auth_usrpwd")] + let user_password_id = self + .ext_auth .recv_open_syn((&mut state.link.ext_auth, open_syn.ext_auth)) .await - .map_err(|e| (e, Some(close::reason::GENERIC)))?; + .map_err(|e| (e, Some(close::reason::GENERIC)))? + .auth_id; // Extension MultiLink #[cfg(feature = "transport_multilink")] @@ -499,6 +541,8 @@ impl<'a, 'b: 'a> AcceptFsm for &'a mut AcceptLink<'b> { other_whatami: cookie.whatami, other_lease: open_syn.lease, other_initial_sn: open_syn.initial_sn, + #[cfg(feature = "auth_usrpwd")] + other_auth_id: user_password_id, }; Ok((state, output)) } @@ -526,14 +570,14 @@ impl<'a, 'b: 'a> AcceptFsm for &'a mut AcceptLink<'b> { .map_err(|e| (e, Some(close::reason::GENERIC)))?; // Extension Shm - let ext_shm = zcondfeat!( - "shared-memory", - self.ext_shm - .send_open_ack(&mut state.transport.ext_shm) + #[cfg(feature = "shared-memory")] + let ext_shm = match self.ext_shm.as_ref() { + Some(my_shm) => my_shm + .send_open_ack(&state.transport.ext_shm) .await .map_err(|e| (e, Some(close::reason::GENERIC)))?, - None - ); + None => None, + }; // Extension Auth let ext_auth = zcondfeat!( @@ -568,10 +612,11 @@ impl<'a, 'b: 'a> AcceptFsm for &'a mut AcceptLink<'b> { // Build OpenAck message let mine_initial_sn = compute_sn(input.mine_zid, input.other_zid, state.transport.resolution); - let open_ack = OpenAck { + let msg = OpenAck { lease: input.mine_lease, initial_sn: mine_initial_sn, ext_qos, + #[cfg(feature = "shared-memory")] ext_shm, ext_auth, ext_mlink, @@ -580,8 +625,13 @@ impl<'a, 'b: 'a> AcceptFsm for &'a mut AcceptLink<'b> { }; // Do not send the OpenAck right now since we might still incur in MAX_LINKS error + tracing::trace!( + "Establishment Accept OpenAck: {}. Sent: {:?}", + self.link, + msg + ); - let output = SendOpenAckOut { open_ack }; + let output = SendOpenAckOut { open_ack: msg }; Ok(output) } } @@ -605,7 +655,12 @@ pub(crate) async fn accept_link(link: LinkUnicast, manager: &TransportManager) - cipher: &manager.cipher, ext_qos: ext::qos::QoSFsm::new(), #[cfg(feature = "shared-memory")] - ext_shm: ext::shm::ShmFsm::new(&manager.state.unicast.shm), + ext_shm: manager + .state + .unicast + .auth_shm + .as_ref() + .map(ext::shm::ShmFsm::new), #[cfg(feature = "transport_multilink")] ext_mlink: manager.state.unicast.multilink.fsm(&manager.prng), #[cfg(feature = "transport_auth")] @@ -642,7 +697,7 @@ pub(crate) async fn accept_link(link: LinkUnicast, manager: &TransportManager) - .multilink .accept(manager.config.unicast.max_links > 1), #[cfg(feature = "shared-memory")] - ext_shm: ext::shm::StateAccept::new(manager.config.unicast.is_shm), + ext_shm: ext::shm::StateAccept::new(), ext_lowlatency: ext::lowlatency::StateAccept::new( manager.config.unicast.is_lowlatency, ), @@ -706,8 +761,13 @@ pub(crate) async fn accept_link(link: LinkUnicast, manager: &TransportManager) - #[cfg(feature = "transport_multilink")] multilink: state.transport.ext_mlink.multilink(), #[cfg(feature = "shared-memory")] - is_shm: state.transport.ext_shm.is_shm(), + shm: match state.transport.ext_shm.negotiated_to_use_shm() { + true => iack_out.ext_shm.map(TransportShmConfig::new), + false => None, + }, is_lowlatency: state.transport.ext_lowlatency.is_lowlatency(), + #[cfg(feature = "auth_usrpwd")] + auth_id: osyn_out.other_auth_id, }; let a_config = TransportLinkUnicastConfig { @@ -732,7 +792,7 @@ pub(crate) async fn accept_link(link: LinkUnicast, manager: &TransportManager) - .await?; tracing::debug!( - "New transport link accepted from {} to {}: {}.", + "New transport link accepted from {} to {}: {}", osyn_out.other_zid, manager.config.zid, s_link, diff --git a/io/zenoh-transport/src/unicast/establishment/cookie.rs b/io/zenoh-transport/src/unicast/establishment/cookie.rs index e9916be7e6..4220f8e08b 100644 --- a/io/zenoh-transport/src/unicast/establishment/cookie.rs +++ b/io/zenoh-transport/src/unicast/establishment/cookie.rs @@ -11,23 +11,27 @@ // Contributors: // ZettaScale Zenoh Team, // -// use super::properties::EstablishmentProperties; -use crate::unicast::establishment::ext; use std::convert::TryFrom; + use zenoh_buffers::{ reader::{DidntRead, HasReader, Reader}, writer::{DidntWrite, HasWriter, Writer}, }; use zenoh_codec::{RCodec, WCodec, Zenoh080}; use zenoh_crypto::{BlockCipher, PseudoRng}; -use zenoh_protocol::core::{Resolution, WhatAmI, ZenohId}; +use zenoh_protocol::{ + core::{Resolution, WhatAmI, ZenohIdProto}, + transport::BatchSize, +}; + +use crate::unicast::establishment::ext; #[derive(Debug, PartialEq)] pub(crate) struct Cookie { - pub(crate) zid: ZenohId, + pub(crate) zid: ZenohIdProto, pub(crate) whatami: WhatAmI, pub(crate) resolution: Resolution, - pub(crate) batch_size: u16, + pub(crate) batch_size: BatchSize, pub(crate) nonce: u64, // Extensions pub(crate) ext_qos: ext::qos::StateAccept, @@ -78,12 +82,12 @@ where type Error = DidntRead; fn read(self, reader: &mut R) -> Result { - let zid: ZenohId = self.read(&mut *reader)?; + let zid: ZenohIdProto = self.read(&mut *reader)?; let wai: u8 = self.read(&mut *reader)?; let whatami = WhatAmI::try_from(wai).map_err(|_| DidntRead)?; let resolution: u8 = self.read(&mut *reader)?; let resolution = Resolution::from(resolution); - let batch_size: u16 = self.read(&mut *reader)?; + let batch_size: BatchSize = self.read(&mut *reader)?; let nonce: u64 = self.read(&mut *reader)?; // Extensions let ext_qos: ext::qos::StateAccept = self.read(&mut *reader)?; @@ -169,7 +173,7 @@ impl Cookie { let mut rng = rand::thread_rng(); Self { - zid: ZenohId::default(), + zid: ZenohIdProto::default(), whatami: WhatAmI::rand(), resolution: Resolution::rand(), batch_size: rng.gen(), @@ -191,10 +195,11 @@ impl Cookie { mod tests { #[test] fn codec_cookie() { - use super::*; use rand::{Rng, SeedableRng}; use zenoh_buffers::ZBuf; + use super::*; + const NUM_ITER: usize = 1_000; macro_rules! run_single { diff --git a/io/zenoh-transport/src/unicast/establishment/ext/auth/mod.rs b/io/zenoh-transport/src/unicast/establishment/ext/auth/mod.rs index beab85d18a..8b7125de6d 100644 --- a/io/zenoh-transport/src/unicast/establishment/ext/auth/mod.rs +++ b/io/zenoh-transport/src/unicast/establishment/ext/auth/mod.rs @@ -16,21 +16,19 @@ pub(crate) mod pubkey; #[cfg(feature = "auth_usrpwd")] pub(crate) mod usrpwd; -use crate::unicast::establishment::{AcceptFsm, OpenFsm}; +use std::{convert::TryInto, marker::PhantomData}; + use async_trait::async_trait; #[cfg(feature = "auth_pubkey")] pub use pubkey::*; use rand::{CryptoRng, Rng}; -use std::convert::TryInto; -use std::marker::PhantomData; use tokio::sync::{Mutex, RwLock}; #[cfg(feature = "auth_usrpwd")] pub use usrpwd::*; -use zenoh_buffers::reader::SiphonableReader; -use zenoh_buffers::ZBuf; use zenoh_buffers::{ - reader::{DidntRead, HasReader, Reader}, + reader::{DidntRead, HasReader, Reader, SiphonableReader}, writer::{DidntWrite, HasWriter, Writer}, + ZBuf, }; use zenoh_codec::{RCodec, WCodec, Zenoh080}; use zenoh_config::Config; @@ -41,6 +39,8 @@ use zenoh_protocol::{ transport::{init, open}, }; +use crate::unicast::establishment::{AcceptFsm, OpenFsm}; + pub(crate) mod id { #[cfg(feature = "auth_pubkey")] pub(crate) const PUBKEY: u8 = 0x1; @@ -475,6 +475,11 @@ impl<'a> OpenFsm for &'a AuthFsm<'a> { /*************************************/ /* ACCEPT */ /*************************************/ +pub(crate) struct RecvOpenSynOut { + #[cfg(feature = "auth_usrpwd")] + pub(crate) auth_id: UsrPwdId, +} + #[async_trait] impl<'a> AcceptFsm for &'a AuthFsm<'a> { type Error = ZError; @@ -571,7 +576,9 @@ impl<'a> AcceptFsm for &'a AuthFsm<'a> { } type RecvOpenSynIn = (&'a mut StateAccept, Option); - type RecvOpenSynOut = (); + + type RecvOpenSynOut = RecvOpenSynOut; + async fn recv_open_syn( self, input: Self::RecvOpenSynIn, @@ -599,19 +606,27 @@ impl<'a> AcceptFsm for &'a AuthFsm<'a> { } } + #[cfg(feature = "auth_usrpwd")] + let auth_id: UsrPwdId; + #[cfg(feature = "auth_usrpwd")] { match (self.usrpwd.as_ref(), state.usrpwd.as_mut()) { (Some(e), Some(s)) => { let x = ztake!(exts, id::USRPWD); - e.recv_open_syn((s, ztryinto!(x, S))).await?; + let username = e.recv_open_syn((s, ztryinto!(x, S))).await?; + auth_id = UsrPwdId(Some(username)); + } + (None, None) => { + auth_id = UsrPwdId(None); } - (None, None) => {} _ => bail!("{S} Invalid UsrPwd configuration."), } } - - Ok(()) + Ok(RecvOpenSynOut { + #[cfg(feature = "auth_usrpwd")] + auth_id, + }) } type SendOpenAckIn = &'a StateAccept; diff --git a/io/zenoh-transport/src/unicast/establishment/ext/auth/pubkey.rs b/io/zenoh-transport/src/unicast/establishment/ext/auth/pubkey.rs index 9a7c3d8f32..5638a9ee33 100644 --- a/io/zenoh-transport/src/unicast/establishment/ext/auth/pubkey.rs +++ b/io/zenoh-transport/src/unicast/establishment/ext/auth/pubkey.rs @@ -11,7 +11,8 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::unicast::establishment::{ext::auth::id, AcceptFsm, OpenFsm}; +use std::{collections::HashSet, fmt, ops::Deref, path::Path}; + use async_trait::async_trait; use rand::Rng; use rsa::{ @@ -19,7 +20,6 @@ use rsa::{ traits::PublicKeyParts, BigUint, Pkcs1v15Encrypt, RsaPrivateKey, RsaPublicKey, }; -use std::{collections::HashSet, fmt, ops::Deref, path::Path}; use tokio::sync::{Mutex, RwLock}; use zenoh_buffers::{ reader::{DidntRead, HasReader, Reader}, @@ -31,10 +31,13 @@ use zenoh_core::{bail, zasynclock, zasyncread, zerror, Error as ZError, Result a use zenoh_crypto::PseudoRng; use zenoh_protocol::common::{ZExtUnit, ZExtZBuf}; +use crate::unicast::establishment::{ext::auth::id, AcceptFsm, OpenFsm}; + mod ext { - use super::{id::PUBKEY, ZExtUnit, ZExtZBuf}; use zenoh_protocol::{zextunit, zextzbuf}; + use super::{id::PUBKEY, ZExtUnit, ZExtZBuf}; + pub(super) type InitSyn = zextzbuf!(PUBKEY, false); pub(super) type InitAck = zextzbuf!(PUBKEY, false); pub(super) type OpenSyn = zextzbuf!(PUBKEY, false); @@ -210,12 +213,14 @@ where /*************************************/ /* InitSyn */ /*************************************/ +/// ```text /// 7 6 5 4 3 2 1 0 /// +-+-+-+-+-+-+-+-+ /// ~ public key ~ /// +---------------+ /// /// ZExtZBuf +/// ``` pub(crate) struct InitSyn { pub(crate) alice_pubkey: ZPublicKey, } @@ -247,6 +252,7 @@ where /*************************************/ /* InitAck */ /*************************************/ +/// ```text /// 7 6 5 4 3 2 1 0 /// +-+-+-+-+-+-+-+-+ /// ~ public key ~ @@ -255,6 +261,7 @@ where /// +---------------+ /// /// ZExtZBuf +/// ``` pub(crate) struct InitAck { pub(crate) bob_pubkey: ZPublicKey, pub(crate) nonce_encrypted_with_alice_pubkey: Vec, @@ -292,12 +299,14 @@ where /*************************************/ /* OpenSyn */ /*************************************/ +/// ```text /// 7 6 5 4 3 2 1 0 /// +-+-+-+-+-+-+-+-+ /// ~ ciphered nonce~ /// +---------------+ /// /// ZExtZBuf +/// ``` pub(crate) struct OpenSyn { pub(crate) nonce_encrypted_with_bob_pubkey: Vec, } @@ -331,11 +340,13 @@ where /*************************************/ /* OpenAck */ /*************************************/ +/// ```text /// 7 6 5 4 3 2 1 0 /// +-+-+-+-+-+-+-+-+ /// +---------------+ /// /// ZExtUnit +/// ``` pub(crate) struct AuthPubKeyFsm<'a> { inner: &'a RwLock, diff --git a/io/zenoh-transport/src/unicast/establishment/ext/auth/usrpwd.rs b/io/zenoh-transport/src/unicast/establishment/ext/auth/usrpwd.rs index 23560e307e..46d3f349b4 100644 --- a/io/zenoh-transport/src/unicast/establishment/ext/auth/usrpwd.rs +++ b/io/zenoh-transport/src/unicast/establishment/ext/auth/usrpwd.rs @@ -11,10 +11,10 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::unicast::establishment::{ext::auth::id, AcceptFsm, OpenFsm}; +use std::{collections::HashMap, fmt}; + use async_trait::async_trait; use rand::{CryptoRng, Rng}; -use std::{collections::HashMap, fmt}; use tokio::sync::RwLock; use zenoh_buffers::{ reader::{DidntRead, HasReader, Reader}, @@ -26,10 +26,13 @@ use zenoh_core::{bail, zasyncread, zerror, Error as ZError, Result as ZResult}; use zenoh_crypto::hmac; use zenoh_protocol::common::{ZExtUnit, ZExtZ64, ZExtZBuf}; +use crate::unicast::establishment::{ext::auth::id, AcceptFsm, OpenFsm}; + mod ext { - use super::{id::USRPWD, ZExtUnit, ZExtZ64, ZExtZBuf}; use zenoh_protocol::{zextunit, zextz64, zextzbuf}; + use super::{id::USRPWD, ZExtUnit, ZExtZ64, ZExtZBuf}; + pub(super) type InitSyn = zextunit!(USRPWD, false); pub(super) type InitAck = zextz64!(USRPWD, false); pub(super) type OpenSyn = zextzbuf!(USRPWD, false); @@ -159,6 +162,8 @@ impl StateOpen { pub(crate) struct StateAccept { nonce: u64, } +#[derive(Clone, Debug, PartialEq, Eq)] +pub(crate) struct UsrPwdId(pub Option>); impl StateAccept { pub(crate) fn new(prng: &mut R) -> Self @@ -212,25 +217,30 @@ impl<'a> AuthUsrPwdFsm<'a> { /*************************************/ /* InitSyn */ /*************************************/ +/// ```text /// 7 6 5 4 3 2 1 0 /// +-+-+-+-+-+-+-+-+ /// +---------------+ /// /// ZExtUnit +/// ``` /*************************************/ /* InitAck */ /*************************************/ +/// ```text /// 7 6 5 4 3 2 1 0 /// +-+-+-+-+-+-+-+-+ /// ~ nonce ~ /// +---------------+ /// /// ZExtZ64 +/// ``` /*************************************/ /* OpenSyn */ /*************************************/ +/// ```text /// 7 6 5 4 3 2 1 0 /// +-+-+-+-+-+-+-+-+ /// ~ user ~ @@ -239,6 +249,7 @@ impl<'a> AuthUsrPwdFsm<'a> { /// +---------------+ /// /// ZExtZBuf +/// ``` struct OpenSyn { user: Vec, hmac: Vec, @@ -273,11 +284,13 @@ where /*************************************/ /* OpenAck */ /*************************************/ +/// ```text /// 7 6 5 4 3 2 1 0 /// +-+-+-+-+-+-+-+-+ /// +---------------+ /// /// ZExtUnit +/// ``` #[async_trait] impl<'a> OpenFsm for &'a AuthUsrPwdFsm<'a> { @@ -403,7 +416,7 @@ impl<'a> AcceptFsm for &'a AuthUsrPwdFsm<'a> { } type RecvOpenSynIn = (&'a mut StateAccept, Option); - type RecvOpenSynOut = (); + type RecvOpenSynOut = Vec; //value of userid is returned if recvopensynout is processed as valid async fn recv_open_syn( self, input: Self::RecvOpenSynIn, @@ -433,8 +446,8 @@ impl<'a> AcceptFsm for &'a AuthUsrPwdFsm<'a> { if hmac != open_syn.hmac { bail!("{S} Invalid password."); } - - Ok(()) + let username = open_syn.user.to_owned(); + Ok(username) } type SendOpenAckIn = &'a StateAccept; @@ -451,10 +464,12 @@ mod tests { #[tokio::test(flavor = "multi_thread", worker_threads = 4)] async fn authenticator_usrpwd_config() { async fn inner() { - use super::AuthUsrPwd; use std::{fs::File, io::Write}; + use zenoh_config::UsrPwdConf; + use super::AuthUsrPwd; + /* [CONFIG] */ let f1 = "zenoh-test-auth-usrpwd.txt"; diff --git a/io/zenoh-transport/src/unicast/establishment/ext/compression.rs b/io/zenoh-transport/src/unicast/establishment/ext/compression.rs index 2b57eb85db..1d4e995af6 100644 --- a/io/zenoh-transport/src/unicast/establishment/ext/compression.rs +++ b/io/zenoh-transport/src/unicast/establishment/ext/compression.rs @@ -11,9 +11,9 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::unicast::establishment::{AcceptFsm, OpenFsm}; -use async_trait::async_trait; use core::marker::PhantomData; + +use async_trait::async_trait; use zenoh_buffers::{ reader::{DidntRead, Reader}, writer::{DidntWrite, Writer}, @@ -22,6 +22,8 @@ use zenoh_codec::{RCodec, WCodec, Zenoh080}; use zenoh_protocol::transport::{init, open}; use zenoh_result::Error as ZError; +use crate::unicast::establishment::{AcceptFsm, OpenFsm}; + // Extension Fsm pub(crate) struct CompressionFsm<'a> { _a: PhantomData<&'a ()>, diff --git a/io/zenoh-transport/src/unicast/establishment/ext/lowlatency.rs b/io/zenoh-transport/src/unicast/establishment/ext/lowlatency.rs index 9dda9175b1..ff1efc90b9 100644 --- a/io/zenoh-transport/src/unicast/establishment/ext/lowlatency.rs +++ b/io/zenoh-transport/src/unicast/establishment/ext/lowlatency.rs @@ -11,9 +11,9 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::unicast::establishment::{AcceptFsm, OpenFsm}; -use async_trait::async_trait; use core::marker::PhantomData; + +use async_trait::async_trait; use zenoh_buffers::{ reader::{DidntRead, Reader}, writer::{DidntWrite, Writer}, @@ -22,6 +22,8 @@ use zenoh_codec::{RCodec, WCodec, Zenoh080}; use zenoh_protocol::transport::{init, open}; use zenoh_result::Error as ZError; +use crate::unicast::establishment::{AcceptFsm, OpenFsm}; + // Extension Fsm pub(crate) struct LowLatencyFsm<'a> { _a: PhantomData<&'a ()>, diff --git a/io/zenoh-transport/src/unicast/establishment/ext/multilink.rs b/io/zenoh-transport/src/unicast/establishment/ext/multilink.rs index f8e74779cf..51c4170755 100644 --- a/io/zenoh-transport/src/unicast/establishment/ext/multilink.rs +++ b/io/zenoh-transport/src/unicast/establishment/ext/multilink.rs @@ -11,10 +11,6 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::unicast::establishment::{ - ext::auth::pubkey::{self, AuthPubKey, AuthPubKeyFsm, ZPublicKey}, - AcceptFsm, OpenFsm, -}; use async_trait::async_trait; use rand::{CryptoRng, Rng}; use rsa::{BigUint, RsaPrivateKey, RsaPublicKey}; @@ -28,25 +24,34 @@ use zenoh_core::{zerror, Error as ZError, Result as ZResult}; use zenoh_crypto::PseudoRng; use zenoh_protocol::transport::{init, open}; +use crate::unicast::establishment::{ + ext::auth::pubkey::{self, AuthPubKey, AuthPubKeyFsm, ZPublicKey}, + AcceptFsm, OpenFsm, +}; + const KEY_SIZE: usize = 512; // Extension Fsm pub(crate) struct MultiLink { - pubkey: RwLock, + pubkey: Option>, } impl MultiLink { - pub(crate) fn make(rng: &mut R) -> ZResult + pub(crate) fn make(rng: &mut R, is_multilink: bool) -> ZResult where R: Rng + CryptoRng, { - let pri_key = RsaPrivateKey::new(rng, KEY_SIZE)?; - let pub_key = RsaPublicKey::from(&pri_key); - let mut auth = AuthPubKey::new(pub_key.into(), pri_key.into()); - auth.disable_lookup(); - Ok(Self { - pubkey: RwLock::new(auth), - }) + if is_multilink { + let pri_key = RsaPrivateKey::new(rng, KEY_SIZE)?; + let pub_key = RsaPublicKey::from(&pri_key); + let mut auth = AuthPubKey::new(pub_key.into(), pri_key.into()); + auth.disable_lookup(); + Ok(Self { + pubkey: Some(RwLock::new(auth)), + }) + } else { + Ok(Self { pubkey: None }) + } } pub(crate) fn open(&self, is_multilink: bool) -> StateOpen { @@ -69,13 +74,16 @@ impl MultiLink { pub(crate) fn fsm<'a>(&'a self, prng: &'a Mutex) -> MultiLinkFsm<'a> { MultiLinkFsm { - fsm: AuthPubKeyFsm::new(&self.pubkey, prng), + fsm: self + .pubkey + .is_some() + .then(|| AuthPubKeyFsm::new(self.pubkey.as_ref().unwrap(), prng)), } } } pub(crate) struct MultiLinkFsm<'a> { - fsm: AuthPubKeyFsm<'a>, + fsm: Option>, } /*************************************/ @@ -101,16 +109,12 @@ impl<'a> OpenFsm for &'a MultiLinkFsm<'a> { self, input: Self::SendInitSynIn, ) -> Result { - let pubkey = match input.pubkey.as_ref() { - Some(pubkey) => pubkey, - None => return Ok(None), + let (pubkey, fsm) = match (input.pubkey.as_ref(), self.fsm.as_ref()) { + (Some(pubkey), Some(fsm)) => (pubkey, fsm), + _ => return Ok(None), }; - let r = self - .fsm - .send_init_syn(&pubkey.0) - .await? - .map(|x| x.transmute()); + let r = fsm.send_init_syn(&pubkey.0).await?.map(|x| x.transmute()); Ok(r) } @@ -123,9 +127,9 @@ impl<'a> OpenFsm for &'a MultiLinkFsm<'a> { const S: &str = "MultiLink extension - Recv InitAck."; let (state, mut ext) = input; - let mut pubkey = match state.pubkey.take() { - Some(pubkey) => pubkey, - None => return Ok(()), + let (mut pubkey, fsm) = match (state.pubkey.take(), self.fsm.as_ref()) { + (Some(pubkey), Some(fsm)) => (pubkey, fsm), + _ => return Ok(()), }; match ext.take() { @@ -136,8 +140,7 @@ impl<'a> OpenFsm for &'a MultiLinkFsm<'a> { .read(&mut reader) .map_err(|_| zerror!("{S} Decoding error."))?; - self.fsm - .recv_init_ack((&mut pubkey.0, Some(ext.transmute()))) + fsm.recv_init_ack((&mut pubkey.0, Some(ext.transmute()))) .await?; state.pubkey = Some((pubkey.0, init_ack.bob_pubkey)); @@ -155,16 +158,12 @@ impl<'a> OpenFsm for &'a MultiLinkFsm<'a> { self, input: Self::SendOpenSynIn, ) -> Result { - let pubkey = match input.pubkey.as_ref() { - Some(pubkey) => pubkey, - None => return Ok(None), + let (pubkey, fsm) = match (input.pubkey.as_ref(), self.fsm.as_ref()) { + (Some(pubkey), Some(fsm)) => (pubkey, fsm), + _ => return Ok(None), }; - let r = self - .fsm - .send_open_syn(&pubkey.0) - .await? - .map(|x| x.transmute()); + let r = fsm.send_open_syn(&pubkey.0).await?.map(|x| x.transmute()); Ok(r) } @@ -175,15 +174,14 @@ impl<'a> OpenFsm for &'a MultiLinkFsm<'a> { input: Self::RecvOpenAckIn, ) -> Result { let (state, mut ext) = input; - let pubkey = match state.pubkey.as_mut() { - Some(pubkey) => pubkey, - None => return Ok(()), + let (pubkey, fsm) = match (state.pubkey.as_mut(), self.fsm.as_ref()) { + (Some(pubkey), Some(fsm)) => (pubkey, fsm), + _ => return Ok(()), }; match ext.take() { Some(ext) => { - self.fsm - .recv_open_ack((&mut pubkey.0, Some(ext.transmute()))) + fsm.recv_open_ack((&mut pubkey.0, Some(ext.transmute()))) .await?; } None => { @@ -279,9 +277,9 @@ impl<'a> AcceptFsm for &'a MultiLinkFsm<'a> { const S: &str = "MultiLink extension - Recv InitSyn."; let (state, mut ext) = input; - let mut pubkey = match state.pubkey.take() { - Some(pubkey) => pubkey, - None => return Ok(()), + let (mut pubkey, fsm) = match (state.pubkey.take(), self.fsm.as_ref()) { + (Some(pubkey), Some(fsm)) => (pubkey, fsm), + _ => return Ok(()), }; match ext.take() { @@ -292,8 +290,7 @@ impl<'a> AcceptFsm for &'a MultiLinkFsm<'a> { .read(&mut reader) .map_err(|_| zerror!("{S} Decoding error."))?; - self.fsm - .recv_init_syn((&mut pubkey.0, Some(ext.transmute()))) + fsm.recv_init_syn((&mut pubkey.0, Some(ext.transmute()))) .await?; state.pubkey = Some((pubkey.0, init_syn.alice_pubkey)); @@ -312,16 +309,12 @@ impl<'a> AcceptFsm for &'a MultiLinkFsm<'a> { self, input: Self::SendInitAckIn, ) -> Result { - let pubkey = match input.pubkey.as_ref() { - Some(pubkey) => pubkey, - None => return Ok(None), + let (pubkey, fsm) = match (input.pubkey.as_ref(), self.fsm.as_ref()) { + (Some(pubkey), Some(fsm)) => (pubkey, fsm), + _ => return Ok(None), }; - let r = self - .fsm - .send_init_ack(&pubkey.0) - .await? - .map(|x| x.transmute()); + let r = fsm.send_init_ack(&pubkey.0).await?.map(|x| x.transmute()); Ok(r) } @@ -332,13 +325,12 @@ impl<'a> AcceptFsm for &'a MultiLinkFsm<'a> { input: Self::RecvOpenSynIn, ) -> Result { let (state, ext) = input; - let pubkey = match state.pubkey.as_mut() { - Some(pubkey) => pubkey, - None => return Ok(()), + let (pubkey, fsm) = match (state.pubkey.as_mut(), self.fsm.as_ref()) { + (Some(pubkey), Some(fsm)) => (pubkey, fsm), + _ => return Ok(()), }; - self.fsm - .recv_open_syn((&mut pubkey.0, ext.map(|x| x.transmute()))) + fsm.recv_open_syn((&mut pubkey.0, ext.map(|x| x.transmute()))) .await } @@ -348,16 +340,12 @@ impl<'a> AcceptFsm for &'a MultiLinkFsm<'a> { self, input: Self::SendOpenAckIn, ) -> Result { - let pubkey = match input.pubkey.as_ref() { - Some(pubkey) => pubkey, - None => return Ok(None), + let (pubkey, fsm) = match (input.pubkey.as_ref(), self.fsm.as_ref()) { + (Some(pubkey), Some(fsm)) => (pubkey, fsm), + _ => return Ok(None), }; - let r = self - .fsm - .send_open_ack(&pubkey.0) - .await? - .map(|x| x.transmute()); + let r = fsm.send_open_ack(&pubkey.0).await?.map(|x| x.transmute()); Ok(r) } } diff --git a/io/zenoh-transport/src/unicast/establishment/ext/qos.rs b/io/zenoh-transport/src/unicast/establishment/ext/qos.rs index 4626ec5998..f749073805 100644 --- a/io/zenoh-transport/src/unicast/establishment/ext/qos.rs +++ b/io/zenoh-transport/src/unicast/establishment/ext/qos.rs @@ -11,9 +11,9 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::unicast::establishment::{AcceptFsm, OpenFsm}; -use async_trait::async_trait; use core::marker::PhantomData; + +use async_trait::async_trait; use zenoh_buffers::{ reader::{DidntRead, Reader}, writer::{DidntWrite, Writer}, @@ -22,6 +22,8 @@ use zenoh_codec::{RCodec, WCodec, Zenoh080}; use zenoh_protocol::transport::{init, open}; use zenoh_result::Error as ZError; +use crate::unicast::establishment::{AcceptFsm, OpenFsm}; + // Extension Fsm pub(crate) struct QoSFsm<'a> { _a: PhantomData<&'a ()>, diff --git a/io/zenoh-transport/src/unicast/establishment/ext/shm.rs b/io/zenoh-transport/src/unicast/establishment/ext/shm.rs index 2aec0cf508..025aaaef44 100644 --- a/io/zenoh-transport/src/unicast/establishment/ext/shm.rs +++ b/io/zenoh-transport/src/unicast/establishment/ext/shm.rs @@ -11,31 +11,117 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::unicast::{ - establishment::{AcceptFsm, OpenFsm}, - shared_memory_unicast::{Challenge, SharedMemoryUnicast}, -}; +use std::ops::Deref; + use async_trait::async_trait; -use std::convert::TryInto; +use rand::{Rng, SeedableRng}; use zenoh_buffers::{ reader::{DidntRead, HasReader, Reader}, writer::{DidntWrite, HasWriter, Writer}, }; use zenoh_codec::{RCodec, WCodec, Zenoh080}; -use zenoh_core::zasyncwrite; +use zenoh_core::bail; +use zenoh_crypto::PseudoRng; use zenoh_protocol::transport::{init, open}; -use zenoh_result::{zerror, Error as ZError}; -use zenoh_shm::SharedMemoryBufInfo; +use zenoh_result::{zerror, Error as ZError, ZResult}; +use zenoh_shm::{api::common::types::ProtocolID, posix_shm::array::ArrayInSHM}; + +use crate::unicast::establishment::{AcceptFsm, OpenFsm}; + +/*************************************/ +/* Segment */ +/*************************************/ +const AUTH_SEGMENT_PREFIX: &str = "auth"; + +pub(crate) type AuthSegmentID = u32; +pub(crate) type AuthChallenge = u64; + +const LEN_INDEX: usize = 0; +const CHALLENGE_INDEX: usize = 1; +const ID_START_INDEX: usize = 2; + +#[derive(Debug)] +pub struct AuthSegment { + array: ArrayInSHM, +} + +impl AuthSegment { + pub fn create(challenge: AuthChallenge, shm_protocols: &[ProtocolID]) -> ZResult { + let array = ArrayInSHM::::create( + ID_START_INDEX + shm_protocols.len(), + AUTH_SEGMENT_PREFIX, + )?; + unsafe { + (*array.elem_mut(LEN_INDEX)) = shm_protocols.len() as AuthChallenge; + (*array.elem_mut(CHALLENGE_INDEX)) = challenge; + for elem in ID_START_INDEX..array.elem_count() { + (*array.elem_mut(elem)) = shm_protocols[elem - ID_START_INDEX] as u64; + } + }; + Ok(Self { array }) + } + + pub fn open(id: AuthSegmentID) -> ZResult { + let array = ArrayInSHM::open(id, AUTH_SEGMENT_PREFIX)?; + Ok(Self { array }) + } + + pub fn challenge(&self) -> AuthChallenge { + unsafe { *self.array.elem(CHALLENGE_INDEX) } + } + + pub fn protocols(&self) -> Vec { + let mut result = vec![]; + for elem in ID_START_INDEX..self.array.elem_count() { + result.push(unsafe { *self.array.elem(elem) as u32 }); + } + result + } + + pub fn id(&self) -> AuthSegmentID { + self.array.id() + } +} + +/*************************************/ +/* Authenticator */ +/*************************************/ +pub(crate) struct AuthUnicast { + segment: AuthSegment, +} + +impl Deref for AuthUnicast { + type Target = AuthSegment; + + fn deref(&self) -> &Self::Target { + &self.segment + } +} + +impl AuthUnicast { + pub fn new(shm_protocols: &[ProtocolID]) -> ZResult { + // Create a challenge for session establishment + let mut prng = PseudoRng::from_entropy(); + let nonce = prng.gen(); + + // allocate SHM segment with challenge + let segment = AuthSegment::create(nonce, shm_protocols)?; + + Ok(Self { segment }) + } +} /*************************************/ /* InitSyn */ /*************************************/ +/// ```text /// 7 6 5 4 3 2 1 0 /// +-+-+-+-+-+-+-+-+ -/// ~ ShmMemBufInfo ~ +/// ~ Segment id ~ /// +---------------+ +/// ``` pub(crate) struct InitSyn { - pub(crate) alice_info: SharedMemoryBufInfo, + pub(crate) alice_segment: AuthSegmentID, } // Codec @@ -46,7 +132,7 @@ where type Output = Result<(), DidntWrite>; fn write(self, writer: &mut W, x: &InitSyn) -> Self::Output { - self.write(&mut *writer, &x.alice_info)?; + self.write(&mut *writer, &x.alice_segment)?; Ok(()) } } @@ -58,23 +144,25 @@ where type Error = DidntRead; fn read(self, reader: &mut R) -> Result { - let alice_info: SharedMemoryBufInfo = self.read(&mut *reader)?; - Ok(InitSyn { alice_info }) + let alice_segment = self.read(&mut *reader)?; + Ok(InitSyn { alice_segment }) } } /*************************************/ /* InitAck */ /*************************************/ +/// ```text /// 7 6 5 4 3 2 1 0 /// +-+-+-+-+-+-+-+-+ /// ~ challenge ~ /// +---------------+ -/// ~ ShmMemBufInfo ~ +/// ~ Segment id ~ /// +---------------+ +/// ``` struct InitAck { alice_challenge: u64, - bob_info: SharedMemoryBufInfo, + bob_segment: AuthSegmentID, } impl WCodec<&InitAck, &mut W> for Zenoh080 @@ -85,7 +173,7 @@ where fn write(self, writer: &mut W, x: &InitAck) -> Self::Output { self.write(&mut *writer, x.alice_challenge)?; - self.write(&mut *writer, &x.bob_info)?; + self.write(&mut *writer, &x.bob_segment)?; Ok(()) } } @@ -98,10 +186,10 @@ where fn read(self, reader: &mut R) -> Result { let alice_challenge: u64 = self.read(&mut *reader)?; - let bob_info: SharedMemoryBufInfo = self.read(&mut *reader)?; + let bob_segment = self.read(&mut *reader)?; Ok(InitAck { alice_challenge, - bob_info, + bob_segment, }) } } @@ -109,26 +197,30 @@ where /*************************************/ /* OpenSyn */ /*************************************/ +/// ```text /// 7 6 5 4 3 2 1 0 /// +-+-+-+-+-+-+-+-+ /// ~ challenge ~ /// +---------------+ +/// ``` /*************************************/ /* OpenAck */ /*************************************/ +/// ```text /// 7 6 5 4 3 2 1 0 /// +-+-+-+-+-+-+-+-+ /// ~ ack ~ /// +---------------+ +/// ``` // Extension Fsm pub(crate) struct ShmFsm<'a> { - inner: &'a SharedMemoryUnicast, + inner: &'a AuthUnicast, } impl<'a> ShmFsm<'a> { - pub(crate) const fn new(inner: &'a SharedMemoryUnicast) -> Self { + pub(crate) const fn new(inner: &'a AuthUnicast) -> Self { Self { inner } } } @@ -136,18 +228,29 @@ impl<'a> ShmFsm<'a> { /*************************************/ /* OPEN */ /*************************************/ -#[derive(Clone, Copy, Debug, PartialEq, Eq)] +#[derive(Clone, Debug, PartialEq, Eq)] pub(crate) struct StateOpen { - is_shm: bool, + // false by default, will be switched to true at the end of open_ack + negotiated_to_use_shm: bool, } impl StateOpen { - pub(crate) const fn new(is_shm: bool) -> Self { - Self { is_shm } + pub(crate) const fn new() -> Self { + Self { + negotiated_to_use_shm: false, + } } - pub(crate) const fn is_shm(&self) -> bool { - self.is_shm + pub(crate) const fn negotiated_to_use_shm(&self) -> bool { + self.negotiated_to_use_shm + } + + #[cfg(test)] + pub(crate) fn rand() -> Self { + let mut rng = rand::thread_rng(); + Self { + negotiated_to_use_shm: rng.gen_bool(0.5), + } } } @@ -159,16 +262,12 @@ impl<'a> OpenFsm for &'a ShmFsm<'a> { type SendInitSynOut = Option; async fn send_init_syn( self, - state: Self::SendInitSynIn, + _state: Self::SendInitSynIn, ) -> Result { const S: &str = "Shm extension - Send InitSyn."; - if !state.is_shm() { - return Ok(None); - } - let init_syn = InitSyn { - alice_info: self.inner.challenge.info.clone(), + alice_segment: self.inner.id(), }; let codec = Zenoh080::new(); @@ -181,22 +280,16 @@ impl<'a> OpenFsm for &'a ShmFsm<'a> { Ok(Some(init::ext::Shm::new(buff.into()))) } - type RecvInitAckIn = (&'a mut StateOpen, Option); - type RecvInitAckOut = Challenge; + type RecvInitAckIn = Option; + type RecvInitAckOut = Option; async fn recv_init_ack( self, - input: Self::RecvInitAckIn, + mut input: Self::RecvInitAckIn, ) -> Result { const S: &str = "Shm extension - Recv InitAck."; - let (state, mut ext) = input; - if !state.is_shm() { - return Ok(0); - } - - let Some(ext) = ext.take() else { - state.is_shm = false; - return Ok(0); + let Some(ext) = input.take() else { + return Ok(None); }; // Decode the extension @@ -204,18 +297,11 @@ impl<'a> OpenFsm for &'a ShmFsm<'a> { let mut reader = ext.value.reader(); let Ok(init_ack): Result = codec.read(&mut reader) else { tracing::trace!("{} Decoding error.", S); - state.is_shm = false; - return Ok(0); + return Ok(None); }; // Alice challenge as seen by Alice - let bytes: [u8; std::mem::size_of::()] = self - .inner - .challenge - .as_slice() - .try_into() - .map_err(|e| zerror!("{}", e))?; - let challenge = u64::from_le_bytes(bytes); + let challenge = self.inner.challenge(); // Verify that Bob has correctly read Alice challenge if challenge != init_ack.alice_challenge { @@ -225,35 +311,22 @@ impl<'a> OpenFsm for &'a ShmFsm<'a> { init_ack.alice_challenge, challenge ); - state.is_shm = false; - return Ok(0); + return Ok(None); } - // Read Bob's SharedMemoryBuf - let shm_buff = match zasyncwrite!(self.inner.reader).read_shmbuf(&init_ack.bob_info) { + // Read Bob's SHM Segment + let bob_segment = match AuthSegment::open(init_ack.bob_segment) { Ok(buff) => buff, Err(e) => { tracing::trace!("{} {}", S, e); - state.is_shm = false; - return Ok(0); - } - }; - - // Bob challenge as seen by Alice - let bytes: [u8; std::mem::size_of::()] = match shm_buff.as_slice().try_into() { - Ok(bytes) => bytes, - Err(_) => { - tracing::trace!("{} Failed to read remote Shm.", S); - state.is_shm = false; - return Ok(0); + return Ok(None); } }; - let bob_challenge = u64::from_le_bytes(bytes); - Ok(bob_challenge) + Ok(Some(bob_segment)) } - type SendOpenSynIn = (&'a StateOpen, Self::RecvInitAckOut); + type SendOpenSynIn = &'a Self::RecvInitAckOut; type SendOpenSynOut = Option; async fn send_open_syn( self, @@ -261,12 +334,9 @@ impl<'a> OpenFsm for &'a ShmFsm<'a> { ) -> Result { // const S: &str = "Shm extension - Send OpenSyn."; - let (state, bob_challenge) = input; - if !state.is_shm() { - return Ok(None); - } - - Ok(Some(open::ext::Shm::new(bob_challenge))) + Ok(input + .as_ref() + .map(|val| open::ext::Shm::new(val.challenge()))) } type RecvOpenAckIn = (&'a mut StateOpen, Option); @@ -278,22 +348,17 @@ impl<'a> OpenFsm for &'a ShmFsm<'a> { const S: &str = "Shm extension - Recv OpenAck."; let (state, mut ext) = input; - if !state.is_shm() { - return Ok(()); - } let Some(ext) = ext.take() else { - state.is_shm = false; return Ok(()); }; if ext.value != 1 { tracing::trace!("{} Invalid value.", S); - state.is_shm = false; return Ok(()); } - state.is_shm = true; + state.negotiated_to_use_shm = true; Ok(()) } } @@ -302,27 +367,7 @@ impl<'a> OpenFsm for &'a ShmFsm<'a> { /* ACCEPT */ /*************************************/ -#[derive(Clone, Copy, Debug, PartialEq, Eq)] -pub(crate) struct StateAccept { - is_shm: bool, -} - -impl StateAccept { - pub(crate) const fn new(is_shm: bool) -> Self { - Self { is_shm } - } - - pub(crate) const fn is_shm(&self) -> bool { - self.is_shm - } - - #[cfg(test)] - pub(crate) fn rand() -> Self { - use rand::Rng; - let mut rng = rand::thread_rng(); - Self::new(rng.gen_bool(0.5)) - } -} +pub(crate) type StateAccept = StateOpen; // Codec impl WCodec<&StateAccept, &mut W> for Zenoh080 @@ -332,8 +377,8 @@ where type Output = Result<(), DidntWrite>; fn write(self, writer: &mut W, x: &StateAccept) -> Self::Output { - let is_shm = u8::from(x.is_shm); - self.write(&mut *writer, is_shm)?; + let negotiated_to_use_shm = u8::from(x.negotiated_to_use_shm); + self.write(&mut *writer, negotiated_to_use_shm)?; Ok(()) } } @@ -345,9 +390,11 @@ where type Error = DidntRead; fn read(self, reader: &mut R) -> Result { - let is_shm: u8 = self.read(&mut *reader)?; - let is_shm = is_shm == 1; - Ok(StateAccept { is_shm }) + let negotiated_to_use_shm: u8 = self.read(&mut *reader)?; + let negotiated_to_use_shm: bool = negotiated_to_use_shm == 1; + Ok(StateAccept { + negotiated_to_use_shm, + }) } } @@ -355,22 +402,16 @@ where impl<'a> AcceptFsm for &'a ShmFsm<'a> { type Error = ZError; - type RecvInitSynIn = (&'a mut StateAccept, Option); - type RecvInitSynOut = Challenge; + type RecvInitSynIn = Option; + type RecvInitSynOut = Option; async fn recv_init_syn( self, input: Self::RecvInitSynIn, ) -> Result { const S: &str = "Shm extension - Recv InitSyn."; - let (state, mut ext) = input; - if !state.is_shm() { - return Ok(0); - } - - let Some(ext) = ext.take() else { - state.is_shm = false; - return Ok(0); + let Some(ext) = input.as_ref() else { + return Ok(None); }; // Decode the extension @@ -378,35 +419,22 @@ impl<'a> AcceptFsm for &'a ShmFsm<'a> { let mut reader = ext.value.reader(); let Ok(init_syn): Result = codec.read(&mut reader) else { tracing::trace!("{} Decoding error.", S); - state.is_shm = false; - return Ok(0); + bail!(""); }; - // Read Alice's SharedMemoryBuf - let shm_buff = match zasyncwrite!(self.inner.reader).read_shmbuf(&init_syn.alice_info) { + // Read Alice's SHM Segment + let alice_segment = match AuthSegment::open(init_syn.alice_segment) { Ok(buff) => buff, Err(e) => { tracing::trace!("{} {}", S, e); - state.is_shm = false; - return Ok(0); + return Ok(None); } }; - // Alice challenge as seen by Bob - let bytes: [u8; std::mem::size_of::()] = match shm_buff.as_slice().try_into() { - Ok(bytes) => bytes, - Err(_) => { - tracing::trace!("{} Failed to read remote Shm.", S); - state.is_shm = false; - return Ok(0); - } - }; - let alice_challenge = u64::from_le_bytes(bytes); - - Ok(alice_challenge) + Ok(Some(alice_segment)) } - type SendInitAckIn = (&'a StateAccept, Self::RecvInitSynOut); + type SendInitAckIn = &'a Self::RecvInitSynOut; type SendInitAckOut = Option; async fn send_init_ack( self, @@ -414,14 +442,13 @@ impl<'a> AcceptFsm for &'a ShmFsm<'a> { ) -> Result { const S: &str = "Shm extension - Send InitAck."; - let (state, alice_challenge) = input; - if !state.is_shm() { + let Some(alice_segment) = input.as_ref() else { return Ok(None); - } + }; let init_syn = InitAck { - alice_challenge, - bob_info: self.inner.challenge.info.clone(), + alice_challenge: alice_segment.challenge(), + bob_segment: self.inner.id(), }; let codec = Zenoh080::new(); @@ -443,23 +470,13 @@ impl<'a> AcceptFsm for &'a ShmFsm<'a> { const S: &str = "Shm extension - Recv OpenSyn."; let (state, mut ext) = input; - if !state.is_shm() { - return Ok(()); - } let Some(ext) = ext.take() else { - state.is_shm = false; return Ok(()); }; // Bob challenge as seen by Bob - let bytes: [u8; std::mem::size_of::()] = self - .inner - .challenge - .as_slice() - .try_into() - .map_err(|e| zerror!("{}", e))?; - let challenge = u64::from_le_bytes(bytes); + let challenge = self.inner.challenge(); // Verify that Alice has correctly read Bob challenge let bob_challnge = ext.value; @@ -470,26 +487,25 @@ impl<'a> AcceptFsm for &'a ShmFsm<'a> { bob_challnge, challenge ); - state.is_shm = false; return Ok(()); } + state.negotiated_to_use_shm = true; + Ok(()) } - type SendOpenAckIn = &'a mut StateAccept; + type SendOpenAckIn = &'a StateAccept; type SendOpenAckOut = Option; async fn send_open_ack( self, - state: Self::SendOpenAckIn, + input: Self::SendOpenAckIn, ) -> Result { // const S: &str = "Shm extension - Send OpenAck."; - if !state.is_shm() { - return Ok(None); - } - - state.is_shm = true; - Ok(Some(open::ext::Shm::new(1))) + Ok(match input.negotiated_to_use_shm { + true => Some(open::ext::Shm::new(1)), + false => None, + }) } } diff --git a/io/zenoh-transport/src/unicast/establishment/mod.rs b/io/zenoh-transport/src/unicast/establishment/mod.rs index f79aa826d0..ca46b40ed1 100644 --- a/io/zenoh-transport/src/unicast/establishment/mod.rs +++ b/io/zenoh-transport/src/unicast/establishment/mod.rs @@ -16,7 +16,6 @@ pub(super) mod cookie; pub mod ext; pub(crate) mod open; -use crate::common::seq_num; use async_trait::async_trait; use cookie::*; use sha3::{ @@ -24,10 +23,12 @@ use sha3::{ Shake128, }; use zenoh_protocol::{ - core::{Field, Resolution, ZenohId}, + core::{Field, Resolution, ZenohIdProto}, transport::TransportSn, }; +use crate::common::seq_num; + /*************************************/ /* TRAITS */ /*************************************/ @@ -100,7 +101,11 @@ pub trait AcceptFsm { /*************************************/ /* FUNCTIONS */ /*************************************/ -pub(super) fn compute_sn(zid1: ZenohId, zid2: ZenohId, resolution: Resolution) -> TransportSn { +pub(super) fn compute_sn( + zid1: ZenohIdProto, + zid2: ZenohIdProto, + resolution: Resolution, +) -> TransportSn { // Create a random yet deterministic initial_sn. // In case of multilink it's important that the same initial_sn is used for every connection attempt. // Instead of storing the state everywhere, we make sure that the we always compute the same initial_sn. diff --git a/io/zenoh-transport/src/unicast/establishment/open.rs b/io/zenoh-transport/src/unicast/establishment/open.rs index bb5db2336e..a9e797228e 100644 --- a/io/zenoh-transport/src/unicast/establishment/open.rs +++ b/io/zenoh-transport/src/unicast/establishment/open.rs @@ -11,29 +11,16 @@ // Contributors: // ZettaScale Zenoh Team, // -#[cfg(feature = "shared-memory")] -use crate::unicast::shared_memory_unicast::Challenge; -use crate::{ - common::batch::BatchConfig, - unicast::{ - establishment::{compute_sn, ext, OpenFsm}, - link::{ - LinkUnicastWithOpenAck, TransportLinkUnicast, TransportLinkUnicastConfig, - TransportLinkUnicastDirection, - }, - TransportConfigUnicast, TransportUnicast, - }, - TransportManager, -}; -use async_trait::async_trait; use std::time::Duration; + +use async_trait::async_trait; use zenoh_buffers::ZSlice; #[cfg(feature = "transport_auth")] use zenoh_core::zasynclock; use zenoh_core::{zcondfeat, zerror}; use zenoh_link::LinkUnicast; use zenoh_protocol::{ - core::{Field, Resolution, WhatAmI, ZenohId}, + core::{Field, Resolution, WhatAmI, ZenohIdProto}, transport::{ batch_size, close, BatchSize, Close, InitSyn, OpenSyn, TransportBody, TransportMessage, TransportSn, @@ -41,6 +28,25 @@ use zenoh_protocol::{ }; use zenoh_result::ZResult; +#[cfg(feature = "shared-memory")] +use super::ext::shm::AuthSegment; +#[cfg(feature = "shared-memory")] +use crate::shm::TransportShmConfig; +#[cfg(feature = "auth_usrpwd")] +use crate::unicast::establishment::ext::auth::UsrPwdId; +use crate::{ + common::batch::BatchConfig, + unicast::{ + establishment::{compute_sn, ext, OpenFsm}, + link::{ + LinkUnicastWithOpenAck, TransportLinkUnicast, TransportLinkUnicastConfig, + TransportLinkUnicastDirection, + }, + TransportConfigUnicast, TransportUnicast, + }, + TransportManager, +}; + type OpenError = (zenoh_result::Error, Option); struct StateTransport { @@ -71,31 +77,33 @@ struct State { // InitSyn struct SendInitSynIn { mine_version: u8, - mine_zid: ZenohId, + mine_zid: ZenohIdProto, mine_whatami: WhatAmI, } // InitAck struct RecvInitAckOut { - other_zid: ZenohId, + other_zid: ZenohIdProto, other_whatami: WhatAmI, other_cookie: ZSlice, #[cfg(feature = "shared-memory")] - ext_shm: Challenge, + ext_shm: Option, } // OpenSyn struct SendOpenSynIn { - mine_zid: ZenohId, + mine_zid: ZenohIdProto, mine_lease: Duration, - other_zid: ZenohId, + other_zid: ZenohIdProto, other_cookie: ZSlice, #[cfg(feature = "shared-memory")] - ext_shm: Challenge, + ext_shm: Option, } struct SendOpenSynOut { mine_initial_sn: TransportSn, + #[cfg(feature = "shared-memory")] + ext_shm: Option, } // OpenAck @@ -110,7 +118,7 @@ struct OpenLink<'a> { #[cfg(feature = "transport_multilink")] ext_mlink: ext::multilink::MultiLinkFsm<'a>, #[cfg(feature = "shared-memory")] - ext_shm: ext::shm::ShmFsm<'a>, + ext_shm: Option>, #[cfg(feature = "transport_auth")] ext_auth: ext::auth::AuthFsm<'a>, ext_lowlatency: ext::lowlatency::LowLatencyFsm<'a>, @@ -138,14 +146,14 @@ impl<'a, 'b: 'a> OpenFsm for &'a mut OpenLink<'b> { .map_err(|e| (e, Some(close::reason::GENERIC)))?; // Extension Shm - let ext_shm = zcondfeat!( - "shared-memory", - self.ext_shm + #[cfg(feature = "shared-memory")] + let ext_shm = match self.ext_shm.as_ref() { + Some(ext) => ext .send_init_syn(&state.transport.ext_shm) .await .map_err(|e| (e, Some(close::reason::GENERIC)))?, - None - ); + None => None, + }; // Extension Auth let ext_auth = zcondfeat!( @@ -191,6 +199,7 @@ impl<'a, 'b: 'a> OpenFsm for &'a mut OpenLink<'b> { batch_size: state.transport.batch_size, resolution: state.transport.resolution, ext_qos, + #[cfg(feature = "shared-memory")] ext_shm, ext_auth, ext_mlink, @@ -199,6 +208,8 @@ impl<'a, 'b: 'a> OpenFsm for &'a mut OpenLink<'b> { } .into(); + tracing::trace!("Establishment Open InitSyn: {}. Sent: {:?}", link, msg); + let _ = link .send(&msg) .await @@ -220,6 +231,8 @@ impl<'a, 'b: 'a> OpenFsm for &'a mut OpenLink<'b> { .await .map_err(|e| (e, Some(close::reason::INVALID)))?; + tracing::trace!("Establishment Open InitAck: {}. Received: {:?}", link, msg); + let init_ack = match msg.body { TransportBody::InitAck(init_ack) => init_ack, TransportBody::Close(Close { reason, .. }) => { @@ -295,11 +308,13 @@ impl<'a, 'b: 'a> OpenFsm for &'a mut OpenLink<'b> { // Extension Shm #[cfg(feature = "shared-memory")] - let shm_challenge = self - .ext_shm - .recv_init_ack((&mut state.transport.ext_shm, init_ack.ext_shm)) - .await - .map_err(|e| (e, Some(close::reason::GENERIC)))?; + let shm_segment = match self.ext_shm.as_ref() { + Some(ext) => ext + .recv_init_ack(init_ack.ext_shm) + .await + .map_err(|e| (e, Some(close::reason::GENERIC)))?, + None => None, + }; // Extension Auth #[cfg(feature = "transport_auth")] @@ -333,7 +348,7 @@ impl<'a, 'b: 'a> OpenFsm for &'a mut OpenLink<'b> { other_whatami: init_ack.whatami, other_cookie: init_ack.cookie, #[cfg(feature = "shared-memory")] - ext_shm: shm_challenge, + ext_shm: shm_segment, }; Ok(output) } @@ -354,14 +369,14 @@ impl<'a, 'b: 'a> OpenFsm for &'a mut OpenLink<'b> { .map_err(|e| (e, Some(close::reason::GENERIC)))?; // Extension Shm - let ext_shm = zcondfeat!( - "shared-memory", - self.ext_shm - .send_open_syn((&state.transport.ext_shm, input.ext_shm)) + #[cfg(feature = "shared-memory")] + let ext_shm = match self.ext_shm.as_ref() { + Some(ext_shm) => ext_shm + .send_open_syn(&input.ext_shm) .await .map_err(|e| (e, Some(close::reason::GENERIC)))?, - None - ); + None => None, + }; // Extension Auth let ext_auth = zcondfeat!( @@ -403,11 +418,12 @@ impl<'a, 'b: 'a> OpenFsm for &'a mut OpenLink<'b> { // Build and send an OpenSyn message let mine_initial_sn = compute_sn(input.mine_zid, input.other_zid, state.transport.resolution); - let message: TransportMessage = OpenSyn { + let msg: TransportMessage = OpenSyn { lease: input.mine_lease, initial_sn: mine_initial_sn, cookie: input.other_cookie, ext_qos, + #[cfg(feature = "shared-memory")] ext_shm, ext_auth, ext_mlink, @@ -417,11 +433,17 @@ impl<'a, 'b: 'a> OpenFsm for &'a mut OpenLink<'b> { .into(); let _ = link - .send(&message) + .send(&msg) .await .map_err(|e| (e, Some(close::reason::GENERIC)))?; - let output = SendOpenSynOut { mine_initial_sn }; + tracing::trace!("Establishment Open OpenSyn: {}. Sent: {:?}", link, msg); + + let output = SendOpenSynOut { + mine_initial_sn, + #[cfg(feature = "shared-memory")] + ext_shm: input.ext_shm, + }; Ok(output) } @@ -438,6 +460,8 @@ impl<'a, 'b: 'a> OpenFsm for &'a mut OpenLink<'b> { .await .map_err(|e| (e, Some(close::reason::INVALID)))?; + tracing::trace!("Establishment Open OpenAck: {}. Received: {:?}", link, msg); + let open_ack = match msg.body { TransportBody::OpenAck(open_ack) => open_ack, TransportBody::Close(Close { reason, .. }) => { @@ -471,10 +495,11 @@ impl<'a, 'b: 'a> OpenFsm for &'a mut OpenLink<'b> { // Extension Shm #[cfg(feature = "shared-memory")] - self.ext_shm - .recv_open_ack((&mut state.transport.ext_shm, open_ack.ext_shm)) - .await - .map_err(|e| (e, Some(close::reason::GENERIC)))?; + if let Some(ext) = self.ext_shm.as_ref() { + ext.recv_open_ack((&mut state.transport.ext_shm, open_ack.ext_shm)) + .await + .map_err(|e| (e, Some(close::reason::GENERIC)))? + }; // Extension Auth #[cfg(feature = "transport_auth")] @@ -531,7 +556,12 @@ pub(crate) async fn open_link( #[cfg(feature = "transport_multilink")] ext_mlink: manager.state.unicast.multilink.fsm(&manager.prng), #[cfg(feature = "shared-memory")] - ext_shm: ext::shm::ShmFsm::new(&manager.state.unicast.shm), + ext_shm: manager + .state + .unicast + .auth_shm + .as_ref() + .map(ext::shm::ShmFsm::new), #[cfg(feature = "transport_auth")] ext_auth: manager.state.unicast.authenticator.fsm(&manager.prng), ext_lowlatency: ext::lowlatency::LowLatencyFsm::new(), @@ -555,7 +585,7 @@ pub(crate) async fn open_link( .multilink .open(manager.config.unicast.max_links > 1), #[cfg(feature = "shared-memory")] - ext_shm: ext::shm::StateOpen::new(manager.config.unicast.is_shm), + ext_shm: ext::shm::StateOpen::new(), ext_lowlatency: ext::lowlatency::StateOpen::new(manager.config.unicast.is_lowlatency), }, @@ -619,8 +649,13 @@ pub(crate) async fn open_link( #[cfg(feature = "transport_multilink")] multilink: state.transport.ext_mlink.multilink(), #[cfg(feature = "shared-memory")] - is_shm: state.transport.ext_shm.is_shm(), + shm: match state.transport.ext_shm.negotiated_to_use_shm() { + true => osyn_out.ext_shm.map(TransportShmConfig::new), + false => None, + }, is_lowlatency: state.transport.ext_lowlatency.is_lowlatency(), + #[cfg(feature = "auth_usrpwd")] + auth_id: UsrPwdId(None), }; let o_config = TransportLinkUnicastConfig { diff --git a/io/zenoh-transport/src/unicast/establishment/properties.rs b/io/zenoh-transport/src/unicast/establishment/properties.rs deleted file mode 100644 index e259b650ab..0000000000 --- a/io/zenoh-transport/src/unicast/establishment/properties.rs +++ /dev/null @@ -1,132 +0,0 @@ -// -// Copyright (c) 2023 ZettaScale Technology -// -// This program and the accompanying materials are made available under the -// terms of the Eclipse Public License 2.0 which is available at -// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 -// which is available at https://www.apache.org/licenses/LICENSE-2.0. -// -// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 -// -// Contributors: -// ZettaScale Zenoh Team, -// -use std::{ - convert::TryFrom, - ops::{Deref, DerefMut}, -}; -use zenoh_buffers::{reader::HasReader, writer::HasWriter, ZBuf}; -use zenoh_codec::{RCodec, WCodec, Zenoh080}; -use zenoh_protocol::core::Property; -use zenoh_result::{bail, zerror, Error as ZError, ZResult}; - -#[derive(Clone, Debug, PartialEq, Eq)] -pub struct EstablishmentProperties(Vec); - -impl Deref for EstablishmentProperties { - type Target = Vec; - - fn deref(&self) -> &Self::Target { - &self.0 - } -} - -impl DerefMut for EstablishmentProperties { - fn deref_mut(&mut self) -> &mut Self::Target { - &mut self.0 - } -} - -impl EstablishmentProperties { - pub(super) fn new() -> Self { - EstablishmentProperties(vec![]) - } - - pub(super) fn insert(&mut self, p: Property) -> ZResult<()> { - if self.0.iter().any(|x| x.key == p.key) { - bail!("Property {} already exists", p.key) - } - self.0.push(p); - Ok(()) - } - - pub(super) fn remove(&mut self, key: u64) -> Option { - self.0 - .iter() - .position(|x| x.key == key) - .map(|i| self.0.remove(i)) - } -} - -impl TryFrom<&EstablishmentProperties> for Attachment { - type Error = ZError; - - fn try_from(eps: &EstablishmentProperties) -> Result { - if eps.is_empty() { - bail!("Can not create an attachment with zero properties") - } - - let mut zbuf = ZBuf::empty(); - let mut writer = zbuf.writer(); - let codec = Zenoh080::new(); - - codec - .write(&mut writer, eps.0.as_slice()) - .map_err(|_| zerror!(""))?; - - let attachment = Attachment::new(zbuf); - Ok(attachment) - } -} - -impl TryFrom> for EstablishmentProperties { - type Error = ZError; - - fn try_from(mut ps: Vec) -> Result { - let mut eps = EstablishmentProperties::new(); - for p in ps.drain(..) { - eps.insert(p)?; - } - - Ok(eps) - } -} - -impl TryFrom<&Attachment> for EstablishmentProperties { - type Error = ZError; - - fn try_from(att: &Attachment) -> Result { - let mut reader = att.buffer.reader(); - let codec = Zenoh080::new(); - - let ps: Vec = codec.read(&mut reader).map_err(|_| zerror!(""))?; - EstablishmentProperties::try_from(ps) - } -} - -impl EstablishmentProperties { - #[cfg(test)] - pub fn rand() -> Self { - use rand::Rng; - - const MIN: usize = 1; - const MAX: usize = 8; - - let mut rng = rand::thread_rng(); - - let mut eps = EstablishmentProperties::new(); - for _ in MIN..=MAX { - loop { - let key: u64 = rng.gen(); - let mut value = vec![0u8; rng.gen_range(MIN..=MAX)]; - rng.fill(&mut value[..]); - let p = Property { key, value }; - if eps.insert(p).is_ok() { - break; - } - } - } - - eps - } -} diff --git a/io/zenoh-transport/src/unicast/link.rs b/io/zenoh-transport/src/unicast/link.rs index 9a8f7f3dbc..736360db63 100644 --- a/io/zenoh-transport/src/unicast/link.rs +++ b/io/zenoh-transport/src/unicast/link.rs @@ -11,15 +11,16 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::common::batch::{BatchConfig, Decode, Encode, Finalize, RBatch, WBatch}; -use std::fmt; -use std::sync::Arc; +use std::{fmt, sync::Arc}; + use zenoh_buffers::{BBuf, ZSlice, ZSliceBuffer}; use zenoh_core::zcondfeat; use zenoh_link::{Link, LinkUnicast}; use zenoh_protocol::transport::{BatchSize, Close, OpenAck, TransportMessage}; use zenoh_result::{zerror, ZResult}; +use crate::common::batch::{BatchConfig, Decode, Encode, Finalize, RBatch, WBatch}; + #[derive(Clone, Copy, PartialEq, Eq, Debug)] pub(crate) enum TransportLinkUnicastDirection { Inbound, @@ -66,9 +67,7 @@ impl TransportLinkUnicast { .batch .is_compression .then_some(BBuf::with_capacity( - lz4_flex::block::get_maximum_output_size( - self.config.batch.max_buffer_size() - ), + lz4_flex::block::get_maximum_output_size(self.config.batch.mtu as usize), )), None ), @@ -209,7 +208,7 @@ impl TransportLinkUnicastRx { pub async fn recv_batch(&mut self, buff: C) -> ZResult where C: Fn() -> T + Copy, - T: ZSliceBuffer + 'static, + T: AsMut<[u8]> + ZSliceBuffer + 'static, { const ERR: &str = "Read error from link: "; @@ -222,19 +221,19 @@ impl TransportLinkUnicastRx { // Read the bytes let slice = into - .as_mut_slice() + .as_mut() .get_mut(len.len()..len.len() + l) .ok_or_else(|| zerror!("{ERR}{self}. Invalid batch length or buffer size."))?; self.link.read_exact(slice).await?; len.len() + l } else { // Read the bytes - self.link.read(into.as_mut_slice()).await? + self.link.read(into.as_mut()).await? }; // tracing::trace!("RBytes: {:02x?}", &into.as_slice()[0..end]); - let buffer = ZSlice::make(Arc::new(into), 0, end) + let buffer = ZSlice::new(Arc::new(into), 0, end) .map_err(|_| zerror!("{ERR}{self}. ZSlice index(es) out of bounds"))?; let mut batch = RBatch::new(self.batch, buffer); batch diff --git a/io/zenoh-transport/src/unicast/lowlatency/link.rs b/io/zenoh-transport/src/unicast/lowlatency/link.rs index ff4b8c3036..3ba1cd724f 100644 --- a/io/zenoh-transport/src/unicast/lowlatency/link.rs +++ b/io/zenoh-transport/src/unicast/lowlatency/link.rs @@ -11,24 +11,23 @@ // Contributors: // ZettaScale Zenoh Team, // -use super::transport::TransportUnicastLowlatency; -#[cfg(feature = "stats")] -use crate::stats::TransportStats; -use crate::unicast::link::TransportLinkUnicast; -use crate::unicast::link::TransportLinkUnicastRx; -use std::sync::Arc; -use std::time::Duration; +use std::{sync::Arc, time::Duration}; + use tokio::sync::RwLock; use tokio_util::sync::CancellationToken; use zenoh_buffers::{writer::HasWriter, ZSlice}; use zenoh_codec::*; use zenoh_core::{zasyncread, zasyncwrite}; use zenoh_link::LinkUnicast; -use zenoh_protocol::transport::TransportMessageLowLatency; -use zenoh_protocol::transport::{KeepAlive, TransportBodyLowLatency}; +use zenoh_protocol::transport::{KeepAlive, TransportBodyLowLatency, TransportMessageLowLatency}; use zenoh_result::{zerror, ZResult}; use zenoh_runtime::ZRuntime; +use super::transport::TransportUnicastLowlatency; +#[cfg(feature = "stats")] +use crate::stats::TransportStats; +use crate::unicast::link::{TransportLinkUnicast, TransportLinkUnicastRx}; + pub(crate) async fn send_with_link( link: &LinkUnicast, msg: TransportMessageLowLatency, @@ -153,11 +152,7 @@ impl TransportUnicastLowlatency { // The pool of buffers let pool = { - let mtu = if is_streamed { - link_rx.batch.mtu as usize - } else { - link_rx.batch.max_buffer_size() - }; + let mtu = link_rx.batch.mtu as usize; let mut n = rx_buffer_size / mtu; if rx_buffer_size % mtu != 0 { n += 1; @@ -180,7 +175,7 @@ impl TransportUnicastLowlatency { } // Deserialize all the messages from the current ZBuf - let zslice = ZSlice::make(Arc::new(buffer), 0, bytes).unwrap(); + let zslice = ZSlice::new(Arc::new(buffer), 0, bytes).unwrap(); c_transport.read_messages(zslice, &link_rx.link).await?; } diff --git a/io/zenoh-transport/src/unicast/lowlatency/rx.rs b/io/zenoh-transport/src/unicast/lowlatency/rx.rs index 4be94cc1a0..3dd499000d 100644 --- a/io/zenoh-transport/src/unicast/lowlatency/rx.rs +++ b/io/zenoh-transport/src/unicast/lowlatency/rx.rs @@ -11,7 +11,6 @@ // Contributors: // ZettaScale Zenoh Team, // -use super::transport::TransportUnicastLowlatency; use zenoh_buffers::{ reader::{HasReader, Reader}, ZSlice, @@ -22,6 +21,8 @@ use zenoh_link::LinkUnicast; use zenoh_protocol::{network::NetworkMessage, transport::TransportMessageLowLatency}; use zenoh_result::{zerror, ZResult}; +use super::transport::TransportUnicastLowlatency; + /*************************************/ /* TRANSPORT RX */ /*************************************/ @@ -35,8 +36,11 @@ impl TransportUnicastLowlatency { if let Some(callback) = callback.as_ref() { #[cfg(feature = "shared-memory")] { - if self.config.is_shm { - crate::shm::map_zmsg_to_shmbuf(&mut msg, &self.manager.shm().reader)?; + if self.config.shm.is_some() { + if let Err(e) = crate::shm::map_zmsg_to_shmbuf(&mut msg, &self.manager.shmr) { + tracing::debug!("Error receiving SHM buffer: {e}"); + return Ok(()); + } } } callback.handle_message(msg) diff --git a/io/zenoh-transport/src/unicast/lowlatency/transport.rs b/io/zenoh-transport/src/unicast/lowlatency/transport.rs index 2c52df4810..c602dcf806 100644 --- a/io/zenoh-transport/src/unicast/lowlatency/transport.rs +++ b/io/zenoh-transport/src/unicast/lowlatency/transport.rs @@ -11,33 +11,34 @@ // Contributors: // ZettaScale Zenoh Team, // +use std::{ + sync::{Arc, RwLock as SyncRwLock}, + time::Duration, +}; + +use async_trait::async_trait; +use tokio::sync::{Mutex as AsyncMutex, MutexGuard as AsyncMutexGuard, RwLock}; +use tokio_util::{sync::CancellationToken, task::TaskTracker}; +use zenoh_core::{zasynclock, zasyncread, zasyncwrite, zread, zwrite}; +use zenoh_link::Link; +use zenoh_protocol::{ + core::{WhatAmI, ZenohIdProto}, + network::NetworkMessage, + transport::{close, Close, TransportBodyLowLatency, TransportMessageLowLatency, TransportSn}, +}; +use zenoh_result::{zerror, ZResult}; + #[cfg(feature = "stats")] use crate::stats::TransportStats; use crate::{ unicast::{ + authentication::AuthId, link::{LinkUnicastWithOpenAck, TransportLinkUnicast}, transport_unicast_inner::{AddLinkResult, TransportUnicastTrait}, TransportConfigUnicast, }, TransportManager, TransportPeerEventHandler, }; -use async_trait::async_trait; -use std::sync::{Arc, RwLock as SyncRwLock}; -use std::time::Duration; -use tokio::sync::{Mutex as AsyncMutex, MutexGuard as AsyncMutexGuard, RwLock}; -use tokio_util::sync::CancellationToken; -use tokio_util::task::TaskTracker; -use zenoh_core::{zasynclock, zasyncread, zasyncwrite, zread, zwrite}; -use zenoh_link::Link; -use zenoh_protocol::network::NetworkMessage; -use zenoh_protocol::transport::TransportBodyLowLatency; -use zenoh_protocol::transport::TransportMessageLowLatency; -use zenoh_protocol::transport::{Close, TransportSn}; -use zenoh_protocol::{ - core::{WhatAmI, ZenohId}, - transport::close, -}; -use zenoh_result::{zerror, ZResult}; /*************************************/ /* LOW-LATENCY TRANSPORT */ @@ -183,17 +184,32 @@ impl TransportUnicastTrait for TransportUnicastLowlatency { vec![] } - fn get_zid(&self) -> ZenohId { + fn get_zid(&self) -> ZenohIdProto { self.config.zid } + fn get_auth_ids(&self) -> Vec { + // Convert LinkUnicast auth id to AuthId + let mut auth_ids: Vec = vec![]; + let handle = tokio::runtime::Handle::current(); + let guard = + tokio::task::block_in_place(|| handle.block_on(async { zasyncread!(self.link) })); + if let Some(val) = guard.as_ref() { + auth_ids.push(val.link.get_auth_id().to_owned().into()); + } + // Convert usrpwd auth id to AuthId + #[cfg(feature = "auth_usrpwd")] + auth_ids.push(self.config.auth_id.clone().into()); + auth_ids + } + fn get_whatami(&self) -> WhatAmI { self.config.whatami } #[cfg(feature = "shared-memory")] fn is_shm(&self) -> bool { - self.config.is_shm + self.config.shm.is_some() } fn is_qos(&self) -> bool { diff --git a/io/zenoh-transport/src/unicast/lowlatency/tx.rs b/io/zenoh-transport/src/unicast/lowlatency/tx.rs index 38751eb61d..90304a196d 100644 --- a/io/zenoh-transport/src/unicast/lowlatency/tx.rs +++ b/io/zenoh-transport/src/unicast/lowlatency/tx.rs @@ -11,7 +11,6 @@ // Contributors: // ZettaScale Zenoh Team, // -use super::transport::TransportUnicastLowlatency; use zenoh_protocol::{ network::NetworkMessage, transport::{TransportBodyLowLatency, TransportMessageLowLatency}, @@ -20,6 +19,10 @@ use zenoh_protocol::{ use zenoh_result::bail; use zenoh_result::ZResult; +use super::transport::TransportUnicastLowlatency; +#[cfg(feature = "shared-memory")] +use crate::shm::map_zmsg_to_partner; + impl TransportUnicastLowlatency { #[allow(unused_mut)] // When feature "shared-memory" is not enabled #[allow(clippy::let_and_return)] // When feature "stats" is not enabled @@ -27,12 +30,7 @@ impl TransportUnicastLowlatency { pub(crate) fn internal_schedule(&self, mut msg: NetworkMessage) -> ZResult<()> { #[cfg(feature = "shared-memory")] { - let res = if self.config.is_shm { - crate::shm::map_zmsg_to_shminfo(&mut msg) - } else { - crate::shm::map_zmsg_to_shmbuf(&mut msg, &self.manager.shm().reader) - }; - if let Err(e) = res { + if let Err(e) = map_zmsg_to_partner(&mut msg, &self.config.shm) { bail!("Failed SHM conversion: {}", e); } } diff --git a/io/zenoh-transport/src/unicast/manager.rs b/io/zenoh-transport/src/unicast/manager.rs index 1423fec900..bff221323e 100644 --- a/io/zenoh-transport/src/unicast/manager.rs +++ b/io/zenoh-transport/src/unicast/manager.rs @@ -11,22 +11,6 @@ // Contributors: // ZettaScale Zenoh Team, // -#[cfg(feature = "shared-memory")] -use super::shared_memory_unicast::SharedMemoryUnicast; -use super::{link::LinkUnicastWithOpenAck, transport_unicast_inner::InitTransportResult}; -#[cfg(feature = "transport_auth")] -use crate::unicast::establishment::ext::auth::Auth; -#[cfg(feature = "transport_multilink")] -use crate::unicast::establishment::ext::multilink::MultiLink; -use crate::{ - unicast::{ - lowlatency::transport::TransportUnicastLowlatency, - transport_unicast_inner::{InitTransportError, TransportUnicastTrait}, - universal::transport::TransportUnicastUniversal, - TransportConfigUnicast, TransportUnicast, - }, - TransportManager, TransportPeer, -}; use std::{ collections::HashMap, sync::{ @@ -35,20 +19,40 @@ use std::{ }, time::Duration, }; + use tokio::sync::{Mutex as AsyncMutex, MutexGuard as AsyncMutexGuard}; #[cfg(feature = "transport_compression")] use zenoh_config::CompressionUnicastConf; #[cfg(feature = "shared-memory")] -use zenoh_config::SharedMemoryConf; +use zenoh_config::ShmConf; use zenoh_config::{Config, LinkTxConf, QoSUnicastConf, TransportUnicastConf}; use zenoh_core::{zasynclock, zcondfeat}; use zenoh_crypto::PseudoRng; use zenoh_link::*; use zenoh_protocol::{ - core::{endpoint, ZenohId}, + core::{parameters, ZenohIdProto}, transport::{close, TransportSn}, }; use zenoh_result::{bail, zerror, ZResult}; +#[cfg(feature = "shared-memory")] +use zenoh_shm::reader::ShmReader; + +#[cfg(feature = "shared-memory")] +use super::establishment::ext::shm::AuthUnicast; +use super::{link::LinkUnicastWithOpenAck, transport_unicast_inner::InitTransportResult}; +#[cfg(feature = "transport_auth")] +use crate::unicast::establishment::ext::auth::Auth; +#[cfg(feature = "transport_multilink")] +use crate::unicast::establishment::ext::multilink::MultiLink; +use crate::{ + unicast::{ + lowlatency::transport::TransportUnicastLowlatency, + transport_unicast_inner::{InitTransportError, TransportUnicastTrait}, + universal::transport::TransportUnicastUniversal, + TransportConfigUnicast, TransportUnicast, + }, + TransportManager, TransportPeer, +}; /*************************************/ /* TRANSPORT CONFIG */ @@ -75,16 +79,17 @@ pub struct TransportManagerStateUnicast { // Established listeners pub(super) protocols: Arc>>, // Established transports - pub(super) transports: Arc>>>, + pub(super) transports: Arc>>>, // Multilink #[cfg(feature = "transport_multilink")] pub(super) multilink: Arc, // Active authenticators #[cfg(feature = "transport_auth")] pub(super) authenticator: Arc, - // Shared memory + // SHM probing + // Option will be None if SHM is disabled by Config #[cfg(feature = "shared-memory")] - pub(super) shm: Arc, + pub(super) auth_shm: Option, } pub struct TransportManagerParamsUnicast { @@ -211,6 +216,7 @@ impl TransportManagerBuilderUnicast { pub fn build( self, #[allow(unused)] prng: &mut PseudoRng, // Required for #[cfg(feature = "transport_multilink")] + #[cfg(feature = "shared-memory")] shm_reader: &ShmReader, ) -> ZResult { if self.is_qos && self.is_lowlatency { bail!("'qos' and 'lowlatency' options are incompatible"); @@ -237,11 +243,16 @@ impl TransportManagerBuilderUnicast { protocols: Arc::new(AsyncMutex::new(HashMap::new())), transports: Arc::new(AsyncMutex::new(HashMap::new())), #[cfg(feature = "transport_multilink")] - multilink: Arc::new(MultiLink::make(prng)?), - #[cfg(feature = "shared-memory")] - shm: Arc::new(SharedMemoryUnicast::make()?), + multilink: Arc::new(MultiLink::make(prng, config.max_links > 1)?), #[cfg(feature = "transport_auth")] authenticator: Arc::new(self.authenticator), + #[cfg(feature = "shared-memory")] + auth_shm: match self.is_shm { + true => Some(AuthUnicast::new( + shm_reader.supported_protocols().as_slice(), + )?), + false => None, + }, }; let params = TransportManagerParamsUnicast { config, state }; @@ -256,7 +267,7 @@ impl Default for TransportManagerBuilderUnicast { let link_tx = LinkTxConf::default(); let qos = QoSUnicastConf::default(); #[cfg(feature = "shared-memory")] - let shm = SharedMemoryConf::default(); + let shm = ShmConf::default(); #[cfg(feature = "transport_compression")] let compression = CompressionUnicastConf::default(); @@ -288,11 +299,6 @@ impl TransportManager { TransportManagerBuilderUnicast::default() } - #[cfg(feature = "shared-memory")] - pub(crate) fn shm(&self) -> &Arc { - &self.state.unicast.shm - } - pub async fn close_unicast(&self) { tracing::trace!("TransportManagerUnicast::clear())"); @@ -381,7 +387,7 @@ impl TransportManager { if let Some(config) = self.config.endpoints.get(endpoint.protocol().as_str()) { endpoint .config_mut() - .extend(endpoint::Parameters::iter(config))?; + .extend_from_iter(parameters::iter(config))?; }; manager.new_listener(endpoint).await } @@ -506,7 +512,7 @@ impl TransportManager { link: LinkUnicastWithOpenAck, other_initial_sn: TransportSn, other_lease: Duration, - mut guard: AsyncMutexGuard<'_, HashMap>>, + mut guard: AsyncMutexGuard<'_, HashMap>>, ) -> InitTransportResult { macro_rules! link_error { ($s:expr, $reason:expr) => { @@ -595,14 +601,14 @@ impl TransportManager { "shared-memory", { tracing::debug!( - "New transport opened between {} and {} - whatami: {}, sn resolution: {:?}, initial sn: {:?}, qos: {}, shm: {}, multilink: {}, lowlatency: {}", + "New transport opened between {} and {} - whatami: {}, sn resolution: {:?}, initial sn: {:?}, qos: {}, shm: {:?}, multilink: {}, lowlatency: {}", self.config.zid, config.zid, config.whatami, config.sn_resolution, config.tx_initial_sn, config.is_qos, - config.is_shm, + config.shm, is_multilink, config.is_lowlatency ); @@ -697,7 +703,7 @@ impl TransportManager { if let Some(config) = self.config.endpoints.get(endpoint.protocol().as_str()) { endpoint .config_mut() - .extend(endpoint::Parameters::iter(config))?; + .extend_from_iter(parameters::iter(config))?; }; // Create a new link associated by calling the Link Manager @@ -706,7 +712,7 @@ impl TransportManager { super::establishment::open::open_link(link, self).await } - pub async fn get_transport_unicast(&self, peer: &ZenohId) -> Option { + pub async fn get_transport_unicast(&self, peer: &ZenohIdProto) -> Option { zasynclock!(self.state.unicast.transports) .get(peer) .map(|t| TransportUnicast(Arc::downgrade(t))) @@ -719,7 +725,7 @@ impl TransportManager { .collect() } - pub(super) async fn del_transport_unicast(&self, peer: &ZenohId) -> ZResult<()> { + pub(super) async fn del_transport_unicast(&self, peer: &ZenohIdProto) -> ZResult<()> { zasynclock!(self.state.unicast.transports) .remove(peer) .ok_or_else(|| { diff --git a/io/zenoh-transport/src/unicast/mod.rs b/io/zenoh-transport/src/unicast/mod.rs index 55226f287c..4539135fe9 100644 --- a/io/zenoh-transport/src/unicast/mod.rs +++ b/io/zenoh-transport/src/unicast/mod.rs @@ -11,42 +11,47 @@ // Contributors: // ZettaScale Zenoh Team, // +pub mod authentication; pub mod establishment; pub(crate) mod link; pub(crate) mod lowlatency; pub(crate) mod manager; -pub(crate) mod transport_unicast_inner; -pub(crate) mod universal; - #[cfg(feature = "test")] pub mod test_helpers; +pub(crate) mod transport_unicast_inner; +pub(crate) mod universal; -#[cfg(feature = "shared-memory")] -pub(crate) mod shared_memory_unicast; - -use self::transport_unicast_inner::TransportUnicastTrait; +use std::{ + fmt, + sync::{Arc, Weak}, +}; -use super::{TransportPeer, TransportPeerEventHandler}; #[cfg(feature = "transport_multilink")] use establishment::ext::auth::ZPublicKey; pub use manager::*; -use std::fmt; -use std::sync::{Arc, Weak}; use zenoh_core::zcondfeat; use zenoh_link::Link; -use zenoh_protocol::network::NetworkMessage; use zenoh_protocol::{ - core::{Bits, WhatAmI, ZenohId}, + core::{Bits, WhatAmI, ZenohIdProto}, + network::NetworkMessage, transport::{close, TransportSn}, }; use zenoh_result::{zerror, ZResult}; +use self::transport_unicast_inner::TransportUnicastTrait; +use super::{TransportPeer, TransportPeerEventHandler}; +#[cfg(feature = "shared-memory")] +use crate::shm::TransportShmConfig; +use crate::unicast::authentication::AuthId; +#[cfg(feature = "auth_usrpwd")] +use crate::unicast::establishment::ext::auth::UsrPwdId; + /*************************************/ /* TRANSPORT UNICAST */ /*************************************/ #[derive(Clone, Debug, PartialEq, Eq)] pub(crate) struct TransportConfigUnicast { - pub(crate) zid: ZenohId, + pub(crate) zid: ZenohIdProto, pub(crate) whatami: WhatAmI, pub(crate) sn_resolution: Bits, pub(crate) tx_initial_sn: TransportSn, @@ -54,8 +59,10 @@ pub(crate) struct TransportConfigUnicast { #[cfg(feature = "transport_multilink")] pub(crate) multilink: Option, #[cfg(feature = "shared-memory")] - pub(crate) is_shm: bool, + pub(crate) shm: Option, pub(crate) is_lowlatency: bool, + #[cfg(feature = "auth_usrpwd")] + pub(crate) auth_id: UsrPwdId, } /// [`TransportUnicast`] is the transport handler returned @@ -72,7 +79,7 @@ impl TransportUnicast { } #[inline(always)] - pub fn get_zid(&self) -> ZResult { + pub fn get_zid(&self) -> ZResult { let transport = self.get_inner()?; Ok(transport.get_zid()) } @@ -115,6 +122,11 @@ impl TransportUnicast { Ok(transport.get_links()) } + pub fn get_auth_ids(&self) -> ZResult> { + let transport = self.get_inner()?; + Ok(transport.get_auth_ids()) + } + #[inline(always)] pub fn schedule(&self, message: NetworkMessage) -> ZResult<()> { let transport = self.get_inner()?; diff --git a/io/zenoh-transport/src/unicast/shared_memory_unicast.rs b/io/zenoh-transport/src/unicast/shared_memory_unicast.rs deleted file mode 100644 index 881e6886d2..0000000000 --- a/io/zenoh-transport/src/unicast/shared_memory_unicast.rs +++ /dev/null @@ -1,57 +0,0 @@ -// -// Copyright (c) 2022 ZettaScale Technology -// -// This program and the accompanying materials are made available under the -// terms of the Eclipse Public License 2.0 which is available at -// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 -// which is available at https://www.apache.org/licenses/LICENSE-2.0. -// -// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 -// -// Contributors: -// ZettaScale Zenoh Team, -// -use rand::{Rng, SeedableRng}; -use tokio::sync::RwLock; -use zenoh_core::zerror; -use zenoh_crypto::PseudoRng; -use zenoh_result::ZResult; -use zenoh_shm::{SharedMemoryBuf, SharedMemoryManager, SharedMemoryReader}; - -pub(crate) type Challenge = u64; -const NAME: &str = "zshm"; - -/*************************************/ -/* Authenticator */ -/*************************************/ -pub(crate) struct SharedMemoryUnicast { - // Rust guarantees that fields are dropped in the order of declaration. - // Buffer needs to be dropped before the manager. - pub(crate) challenge: SharedMemoryBuf, - pub(crate) _manager: SharedMemoryManager, - pub(crate) reader: RwLock, -} - -unsafe impl Sync for SharedMemoryUnicast {} - -impl SharedMemoryUnicast { - pub fn make() -> ZResult { - // Create a challenge for session establishment - let mut prng = PseudoRng::from_entropy(); - let nonce = prng.gen::(); - let size = std::mem::size_of::(); - - let mut _manager = SharedMemoryManager::make(format!("{NAME}.{nonce}"), size)?; - - let mut challenge = _manager.alloc(size).map_err(|e| zerror!("{e}"))?; - let slice = unsafe { challenge.as_mut_slice() }; - slice[0..size].copy_from_slice(&nonce.to_le_bytes()); - - let shmauth = SharedMemoryUnicast { - challenge, - _manager, - reader: RwLock::new(SharedMemoryReader::new()), - }; - Ok(shmauth) - } -} diff --git a/io/zenoh-transport/src/unicast/test_helpers.rs b/io/zenoh-transport/src/unicast/test_helpers.rs index 42ed6db927..6d25ae0d77 100644 --- a/io/zenoh-transport/src/unicast/test_helpers.rs +++ b/io/zenoh-transport/src/unicast/test_helpers.rs @@ -11,9 +11,10 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::{unicast::TransportManagerBuilderUnicast, TransportManager}; use zenoh_core::zcondfeat; +use crate::{unicast::TransportManagerBuilderUnicast, TransportManager}; + pub fn make_transport_manager_builder( #[cfg(feature = "transport_multilink")] max_links: usize, #[cfg(feature = "shared-memory")] with_shm: bool, diff --git a/io/zenoh-transport/src/unicast/transport_unicast_inner.rs b/io/zenoh-transport/src/unicast/transport_unicast_inner.rs index 1e3389ff75..0a84e5e753 100644 --- a/io/zenoh-transport/src/unicast/transport_unicast_inner.rs +++ b/io/zenoh-transport/src/unicast/transport_unicast_inner.rs @@ -12,22 +12,23 @@ // ZettaScale Zenoh Team, // -use crate::{ - unicast::{link::TransportLinkUnicast, TransportConfigUnicast}, - TransportPeerEventHandler, -}; -use async_trait::async_trait; use std::{fmt::DebugStruct, sync::Arc, time::Duration}; + +use async_trait::async_trait; use tokio::sync::MutexGuard as AsyncMutexGuard; use zenoh_link::Link; use zenoh_protocol::{ - core::{WhatAmI, ZenohId}, + core::{WhatAmI, ZenohIdProto}, network::NetworkMessage, transport::TransportSn, }; use zenoh_result::ZResult; use super::link::{LinkUnicastWithOpenAck, MaybeOpenAck}; +use crate::{ + unicast::{link::TransportLinkUnicast, TransportConfigUnicast}, + TransportPeerEventHandler, +}; pub(crate) type LinkError = (zenoh_result::Error, TransportLinkUnicast, u8); pub(crate) type TransportError = (zenoh_result::Error, Arc, u8); @@ -57,10 +58,11 @@ pub(crate) trait TransportUnicastTrait: Send + Sync { fn set_callback(&self, callback: Arc); async fn get_alive(&self) -> AsyncMutexGuard<'_, bool>; - fn get_zid(&self) -> ZenohId; + fn get_zid(&self) -> ZenohIdProto; fn get_whatami(&self) -> WhatAmI; fn get_callback(&self) -> Option>; fn get_links(&self) -> Vec; + fn get_auth_ids(&self) -> Vec; #[cfg(feature = "shared-memory")] fn is_shm(&self) -> bool; fn is_qos(&self) -> bool; diff --git a/io/zenoh-transport/src/unicast/universal/link.rs b/io/zenoh-transport/src/unicast/universal/link.rs index 44a12be4ac..fff842c255 100644 --- a/io/zenoh-transport/src/unicast/universal/link.rs +++ b/io/zenoh-transport/src/unicast/universal/link.rs @@ -11,6 +11,16 @@ // Contributors: // ZettaScale Zenoh Team, // +use std::time::Duration; + +use tokio_util::{sync::CancellationToken, task::TaskTracker}; +use zenoh_buffers::ZSliceBuffer; +use zenoh_protocol::transport::{KeepAlive, TransportMessage}; +use zenoh_result::{zerror, ZResult}; +use zenoh_sync::{RecyclingObject, RecyclingObjectPool}; +#[cfg(feature = "stats")] +use {crate::common::stats::TransportStats, std::sync::Arc}; + use super::transport::TransportUnicastUniversal; use crate::{ common::{ @@ -23,14 +33,6 @@ use crate::{ }, unicast::link::{TransportLinkUnicast, TransportLinkUnicastRx, TransportLinkUnicastTx}, }; -use std::time::Duration; -use tokio_util::{sync::CancellationToken, task::TaskTracker}; -use zenoh_buffers::ZSliceBuffer; -use zenoh_protocol::transport::{KeepAlive, TransportMessage}; -use zenoh_result::{zerror, ZResult}; -use zenoh_sync::{RecyclingObject, RecyclingObjectPool}; -#[cfg(feature = "stats")] -use {crate::common::stats::TransportStats, std::sync::Arc}; #[derive(Clone)] pub(super) struct TransportLinkUnicastUniversal { @@ -60,7 +62,8 @@ impl TransportLinkUnicastUniversal { }, queue_size: transport.manager.config.queue_size, wait_before_drop: transport.manager.config.wait_before_drop, - backoff: transport.manager.config.queue_backoff, + batching_enabled: transport.manager.config.batching, + batching_time_limit: transport.manager.config.queue_backoff, }; // The pipeline @@ -236,7 +239,7 @@ async fn rx_task( where T: ZSliceBuffer + 'static, F: Fn() -> T, - RecyclingObject: ZSliceBuffer, + RecyclingObject: AsMut<[u8]> + ZSliceBuffer, { let batch = link .recv_batch(|| pool.try_take().unwrap_or_else(|| pool.alloc())) @@ -245,7 +248,7 @@ async fn rx_task( } // The pool of buffers - let mtu = link.batch.max_buffer_size(); + let mtu = link.batch.mtu as usize; let mut n = rx_buffer_size / mtu; if rx_buffer_size % mtu != 0 { n += 1; diff --git a/io/zenoh-transport/src/unicast/universal/reliability.rs b/io/zenoh-transport/src/unicast/universal/reliability.rs index b3637bee27..7aece8d077 100644 --- a/io/zenoh-transport/src/unicast/universal/reliability.rs +++ b/io/zenoh-transport/src/unicast/universal/reliability.rs @@ -11,15 +11,13 @@ // Contributors: // ZettaScale Zenoh Team, // -use std::convert::TryInto; -use std::fmt; - -use super::common::seq_num::SeqNum; -use super::core::u64; +use std::{convert::TryInto, fmt}; use zenoh_result::{ZError, ZErrorKind, ZResult}; use zenoh_util::zerror; +use super::{common::seq_num::SeqNum, core::u64}; + pub(super) struct ReliabilityQueue { sn: SeqNum, index: usize, @@ -249,9 +247,10 @@ impl fmt::Debug for ReliabilityQueue { #[cfg(test)] mod tests { - use super::*; use rand::{thread_rng, Rng}; + use super::*; + #[test] fn reliability_queue_simple() { let size = 2; diff --git a/io/zenoh-transport/src/unicast/universal/rx.rs b/io/zenoh-transport/src/unicast/universal/rx.rs index 027a11c796..afd8e114d7 100644 --- a/io/zenoh-transport/src/unicast/universal/rx.rs +++ b/io/zenoh-transport/src/unicast/universal/rx.rs @@ -11,16 +11,8 @@ // Contributors: // ZettaScale Zenoh Team, // -use super::transport::TransportUnicastUniversal; -use crate::{ - common::{ - batch::{Decode, RBatch}, - priority::TransportChannelRx, - }, - unicast::transport_unicast_inner::TransportUnicastTrait, - TransportPeerEventHandler, -}; use std::sync::MutexGuard; + use zenoh_core::{zlock, zread}; use zenoh_link::Link; use zenoh_protocol::{ @@ -30,6 +22,16 @@ use zenoh_protocol::{ }; use zenoh_result::{bail, zerror, ZResult}; +use super::transport::TransportUnicastUniversal; +use crate::{ + common::{ + batch::{Decode, RBatch}, + priority::TransportChannelRx, + }, + unicast::transport_unicast_inner::TransportUnicastTrait, + TransportPeerEventHandler, +}; + /*************************************/ /* TRANSPORT RX */ /*************************************/ @@ -42,8 +44,11 @@ impl TransportUnicastUniversal { ) -> ZResult<()> { #[cfg(feature = "shared-memory")] { - if self.config.is_shm { - crate::shm::map_zmsg_to_shmbuf(&mut msg, &self.manager.state.unicast.shm.reader)?; + if self.config.shm.is_some() { + if let Err(e) = crate::shm::map_zmsg_to_shmbuf(&mut msg, &self.manager.shmr) { + tracing::debug!("Error receiving SHM buffer: {e}"); + return Ok(()); + } } } callback.handle_message(msg) @@ -77,7 +82,7 @@ impl TransportUnicastUniversal { let priority = ext_qos.priority(); let c = if self.is_qos() { &self.priority_rx[priority as usize] - } else if priority == Priority::default() { + } else if priority == Priority::DEFAULT { &self.priority_rx[0] } else { bail!( @@ -120,7 +125,7 @@ impl TransportUnicastUniversal { let c = if self.is_qos() { &self.priority_rx[qos.priority() as usize] - } else if qos.priority() == Priority::default() { + } else if qos.priority() == Priority::DEFAULT { &self.priority_rx[0] } else { bail!( diff --git a/io/zenoh-transport/src/unicast/universal/transport.rs b/io/zenoh-transport/src/unicast/universal/transport.rs index aa14a64bda..47f2ff344c 100644 --- a/io/zenoh-transport/src/unicast/universal/transport.rs +++ b/io/zenoh-transport/src/unicast/universal/transport.rs @@ -11,11 +11,29 @@ // Contributors: // ZettaScale Zenoh Team, // +use std::{ + fmt::DebugStruct, + sync::{Arc, RwLock}, + time::Duration, +}; + +use async_trait::async_trait; +use tokio::sync::{Mutex as AsyncMutex, MutexGuard as AsyncMutexGuard}; +use zenoh_core::{zasynclock, zcondfeat, zread, zwrite}; +use zenoh_link::Link; +use zenoh_protocol::{ + core::{Priority, WhatAmI, ZenohIdProto}, + network::NetworkMessage, + transport::{close, Close, PrioritySn, TransportMessage, TransportSn}, +}; +use zenoh_result::{bail, zerror, ZResult}; + #[cfg(feature = "stats")] use crate::stats::TransportStats; use crate::{ common::priority::{TransportPriorityRx, TransportPriorityTx}, unicast::{ + authentication::AuthId, link::{LinkUnicastWithOpenAck, TransportLinkUnicastDirection}, transport_unicast_inner::{AddLinkResult, TransportUnicastTrait}, universal::link::TransportLinkUnicastUniversal, @@ -23,19 +41,6 @@ use crate::{ }, TransportManager, TransportPeerEventHandler, }; -use async_trait::async_trait; -use std::fmt::DebugStruct; -use std::sync::{Arc, RwLock}; -use std::time::Duration; -use tokio::sync::{Mutex as AsyncMutex, MutexGuard as AsyncMutexGuard}; -use zenoh_core::{zasynclock, zcondfeat, zread, zwrite}; -use zenoh_link::Link; -use zenoh_protocol::{ - core::{Priority, WhatAmI, ZenohId}, - network::NetworkMessage, - transport::{close, Close, PrioritySn, TransportMessage, TransportSn}, -}; -use zenoh_result::{bail, zerror, ZResult}; macro_rules! zlinkindex { ($guard:expr, $link:expr) => { @@ -320,7 +325,7 @@ impl TransportUnicastTrait for TransportUnicastUniversal { zasynclock!(self.alive) } - fn get_zid(&self) -> ZenohId { + fn get_zid(&self) -> ZenohIdProto { self.config.zid } @@ -330,7 +335,7 @@ impl TransportUnicastTrait for TransportUnicastUniversal { #[cfg(feature = "shared-memory")] fn is_shm(&self) -> bool { - self.config.is_shm + self.config.shm.is_some() } fn is_qos(&self) -> bool { @@ -381,6 +386,19 @@ impl TransportUnicastTrait for TransportUnicastUniversal { zread!(self.links).iter().map(|l| l.link.link()).collect() } + fn get_auth_ids(&self) -> Vec { + // Convert LinkUnicast auth ids to AuthId + #[allow(unused_mut)] + let mut auth_ids: Vec = zread!(self.links) + .iter() + .map(|l| l.link.link.get_auth_id().to_owned().into()) + .collect(); + // Convert usrpwd auth id to AuthId + #[cfg(feature = "auth_usrpwd")] + auth_ids.push(self.config.auth_id.clone().into()); + auth_ids + } + /*************************************/ /* TX */ /*************************************/ diff --git a/io/zenoh-transport/src/unicast/universal/tx.rs b/io/zenoh-transport/src/unicast/universal/tx.rs index ffc162c0b4..f7754489ef 100644 --- a/io/zenoh-transport/src/unicast/universal/tx.rs +++ b/io/zenoh-transport/src/unicast/universal/tx.rs @@ -11,10 +11,13 @@ // Contributors: // ZettaScale Zenoh Team, // -use super::transport::TransportUnicastUniversal; use zenoh_core::zread; use zenoh_protocol::network::NetworkMessage; +use super::transport::TransportUnicastUniversal; +#[cfg(feature = "shared-memory")] +use crate::shm::map_zmsg_to_partner; + impl TransportUnicastUniversal { fn schedule_on_link(&self, msg: NetworkMessage) -> bool { macro_rules! zpush { @@ -61,12 +64,7 @@ impl TransportUnicastUniversal { pub(crate) fn internal_schedule(&self, mut msg: NetworkMessage) -> bool { #[cfg(feature = "shared-memory")] { - let res = if self.config.is_shm { - crate::shm::map_zmsg_to_shminfo(&mut msg) - } else { - crate::shm::map_zmsg_to_shmbuf(&mut msg, &self.manager.shm().reader) - }; - if let Err(e) = res { + if let Err(e) = map_zmsg_to_partner(&mut msg, &self.config.shm) { tracing::trace!("Failed SHM conversion: {}", e); return false; } diff --git a/io/zenoh-transport/tests/endpoints.rs b/io/zenoh-transport/tests/endpoints.rs index 6269f78cb9..f4ddbd6ec4 100644 --- a/io/zenoh-transport/tests/endpoints.rs +++ b/io/zenoh-transport/tests/endpoints.rs @@ -12,10 +12,11 @@ // ZettaScale Zenoh Team, // use std::{any::Any, convert::TryFrom, sync::Arc, time::Duration}; + use zenoh_core::ztimeout; use zenoh_link::{EndPoint, Link}; use zenoh_protocol::{ - core::{WhatAmI, ZenohId}, + core::{WhatAmI, ZenohIdProto}, network::NetworkMessage, }; use zenoh_result::ZResult; @@ -73,7 +74,7 @@ async fn run(endpoints: &[EndPoint]) { // Create the transport manager let sm = TransportManager::builder() .whatami(WhatAmI::Peer) - .zid(ZenohId::try_from([1]).unwrap()) + .zid(ZenohIdProto::try_from([1]).unwrap()) .build(Arc::new(SH)) .unwrap(); @@ -317,13 +318,13 @@ AXVFFIgCSluyrolaD6CWD9MqOex4YOfJR2bNxI7lFvuK4AwjyUJzT1U1HXib17mM let mut endpoint: EndPoint = format!("tls/localhost:{}", 7070).parse().unwrap(); endpoint .config_mut() - .extend( + .extend_from_iter( [ (TLS_SERVER_CERTIFICATE_RAW, cert), (TLS_SERVER_PRIVATE_KEY_RAW, key), ] .iter() - .map(|(k, v)| ((*k).to_owned(), (*v).to_owned())), + .copied(), ) .unwrap(); @@ -396,13 +397,13 @@ AXVFFIgCSluyrolaD6CWD9MqOex4YOfJR2bNxI7lFvuK4AwjyUJzT1U1HXib17mM let mut endpoint: EndPoint = format!("quic/localhost:{}", 7080).parse().unwrap(); endpoint .config_mut() - .extend( + .extend_from_iter( [ (TLS_SERVER_CERTIFICATE_RAW, cert), (TLS_SERVER_PRIVATE_KEY_RAW, key), ] .iter() - .map(|(k, v)| ((*k).to_owned(), (*v).to_owned())), + .copied(), ) .unwrap(); let endpoints = vec![endpoint]; diff --git a/io/zenoh-transport/tests/multicast_compression.rs b/io/zenoh-transport/tests/multicast_compression.rs index e046c96958..129f79d55e 100644 --- a/io/zenoh-transport/tests/multicast_compression.rs +++ b/io/zenoh-transport/tests/multicast_compression.rs @@ -24,11 +24,13 @@ mod tests { }, time::Duration, }; + use zenoh_core::ztimeout; use zenoh_link::Link; use zenoh_protocol::{ core::{ - Channel, CongestionControl, Encoding, EndPoint, Priority, Reliability, WhatAmI, ZenohId, + Channel, CongestionControl, Encoding, EndPoint, Priority, Reliability, WhatAmI, + ZenohIdProto, }, network::{ push::{ @@ -143,8 +145,8 @@ mod tests { endpoint: &EndPoint, ) -> (TransportMulticastPeer, TransportMulticastPeer) { // Define peer01 and peer02 IDs - let peer01_id = ZenohId::try_from([1]).unwrap(); - let peer02_id = ZenohId::try_from([2]).unwrap(); + let peer01_id = ZenohIdProto::try_from([1]).unwrap(); + let peer02_id = ZenohIdProto::try_from([2]).unwrap(); // Create the peer01 transport manager let peer01_handler = Arc::new(SHPeer::default()); @@ -168,13 +170,19 @@ mod tests { // Open transport -> This should be accepted println!("Opening transport with {endpoint}"); let _ = ztimeout!(peer01_manager.open_transport_multicast(endpoint.clone())).unwrap(); - assert!(!peer01_manager.get_transports_multicast().await.is_empty()); - println!("\t{:?}", peer01_manager.get_transports_multicast().await); + assert!(!ztimeout!(peer01_manager.get_transports_multicast()).is_empty()); + println!( + "\t{:?}", + ztimeout!(peer01_manager.get_transports_multicast()) + ); println!("Opening transport with {endpoint}"); let _ = ztimeout!(peer02_manager.open_transport_multicast(endpoint.clone())).unwrap(); - assert!(!peer02_manager.get_transports_multicast().await.is_empty()); - println!("\t{:?}", peer02_manager.get_transports_multicast().await); + assert!(!ztimeout!(peer02_manager.get_transports_multicast()).is_empty()); + println!( + "\t{:?}", + ztimeout!(peer02_manager.get_transports_multicast()) + ); // Wait to for peer 01 and 02 to join each other ztimeout!(async { @@ -186,10 +194,8 @@ mod tests { tokio::time::sleep(SLEEP_COUNT).await; } }); - let peer01_transport = peer01_manager - .get_transport_multicast(&peer02_id) - .await - .unwrap(); + let peer01_transport = + ztimeout!(peer01_manager.get_transport_multicast(&peer02_id)).unwrap(); println!( "\tPeer01 peers: {:?}", peer01_transport.get_peers().unwrap() @@ -204,10 +210,8 @@ mod tests { tokio::time::sleep(SLEEP_COUNT).await; } }); - let peer02_transport = peer02_manager - .get_transport_multicast(&peer01_id) - .await - .unwrap(); + let peer02_transport = + ztimeout!(peer02_manager.get_transport_multicast(&peer01_id)).unwrap(); println!( "\tPeer02 peers: {:?}", peer02_transport.get_peers().unwrap() @@ -235,7 +239,7 @@ mod tests { // Close the peer01 transport println!("Closing transport with {endpoint}"); ztimeout!(peer01.transport.close()).unwrap(); - assert!(peer01.manager.get_transports_multicast().await.is_empty()); + assert!(ztimeout!(peer01.manager.get_transports_multicast()).is_empty()); ztimeout!(async { while !peer02.transport.get_peers().unwrap().is_empty() { tokio::time::sleep(SLEEP_COUNT).await; @@ -245,7 +249,7 @@ mod tests { // Close the peer02 transport println!("Closing transport with {endpoint}"); ztimeout!(peer02.transport.close()).unwrap(); - assert!(peer02.manager.get_transports_multicast().await.is_empty()); + assert!(ztimeout!(peer02.manager.get_transports_multicast()).is_empty()); // Wait a little bit tokio::time::sleep(SLEEP).await; @@ -262,11 +266,11 @@ mod tests { wire_expr: "test".into(), ext_qos: QoSType::new(channel.priority, CongestionControl::Block, false), ext_tstamp: None, - ext_nodeid: NodeIdType::default(), + ext_nodeid: NodeIdType::DEFAULT, payload: Put { payload: vec![0u8; msg_size].into(), timestamp: None, - encoding: Encoding::default(), + encoding: Encoding::empty(), ext_sinfo: None, #[cfg(feature = "shared-memory")] ext_shm: None, @@ -352,7 +356,7 @@ mod tests { // Define the reliability and congestion control let channel = [ Channel { - priority: Priority::default(), + priority: Priority::DEFAULT, reliability: Reliability::BestEffort, }, Channel { diff --git a/io/zenoh-transport/tests/multicast_transport.rs b/io/zenoh-transport/tests/multicast_transport.rs index 87422daf2a..0ffefb59b2 100644 --- a/io/zenoh-transport/tests/multicast_transport.rs +++ b/io/zenoh-transport/tests/multicast_transport.rs @@ -25,11 +25,13 @@ mod tests { }, time::Duration, }; + use zenoh_core::ztimeout; use zenoh_link::Link; use zenoh_protocol::{ core::{ - Channel, CongestionControl, Encoding, EndPoint, Priority, Reliability, WhatAmI, ZenohId, + Channel, CongestionControl, Encoding, EndPoint, Priority, Reliability, WhatAmI, + ZenohIdProto, }, network::{ push::{ @@ -142,8 +144,8 @@ mod tests { endpoint: &EndPoint, ) -> (TransportMulticastPeer, TransportMulticastPeer) { // Define peer01 and peer02 IDs - let peer01_id = ZenohId::try_from([1]).unwrap(); - let peer02_id = ZenohId::try_from([2]).unwrap(); + let peer01_id = ZenohIdProto::try_from([1]).unwrap(); + let peer02_id = ZenohIdProto::try_from([2]).unwrap(); // Create the peer01 transport manager let peer01_handler = Arc::new(SHPeer::default()); @@ -165,13 +167,19 @@ mod tests { // Open transport -> This should be accepted println!("Opening transport with {endpoint}"); let _ = ztimeout!(peer01_manager.open_transport_multicast(endpoint.clone())).unwrap(); - assert!(!peer01_manager.get_transports_multicast().await.is_empty()); - println!("\t{:?}", peer01_manager.get_transports_multicast().await); + assert!(!ztimeout!(peer01_manager.get_transports_multicast()).is_empty()); + println!( + "\t{:?}", + ztimeout!(peer01_manager.get_transports_multicast()) + ); println!("Opening transport with {endpoint}"); let _ = ztimeout!(peer02_manager.open_transport_multicast(endpoint.clone())).unwrap(); - assert!(!peer02_manager.get_transports_multicast().await.is_empty()); - println!("\t{:?}", peer02_manager.get_transports_multicast().await); + assert!(!ztimeout!(peer02_manager.get_transports_multicast()).is_empty()); + println!( + "\t{:?}", + ztimeout!(peer02_manager.get_transports_multicast()) + ); // Wait to for peer 01 and 02 to join each other ztimeout!(async { @@ -183,10 +191,8 @@ mod tests { tokio::time::sleep(SLEEP_COUNT).await; } }); - let peer01_transport = peer01_manager - .get_transport_multicast(&peer02_id) - .await - .unwrap(); + let peer01_transport = + ztimeout!(peer01_manager.get_transport_multicast(&peer02_id)).unwrap(); println!( "\tPeer01 peers: {:?}", peer01_transport.get_peers().unwrap() @@ -201,10 +207,8 @@ mod tests { tokio::time::sleep(SLEEP_COUNT).await; } }); - let peer02_transport = peer02_manager - .get_transport_multicast(&peer01_id) - .await - .unwrap(); + let peer02_transport = + ztimeout!(peer02_manager.get_transport_multicast(&peer01_id)).unwrap(); println!( "\tPeer02 peers: {:?}", peer02_transport.get_peers().unwrap() @@ -232,7 +236,7 @@ mod tests { // Close the peer01 transport println!("Closing transport with {endpoint}"); ztimeout!(peer01.transport.close()).unwrap(); - assert!(peer01.manager.get_transports_multicast().await.is_empty()); + assert!(ztimeout!(peer01.manager.get_transports_multicast()).is_empty()); ztimeout!(async { while !peer02.transport.get_peers().unwrap().is_empty() { tokio::time::sleep(SLEEP_COUNT).await; @@ -242,7 +246,7 @@ mod tests { // Close the peer02 transport println!("Closing transport with {endpoint}"); ztimeout!(peer02.transport.close()).unwrap(); - assert!(peer02.manager.get_transports_multicast().await.is_empty()); + assert!(ztimeout!(peer02.manager.get_transports_multicast()).is_empty()); // Wait a little bit tokio::time::sleep(SLEEP).await; @@ -259,11 +263,11 @@ mod tests { wire_expr: "test".into(), ext_qos: QoSType::new(channel.priority, CongestionControl::Block, false), ext_tstamp: None, - ext_nodeid: NodeIdType::default(), + ext_nodeid: NodeIdType::DEFAULT, payload: Put { payload: vec![0u8; msg_size].into(), timestamp: None, - encoding: Encoding::default(), + encoding: Encoding::empty(), ext_sinfo: None, #[cfg(feature = "shared-memory")] ext_shm: None, @@ -349,7 +353,7 @@ mod tests { // Define the reliability and congestion control let channel = [ Channel { - priority: Priority::default(), + priority: Priority::DEFAULT, reliability: Reliability::BestEffort, }, Channel { diff --git a/io/zenoh-transport/tests/transport_whitelist.rs b/io/zenoh-transport/tests/transport_whitelist.rs index a859a1c0c9..121db5b5d6 100644 --- a/io/zenoh-transport/tests/transport_whitelist.rs +++ b/io/zenoh-transport/tests/transport_whitelist.rs @@ -12,10 +12,11 @@ // ZettaScale Zenoh Team, // use std::{any::Any, convert::TryFrom, iter::FromIterator, sync::Arc, time::Duration}; + use zenoh_core::ztimeout; use zenoh_link::Link; use zenoh_protocol::{ - core::{EndPoint, ZenohId}, + core::{EndPoint, ZenohIdProto}, network::NetworkMessage, }; use zenoh_result::ZResult; @@ -67,7 +68,7 @@ impl TransportPeerEventHandler for SCRouter { async fn run(endpoints: &[EndPoint]) { // Define client and router IDs - let router_id = ZenohId::try_from([1]).unwrap(); + let router_id = ZenohIdProto::try_from([1]).unwrap(); // Create the router transport manager println!(">>> Transport Whitelist [1a1]"); diff --git a/io/zenoh-transport/tests/unicast_authenticator.rs b/io/zenoh-transport/tests/unicast_authenticator.rs index a232584cff..87f2174598 100644 --- a/io/zenoh-transport/tests/unicast_authenticator.rs +++ b/io/zenoh-transport/tests/unicast_authenticator.rs @@ -12,19 +12,18 @@ // ZettaScale Zenoh Team, // use std::{any::Any, sync::Arc, time::Duration}; + use zenoh_core::{zasyncwrite, ztimeout}; use zenoh_link::Link; use zenoh_protocol::{ - core::{EndPoint, WhatAmI, ZenohId}, + core::{EndPoint, WhatAmI, ZenohIdProto}, network::NetworkMessage, }; use zenoh_result::ZResult; use zenoh_transport::{ - multicast::TransportMulticast, unicast::establishment::ext::auth::Auth, - TransportMulticastEventHandler, -}; -use zenoh_transport::{ - unicast::TransportUnicast, DummyTransportPeerEventHandler, TransportEventHandler, + multicast::TransportMulticast, + unicast::{establishment::ext::auth::Auth, TransportUnicast}, + DummyTransportPeerEventHandler, TransportEventHandler, TransportMulticastEventHandler, TransportPeer, TransportPeerEventHandler, }; @@ -112,7 +111,7 @@ async fn auth_pubkey(endpoint: &EndPoint, lowlatency_transport: bool) { }; // Create the transport transport manager for the client 01 - let client01_id = ZenohId::try_from([2]).unwrap(); + let client01_id = ZenohIdProto::try_from([2]).unwrap(); let n = BigUint::from_bytes_le(&[ 0x41, 0x74, 0xc6, 0x40, 0x18, 0x63, 0xbd, 0x59, 0xe6, 0x0d, 0xe9, 0x23, 0x3e, 0x95, 0xca, @@ -171,7 +170,7 @@ async fn auth_pubkey(endpoint: &EndPoint, lowlatency_transport: bool) { .unwrap(); // Create the transport transport manager for the client 02 - let client02_id = ZenohId::try_from([3]).unwrap(); + let client02_id = ZenohIdProto::try_from([3]).unwrap(); let n = BigUint::from_bytes_le(&[ 0xd1, 0x36, 0xcf, 0x94, 0xda, 0x04, 0x7e, 0x9f, 0x53, 0x39, 0xb8, 0x7b, 0x53, 0x3a, 0xe6, @@ -230,7 +229,7 @@ async fn auth_pubkey(endpoint: &EndPoint, lowlatency_transport: bool) { .unwrap(); // Create the transport transport manager for the client 03 with the same key as client 02 - let client03_id = ZenohId::try_from([4]).unwrap(); + let client03_id = ZenohIdProto::try_from([4]).unwrap(); let mut auth = Auth::empty(); auth.set_pubkey(Some(AuthPubKey::new( client02_pub_key.clone().into(), @@ -250,7 +249,7 @@ async fn auth_pubkey(endpoint: &EndPoint, lowlatency_transport: bool) { .unwrap(); // Create the router transport manager - let router_id = ZenohId::try_from([1]).unwrap(); + let router_id = ZenohIdProto::try_from([1]).unwrap(); let router_handler = Arc::new(SHRouterAuthenticator::new()); let n = BigUint::from_bytes_le(&[ 0x31, 0xd1, 0xfc, 0x7e, 0x70, 0x5f, 0xd7, 0xe3, 0xcc, 0xa4, 0xca, 0xcb, 0x38, 0x84, 0x2f, @@ -291,10 +290,7 @@ async fn auth_pubkey(endpoint: &EndPoint, lowlatency_transport: bool) { ]; let router_pri_key = RsaPrivateKey::from_components(n, e, d, primes).unwrap(); let mut auth_pubkey = AuthPubKey::new(router_pub_key.into(), router_pri_key.into()); - auth_pubkey - .add_pubkey(client01_pub_key.into()) - .await - .unwrap(); + ztimeout!(auth_pubkey.add_pubkey(client01_pub_key.into())).unwrap(); let mut auth = Auth::empty(); auth.set_pubkey(Some(auth_pubkey)); let unicast = make_basic_transport_manager_builder( @@ -315,7 +311,7 @@ async fn auth_pubkey(endpoint: &EndPoint, lowlatency_transport: bool) { // Add the locator on the router ztimeout!(router_manager.add_listener(endpoint.clone())).unwrap(); println!("Transport Authenticator PubKey [1a2]"); - let locators = router_manager.get_listeners().await; + let locators = ztimeout!(router_manager.get_listeners()); println!("Transport Authenticator PubKey [1a2]: {locators:?}"); assert_eq!(locators.len(), 1); @@ -344,10 +340,10 @@ async fn auth_pubkey(endpoint: &EndPoint, lowlatency_transport: bool) { // Add client02 pubkey to the router let router_auth_handle = router_manager.get_auth_handle_unicast(); - zasyncwrite!(router_auth_handle.get_pubkey().unwrap()) - .add_pubkey(client02_pub_key.into()) - .await - .unwrap(); + ztimeout!( + zasyncwrite!(router_auth_handle.get_pubkey().unwrap()).add_pubkey(client02_pub_key.into()) + ) + .unwrap(); /* [3b] */ // Open a first transport from client02 to the router @@ -418,11 +414,11 @@ async fn auth_usrpwd(endpoint: &EndPoint, lowlatency_transport: bool) { }; /* [CLIENT] */ - let client01_id = ZenohId::try_from([2]).unwrap(); + let client01_id = ZenohIdProto::try_from([2]).unwrap(); let user01 = "user01".to_string(); let password01 = "password01".to_string(); - let client02_id = ZenohId::try_from([3]).unwrap(); + let client02_id = ZenohIdProto::try_from([3]).unwrap(); let user02 = "invalid".to_string(); let password02 = "invalid".to_string(); @@ -431,17 +427,13 @@ async fn auth_usrpwd(endpoint: &EndPoint, lowlatency_transport: bool) { let password03 = "password03".to_string(); /* [ROUTER] */ - let router_id = ZenohId::try_from([1]).unwrap(); + let router_id = ZenohIdProto::try_from([1]).unwrap(); let router_handler = Arc::new(SHRouterAuthenticator::new()); // Create the router transport manager let mut auth_usrpwd_router = AuthUsrPwd::new(None); - auth_usrpwd_router - .add_user(user01.clone().into(), password01.clone().into()) - .await + ztimeout!(auth_usrpwd_router.add_user(user01.clone().into(), password01.clone().into())) .unwrap(); - auth_usrpwd_router - .add_user(user03.clone().into(), password03.clone().into()) - .await + ztimeout!(auth_usrpwd_router.add_user(user03.clone().into(), password03.clone().into())) .unwrap(); let mut auth_router = Auth::empty(); auth_router.set_usrpwd(Some(auth_usrpwd_router)); @@ -520,7 +512,7 @@ async fn auth_usrpwd(endpoint: &EndPoint, lowlatency_transport: bool) { println!("Transport Authenticator UserPassword [1a1]: {res:?}"); assert!(res.is_ok()); println!("Transport Authenticator UserPassword [1a2]"); - let locators = router_manager.get_listeners().await; + let locators = ztimeout!(router_manager.get_listeners()); println!("Transport Authenticator UserPassword [1a2]: {locators:?}"); assert_eq!(locators.len(), 1); @@ -802,14 +794,14 @@ R+IdLiXcyIkg0m9N8I17p0ljCSkbrgGMD3bbePRTfg== let mut endpoint: EndPoint = format!("tls/localhost:{}", 8030).parse().unwrap(); endpoint .config_mut() - .extend( + .extend_from_iter( [ (TLS_ROOT_CA_CERTIFICATE_RAW, ca), (TLS_SERVER_CERTIFICATE_RAW, cert), (TLS_SERVER_PRIVATE_KEY_RAW, key), ] .iter() - .map(|(k, v)| ((*k).to_owned(), (*v).to_owned())), + .copied(), ) .unwrap(); @@ -902,14 +894,14 @@ R+IdLiXcyIkg0m9N8I17p0ljCSkbrgGMD3bbePRTfg== let mut endpoint: EndPoint = format!("quic/localhost:{}", 8040).parse().unwrap(); endpoint .config_mut() - .extend( + .extend_from_iter( [ (TLS_ROOT_CA_CERTIFICATE_RAW, ca), (TLS_SERVER_CERTIFICATE_RAW, cert), (TLS_SERVER_PRIVATE_KEY_RAW, key), ] .iter() - .map(|(k, v)| ((*k).to_owned(), (*v).to_owned())), + .copied(), ) .unwrap(); diff --git a/io/zenoh-transport/tests/unicast_compression.rs b/io/zenoh-transport/tests/unicast_compression.rs index 91784a3497..e5015c3d25 100644 --- a/io/zenoh-transport/tests/unicast_compression.rs +++ b/io/zenoh-transport/tests/unicast_compression.rs @@ -13,21 +13,23 @@ // #[cfg(feature = "transport_compression")] mod tests { - use std::fmt::Write as _; use std::{ any::Any, convert::TryFrom, + fmt::Write as _, sync::{ atomic::{AtomicUsize, Ordering}, Arc, }, time::Duration, }; + use zenoh_core::ztimeout; use zenoh_link::Link; use zenoh_protocol::{ core::{ - Channel, CongestionControl, Encoding, EndPoint, Priority, Reliability, WhatAmI, ZenohId, + Channel, CongestionControl, Encoding, EndPoint, Priority, Reliability, WhatAmI, + ZenohIdProto, }, network::{ push::ext::{NodeIdType, QoSType}, @@ -49,8 +51,8 @@ mod tests { const MSG_COUNT: usize = 1_000; const MSG_SIZE_ALL: [usize; 2] = [1_024, 131_072]; - const MSG_SIZE_LOWLATENCY: [usize; 2] = [1_024, 65000]; const MSG_SIZE_NOFRAG: [usize; 1] = [1_024]; + const MSG_SIZE_LOWLATENCY: [usize; 1] = MSG_SIZE_NOFRAG; // Transport Handler for the router struct SHRouter { @@ -167,8 +169,8 @@ mod tests { TransportUnicast, ) { // Define client and router IDs - let client_id = ZenohId::try_from([1]).unwrap(); - let router_id = ZenohId::try_from([2]).unwrap(); + let client_id = ZenohIdProto::try_from([1]).unwrap(); + let router_id = ZenohIdProto::try_from([2]).unwrap(); // Create the router transport manager let router_handler = Arc::new(SHRouter::default()); @@ -216,10 +218,7 @@ mod tests { let _ = ztimeout!(client_manager.open_transport_unicast(e.clone())).unwrap(); } - let client_transport = client_manager - .get_transport_unicast(&router_id) - .await - .unwrap(); + let client_transport = ztimeout!(client_manager.get_transport_unicast(&router_id)).unwrap(); // Return the handlers ( @@ -291,11 +290,11 @@ mod tests { wire_expr: "test".into(), ext_qos: QoSType::new(channel.priority, cctrl, false), ext_tstamp: None, - ext_nodeid: NodeIdType::default(), + ext_nodeid: NodeIdType::DEFAULT, payload: Put { payload: vec![0u8; msg_size].into(), timestamp: None, - encoding: Encoding::default(), + encoding: Encoding::empty(), ext_sinfo: None, #[cfg(feature = "shared-memory")] ext_shm: None, @@ -358,13 +357,12 @@ mod tests { { let c_stats = client_transport.get_stats().unwrap().report(); println!("\tClient: {:?}", c_stats); - let r_stats = router_manager - .get_transport_unicast(&client_manager.config.zid) - .await - .unwrap() - .get_stats() - .map(|s| s.report()) - .unwrap(); + let r_stats = + ztimeout!(router_manager.get_transport_unicast(&client_manager.config.zid)) + .unwrap() + .get_stats() + .map(|s| s.report()) + .unwrap(); println!("\tRouter: {:?}", r_stats); } @@ -433,7 +431,7 @@ mod tests { // Define the reliability and congestion control let channel = [ Channel { - priority: Priority::default(), + priority: Priority::DEFAULT, reliability: Reliability::Reliable, }, Channel { @@ -455,7 +453,7 @@ mod tests { // Define the reliability and congestion control let channel = [ Channel { - priority: Priority::default(), + priority: Priority::DEFAULT, reliability: Reliability::Reliable, }, Channel { @@ -480,7 +478,7 @@ mod tests { // Define the reliability and congestion control let channel = [ Channel { - priority: Priority::default(), + priority: Priority::DEFAULT, reliability: Reliability::BestEffort, }, Channel { @@ -502,7 +500,7 @@ mod tests { // Define the reliability and congestion control let channel = [ Channel { - priority: Priority::default(), + priority: Priority::DEFAULT, reliability: Reliability::BestEffort, }, Channel { diff --git a/io/zenoh-transport/tests/unicast_concurrent.rs b/io/zenoh-transport/tests/unicast_concurrent.rs index 829ae0dea6..183f8a7163 100644 --- a/io/zenoh-transport/tests/unicast_concurrent.rs +++ b/io/zenoh-transport/tests/unicast_concurrent.rs @@ -10,16 +10,21 @@ // Contributors: // ZettaScale Zenoh Team, // -use std::any::Any; -use std::convert::TryFrom; -use std::sync::atomic::{AtomicUsize, Ordering}; -use std::sync::Arc; -use std::time::Duration; +use std::{ + any::Any, + convert::TryFrom, + sync::{ + atomic::{AtomicUsize, Ordering}, + Arc, + }, + time::Duration, +}; + use tokio::sync::Barrier; use zenoh_core::ztimeout; use zenoh_link::Link; use zenoh_protocol::{ - core::{CongestionControl, Encoding, EndPoint, Priority, WhatAmI, ZenohId}, + core::{CongestionControl, Encoding, EndPoint, Priority, WhatAmI, ZenohIdProto}, network::{ push::{ ext::{NodeIdType, QoSType}, @@ -103,8 +108,8 @@ impl TransportPeerEventHandler for MHPeer { async fn transport_concurrent(endpoint01: Vec, endpoint02: Vec) { /* [Peers] */ - let peer_id01 = ZenohId::try_from([2]).unwrap(); - let peer_id02 = ZenohId::try_from([3]).unwrap(); + let peer_id01 = ZenohIdProto::try_from([2]).unwrap(); + let peer_id02 = ZenohIdProto::try_from([3]).unwrap(); // Create the peer01 transport manager let peer_sh01 = Arc::new(SHPeer::new()); @@ -145,7 +150,7 @@ async fn transport_concurrent(endpoint01: Vec, endpoint02: Vec Adding endpoint {e:?}: {res:?}"); assert!(res.is_ok()); } - let locs = peer01_manager.get_listeners().await; + let locs = ztimeout!(peer01_manager.get_listeners()); println!("[Transport Peer 01b] => Getting endpoints: {c_end01:?} {locs:?}"); assert_eq!(c_end01.len(), locs.len()); @@ -173,11 +178,8 @@ async fn transport_concurrent(endpoint01: Vec, endpoint02: Vec Waiting... OK"); // Verify that the transport has been correctly open - assert_eq!(peer01_manager.get_transports_unicast().await.len(), 1); - let s02 = peer01_manager - .get_transport_unicast(&c_zid02) - .await - .unwrap(); + assert_eq!(ztimeout!(peer01_manager.get_transports_unicast()).len(), 1); + let s02 = ztimeout!(peer01_manager.get_transport_unicast(&c_zid02)).unwrap(); assert_eq!( s02.get_links().unwrap().len(), c_end01.len() + c_end02.len() @@ -186,13 +188,13 @@ async fn transport_concurrent(endpoint01: Vec, endpoint02: Vec, endpoint02: Vec Adding endpoint {e:?}: {res:?}"); assert!(res.is_ok()); } - let locs = peer02_manager.get_listeners().await; + let locs = ztimeout!(peer02_manager.get_listeners()); println!("[Transport Peer 02b] => Getting endpoints: {c_end02:?} {locs:?}"); assert_eq!(c_end02.len(), locs.len()); @@ -276,13 +278,10 @@ async fn transport_concurrent(endpoint01: Vec, endpoint02: Vec Transports: {:?}", - peer02_manager.get_transports_unicast().await + ztimeout!(peer02_manager.get_transports_unicast()) ); - assert_eq!(peer02_manager.get_transports_unicast().await.len(), 1); - let s01 = peer02_manager - .get_transport_unicast(&c_zid01) - .await - .unwrap(); + assert_eq!(ztimeout!(peer02_manager.get_transports_unicast()).len(), 1); + let s01 = ztimeout!(peer02_manager.get_transport_unicast(&c_zid01)).unwrap(); assert_eq!( s01.get_links().unwrap().len(), c_end01.len() + c_end02.len() @@ -291,13 +290,13 @@ async fn transport_concurrent(endpoint01: Vec, endpoint02: Vec // use std::{convert::TryFrom, sync::Arc, time::Duration}; + use zenoh_core::ztimeout; use zenoh_protocol::{ core::{ - Channel, CongestionControl, Encoding, EndPoint, Priority, Reliability, WhatAmI, ZenohId, + Channel, CongestionControl, Encoding, EndPoint, Priority, Reliability, WhatAmI, + ZenohIdProto, }, network::{ push::{ @@ -36,8 +38,8 @@ const MSG_DEFRAG_BUF: usize = 128_000; async fn run(endpoint: &EndPoint, channel: Channel, msg_size: usize) { // Define client and router IDs - let client_id = ZenohId::try_from([1]).unwrap(); - let router_id = ZenohId::try_from([2]).unwrap(); + let client_id = ZenohIdProto::try_from([1]).unwrap(); + let router_id = ZenohIdProto::try_from([2]).unwrap(); // Create the router transport manager let router_manager = TransportManager::builder() @@ -64,21 +66,18 @@ async fn run(endpoint: &EndPoint, channel: Channel, msg_size: usize) { println!("Opening transport with {endpoint}"); let _ = ztimeout!(client_manager.open_transport_unicast(endpoint.clone())).unwrap(); - let client_transport = client_manager - .get_transport_unicast(&router_id) - .await - .unwrap(); + let client_transport = ztimeout!(client_manager.get_transport_unicast(&router_id)).unwrap(); // Create the message to send let message: NetworkMessage = Push { wire_expr: "test".into(), ext_qos: QoSType::new(channel.priority, CongestionControl::Block, false), ext_tstamp: None, - ext_nodeid: NodeIdType::default(), + ext_nodeid: NodeIdType::DEFAULT, payload: Put { payload: vec![0u8; msg_size].into(), timestamp: None, - encoding: Encoding::default(), + encoding: Encoding::empty(), ext_sinfo: None, #[cfg(feature = "shared-memory")] ext_shm: None, @@ -138,11 +137,11 @@ async fn transport_unicast_defragmentation_tcp_only() { // Define the reliability and congestion control let channel = [ Channel { - priority: Priority::default(), + priority: Priority::DEFAULT, reliability: Reliability::Reliable, }, Channel { - priority: Priority::default(), + priority: Priority::DEFAULT, reliability: Reliability::BestEffort, }, Channel { @@ -171,11 +170,11 @@ async fn transport_unicast_defragmentation_ws_only() { // Define the reliability and congestion control let channel = [ Channel { - priority: Priority::default(), + priority: Priority::DEFAULT, reliability: Reliability::Reliable, }, Channel { - priority: Priority::default(), + priority: Priority::DEFAULT, reliability: Reliability::BestEffort, }, Channel { @@ -206,11 +205,11 @@ async fn transport_unicast_defragmentation_unixpipe_only() { // Define the reliability and congestion control let channel = [ Channel { - priority: Priority::default(), + priority: Priority::DEFAULT, reliability: Reliability::Reliable, }, Channel { - priority: Priority::default(), + priority: Priority::DEFAULT, reliability: Reliability::BestEffort, }, Channel { diff --git a/io/zenoh-transport/tests/unicast_intermittent.rs b/io/zenoh-transport/tests/unicast_intermittent.rs index 1eb268770c..a2cb1e2d12 100644 --- a/io/zenoh-transport/tests/unicast_intermittent.rs +++ b/io/zenoh-transport/tests/unicast_intermittent.rs @@ -11,16 +11,21 @@ // Contributors: // ZettaScale Zenoh Team, // -use std::any::Any; -use std::convert::TryFrom; -use std::io::Write; -use std::sync::atomic::{AtomicUsize, Ordering}; -use std::sync::Arc; -use std::time::Duration; +use std::{ + any::Any, + convert::TryFrom, + io::Write, + sync::{ + atomic::{AtomicUsize, Ordering}, + Arc, + }, + time::Duration, +}; + use zenoh_core::ztimeout; use zenoh_link::Link; use zenoh_protocol::{ - core::{CongestionControl, Encoding, EndPoint, Priority, WhatAmI, ZenohId}, + core::{CongestionControl, Encoding, EndPoint, Priority, WhatAmI, ZenohIdProto}, network::{ push::{ ext::{NodeIdType, QoSType}, @@ -143,7 +148,7 @@ impl TransportPeerEventHandler for SCClient { async fn transport_intermittent(endpoint: &EndPoint, lowlatency_transport: bool) { /* [ROUTER] */ - let router_id = ZenohId::try_from([1]).unwrap(); + let router_id = ZenohIdProto::try_from([1]).unwrap(); let router_handler = Arc::new(SHRouterIntermittent); // Create the router transport manager @@ -163,9 +168,9 @@ async fn transport_intermittent(endpoint: &EndPoint, lowlatency_transport: bool) .unwrap(); /* [CLIENT] */ - let client01_id = ZenohId::try_from([2]).unwrap(); - let client02_id = ZenohId::try_from([3]).unwrap(); - let client03_id = ZenohId::try_from([4]).unwrap(); + let client01_id = ZenohIdProto::try_from([2]).unwrap(); + let client02_id = ZenohIdProto::try_from([3]).unwrap(); + let client03_id = ZenohIdProto::try_from([4]).unwrap(); // Create the transport transport manager for the first client let counter = Arc::new(AtomicUsize::new(0)); @@ -220,7 +225,7 @@ async fn transport_intermittent(endpoint: &EndPoint, lowlatency_transport: bool) // Add a listener to the router println!("\nTransport Intermittent [1a1]"); let _ = ztimeout!(router_manager.add_listener(endpoint.clone())).unwrap(); - let locators = router_manager.get_listeners().await; + let locators = ztimeout!(router_manager.get_listeners()); println!("Transport Intermittent [1a2]: {locators:?}"); assert_eq!(locators.len(), 1); @@ -228,7 +233,10 @@ async fn transport_intermittent(endpoint: &EndPoint, lowlatency_transport: bool) // Open a transport from client01 to the router let c_ses1 = ztimeout!(client01_manager.open_transport_unicast(endpoint.clone())).unwrap(); assert_eq!(c_ses1.get_links().unwrap().len(), 1); - assert_eq!(client01_manager.get_transports_unicast().await.len(), 1); + assert_eq!( + ztimeout!(client01_manager.get_transports_unicast()).len(), + 1 + ); assert_eq!(c_ses1.get_zid().unwrap(), router_id); /* [3] */ @@ -244,7 +252,10 @@ async fn transport_intermittent(endpoint: &EndPoint, lowlatency_transport: bool) let c_ses2 = ztimeout!(c_client02_manager.open_transport_unicast(c_endpoint.clone())).unwrap(); assert_eq!(c_ses2.get_links().unwrap().len(), 1); - assert_eq!(c_client02_manager.get_transports_unicast().await.len(), 1); + assert_eq!( + ztimeout!(c_client02_manager.get_transports_unicast()).len(), + 1 + ); assert_eq!(c_ses2.get_zid().unwrap(), c_router_id); tokio::time::sleep(SLEEP).await; @@ -269,7 +280,10 @@ async fn transport_intermittent(endpoint: &EndPoint, lowlatency_transport: bool) let c_ses3 = ztimeout!(c_client03_manager.open_transport_unicast(c_endpoint.clone())).unwrap(); assert_eq!(c_ses3.get_links().unwrap().len(), 1); - assert_eq!(c_client03_manager.get_transports_unicast().await.len(), 1); + assert_eq!( + ztimeout!(c_client03_manager.get_transports_unicast()).len(), + 1 + ); assert_eq!(c_ses3.get_zid().unwrap(), c_router_id); tokio::time::sleep(SLEEP).await; @@ -291,13 +305,13 @@ async fn transport_intermittent(endpoint: &EndPoint, lowlatency_transport: bool) // Create the message to send let message: NetworkMessage = Push { wire_expr: "test".into(), - ext_qos: QoSType::new(Priority::default(), CongestionControl::Block, false), + ext_qos: QoSType::new(Priority::DEFAULT, CongestionControl::Block, false), ext_tstamp: None, - ext_nodeid: NodeIdType::default(), + ext_nodeid: NodeIdType::DEFAULT, payload: Put { payload: vec![0u8; MSG_SIZE].into(), timestamp: None, - encoding: Encoding::default(), + encoding: Encoding::empty(), ext_sinfo: None, #[cfg(feature = "shared-memory")] ext_shm: None, @@ -361,15 +375,15 @@ async fn transport_intermittent(endpoint: &EndPoint, lowlatency_transport: bool) /* [5] */ // Close the open transport on the client println!("Transport Intermittent [5a1]"); - for s in client01_manager.get_transports_unicast().await.iter() { + for s in ztimeout!(client01_manager.get_transports_unicast()).iter() { ztimeout!(s.close()).unwrap(); } println!("Transport Intermittent [5a2]"); - for s in client02_manager.get_transports_unicast().await.iter() { + for s in ztimeout!(client02_manager.get_transports_unicast()).iter() { ztimeout!(s.close()).unwrap(); } println!("Transport Intermittent [5a3]"); - for s in client03_manager.get_transports_unicast().await.iter() { + for s in ztimeout!(client03_manager.get_transports_unicast()).iter() { ztimeout!(s.close()).unwrap(); } diff --git a/io/zenoh-transport/tests/unicast_multilink.rs b/io/zenoh-transport/tests/unicast_multilink.rs index d69a30ac9d..6fc0864fe2 100644 --- a/io/zenoh-transport/tests/unicast_multilink.rs +++ b/io/zenoh-transport/tests/unicast_multilink.rs @@ -14,9 +14,10 @@ #[cfg(feature = "transport_multilink")] mod tests { use std::{convert::TryFrom, sync::Arc, time::Duration}; + use zenoh_core::ztimeout; use zenoh_link::EndPoint; - use zenoh_protocol::core::{WhatAmI, ZenohId}; + use zenoh_protocol::core::{WhatAmI, ZenohIdProto}; use zenoh_result::ZResult; use zenoh_transport::{ multicast::TransportMulticast, unicast::TransportUnicast, DummyTransportPeerEventHandler, @@ -76,7 +77,7 @@ mod tests { async fn multilink_transport(endpoint: &EndPoint) { /* [ROUTER] */ - let router_id = ZenohId::try_from([1]).unwrap(); + let router_id = ZenohIdProto::try_from([1]).unwrap(); let router_handler = Arc::new(SHRouterOpenClose); // Create the router transport manager @@ -91,8 +92,8 @@ mod tests { .unwrap(); /* [CLIENT] */ - let client01_id = ZenohId::try_from([2]).unwrap(); - let client02_id = ZenohId::try_from([3]).unwrap(); + let client01_id = ZenohIdProto::try_from([2]).unwrap(); + let client02_id = ZenohIdProto::try_from([3]).unwrap(); // Create the transport transport manager for the first client let unicast = TransportManager::config_unicast() @@ -134,7 +135,7 @@ mod tests { println!("Transport Open Close [1a1]: {res:?}"); assert!(res.is_ok()); println!("Transport Open Close [1a2]"); - let locators = router_manager.get_listeners().await; + let locators = ztimeout!(router_manager.get_listeners()); println!("Transport Open Close [1a2]: {locators:?}"); assert_eq!(locators.len(), 1); @@ -148,7 +149,7 @@ mod tests { assert!(res.is_ok()); let c_ses1 = res.unwrap(); println!("Transport Open Close [1d1]"); - let transports = client01_manager.get_transports_unicast().await; + let transports = ztimeout!(client01_manager.get_transports_unicast()); println!("Transport Open Close [1d2]: {transports:?}"); assert_eq!(transports.len(), 1); assert_eq!(c_ses1.get_zid().unwrap(), router_id); @@ -188,7 +189,7 @@ mod tests { assert!(res.is_ok()); let c_ses2 = res.unwrap(); println!("Transport Open Close [2b1]"); - let transports = client01_manager.get_transports_unicast().await; + let transports = ztimeout!(client01_manager.get_transports_unicast()); println!("Transport Open Close [2b2]: {transports:?}"); assert_eq!(transports.len(), 1); assert_eq!(c_ses2.get_zid().unwrap(), router_id); @@ -224,7 +225,7 @@ mod tests { println!("Transport Open Close [3a2]: {res:?}"); assert!(res.is_err()); println!("Transport Open Close [3b1]"); - let transports = client01_manager.get_transports_unicast().await; + let transports = ztimeout!(client01_manager.get_transports_unicast()); println!("Transport Open Close [3b2]: {transports:?}"); assert_eq!(transports.len(), 1); assert_eq!(c_ses1.get_zid().unwrap(), router_id); @@ -254,7 +255,7 @@ mod tests { println!("Transport Open Close [4a2]: {res:?}"); assert!(res.is_ok()); println!("Transport Open Close [4b1]"); - let transports = client01_manager.get_transports_unicast().await; + let transports = ztimeout!(client01_manager.get_transports_unicast()); println!("Transport Open Close [4b2]: {transports:?}"); assert_eq!(transports.len(), 0); @@ -284,7 +285,7 @@ mod tests { assert!(res.is_ok()); let c_ses3 = res.unwrap(); println!("Transport Open Close [5b1]"); - let transports = client01_manager.get_transports_unicast().await; + let transports = ztimeout!(client01_manager.get_transports_unicast()); println!("Transport Open Close [5b2]: {transports:?}"); assert_eq!(transports.len(), 1); assert_eq!(c_ses3.get_zid().unwrap(), router_id); @@ -316,7 +317,7 @@ mod tests { assert!(res.is_ok()); let c_ses4 = res.unwrap(); println!("Transport Open Close [6b1]"); - let transports = client02_manager.get_transports_unicast().await; + let transports = ztimeout!(client02_manager.get_transports_unicast()); println!("Transport Open Close [6b2]: {transports:?}"); assert_eq!(transports.len(), 1); assert_eq!(c_ses4.get_zid().unwrap(), router_id); @@ -332,7 +333,7 @@ mod tests { println!("Transport Open Close [6d2]: {res:?}"); assert!(res.is_err()); println!("Transport Open Close [6e1]"); - let transports = client02_manager.get_transports_unicast().await; + let transports = ztimeout!(client02_manager.get_transports_unicast()); println!("Transport Open Close [6e2]: {transports:?}"); assert_eq!(transports.len(), 1); @@ -340,7 +341,7 @@ mod tests { println!("Transport Open Close [6f1]"); ztimeout!(async { tokio::time::sleep(SLEEP).await; - let transports = router_manager.get_transports_unicast().await; + let transports = ztimeout!(router_manager.get_transports_unicast()); assert_eq!(transports.len(), 2); let s = transports .iter() @@ -358,7 +359,7 @@ mod tests { println!("Transport Open Close [7a2]: {res:?}"); assert!(res.is_err()); println!("Transport Open Close [7b1]"); - let transports = client03_manager.get_transports_unicast().await; + let transports = ztimeout!(client03_manager.get_transports_unicast()); println!("Transport Open Close [7b2]: {transports:?}"); assert_eq!(transports.len(), 0); @@ -373,7 +374,7 @@ mod tests { println!("Transport Open Close [8b2]: {res:?}"); assert!(res.is_ok()); println!("Transport Open Close [8c1]"); - let transports = client01_manager.get_transports_unicast().await; + let transports = ztimeout!(client01_manager.get_transports_unicast()); println!("Transport Open Close [8c2]: {transports:?}"); assert_eq!(transports.len(), 0); @@ -400,7 +401,7 @@ mod tests { assert!(res.is_ok()); let c_ses4 = res.unwrap(); println!("Transport Open Close [9b1]"); - let transports = client02_manager.get_transports_unicast().await; + let transports = ztimeout!(client02_manager.get_transports_unicast()); println!("Transport Open Close [9b2]: {transports:?}"); assert_eq!(transports.len(), 1); println!("Transport Open Close [9c1]"); @@ -434,7 +435,7 @@ mod tests { println!("Transport Open Close [9a2]: {res:?}"); assert!(res.is_ok()); println!("Transport Open Close [9b1]"); - let transports = client02_manager.get_transports_unicast().await; + let transports = ztimeout!(client02_manager.get_transports_unicast()); println!("Transport Open Close [9b2]: {transports:?}"); assert_eq!(transports.len(), 0); @@ -611,14 +612,14 @@ R+IdLiXcyIkg0m9N8I17p0ljCSkbrgGMD3bbePRTfg== let mut endpoint: EndPoint = format!("tls/localhost:{}", 18030).parse().unwrap(); endpoint .config_mut() - .extend( + .extend_from_iter( [ (TLS_ROOT_CA_CERTIFICATE_RAW, ca), (TLS_SERVER_PRIVATE_KEY_RAW, key), (TLS_SERVER_CERTIFICATE_RAW, cert), ] .iter() - .map(|(k, v)| ((*k).to_owned(), (*v).to_owned())), + .copied(), ) .unwrap(); @@ -709,14 +710,14 @@ R+IdLiXcyIkg0m9N8I17p0ljCSkbrgGMD3bbePRTfg== let mut endpoint: EndPoint = format!("quic/localhost:{}", 18040).parse().unwrap(); endpoint .config_mut() - .extend( + .extend_from_iter( [ (TLS_ROOT_CA_CERTIFICATE_RAW, ca), (TLS_SERVER_PRIVATE_KEY_RAW, key), (TLS_SERVER_CERTIFICATE_RAW, cert), ] .iter() - .map(|(k, v)| ((*k).to_owned(), (*v).to_owned())), + .copied(), ) .unwrap(); diff --git a/io/zenoh-transport/tests/unicast_openclose.rs b/io/zenoh-transport/tests/unicast_openclose.rs index a671de14a8..8909d74402 100644 --- a/io/zenoh-transport/tests/unicast_openclose.rs +++ b/io/zenoh-transport/tests/unicast_openclose.rs @@ -12,9 +12,10 @@ // ZettaScale Zenoh Team, // use std::{convert::TryFrom, sync::Arc, time::Duration}; + use zenoh_core::ztimeout; use zenoh_link::EndPoint; -use zenoh_protocol::core::{WhatAmI, ZenohId}; +use zenoh_protocol::core::{WhatAmI, ZenohIdProto}; use zenoh_result::ZResult; use zenoh_transport::{ multicast::TransportMulticast, @@ -22,7 +23,6 @@ use zenoh_transport::{ DummyTransportPeerEventHandler, TransportEventHandler, TransportManager, TransportMulticastEventHandler, TransportPeer, TransportPeerEventHandler, }; - #[cfg(target_os = "linux")] #[cfg(any(feature = "transport_tcp", feature = "transport_udp"))] use zenoh_util::net::get_ipv4_ipaddrs; @@ -90,7 +90,7 @@ async fn openclose_transport( lowlatency_transport: bool, ) { /* [ROUTER] */ - let router_id = ZenohId::try_from([1]).unwrap(); + let router_id = ZenohIdProto::try_from([1]).unwrap(); let router_handler = Arc::new(SHRouterOpenClose); // Create the router transport manager @@ -110,8 +110,8 @@ async fn openclose_transport( .unwrap(); /* [CLIENT] */ - let client01_id = ZenohId::try_from([2]).unwrap(); - let client02_id = ZenohId::try_from([3]).unwrap(); + let client01_id = ZenohIdProto::try_from([2]).unwrap(); + let client02_id = ZenohIdProto::try_from([3]).unwrap(); // Create the transport transport manager for the first client let unicast = make_transport_manager_builder( @@ -152,7 +152,7 @@ async fn openclose_transport( println!("Transport Open Close [1a1]: {res:?}"); assert!(res.is_ok()); println!("Transport Open Close [1a2]"); - let locators = router_manager.get_listeners().await; + let locators = ztimeout!(router_manager.get_listeners()); println!("Transport Open Close [1a2]: {locators:?}"); assert_eq!(locators.len(), 1); @@ -640,14 +640,14 @@ R+IdLiXcyIkg0m9N8I17p0ljCSkbrgGMD3bbePRTfg== let mut endpoint: EndPoint = format!("tls/localhost:{}", 13030).parse().unwrap(); endpoint .config_mut() - .extend( + .extend_from_iter( [ (TLS_ROOT_CA_CERTIFICATE_RAW, ca), (TLS_SERVER_PRIVATE_KEY_RAW, key), (TLS_SERVER_CERTIFICATE_RAW, cert), ] .iter() - .map(|(k, v)| ((*k).to_owned(), (*v).to_owned())), + .copied(), ) .unwrap(); @@ -738,14 +738,14 @@ R+IdLiXcyIkg0m9N8I17p0ljCSkbrgGMD3bbePRTfg== let mut endpoint: EndPoint = format!("quic/localhost:{}", 13040).parse().unwrap(); endpoint .config_mut() - .extend( + .extend_from_iter( [ (TLS_ROOT_CA_CERTIFICATE_RAW, ca), (TLS_SERVER_PRIVATE_KEY_RAW, key), (TLS_SERVER_CERTIFICATE_RAW, cert), ] .iter() - .map(|(k, v)| ((*k).to_owned(), (*v).to_owned())), + .copied(), ) .unwrap(); diff --git a/io/zenoh-transport/tests/unicast_priorities.rs b/io/zenoh-transport/tests/unicast_priorities.rs index 3a594cfea3..708a9fad3b 100644 --- a/io/zenoh-transport/tests/unicast_priorities.rs +++ b/io/zenoh-transport/tests/unicast_priorities.rs @@ -11,23 +11,27 @@ // Contributors: // ZettaScale Zenoh Team, // -use std::any::Any; -use std::convert::TryFrom; -use std::fmt::Write as _; -use std::sync::atomic::{AtomicUsize, Ordering}; -use std::sync::Arc; -use std::time::Duration; +use std::{ + any::Any, + convert::TryFrom, + fmt::Write as _, + sync::{ + atomic::{AtomicUsize, Ordering}, + Arc, + }, + time::Duration, +}; + use zenoh_core::ztimeout; use zenoh_link::Link; -use zenoh_protocol::network::NetworkBody; use zenoh_protocol::{ - core::{CongestionControl, Encoding, EndPoint, Priority, WhatAmI, ZenohId}, + core::{CongestionControl, Encoding, EndPoint, Priority, WhatAmI, ZenohIdProto}, network::{ push::{ ext::{NodeIdType, QoSType}, Push, }, - NetworkMessage, + NetworkBody, NetworkMessage, }, zenoh::Put, }; @@ -196,8 +200,8 @@ async fn open_transport_unicast( TransportUnicast, ) { // Define client and router IDs - let client_id = ZenohId::try_from([1]).unwrap(); - let router_id = ZenohId::try_from([2]).unwrap(); + let client_id = ZenohIdProto::try_from([1]).unwrap(); + let router_id = ZenohIdProto::try_from([2]).unwrap(); // Create the router transport manager let router_handler = Arc::new(SHRouter::new()); @@ -227,10 +231,7 @@ async fn open_transport_unicast( let _ = ztimeout!(client_manager.open_transport_unicast(e.clone())).unwrap(); } - let client_transport = client_manager - .get_transport_unicast(&router_id) - .await - .unwrap(); + let client_transport = ztimeout!(client_manager.get_transport_unicast(&router_id)).unwrap(); // Return the handlers ( @@ -289,11 +290,11 @@ async fn single_run(router_handler: Arc, client_transport: TransportUn wire_expr: "test".into(), ext_qos: QoSType::new(*p, CongestionControl::Block, false), ext_tstamp: None, - ext_nodeid: NodeIdType::default(), + ext_nodeid: NodeIdType::DEFAULT, payload: Put { payload: vec![0u8; *ms].into(), timestamp: None, - encoding: Encoding::default(), + encoding: Encoding::empty(), ext_sinfo: None, #[cfg(feature = "shared-memory")] ext_shm: None, diff --git a/io/zenoh-transport/tests/unicast_shm.rs b/io/zenoh-transport/tests/unicast_shm.rs index d04a54c63a..8c06a17f6d 100644 --- a/io/zenoh-transport/tests/unicast_shm.rs +++ b/io/zenoh-transport/tests/unicast_shm.rs @@ -13,7 +13,6 @@ // #[cfg(feature = "shared-memory")] mod tests { - use rand::{Rng, SeedableRng}; use std::{ any::Any, convert::TryFrom, @@ -23,20 +22,28 @@ mod tests { }, time::Duration, }; + use zenoh_buffers::buffer::SplitBuffer; use zenoh_core::ztimeout; - use zenoh_crypto::PseudoRng; use zenoh_link::Link; use zenoh_protocol::{ - core::{CongestionControl, Encoding, EndPoint, Priority, WhatAmI, ZenohId}, + core::{CongestionControl, Encoding, EndPoint, Priority, WhatAmI, ZenohIdProto}, network::{ push::ext::{NodeIdType, QoSType}, NetworkBody, NetworkMessage, Push, }, zenoh::{PushBody, Put}, }; - use zenoh_result::{zerror, ZResult}; - use zenoh_shm::{SharedMemoryBuf, SharedMemoryManager}; + use zenoh_result::ZResult; + use zenoh_shm::{ + api::{ + protocol_implementations::posix::{ + posix_shm_provider_backend::PosixShmProviderBackend, protocol_id::POSIX_PROTOCOL_ID, + }, + provider::shm_provider::{BlockOn, GarbageCollect, ShmProviderBuilder}, + }, + ShmBufInner, + }; use zenoh_transport::{ multicast::TransportMulticast, unicast::TransportUnicast, TransportEventHandler, TransportManager, TransportMulticastEventHandler, TransportPeer, TransportPeerEventHandler, @@ -44,7 +51,6 @@ mod tests { const TIMEOUT: Duration = Duration::from_secs(60); const SLEEP: Duration = Duration::from_secs(1); - const USLEEP: Duration = Duration::from_micros(100); const MSG_COUNT: usize = 1_000; const MSG_SIZE: usize = 1_024; @@ -109,11 +115,10 @@ mod tests { NetworkBody::Push(m) => match m.payload { PushBody::Put(Put { payload, .. }) => { for zs in payload.zslices() { - if self.is_shm && zs.downcast_ref::().is_none() { - panic!("Expected SharedMemoryBuf: {:?}", zs); - } else if !self.is_shm && zs.downcast_ref::().is_some() - { - panic!("Not Expected SharedMemoryBuf: {:?}", zs); + if self.is_shm && zs.downcast_ref::().is_none() { + panic!("Expected ShmBufInner: {:?}", zs); + } else if !self.is_shm && zs.downcast_ref::().is_some() { + panic!("Not Expected ShmBufInner: {:?}", zs); } } payload.contiguous().into_owned() @@ -148,26 +153,20 @@ mod tests { println!("Transport SHM [0a]: {endpoint:?}"); // Define client and router IDs - let peer_shm01 = ZenohId::try_from([1]).unwrap(); - let peer_shm02 = ZenohId::try_from([2]).unwrap(); - let peer_net01 = ZenohId::try_from([3]).unwrap(); - - let mut tries = 100; - let mut prng = PseudoRng::from_entropy(); - let mut shm01 = loop { - // Create the SharedMemoryManager - if let Ok(shm01) = SharedMemoryManager::make( - format!("peer_shm01_{}_{}", endpoint.protocol(), prng.gen::()), - 2 * MSG_SIZE, - ) { - break Ok(shm01); - } - tries -= 1; - if tries == 0 { - break Err(zerror!("Unable to create SharedMemoryManager!")); - } - } - .unwrap(); + let peer_shm01 = ZenohIdProto::try_from([1]).unwrap(); + let peer_shm02 = ZenohIdProto::try_from([2]).unwrap(); + let peer_net01 = ZenohIdProto::try_from([3]).unwrap(); + + // create SHM provider + let backend = PosixShmProviderBackend::builder() + .with_size(2 * MSG_SIZE) + .unwrap() + .res() + .unwrap(); + let shm01 = ShmProviderBuilder::builder() + .protocol_id::() + .backend(backend) + .res(); // Create a peer manager with shared-memory authenticator enabled let peer_shm01_handler = Arc::new(SHPeer::new(true)); @@ -229,45 +228,35 @@ mod tests { // Retrieve the transports println!("Transport SHM [2a]"); - let peer_shm02_transport = peer_shm01_manager - .get_transport_unicast(&peer_shm02) - .await - .unwrap(); + let peer_shm02_transport = + ztimeout!(peer_shm01_manager.get_transport_unicast(&peer_shm02)).unwrap(); assert!(peer_shm02_transport.is_shm().unwrap()); println!("Transport SHM [2b]"); - let peer_net01_transport = peer_shm01_manager - .get_transport_unicast(&peer_net01) - .await - .unwrap(); + let peer_net01_transport = + ztimeout!(peer_shm01_manager.get_transport_unicast(&peer_net01)).unwrap(); assert!(!peer_net01_transport.is_shm().unwrap()); + let layout = shm01.alloc(MSG_SIZE).into_layout().unwrap(); + // Send the message println!("Transport SHM [3a]"); // The msg count for (msg_count, _) in (0..MSG_COUNT).enumerate() { // Create the message to send - let mut sbuf = ztimeout!(async { - loop { - match shm01.alloc(MSG_SIZE) { - Ok(sbuf) => break sbuf, - Err(_) => tokio::time::sleep(USLEEP).await, - } - } - }); - - let bs = unsafe { sbuf.as_mut_slice() }; - bs[0..8].copy_from_slice(&msg_count.to_le_bytes()); + let mut sbuf = + ztimeout!(layout.alloc().with_policy::>()).unwrap(); + sbuf[0..8].copy_from_slice(&msg_count.to_le_bytes()); let message: NetworkMessage = Push { wire_expr: "test".into(), - ext_qos: QoSType::new(Priority::default(), CongestionControl::Block, false), + ext_qos: QoSType::new(Priority::DEFAULT, CongestionControl::Block, false), ext_tstamp: None, - ext_nodeid: NodeIdType::default(), + ext_nodeid: NodeIdType::DEFAULT, payload: Put { payload: sbuf.into(), timestamp: None, - encoding: Encoding::default(), + encoding: Encoding::empty(), ext_sinfo: None, ext_shm: None, ext_attachment: None, @@ -296,26 +285,19 @@ mod tests { // The msg count for (msg_count, _) in (0..MSG_COUNT).enumerate() { // Create the message to send - let mut sbuf = ztimeout!(async { - loop { - match shm01.alloc(MSG_SIZE) { - Ok(sbuf) => break sbuf, - Err(_) => tokio::time::sleep(USLEEP).await, - } - } - }); - let bs = unsafe { sbuf.as_mut_slice() }; - bs[0..8].copy_from_slice(&msg_count.to_le_bytes()); + let mut sbuf = + ztimeout!(layout.alloc().with_policy::>()).unwrap(); + sbuf[0..8].copy_from_slice(&msg_count.to_le_bytes()); let message: NetworkMessage = Push { wire_expr: "test".into(), - ext_qos: QoSType::new(Priority::default(), CongestionControl::Block, false), + ext_qos: QoSType::new(Priority::DEFAULT, CongestionControl::Block, false), ext_tstamp: None, - ext_nodeid: NodeIdType::default(), + ext_nodeid: NodeIdType::DEFAULT, payload: Put { payload: sbuf.into(), timestamp: None, - encoding: Encoding::default(), + encoding: Encoding::empty(), ext_sinfo: None, ext_shm: None, ext_attachment: None, diff --git a/io/zenoh-transport/tests/unicast_simultaneous.rs b/io/zenoh-transport/tests/unicast_simultaneous.rs index 5cd8224fc2..4f529c3b74 100644 --- a/io/zenoh-transport/tests/unicast_simultaneous.rs +++ b/io/zenoh-transport/tests/unicast_simultaneous.rs @@ -13,15 +13,20 @@ // #[cfg(target_family = "unix")] mod tests { - use std::any::Any; - use std::convert::TryFrom; - use std::sync::atomic::{AtomicUsize, Ordering}; - use std::sync::Arc; - use std::time::Duration; + use std::{ + any::Any, + convert::TryFrom, + sync::{ + atomic::{AtomicUsize, Ordering}, + Arc, + }, + time::Duration, + }; + use zenoh_core::ztimeout; use zenoh_link::Link; use zenoh_protocol::{ - core::{CongestionControl, Encoding, EndPoint, Priority, WhatAmI, ZenohId}, + core::{CongestionControl, Encoding, EndPoint, Priority, WhatAmI, ZenohIdProto}, network::{ push::ext::{NodeIdType, QoSType}, NetworkMessage, Push, @@ -42,12 +47,12 @@ mod tests { // Transport Handler for the router struct SHPeer { - zid: ZenohId, + zid: ZenohIdProto, count: Arc, } impl SHPeer { - fn new(zid: ZenohId) -> Self { + fn new(zid: ZenohIdProto) -> Self { Self { zid, count: Arc::new(AtomicUsize::new(0)), @@ -70,11 +75,11 @@ mod tests { wire_expr: "test".into(), ext_qos: QoSType::new(Priority::Control, CongestionControl::Block, false), ext_tstamp: None, - ext_nodeid: NodeIdType::default(), + ext_nodeid: NodeIdType::DEFAULT, payload: Put { payload: vec![0u8; MSG_SIZE].into(), timestamp: None, - encoding: Encoding::default(), + encoding: Encoding::empty(), ext_sinfo: None, #[cfg(feature = "shared-memory")] ext_shm: None, @@ -131,8 +136,8 @@ mod tests { async fn transport_simultaneous(endpoint01: Vec, endpoint02: Vec) { /* [Peers] */ - let peer_id01 = ZenohId::try_from([2]).unwrap(); - let peer_id02 = ZenohId::try_from([3]).unwrap(); + let peer_id01 = ZenohIdProto::try_from([2]).unwrap(); + let peer_id02 = ZenohIdProto::try_from([3]).unwrap(); // Create the peer01 transport manager let peer_sh01 = Arc::new(SHPeer::new(peer_id01)); @@ -160,7 +165,7 @@ mod tests { println!("[Simultaneous 01a] => Adding endpoint {e:?}: {res:?}"); assert!(res.is_ok()); } - let locs = peer01_manager.get_listeners().await; + let locs = ztimeout!(peer01_manager.get_listeners()); println!("[Simultaneous 01b] => Getting endpoints: {endpoint01:?} {locs:?}"); assert_eq!(endpoint01.len(), locs.len()); @@ -170,7 +175,7 @@ mod tests { println!("[Simultaneous 02a] => Adding endpoint {e:?}: {res:?}"); assert!(res.is_ok()); } - let locs = peer02_manager.get_listeners().await; + let locs = ztimeout!(peer02_manager.get_listeners()); println!("[Simultaneous 02b] => Getting endpoints: {endpoint02:?} {locs:?}"); assert_eq!(endpoint02.len(), locs.len()); diff --git a/io/zenoh-transport/tests/unicast_time.rs b/io/zenoh-transport/tests/unicast_time.rs new file mode 100644 index 0000000000..5c62235371 --- /dev/null +++ b/io/zenoh-transport/tests/unicast_time.rs @@ -0,0 +1,521 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// +use std::{ + convert::TryFrom, + sync::Arc, + time::{Duration, Instant}, +}; + +use zenoh_core::ztimeout; +use zenoh_link::EndPoint; +use zenoh_protocol::core::{WhatAmI, ZenohIdProto}; +use zenoh_result::ZResult; +use zenoh_transport::{ + multicast::TransportMulticast, + unicast::{test_helpers::make_transport_manager_builder, TransportUnicast}, + DummyTransportPeerEventHandler, TransportEventHandler, TransportManager, + TransportMulticastEventHandler, TransportPeer, TransportPeerEventHandler, +}; + +const TIMEOUT: Duration = Duration::from_secs(60); +const TIMEOUT_EXPECTED: Duration = Duration::from_secs(5); +const SLEEP: Duration = Duration::from_millis(100); + +macro_rules! ztimeout_expected { + ($f:expr) => { + tokio::time::timeout(TIMEOUT_EXPECTED, $f).await.unwrap() + }; +} + +#[cfg(test)] +#[derive(Default)] +struct SHRouterOpenClose; + +impl TransportEventHandler for SHRouterOpenClose { + fn new_unicast( + &self, + _peer: TransportPeer, + _transport: TransportUnicast, + ) -> ZResult> { + Ok(Arc::new(DummyTransportPeerEventHandler)) + } + + fn new_multicast( + &self, + _transport: TransportMulticast, + ) -> ZResult> { + panic!(); + } +} + +// Transport Handler for the client +struct SHClientOpenClose {} + +impl SHClientOpenClose { + fn new() -> Self { + Self {} + } +} + +impl TransportEventHandler for SHClientOpenClose { + fn new_unicast( + &self, + _peer: TransportPeer, + _transport: TransportUnicast, + ) -> ZResult> { + Ok(Arc::new(DummyTransportPeerEventHandler)) + } + + fn new_multicast( + &self, + _transport: TransportMulticast, + ) -> ZResult> { + panic!(); + } +} + +async fn time_transport( + listen_endpoint: &EndPoint, + connect_endpoint: &EndPoint, + lowlatency_transport: bool, +) { + if lowlatency_transport { + println!(">>> Low latency transport"); + } else { + println!(">>> Universal transport"); + } + /* [ROUTER] */ + let router_id = ZenohIdProto::try_from([1]).unwrap(); + + let router_handler = Arc::new(SHRouterOpenClose); + // Create the router transport manager + let unicast = make_transport_manager_builder( + #[cfg(feature = "transport_multilink")] + 1, + #[cfg(feature = "shared-memory")] + false, + lowlatency_transport, + ) + .max_sessions(1); + let router_manager = TransportManager::builder() + .whatami(WhatAmI::Router) + .zid(router_id) + .unicast(unicast) + .build(router_handler.clone()) + .unwrap(); + + /* [CLIENT] */ + let client01_id = ZenohIdProto::try_from([2]).unwrap(); + + // Create the transport transport manager for the first client + let unicast = make_transport_manager_builder( + #[cfg(feature = "transport_multilink")] + 1, + #[cfg(feature = "shared-memory")] + false, + lowlatency_transport, + ) + .max_sessions(1); + let client01_manager = TransportManager::builder() + .whatami(WhatAmI::Client) + .zid(client01_id) + .unicast(unicast) + .build(Arc::new(SHClientOpenClose::new())) + .unwrap(); + + /* [1] */ + // Add the locator on the router + let start = Instant::now(); + ztimeout!(router_manager.add_listener(listen_endpoint.clone())).unwrap(); + println!("Add listener {}: {:#?}", listen_endpoint, start.elapsed()); + + // Open a transport from the client to the router + let start = Instant::now(); + let c_ses1 = + ztimeout_expected!(client01_manager.open_transport_unicast(connect_endpoint.clone())) + .unwrap(); + println!( + "Open transport {}: {:#?}", + connect_endpoint, + start.elapsed() + ); + + // Verify that the transport has been open on the router + ztimeout!(async { + loop { + let transports = ztimeout!(router_manager.get_transports_unicast()); + let s = transports + .iter() + .find(|s| s.get_zid().unwrap() == client01_id); + + match s { + Some(s) => { + let links = s.get_links().unwrap(); + assert_eq!(links.len(), 1); + break; + } + None => tokio::time::sleep(SLEEP).await, + } + } + }); + + /* [2] */ + // Close the open transport on the client + let start = Instant::now(); + ztimeout!(c_ses1.close()).unwrap(); + println!( + "Close transport {}: {:#?}", + connect_endpoint, + start.elapsed() + ); + + // Verify that the transport has been closed also on the router + ztimeout!(async { + loop { + let transports = ztimeout!(router_manager.get_transports_unicast()); + let index = transports + .iter() + .find(|s| s.get_zid().unwrap() == client01_id); + if index.is_none() { + break; + } + tokio::time::sleep(SLEEP).await; + } + }); + + /* [3] */ + let start = Instant::now(); + ztimeout!(router_manager.del_listener(listen_endpoint)).unwrap(); + println!( + "Delete listener {}: {:#?}", + listen_endpoint, + start.elapsed() + ); + + ztimeout!(async { + while !router_manager.get_listeners().await.is_empty() { + tokio::time::sleep(SLEEP).await; + } + }); + + // Wait a little bit + tokio::time::sleep(SLEEP).await; + + ztimeout!(router_manager.close()); + ztimeout!(client01_manager.close()); + + // Wait a little bit + tokio::time::sleep(SLEEP).await; +} + +async fn time_universal_transport(endpoint: &EndPoint) { + time_transport(endpoint, endpoint, false).await +} + +async fn time_lowlatency_transport(endpoint: &EndPoint) { + time_transport(endpoint, endpoint, true).await +} + +#[cfg(feature = "transport_tcp")] +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +#[ignore] +async fn time_tcp_only() { + zenoh_util::try_init_log_from_env(); + let endpoint: EndPoint = format!("tcp/127.0.0.1:{}", 13000).parse().unwrap(); + time_universal_transport(&endpoint).await; +} + +#[cfg(feature = "transport_tcp")] +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +#[ignore] +async fn time_tcp_only_with_lowlatency_transport() { + zenoh_util::try_init_log_from_env(); + let endpoint: EndPoint = format!("tcp/127.0.0.1:{}", 13100).parse().unwrap(); + time_lowlatency_transport(&endpoint).await; +} + +#[cfg(feature = "transport_udp")] +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +#[ignore] +async fn time_udp_only() { + zenoh_util::try_init_log_from_env(); + let endpoint: EndPoint = format!("udp/127.0.0.1:{}", 13010).parse().unwrap(); + time_universal_transport(&endpoint).await; +} + +#[cfg(feature = "transport_udp")] +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +#[ignore] +async fn time_udp_only_with_lowlatency_transport() { + zenoh_util::try_init_log_from_env(); + let endpoint: EndPoint = format!("udp/127.0.0.1:{}", 13110).parse().unwrap(); + time_lowlatency_transport(&endpoint).await; +} + +#[cfg(feature = "transport_ws")] +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +#[ignore] +async fn time_ws_only() { + zenoh_util::try_init_log_from_env(); + let endpoint: EndPoint = format!("ws/127.0.0.1:{}", 13020).parse().unwrap(); + time_universal_transport(&endpoint).await; +} + +#[cfg(feature = "transport_ws")] +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +#[ignore] +async fn time_ws_only_with_lowlatency_transport() { + zenoh_util::try_init_log_from_env(); + let endpoint: EndPoint = format!("ws/127.0.0.1:{}", 13120).parse().unwrap(); + time_lowlatency_transport(&endpoint).await; +} + +#[cfg(feature = "transport_unixpipe")] +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +#[ignore] +async fn time_unixpipe_only() { + zenoh_util::try_init_log_from_env(); + let endpoint: EndPoint = "unixpipe/time_unixpipe_only".parse().unwrap(); + time_universal_transport(&endpoint).await; +} + +#[cfg(feature = "transport_unixpipe")] +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +#[ignore] +async fn time_unixpipe_only_with_lowlatency_transport() { + zenoh_util::try_init_log_from_env(); + let endpoint: EndPoint = "unixpipe/time_unixpipe_only_with_lowlatency_transport" + .parse() + .unwrap(); + time_lowlatency_transport(&endpoint).await; +} + +#[cfg(all(feature = "transport_unixsock-stream", target_family = "unix"))] +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +#[ignore] +async fn time_unix_only() { + zenoh_util::try_init_log_from_env(); + let f1 = "zenoh-test-unix-socket-9.sock"; + let _ = std::fs::remove_file(f1); + let endpoint: EndPoint = format!("unixsock-stream/{f1}").parse().unwrap(); + time_universal_transport(&endpoint).await; + let _ = std::fs::remove_file(f1); + let _ = std::fs::remove_file(format!("{f1}.lock")); +} + +#[cfg(feature = "transport_tls")] +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +#[ignore] +async fn time_tls_only() { + use zenoh_link::tls::config::*; + zenoh_util::try_init_log_from_env(); + // NOTE: this an auto-generated pair of certificate and key. + // The target domain is localhost, so it has no real + // mapping to any existing domain. The certificate and key + // have been generated using: https://github.com/jsha/minica + let key = "-----BEGIN RSA PRIVATE KEY----- +MIIEpAIBAAKCAQEAsfqAuhElN4HnyeqLovSd4Qe+nNv5AwCjSO+HFiF30x3vQ1Hi +qRA0UmyFlSqBnFH3TUHm4Jcad40QfrX8f11NKGZdpvKHsMYqYjZnYkRFGS2s4fQy +aDbV5M06s3UDX8ETPgY41Y8fCKTSVdi9iHkwcVrXMxUu4IBBx0C1r2GSo3gkIBnU +cELdFdaUOSbdCipJhbnkwixEr2h7PXxwba7SIZgZtRaQWak1VE9b716qe3iMuMha +Efo/UoFmeZCPu5spfwaOZsnCsxRPk2IjbzlsHTJ09lM9wmbEFHBMVAXejLTk++Sr +Xt8jASZhNen/2GzyLQNAquGn98lCMQ6SsE9vLQIDAQABAoIBAGQkKggHm6Q20L+4 +2+bNsoOqguLplpvM4RMpyx11qWE9h6GeUmWD+5yg+SysJQ9aw0ZSHWEjRD4ePji9 +lxvm2IIxzuIftp+NcM2gBN2ywhpfq9XbO/2NVR6PJ0dQQJzBG12bzKDFDdYkP0EU +WdiPL+WoEkvo0F57bAd77n6G7SZSgxYekBF+5S6rjbu5I1cEKW+r2vLehD4uFCVX +Q0Tu7TyIOE1KJ2anRb7ZXVUaguNj0/Er7EDT1+wN8KJKvQ1tYGIq/UUBtkP9nkOI +9XJd25k6m5AQPDddzd4W6/5+M7kjyVPi3CsQcpBPss6ueyecZOMaKqdWAHeEyaak +r67TofUCgYEA6GBa+YkRvp0Ept8cd5mh4gCRM8wUuhtzTQnhubCPivy/QqMWScdn +qD0OiARLAsqeoIfkAVgyqebVnxwTrKTvWe0JwpGylEVWQtpGz3oHgjST47yZxIiY +CSAaimi2CYnJZ+QB2oBkFVwNCuXdPEGX6LgnOGva19UKrm6ONsy6V9MCgYEAxBJu +fu4dGXZreARKEHa/7SQjI9ayAFuACFlON/EgSlICzQyG/pumv1FsMEiFrv6w7PRj +4AGqzyzGKXWVDRMrUNVeGPSKJSmlPGNqXfPaXRpVEeB7UQhAs5wyMrWDl8jEW7Ih +XcWhMLn1f/NOAKyrSDSEaEM+Nuu+xTifoAghvP8CgYEAlta9Fw+nihDIjT10cBo0 +38w4dOP7bFcXQCGy+WMnujOYPzw34opiue1wOlB3FIfL8i5jjY/fyzPA5PhHuSCT +Ec9xL3B9+AsOFHU108XFi/pvKTwqoE1+SyYgtEmGKKjdKOfzYA9JaCgJe1J8inmV +jwXCx7gTJVjwBwxSmjXIm+sCgYBQF8NhQD1M0G3YCdCDZy7BXRippCL0OGxVfL2R +5oKtOVEBl9NxH/3+evE5y/Yn5Mw7Dx3ZPHUcygpslyZ6v9Da5T3Z7dKcmaVwxJ+H +n3wcugv0EIHvOPLNK8npovINR6rGVj6BAqD0uZHKYYYEioQxK5rGyGkaoDQ+dgHm +qku12wKBgQDem5FvNp5iW7mufkPZMqf3sEGtu612QeqejIPFM1z7VkUgetsgPBXD +tYsqC2FtWzY51VOEKNpnfH7zH5n+bjoI9nAEAW63TK9ZKkr2hRGsDhJdGzmLfQ7v +F6/CuIw9EsAq6qIB8O88FXQqald+BZOx6AzB8Oedsz/WtMmIEmr/+Q== +-----END RSA PRIVATE KEY-----"; + + let cert = "-----BEGIN CERTIFICATE----- +MIIDLjCCAhagAwIBAgIIeUtmIdFQznMwDQYJKoZIhvcNAQELBQAwIDEeMBwGA1UE +AxMVbWluaWNhIHJvb3QgY2EgMDc4ZGE3MCAXDTIzMDMwNjE2MDMxOFoYDzIxMjMw +MzA2MTYwMzE4WjAUMRIwEAYDVQQDEwlsb2NhbGhvc3QwggEiMA0GCSqGSIb3DQEB +AQUAA4IBDwAwggEKAoIBAQCx+oC6ESU3gefJ6oui9J3hB76c2/kDAKNI74cWIXfT +He9DUeKpEDRSbIWVKoGcUfdNQebglxp3jRB+tfx/XU0oZl2m8oewxipiNmdiREUZ +Lazh9DJoNtXkzTqzdQNfwRM+BjjVjx8IpNJV2L2IeTBxWtczFS7ggEHHQLWvYZKj +eCQgGdRwQt0V1pQ5Jt0KKkmFueTCLESvaHs9fHBtrtIhmBm1FpBZqTVUT1vvXqp7 +eIy4yFoR+j9SgWZ5kI+7myl/Bo5mycKzFE+TYiNvOWwdMnT2Uz3CZsQUcExUBd6M +tOT75Kte3yMBJmE16f/YbPItA0Cq4af3yUIxDpKwT28tAgMBAAGjdjB0MA4GA1Ud +DwEB/wQEAwIFoDAdBgNVHSUEFjAUBggrBgEFBQcDAQYIKwYBBQUHAwIwDAYDVR0T +AQH/BAIwADAfBgNVHSMEGDAWgBTWfAmQ/BUIQm/9/llJJs2jUMWzGzAUBgNVHREE +DTALgglsb2NhbGhvc3QwDQYJKoZIhvcNAQELBQADggEBAG/POnBob0S7iYwsbtI2 +3LTTbRnmseIErtJuJmI9yYzgVIm6sUSKhlIUfAIm4rfRuzE94KFeWR2w9RabxOJD +wjYLLKvQ6rFY5g2AV/J0TwDjYuq0absdaDPZ8MKJ+/lpGYK3Te+CTOfq5FJRFt1q +GOkXAxnNpGg0obeRWRKFiAMHbcw6a8LIMfRjCooo3+uSQGsbVzGxSB4CYo720KcC +9vB1K9XALwzoqCewP4aiQsMY1GWpAmzXJftY3w+lka0e9dBYcdEdOqxSoZb5OBBZ +p5e60QweRuJsb60aUaCG8HoICevXYK2fFqCQdlb5sIqQqXyN2K6HuKAFywsjsGyJ +abY= +-----END CERTIFICATE-----"; + + // Configure the client + let ca = "-----BEGIN CERTIFICATE----- +MIIDSzCCAjOgAwIBAgIIB42n1ZIkOakwDQYJKoZIhvcNAQELBQAwIDEeMBwGA1UE +AxMVbWluaWNhIHJvb3QgY2EgMDc4ZGE3MCAXDTIzMDMwNjE2MDMwN1oYDzIxMjMw +MzA2MTYwMzA3WjAgMR4wHAYDVQQDExVtaW5pY2Egcm9vdCBjYSAwNzhkYTcwggEi +MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDIuCq24O4P4Aep5vAVlrIQ7P8+ +uWWgcHIFYa02TmhBUB/hjo0JANCQvAtpVNuQ8NyKPlqnnq1cttePbSYVeA0rrnOs +DcfySAiyGBEY9zMjFfHJtH1wtrPcJEU8XIEY3xUlrAJE2CEuV9dVYgfEEydnvgLc +8Ug0WXSiARjqbnMW3l8jh6bYCp/UpL/gSM4mxdKrgpfyPoweGhlOWXc3RTS7cqM9 +T25acURGOSI6/g8GF0sNE4VZmUvHggSTmsbLeXMJzxDWO+xVehRmbQx3IkG7u++b +QdRwGIJcDNn7zHlDMHtQ0Z1DBV94fZNBwCULhCBB5g20XTGw//S7Fj2FPwyhAgMB +AAGjgYYwgYMwDgYDVR0PAQH/BAQDAgKEMB0GA1UdJQQWMBQGCCsGAQUFBwMBBggr +BgEFBQcDAjASBgNVHRMBAf8ECDAGAQH/AgEAMB0GA1UdDgQWBBTWfAmQ/BUIQm/9 +/llJJs2jUMWzGzAfBgNVHSMEGDAWgBTWfAmQ/BUIQm/9/llJJs2jUMWzGzANBgkq +hkiG9w0BAQsFAAOCAQEAvtcZFAELKiTuOiAeYts6zeKxc+nnHCzayDeD/BDCbxGJ +e1n+xdHjLtWGd+/Anc+fvftSYBPTFQqCi84lPiUIln5z/rUxE+ke81hNPIfw2obc +yIg87xCabQpVyEh8s+MV+7YPQ1+fH4FuSi2Fck1FejxkVqN2uOZPvOYUmSTsaVr1 +8SfRnwJNZ9UMRPM2bD4Jkvj0VcL42JM3QkOClOzYW4j/vll2cSs4kx7er27cIoo1 +Ck0v2xSPAiVjg6w65rUQeW6uB5m0T2wyj+wm0At8vzhZPlgS1fKhcmT2dzOq3+oN +R+IdLiXcyIkg0m9N8I17p0ljCSkbrgGMD3bbePRTfg== +-----END CERTIFICATE-----"; + + let mut endpoint: EndPoint = format!("tls/localhost:{}", 13030).parse().unwrap(); + endpoint + .config_mut() + .extend_from_iter( + [ + (TLS_ROOT_CA_CERTIFICATE_RAW, ca), + (TLS_SERVER_PRIVATE_KEY_RAW, key), + (TLS_SERVER_CERTIFICATE_RAW, cert), + ] + .iter() + .copied(), + ) + .unwrap(); + + time_universal_transport(&endpoint).await; +} + +#[cfg(feature = "transport_quic")] +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +#[ignore] +async fn time_quic_only() { + use zenoh_link::quic::config::*; + + // NOTE: this an auto-generated pair of certificate and key. + // The target domain is localhost, so it has no real + // mapping to any existing domain. The certificate and key + // have been generated using: https://github.com/jsha/minica + let key = "-----BEGIN RSA PRIVATE KEY----- +MIIEpAIBAAKCAQEAsfqAuhElN4HnyeqLovSd4Qe+nNv5AwCjSO+HFiF30x3vQ1Hi +qRA0UmyFlSqBnFH3TUHm4Jcad40QfrX8f11NKGZdpvKHsMYqYjZnYkRFGS2s4fQy +aDbV5M06s3UDX8ETPgY41Y8fCKTSVdi9iHkwcVrXMxUu4IBBx0C1r2GSo3gkIBnU +cELdFdaUOSbdCipJhbnkwixEr2h7PXxwba7SIZgZtRaQWak1VE9b716qe3iMuMha +Efo/UoFmeZCPu5spfwaOZsnCsxRPk2IjbzlsHTJ09lM9wmbEFHBMVAXejLTk++Sr +Xt8jASZhNen/2GzyLQNAquGn98lCMQ6SsE9vLQIDAQABAoIBAGQkKggHm6Q20L+4 +2+bNsoOqguLplpvM4RMpyx11qWE9h6GeUmWD+5yg+SysJQ9aw0ZSHWEjRD4ePji9 +lxvm2IIxzuIftp+NcM2gBN2ywhpfq9XbO/2NVR6PJ0dQQJzBG12bzKDFDdYkP0EU +WdiPL+WoEkvo0F57bAd77n6G7SZSgxYekBF+5S6rjbu5I1cEKW+r2vLehD4uFCVX +Q0Tu7TyIOE1KJ2anRb7ZXVUaguNj0/Er7EDT1+wN8KJKvQ1tYGIq/UUBtkP9nkOI +9XJd25k6m5AQPDddzd4W6/5+M7kjyVPi3CsQcpBPss6ueyecZOMaKqdWAHeEyaak +r67TofUCgYEA6GBa+YkRvp0Ept8cd5mh4gCRM8wUuhtzTQnhubCPivy/QqMWScdn +qD0OiARLAsqeoIfkAVgyqebVnxwTrKTvWe0JwpGylEVWQtpGz3oHgjST47yZxIiY +CSAaimi2CYnJZ+QB2oBkFVwNCuXdPEGX6LgnOGva19UKrm6ONsy6V9MCgYEAxBJu +fu4dGXZreARKEHa/7SQjI9ayAFuACFlON/EgSlICzQyG/pumv1FsMEiFrv6w7PRj +4AGqzyzGKXWVDRMrUNVeGPSKJSmlPGNqXfPaXRpVEeB7UQhAs5wyMrWDl8jEW7Ih +XcWhMLn1f/NOAKyrSDSEaEM+Nuu+xTifoAghvP8CgYEAlta9Fw+nihDIjT10cBo0 +38w4dOP7bFcXQCGy+WMnujOYPzw34opiue1wOlB3FIfL8i5jjY/fyzPA5PhHuSCT +Ec9xL3B9+AsOFHU108XFi/pvKTwqoE1+SyYgtEmGKKjdKOfzYA9JaCgJe1J8inmV +jwXCx7gTJVjwBwxSmjXIm+sCgYBQF8NhQD1M0G3YCdCDZy7BXRippCL0OGxVfL2R +5oKtOVEBl9NxH/3+evE5y/Yn5Mw7Dx3ZPHUcygpslyZ6v9Da5T3Z7dKcmaVwxJ+H +n3wcugv0EIHvOPLNK8npovINR6rGVj6BAqD0uZHKYYYEioQxK5rGyGkaoDQ+dgHm +qku12wKBgQDem5FvNp5iW7mufkPZMqf3sEGtu612QeqejIPFM1z7VkUgetsgPBXD +tYsqC2FtWzY51VOEKNpnfH7zH5n+bjoI9nAEAW63TK9ZKkr2hRGsDhJdGzmLfQ7v +F6/CuIw9EsAq6qIB8O88FXQqald+BZOx6AzB8Oedsz/WtMmIEmr/+Q== +-----END RSA PRIVATE KEY-----"; + + let cert = "-----BEGIN CERTIFICATE----- +MIIDLjCCAhagAwIBAgIIeUtmIdFQznMwDQYJKoZIhvcNAQELBQAwIDEeMBwGA1UE +AxMVbWluaWNhIHJvb3QgY2EgMDc4ZGE3MCAXDTIzMDMwNjE2MDMxOFoYDzIxMjMw +MzA2MTYwMzE4WjAUMRIwEAYDVQQDEwlsb2NhbGhvc3QwggEiMA0GCSqGSIb3DQEB +AQUAA4IBDwAwggEKAoIBAQCx+oC6ESU3gefJ6oui9J3hB76c2/kDAKNI74cWIXfT +He9DUeKpEDRSbIWVKoGcUfdNQebglxp3jRB+tfx/XU0oZl2m8oewxipiNmdiREUZ +Lazh9DJoNtXkzTqzdQNfwRM+BjjVjx8IpNJV2L2IeTBxWtczFS7ggEHHQLWvYZKj +eCQgGdRwQt0V1pQ5Jt0KKkmFueTCLESvaHs9fHBtrtIhmBm1FpBZqTVUT1vvXqp7 +eIy4yFoR+j9SgWZ5kI+7myl/Bo5mycKzFE+TYiNvOWwdMnT2Uz3CZsQUcExUBd6M +tOT75Kte3yMBJmE16f/YbPItA0Cq4af3yUIxDpKwT28tAgMBAAGjdjB0MA4GA1Ud +DwEB/wQEAwIFoDAdBgNVHSUEFjAUBggrBgEFBQcDAQYIKwYBBQUHAwIwDAYDVR0T +AQH/BAIwADAfBgNVHSMEGDAWgBTWfAmQ/BUIQm/9/llJJs2jUMWzGzAUBgNVHREE +DTALgglsb2NhbGhvc3QwDQYJKoZIhvcNAQELBQADggEBAG/POnBob0S7iYwsbtI2 +3LTTbRnmseIErtJuJmI9yYzgVIm6sUSKhlIUfAIm4rfRuzE94KFeWR2w9RabxOJD +wjYLLKvQ6rFY5g2AV/J0TwDjYuq0absdaDPZ8MKJ+/lpGYK3Te+CTOfq5FJRFt1q +GOkXAxnNpGg0obeRWRKFiAMHbcw6a8LIMfRjCooo3+uSQGsbVzGxSB4CYo720KcC +9vB1K9XALwzoqCewP4aiQsMY1GWpAmzXJftY3w+lka0e9dBYcdEdOqxSoZb5OBBZ +p5e60QweRuJsb60aUaCG8HoICevXYK2fFqCQdlb5sIqQqXyN2K6HuKAFywsjsGyJ +abY= +-----END CERTIFICATE-----"; + + // Configure the client + let ca = "-----BEGIN CERTIFICATE----- +MIIDSzCCAjOgAwIBAgIIB42n1ZIkOakwDQYJKoZIhvcNAQELBQAwIDEeMBwGA1UE +AxMVbWluaWNhIHJvb3QgY2EgMDc4ZGE3MCAXDTIzMDMwNjE2MDMwN1oYDzIxMjMw +MzA2MTYwMzA3WjAgMR4wHAYDVQQDExVtaW5pY2Egcm9vdCBjYSAwNzhkYTcwggEi +MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDIuCq24O4P4Aep5vAVlrIQ7P8+ +uWWgcHIFYa02TmhBUB/hjo0JANCQvAtpVNuQ8NyKPlqnnq1cttePbSYVeA0rrnOs +DcfySAiyGBEY9zMjFfHJtH1wtrPcJEU8XIEY3xUlrAJE2CEuV9dVYgfEEydnvgLc +8Ug0WXSiARjqbnMW3l8jh6bYCp/UpL/gSM4mxdKrgpfyPoweGhlOWXc3RTS7cqM9 +T25acURGOSI6/g8GF0sNE4VZmUvHggSTmsbLeXMJzxDWO+xVehRmbQx3IkG7u++b +QdRwGIJcDNn7zHlDMHtQ0Z1DBV94fZNBwCULhCBB5g20XTGw//S7Fj2FPwyhAgMB +AAGjgYYwgYMwDgYDVR0PAQH/BAQDAgKEMB0GA1UdJQQWMBQGCCsGAQUFBwMBBggr +BgEFBQcDAjASBgNVHRMBAf8ECDAGAQH/AgEAMB0GA1UdDgQWBBTWfAmQ/BUIQm/9 +/llJJs2jUMWzGzAfBgNVHSMEGDAWgBTWfAmQ/BUIQm/9/llJJs2jUMWzGzANBgkq +hkiG9w0BAQsFAAOCAQEAvtcZFAELKiTuOiAeYts6zeKxc+nnHCzayDeD/BDCbxGJ +e1n+xdHjLtWGd+/Anc+fvftSYBPTFQqCi84lPiUIln5z/rUxE+ke81hNPIfw2obc +yIg87xCabQpVyEh8s+MV+7YPQ1+fH4FuSi2Fck1FejxkVqN2uOZPvOYUmSTsaVr1 +8SfRnwJNZ9UMRPM2bD4Jkvj0VcL42JM3QkOClOzYW4j/vll2cSs4kx7er27cIoo1 +Ck0v2xSPAiVjg6w65rUQeW6uB5m0T2wyj+wm0At8vzhZPlgS1fKhcmT2dzOq3+oN +R+IdLiXcyIkg0m9N8I17p0ljCSkbrgGMD3bbePRTfg== +-----END CERTIFICATE-----"; + + // Define the locator + let mut endpoint: EndPoint = format!("quic/localhost:{}", 13040).parse().unwrap(); + endpoint + .config_mut() + .extend_from_iter( + [ + (TLS_ROOT_CA_CERTIFICATE_RAW, ca), + (TLS_SERVER_PRIVATE_KEY_RAW, key), + (TLS_SERVER_CERTIFICATE_RAW, cert), + ] + .iter() + .copied(), + ) + .unwrap(); + + time_universal_transport(&endpoint).await; +} + +#[cfg(all(feature = "transport_vsock", target_os = "linux"))] +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +#[ignore] +async fn time_vsock_only() { + zenoh_util::try_init_log_from_env(); + let endpoint: EndPoint = "vsock/VMADDR_CID_LOCAL:17000".parse().unwrap(); + time_lowlatency_transport(&endpoint).await; +} diff --git a/io/zenoh-transport/tests/unicast_transport.rs b/io/zenoh-transport/tests/unicast_transport.rs index 4ddacef6bc..1c5d749b59 100644 --- a/io/zenoh-transport/tests/unicast_transport.rs +++ b/io/zenoh-transport/tests/unicast_transport.rs @@ -11,21 +11,23 @@ // Contributors: // ZettaScale Zenoh Team, // -use std::fmt::Write as _; use std::{ any::Any, convert::TryFrom, + fmt::Write as _, sync::{ atomic::{AtomicUsize, Ordering}, Arc, }, time::Duration, }; + use zenoh_core::ztimeout; use zenoh_link::Link; use zenoh_protocol::{ core::{ - Channel, CongestionControl, Encoding, EndPoint, Priority, Reliability, WhatAmI, ZenohId, + Channel, CongestionControl, Encoding, EndPoint, Priority, Reliability, WhatAmI, + ZenohIdProto, }, network::{ push::ext::{NodeIdType, QoSType}, @@ -230,13 +232,13 @@ const SLEEP_COUNT: Duration = Duration::from_millis(10); const MSG_COUNT: usize = 1_000; const MSG_SIZE_ALL: [usize; 2] = [1_024, 131_072]; -const MSG_SIZE_LOWLATENCY: [usize; 2] = [1_024, 65000]; #[cfg(any( feature = "transport_tcp", feature = "transport_udp", feature = "transport_unixsock-stream", ))] const MSG_SIZE_NOFRAG: [usize; 1] = [1_024]; +const MSG_SIZE_LOWLATENCY: [usize; 1] = MSG_SIZE_NOFRAG; // Transport Handler for the router struct SHRouter { @@ -353,8 +355,8 @@ async fn open_transport_unicast( TransportUnicast, ) { // Define client and router IDs - let client_id = ZenohId::try_from([1]).unwrap(); - let router_id = ZenohId::try_from([2]).unwrap(); + let client_id = ZenohIdProto::try_from([1]).unwrap(); + let router_id = ZenohIdProto::try_from([2]).unwrap(); // Create the router transport manager let router_handler = Arc::new(SHRouter::default()); @@ -400,10 +402,7 @@ async fn open_transport_unicast( let _ = ztimeout!(client_manager.open_transport_unicast(e.clone())).unwrap(); } - let client_transport = client_manager - .get_transport_unicast(&router_id) - .await - .unwrap(); + let client_transport = ztimeout!(client_manager.get_transport_unicast(&router_id)).unwrap(); // Return the handlers ( @@ -475,11 +474,11 @@ async fn test_transport( wire_expr: "test".into(), ext_qos: QoSType::new(channel.priority, cctrl, false), ext_tstamp: None, - ext_nodeid: NodeIdType::default(), + ext_nodeid: NodeIdType::DEFAULT, payload: Put { payload: vec![0u8; msg_size].into(), timestamp: None, - encoding: Encoding::default(), + encoding: Encoding::empty(), ext_sinfo: None, #[cfg(feature = "shared-memory")] ext_shm: None, @@ -543,9 +542,7 @@ async fn run_single( { let c_stats = client_transport.get_stats().unwrap().report(); println!("\tClient: {:?}", c_stats); - let r_stats = router_manager - .get_transport_unicast(&client_manager.config.zid) - .await + let r_stats = ztimeout!(router_manager.get_transport_unicast(&client_manager.config.zid)) .unwrap() .get_stats() .map(|s| s.report()) @@ -618,7 +615,7 @@ async fn transport_unicast_tcp_only() { // Define the reliability and congestion control let channel = [ Channel { - priority: Priority::default(), + priority: Priority::DEFAULT, reliability: Reliability::Reliable, }, Channel { @@ -640,7 +637,7 @@ async fn transport_unicast_tcp_only_with_lowlatency_transport() { // Define the reliability and congestion control let channel = [ Channel { - priority: Priority::default(), + priority: Priority::DEFAULT, reliability: Reliability::Reliable, }, Channel { @@ -665,7 +662,7 @@ async fn transport_unicast_udp_only() { // Define the reliability and congestion control let channel = [ Channel { - priority: Priority::default(), + priority: Priority::DEFAULT, reliability: Reliability::BestEffort, }, Channel { @@ -687,7 +684,7 @@ async fn transport_unicast_udp_only_with_lowlatency_transport() { // Define the reliability and congestion control let channel = [ Channel { - priority: Priority::default(), + priority: Priority::DEFAULT, reliability: Reliability::BestEffort, }, Channel { @@ -711,7 +708,7 @@ async fn transport_unicast_unix_only() { // Define the reliability and congestion control let channel = [ Channel { - priority: Priority::default(), + priority: Priority::DEFAULT, reliability: Reliability::BestEffort, }, Channel { @@ -737,7 +734,7 @@ async fn transport_unicast_unix_only_with_lowlatency_transport() { // Define the reliability and congestion control let channel = [ Channel { - priority: Priority::default(), + priority: Priority::DEFAULT, reliability: Reliability::BestEffort, }, Channel { @@ -764,11 +761,11 @@ async fn transport_unicast_ws_only() { // Define the reliability and congestion control let channel = [ Channel { - priority: Priority::default(), + priority: Priority::DEFAULT, reliability: Reliability::Reliable, }, Channel { - priority: Priority::default(), + priority: Priority::DEFAULT, reliability: Reliability::BestEffort, }, Channel { @@ -794,11 +791,11 @@ async fn transport_unicast_ws_only_with_lowlatency_transport() { // Define the reliability and congestion control let channel = [ Channel { - priority: Priority::default(), + priority: Priority::DEFAULT, reliability: Reliability::Reliable, }, Channel { - priority: Priority::default(), + priority: Priority::DEFAULT, reliability: Reliability::BestEffort, }, Channel { @@ -827,7 +824,7 @@ async fn transport_unicast_unixpipe_only() { // Define the reliability and congestion control let channel = [ Channel { - priority: Priority::default(), + priority: Priority::DEFAULT, reliability: Reliability::Reliable, }, Channel { @@ -853,7 +850,7 @@ async fn transport_unicast_unixpipe_only_with_lowlatency_transport() { // Define the reliability and congestion control let channel = [ Channel { - priority: Priority::default(), + priority: Priority::DEFAULT, reliability: Reliability::Reliable, }, Channel { @@ -880,7 +877,7 @@ async fn transport_unicast_tcp_udp() { // Define the reliability and congestion control let channel = [ Channel { - priority: Priority::default(), + priority: Priority::DEFAULT, reliability: Reliability::BestEffort, }, Channel { @@ -912,7 +909,7 @@ async fn transport_unicast_tcp_unix() { // Define the reliability and congestion control let channel = [ Channel { - priority: Priority::default(), + priority: Priority::DEFAULT, reliability: Reliability::BestEffort, }, Channel { @@ -946,7 +943,7 @@ async fn transport_unicast_udp_unix() { // Define the reliability and congestion control let channel = [ Channel { - priority: Priority::default(), + priority: Priority::DEFAULT, reliability: Reliability::BestEffort, }, Channel { @@ -983,7 +980,7 @@ async fn transport_unicast_tcp_udp_unix() { // Define the reliability and congestion control let channel = [ Channel { - priority: Priority::default(), + priority: Priority::DEFAULT, reliability: Reliability::BestEffort, }, Channel { @@ -1008,25 +1005,25 @@ async fn transport_unicast_tls_only_server() { let mut endpoint: EndPoint = format!("tls/localhost:{}", 16070).parse().unwrap(); endpoint .config_mut() - .extend( + .extend_from_iter( [ (TLS_ROOT_CA_CERTIFICATE_RAW, SERVER_CA), (TLS_SERVER_CERTIFICATE_RAW, SERVER_CERT), (TLS_SERVER_PRIVATE_KEY_RAW, SERVER_KEY), ] .iter() - .map(|(k, v)| ((*k).to_owned(), (*v).to_owned())), + .copied(), ) .unwrap(); // Define the reliability and congestion control let channel = [ Channel { - priority: Priority::default(), + priority: Priority::DEFAULT, reliability: Reliability::Reliable, }, Channel { - priority: Priority::default(), + priority: Priority::DEFAULT, reliability: Reliability::BestEffort, }, Channel { @@ -1053,25 +1050,25 @@ async fn transport_unicast_quic_only_server() { let mut endpoint: EndPoint = format!("quic/localhost:{}", 16080).parse().unwrap(); endpoint .config_mut() - .extend( + .extend_from_iter( [ (TLS_ROOT_CA_CERTIFICATE_RAW, SERVER_CA), (TLS_SERVER_CERTIFICATE_RAW, SERVER_CERT), (TLS_SERVER_PRIVATE_KEY_RAW, SERVER_KEY), ] .iter() - .map(|(k, v)| ((*k).to_owned(), (*v).to_owned())), + .copied(), ) .unwrap(); // Define the reliability and congestion control let channel = [ Channel { - priority: Priority::default(), + priority: Priority::DEFAULT, reliability: Reliability::Reliable, }, Channel { - priority: Priority::default(), + priority: Priority::DEFAULT, reliability: Reliability::BestEffort, }, Channel { @@ -1101,7 +1098,7 @@ async fn transport_unicast_tls_only_mutual_success() { let mut client_endpoint: EndPoint = ("tls/localhost:10461").parse().unwrap(); client_endpoint .config_mut() - .extend( + .extend_from_iter( [ (TLS_ROOT_CA_CERTIFICATE_RAW, SERVER_CA), (TLS_CLIENT_CERTIFICATE_RAW, CLIENT_CERT), @@ -1109,7 +1106,7 @@ async fn transport_unicast_tls_only_mutual_success() { (TLS_CLIENT_AUTH, client_auth), ] .iter() - .map(|(k, v)| ((*k).to_owned(), (*v).to_owned())), + .copied(), ) .unwrap(); @@ -1117,7 +1114,7 @@ async fn transport_unicast_tls_only_mutual_success() { let mut server_endpoint: EndPoint = ("tls/localhost:10461").parse().unwrap(); server_endpoint .config_mut() - .extend( + .extend_from_iter( [ (TLS_ROOT_CA_CERTIFICATE_RAW, CLIENT_CA), (TLS_SERVER_CERTIFICATE_RAW, SERVER_CERT), @@ -1125,17 +1122,17 @@ async fn transport_unicast_tls_only_mutual_success() { (TLS_CLIENT_AUTH, client_auth), ] .iter() - .map(|(k, v)| ((*k).to_owned(), (*v).to_owned())), + .copied(), ) .unwrap(); // Define the reliability and congestion control let channel = [ Channel { - priority: Priority::default(), + priority: Priority::DEFAULT, reliability: Reliability::Reliable, }, Channel { - priority: Priority::default(), + priority: Priority::DEFAULT, reliability: Reliability::BestEffort, }, Channel { @@ -1163,6 +1160,7 @@ async fn transport_unicast_tls_only_mutual_success() { #[tokio::test(flavor = "multi_thread", worker_threads = 4)] async fn transport_unicast_tls_only_mutual_no_client_certs_failure() { use std::vec; + use zenoh_link::tls::config::*; zenoh_util::try_init_log_from_env(); @@ -1171,18 +1169,14 @@ async fn transport_unicast_tls_only_mutual_no_client_certs_failure() { let mut client_endpoint: EndPoint = ("tls/localhost:10462").parse().unwrap(); client_endpoint .config_mut() - .extend( - [(TLS_ROOT_CA_CERTIFICATE_RAW, SERVER_CA)] - .iter() - .map(|(k, v)| ((*k).to_owned(), (*v).to_owned())), - ) + .extend_from_iter([(TLS_ROOT_CA_CERTIFICATE_RAW, SERVER_CA)].iter().copied()) .unwrap(); // Define the locator let mut server_endpoint: EndPoint = ("tls/localhost:10462").parse().unwrap(); server_endpoint .config_mut() - .extend( + .extend_from_iter( [ (TLS_ROOT_CA_CERTIFICATE_RAW, CLIENT_CA), (TLS_SERVER_CERTIFICATE_RAW, SERVER_CERT), @@ -1190,17 +1184,17 @@ async fn transport_unicast_tls_only_mutual_no_client_certs_failure() { (TLS_CLIENT_AUTH, "true"), ] .iter() - .map(|(k, v)| ((*k).to_owned(), (*v).to_owned())), + .copied(), ) .unwrap(); // Define the reliability and congestion control let channel = [ Channel { - priority: Priority::default(), + priority: Priority::DEFAULT, reliability: Reliability::Reliable, }, Channel { - priority: Priority::default(), + priority: Priority::DEFAULT, reliability: Reliability::BestEffort, }, Channel { @@ -1241,7 +1235,7 @@ fn transport_unicast_tls_only_mutual_wrong_client_certs_failure() { let mut client_endpoint: EndPoint = ("tls/localhost:10463").parse().unwrap(); client_endpoint .config_mut() - .extend( + .extend_from_iter( [ (TLS_ROOT_CA_CERTIFICATE_RAW, SERVER_CA), // Using the SERVER_CERT and SERVER_KEY in the client to simulate the case the client has @@ -1253,7 +1247,7 @@ fn transport_unicast_tls_only_mutual_wrong_client_certs_failure() { (TLS_CLIENT_AUTH, client_auth), ] .iter() - .map(|(k, v)| ((*k).to_owned(), (*v).to_owned())), + .copied(), ) .unwrap(); @@ -1261,7 +1255,7 @@ fn transport_unicast_tls_only_mutual_wrong_client_certs_failure() { let mut server_endpoint: EndPoint = ("tls/localhost:10463").parse().unwrap(); server_endpoint .config_mut() - .extend( + .extend_from_iter( [ (TLS_ROOT_CA_CERTIFICATE_RAW, CLIENT_CA), (TLS_SERVER_CERTIFICATE_RAW, SERVER_CERT), @@ -1269,17 +1263,17 @@ fn transport_unicast_tls_only_mutual_wrong_client_certs_failure() { (TLS_CLIENT_AUTH, client_auth), ] .iter() - .map(|(k, v)| ((*k).to_owned(), (*v).to_owned())), + .copied(), ) .unwrap(); // Define the reliability and congestion control let channel = [ Channel { - priority: Priority::default(), + priority: Priority::DEFAULT, reliability: Reliability::Reliable, }, Channel { - priority: Priority::default(), + priority: Priority::DEFAULT, reliability: Reliability::BestEffort, }, Channel { @@ -1320,7 +1314,7 @@ async fn transport_unicast_quic_only_mutual_success() { let mut client_endpoint: EndPoint = ("quic/localhost:10461").parse().unwrap(); client_endpoint .config_mut() - .extend( + .extend_from_iter( [ (TLS_ROOT_CA_CERTIFICATE_RAW, SERVER_CA), (TLS_CLIENT_CERTIFICATE_RAW, CLIENT_CERT), @@ -1328,7 +1322,7 @@ async fn transport_unicast_quic_only_mutual_success() { (TLS_CLIENT_AUTH, client_auth), ] .iter() - .map(|(k, v)| ((*k).to_owned(), (*v).to_owned())), + .copied(), ) .unwrap(); @@ -1336,7 +1330,7 @@ async fn transport_unicast_quic_only_mutual_success() { let mut server_endpoint: EndPoint = ("quic/localhost:10461").parse().unwrap(); server_endpoint .config_mut() - .extend( + .extend_from_iter( [ (TLS_ROOT_CA_CERTIFICATE_RAW, CLIENT_CA), (TLS_SERVER_CERTIFICATE_RAW, SERVER_CERT), @@ -1344,7 +1338,7 @@ async fn transport_unicast_quic_only_mutual_success() { (TLS_CLIENT_AUTH, client_auth), ] .iter() - .map(|(k, v)| ((*k).to_owned(), (*v).to_owned())), + .copied(), ) .unwrap(); // Define the reliability and congestion control @@ -1382,6 +1376,7 @@ async fn transport_unicast_quic_only_mutual_success() { #[tokio::test(flavor = "multi_thread", worker_threads = 4)] async fn transport_unicast_quic_only_mutual_no_client_certs_failure() { use std::vec; + use zenoh_link::quic::config::*; zenoh_util::try_init_log_from_env(); @@ -1390,18 +1385,14 @@ async fn transport_unicast_quic_only_mutual_no_client_certs_failure() { let mut client_endpoint: EndPoint = ("quic/localhost:10462").parse().unwrap(); client_endpoint .config_mut() - .extend( - [(TLS_ROOT_CA_CERTIFICATE_RAW, SERVER_CA)] - .iter() - .map(|(k, v)| ((*k).to_owned(), (*v).to_owned())), - ) + .extend_from_iter([(TLS_ROOT_CA_CERTIFICATE_RAW, SERVER_CA)].iter().copied()) .unwrap(); // Define the locator let mut server_endpoint: EndPoint = ("quic/localhost:10462").parse().unwrap(); server_endpoint .config_mut() - .extend( + .extend_from_iter( [ (TLS_ROOT_CA_CERTIFICATE_RAW, CLIENT_CA), (TLS_SERVER_CERTIFICATE_RAW, SERVER_CERT), @@ -1409,7 +1400,7 @@ async fn transport_unicast_quic_only_mutual_no_client_certs_failure() { (TLS_CLIENT_AUTH, "true"), ] .iter() - .map(|(k, v)| ((*k).to_owned(), (*v).to_owned())), + .copied(), ) .unwrap(); // Define the reliability and congestion control @@ -1460,7 +1451,7 @@ fn transport_unicast_quic_only_mutual_wrong_client_certs_failure() { let mut client_endpoint: EndPoint = ("quic/localhost:10463").parse().unwrap(); client_endpoint .config_mut() - .extend( + .extend_from_iter( [ (TLS_ROOT_CA_CERTIFICATE_RAW, SERVER_CA), // Using the SERVER_CERT and SERVER_KEY in the client to simulate the case the client has @@ -1472,7 +1463,7 @@ fn transport_unicast_quic_only_mutual_wrong_client_certs_failure() { (TLS_CLIENT_AUTH, client_auth), ] .iter() - .map(|(k, v)| ((*k).to_owned(), (*v).to_owned())), + .copied(), ) .unwrap(); @@ -1480,7 +1471,7 @@ fn transport_unicast_quic_only_mutual_wrong_client_certs_failure() { let mut server_endpoint: EndPoint = ("quic/localhost:10463").parse().unwrap(); server_endpoint .config_mut() - .extend( + .extend_from_iter( [ (TLS_ROOT_CA_CERTIFICATE_RAW, CLIENT_CA), (TLS_SERVER_CERTIFICATE_RAW, SERVER_CERT), @@ -1488,7 +1479,7 @@ fn transport_unicast_quic_only_mutual_wrong_client_certs_failure() { (TLS_CLIENT_AUTH, client_auth), ] .iter() - .map(|(k, v)| ((*k).to_owned(), (*v).to_owned())), + .copied(), ) .unwrap(); // Define the reliability and congestion control diff --git a/plugins/zenoh-backend-example/Cargo.toml b/plugins/zenoh-backend-example/Cargo.toml index 5ca4d3096b..9f548e1187 100644 --- a/plugins/zenoh-backend-example/Cargo.toml +++ b/plugins/zenoh-backend-example/Cargo.toml @@ -20,7 +20,7 @@ edition = { workspace = true } publish = false [features] -default = ["dynamic_plugin", "zenoh/default"] +default = ["dynamic_plugin"] dynamic_plugin = [] [lib] @@ -29,16 +29,13 @@ name = "zenoh_backend_example" crate-type = ["cdylib"] [dependencies] -async-std = { workspace = true, features = ["default"] } const_format = { workspace = true } futures = { workspace = true } git-version = { workspace = true } -tracing = {workspace = true} +tracing = { workspace = true } +tokio = { workspace = true } serde_json = { workspace = true } -zenoh = { workspace = true } -zenoh-core = { workspace = true } +zenoh = { workspace = true, features = ["default"] } zenoh-plugin-trait = { workspace = true } -zenoh-result = { workspace = true } -zenoh-util = { workspace = true } async-trait = { workspace = true } zenoh_backend_traits = { workspace = true } diff --git a/plugins/zenoh-backend-example/src/lib.rs b/plugins/zenoh-backend-example/src/lib.rs index aef889c64e..b9e670b799 100644 --- a/plugins/zenoh-backend-example/src/lib.rs +++ b/plugins/zenoh-backend-example/src/lib.rs @@ -11,20 +11,17 @@ // Contributors: // ZettaScale Zenoh Team, // -use async_std::sync::RwLock; +use std::collections::{hash_map::Entry, HashMap}; + use async_trait::async_trait; -use std::{ - collections::{hash_map::Entry, HashMap}, - sync::Arc, -}; -use zenoh::{prelude::OwnedKeyExpr, sample::Sample, time::Timestamp, value::Value}; +use tokio::sync::RwLock; +use zenoh::{internal::Value, key_expr::OwnedKeyExpr, prelude::*, time::Timestamp}; use zenoh_backend_traits::{ config::{StorageConfig, VolumeConfig}, Capability, History, Persistence, Storage, StorageInsertionResult, StoredData, Volume, VolumeInstance, }; use zenoh_plugin_trait::{plugin_long_version, plugin_version, Plugin}; -use zenoh_result::ZResult; #[cfg(feature = "dynamic_plugin")] zenoh_plugin_trait::declare_plugin!(ExampleBackend); @@ -71,12 +68,6 @@ impl Volume for ExampleBackend { async fn create_storage(&self, _props: StorageConfig) -> ZResult> { Ok(Box::::default()) } - fn incoming_data_interceptor(&self) -> Option Sample + Send + Sync>> { - None - } - fn outgoing_data_interceptor(&self) -> Option Sample + Send + Sync>> { - None - } } #[async_trait] diff --git a/plugins/zenoh-backend-traits/Cargo.toml b/plugins/zenoh-backend-traits/Cargo.toml index f2b8a4a1eb..766f52d609 100644 --- a/plugins/zenoh-backend-traits/Cargo.toml +++ b/plugins/zenoh-backend-traits/Cargo.toml @@ -27,16 +27,16 @@ description = "Zenoh: traits to be implemented by backends libraries" maintenance = { status = "actively-developed" } [dependencies] -async-std = { workspace = true, features = ["default"] } async-trait = { workspace = true } derive_more = { workspace = true } serde_json = { workspace = true } -zenoh = { workspace = true } +zenoh = { workspace = true, features = ["unstable", "internal"] } zenoh-result = { workspace = true } zenoh-util = { workspace = true } schemars = { workspace = true } zenoh-plugin-trait = { workspace = true } const_format = { workspace = true } +either = { workspace = true } [features] default = [] diff --git a/plugins/zenoh-backend-traits/src/config.rs b/plugins/zenoh-backend-traits/src/config.rs index ca97e4791f..e440e3014e 100644 --- a/plugins/zenoh-backend-traits/src/config.rs +++ b/plugins/zenoh-backend-traits/src/config.rs @@ -11,15 +11,20 @@ // Contributors: // ZettaScale Zenoh Team, // +use std::{convert::TryFrom, time::Duration}; + use const_format::concatcp; use derive_more::{AsMut, AsRef}; +use either::Either; use schemars::JsonSchema; use serde_json::{Map, Value}; -use std::convert::TryFrom; -use std::time::Duration; -use zenoh::{key_expr::keyexpr, prelude::OwnedKeyExpr, Result as ZResult}; +use zenoh::{ + key_expr::{keyexpr, OwnedKeyExpr}, + Result as ZResult, +}; use zenoh_plugin_trait::{PluginStartArgs, StructVersion}; use zenoh_result::{bail, zerror, Error}; +use zenoh_util::LibSearchDirs; #[derive(JsonSchema, Debug, Clone, AsMut, AsRef)] pub struct PluginConfig { @@ -27,7 +32,9 @@ pub struct PluginConfig { pub name: String, #[schemars(with = "Option")] pub required: bool, - pub backend_search_dirs: Option>, + // REVIEW: This is inconsistent with `plugins_loading/search_dirs` + #[schemars(with = "Option, String>>>>")] + pub backend_search_dirs: LibSearchDirs, #[schemars(with = "Map")] pub volumes: Vec, #[schemars(with = "Map")] @@ -158,16 +165,18 @@ impl + AsRef, V: AsObject> TryFrom<(S, &V)> for PluginConfi }) .unwrap_or(Ok(true))?; let backend_search_dirs = match value.get("backend_search_dirs") { - Some(serde_json::Value::String(path)) => Some(vec![path.clone()]), + Some(serde_json::Value::String(path)) => LibSearchDirs::from_paths(&[path.clone()]), Some(serde_json::Value::Array(paths)) => { - let mut result = Vec::with_capacity(paths.len()); + let mut specs = Vec::with_capacity(paths.len()); for path in paths { - let path = if let serde_json::Value::String(path) = path {path} else {bail!("`backend_search_dirs` field of {}'s configuration must be a string or array of strings", name.as_ref())}; - result.push(path.clone()); + let serde_json::Value::String(path) = path else { + bail!("`backend_search_dirs` field of {}'s configuration must be a string or array of strings", name.as_ref()); + }; + specs.push(path.clone()); } - Some(result) + LibSearchDirs::from_specs(&specs)? } - None => None, + None => LibSearchDirs::default(), _ => bail!("`backend_search_dirs` field of {}'s configuration must be a string or array of strings", name.as_ref()) }; let volumes = match value.get("volumes") { diff --git a/plugins/zenoh-backend-traits/src/lib.rs b/plugins/zenoh-backend-traits/src/lib.rs index f185aaa259..a75d934050 100644 --- a/plugins/zenoh-backend-traits/src/lib.rs +++ b/plugins/zenoh-backend-traits/src/lib.rs @@ -29,11 +29,9 @@ //! ``` //! use std::sync::Arc; //! use async_trait::async_trait; -//! use zenoh::prelude::r#async::*; -//! use zenoh::time::Timestamp; +//! use zenoh::{key_expr::OwnedKeyExpr, prelude::*, time::Timestamp, internal::Value}; //! use zenoh_backend_traits::*; //! use zenoh_backend_traits::config::*; -//! use zenoh::Result as ZResult; //! //! #[no_mangle] //! pub fn create_volume(config: VolumeConfig) -> ZResult> { @@ -68,16 +66,6 @@ //! // The properties are the ones passed via a PUT in the admin space for Storage creation. //! Ok(Box::new(MyStorage::new(properties).await?)) //! } -//! -//! fn incoming_data_interceptor(&self) -> Option Sample + Send + Sync>> { -//! // No interception point for incoming data (on PUT operations) -//! None -//! } -//! -//! fn outgoing_data_interceptor(&self) -> Option Sample + Send + Sync>> { -//! // No interception point for outgoing data (on GET operations) -//! None -//! } //! } //! //! // Your Storage implementation @@ -135,12 +123,12 @@ use async_trait::async_trait; use const_format::concatcp; -use std::sync::Arc; -use zenoh::prelude::{KeyExpr, OwnedKeyExpr, Sample, Selector}; -use zenoh::queryable::ReplyBuilder; -use zenoh::time::Timestamp; -use zenoh::value::Value; -pub use zenoh::Result as ZResult; +use zenoh::{ + internal::Value, + key_expr::{keyexpr, OwnedKeyExpr}, + time::Timestamp, + Result as ZResult, +}; use zenoh_plugin_trait::{PluginControl, PluginInstance, PluginStatusRec, StructVersion}; use zenoh_util::concat_enabled_features; @@ -209,14 +197,6 @@ pub trait Volume: Send + Sync { /// Creates a storage configured with some properties. async fn create_storage(&self, props: StorageConfig) -> ZResult>; - - /// Returns an interceptor that will be called before pushing any data - /// into a storage created by this backend. `None` can be returned for no interception point. - fn incoming_data_interceptor(&self) -> Option Sample + Send + Sync>>; - - /// Returns an interceptor that will be called before sending any reply - /// to a query from a storage created by this backend. `None` can be returned for no interception point. - fn outgoing_data_interceptor(&self) -> Option Sample + Send + Sync>>; } pub type VolumeInstance = Box; @@ -231,7 +211,7 @@ impl StructVersion for VolumeInstance { } impl PluginControl for VolumeInstance { - fn plugins_status(&self, _names: &zenoh::prelude::keyexpr) -> Vec { + fn plugins_status(&self, _names: &keyexpr) -> Vec { Vec::new() } } @@ -245,7 +225,7 @@ pub trait Storage: Send + Sync { /// on the administration space for this storage. fn get_admin_status(&self) -> serde_json::Value; - /// Function called for each incoming data ([`Sample`]) to be stored in this storage. + /// Function called for each incoming data ([`Sample`](zenoh::sample::Sample)) to be stored in this storage. /// A key can be `None` if it matches the `strip_prefix` exactly. /// In order to avoid data loss, the storage must store the `value` and `timestamp` associated with the `None` key /// in a manner suitable for the given backend technology @@ -281,49 +261,3 @@ pub trait Storage: Send + Sync { /// Remember to fetch the entry corresponding to the `None` key async fn get_all_entries(&self) -> ZResult, Timestamp)>>; } - -/// A wrapper around the [`zenoh::queryable::Query`] allowing to call the -/// OutgoingDataInterceptor (if any) before to send the reply -pub struct Query { - q: zenoh::queryable::Query, - interceptor: Option Sample + Send + Sync>>, -} - -impl Query { - pub fn new( - q: zenoh::queryable::Query, - interceptor: Option Sample + Send + Sync>>, - ) -> Query { - Query { q, interceptor } - } - - /// The full [`Selector`] of this Query. - #[inline(always)] - pub fn selector(&self) -> Selector<'_> { - self.q.selector() - } - - /// The key selector part of this Query. - #[inline(always)] - pub fn key_expr(&self) -> &KeyExpr<'static> { - self.q.key_expr() - } - - /// This Query's selector parameters. - #[inline(always)] - pub fn parameters(&self) -> &str { - self.q.parameters() - } - - /// Sends a Sample as a reply to this Query - pub fn reply(&self, sample: Sample) -> ReplyBuilder<'_> { - // Call outgoing intercerceptor - let sample = if let Some(ref interceptor) = self.interceptor { - interceptor(sample) - } else { - sample - }; - // Send reply - self.q.reply(Ok(sample)) - } -} diff --git a/plugins/zenoh-plugin-example/Cargo.toml b/plugins/zenoh-plugin-example/Cargo.toml index 7cb0ad66f6..5341adcf8c 100644 --- a/plugins/zenoh-plugin-example/Cargo.toml +++ b/plugins/zenoh-plugin-example/Cargo.toml @@ -20,7 +20,7 @@ edition = { workspace = true } publish = false [features] -default = ["dynamic_plugin", "zenoh/default", "zenoh/unstable", "zenoh/plugins"] +default = ["dynamic_plugin"] dynamic_plugin = [] [lib] @@ -34,14 +34,18 @@ name = "zenoh_plugin_example" crate-type = ["cdylib"] [dependencies] -async-std = { workspace = true, features = ["default"] } const_format = { workspace = true } -zenoh-util = {workspace = true } +zenoh-util = { workspace = true } futures = { workspace = true } +lazy_static = { workspace = true } git-version = { workspace = true } -tracing = {workspace = true} +tracing = { workspace = true } +tokio = { workspace = true } serde_json = { workspace = true } -zenoh = { workspace = true, features = ["unstable"] } -zenoh-core = { workspace = true } +zenoh = { workspace = true, features = [ + "default", + "plugins", + "internal", + "unstable", +] } zenoh-plugin-trait = { workspace = true } -zenoh-result = { workspace = true } diff --git a/plugins/zenoh-plugin-example/src/lib.rs b/plugins/zenoh-plugin-example/src/lib.rs index 304a3c6338..b7c494946d 100644 --- a/plugins/zenoh-plugin-example/src/lib.rs +++ b/plugins/zenoh-plugin-example/src/lib.rs @@ -13,20 +13,58 @@ // #![recursion_limit = "256"] -use futures::select; -use std::collections::HashMap; -use std::convert::TryFrom; -use std::sync::{ - atomic::{AtomicBool, Ordering::Relaxed}, - Arc, Mutex, +use std::{ + borrow::Cow, + collections::HashMap, + convert::TryFrom, + future::Future, + sync::{ + atomic::{AtomicBool, Ordering::Relaxed}, + Arc, Mutex, + }, }; + +use futures::select; use tracing::{debug, info}; -use zenoh::plugins::{RunningPluginTrait, ZenohPlugin}; -use zenoh::prelude::r#async::*; -use zenoh::runtime::Runtime; -use zenoh_core::zlock; +use zenoh::{ + internal::{ + bail, + plugins::{RunningPluginTrait, ZenohPlugin}, + runtime::Runtime, + zlock, + }, + key_expr::{keyexpr, KeyExpr}, + prelude::ZResult, + sample::Sample, + session::SessionDeclarations, +}; use zenoh_plugin_trait::{plugin_long_version, plugin_version, Plugin, PluginControl}; -use zenoh_result::{bail, ZResult}; + +const WORKER_THREAD_NUM: usize = 2; +const MAX_BLOCK_THREAD_NUM: usize = 50; +lazy_static::lazy_static! { + // The global runtime is used in the dynamic plugins, which we can't get the current runtime + static ref TOKIO_RUNTIME: tokio::runtime::Runtime = tokio::runtime::Builder::new_multi_thread() + .worker_threads(WORKER_THREAD_NUM) + .max_blocking_threads(MAX_BLOCK_THREAD_NUM) + .enable_all() + .build() + .expect("Unable to create runtime"); +} +#[inline(always)] +fn spawn_runtime(task: impl Future + Send + 'static) { + // Check whether able to get the current runtime + match tokio::runtime::Handle::try_current() { + Ok(rt) => { + // Able to get the current runtime (standalone binary), spawn on the current runtime + rt.spawn(task); + } + Err(_) => { + // Unable to get the current runtime (dynamic plugins), spawn on the global runtime + TOKIO_RUNTIME.spawn(task); + } + } +} // The struct implementing the ZenohPlugin and ZenohPlugin traits pub struct ExamplePlugin {} @@ -42,7 +80,7 @@ const DEFAULT_SELECTOR: &str = "demo/example/**"; impl ZenohPlugin for ExamplePlugin {} impl Plugin for ExamplePlugin { type StartArgs = Runtime; - type Instance = zenoh::plugins::RunningPlugin; + type Instance = zenoh::internal::plugins::RunningPlugin; // A mandatory const to define, in case of the plugin is built as a standalone executable const DEFAULT_NAME: &'static str = "example"; @@ -67,8 +105,7 @@ impl Plugin for ExamplePlugin { // a flag to end the plugin's loop when the plugin is removed from the config let flag = Arc::new(AtomicBool::new(true)); - // spawn the task running the plugin's loop - async_std::task::spawn(run(runtime.clone(), selector, flag.clone())); + spawn_runtime(run(runtime.clone(), selector, flag.clone())); // return a RunningPlugin to zenohd Ok(Box::new(RunningPlugin(Arc::new(Mutex::new( RunningPluginInner { @@ -111,11 +148,7 @@ impl RunningPluginTrait for RunningPlugin { match KeyExpr::try_from(selector.clone()) { Err(e) => tracing::error!("{}", e), Ok(selector) => { - async_std::task::spawn(run( - guard.runtime.clone(), - selector, - guard.flag.clone(), - )); + spawn_runtime(run(guard.runtime.clone(), selector, guard.flag.clone())); } } return Ok(None); @@ -143,7 +176,7 @@ async fn run(runtime: Runtime, selector: KeyExpr<'_>, flag: Arc) { zenoh_util::try_init_log_from_env(); // create a zenoh Session that shares the same Runtime than zenohd - let session = zenoh::init(runtime).res().await.unwrap(); + let session = zenoh::session::init(runtime).await.unwrap(); // the HasMap used as a storage by this example of storage plugin let mut stored: HashMap = HashMap::new(); @@ -152,11 +185,11 @@ async fn run(runtime: Runtime, selector: KeyExpr<'_>, flag: Arc) { // This storage plugin subscribes to the selector and will store in HashMap the received samples debug!("Create Subscriber on {}", selector); - let sub = session.declare_subscriber(&selector).res().await.unwrap(); + let sub = session.declare_subscriber(&selector).await.unwrap(); // This storage plugin declares a Queryable that will reply to queries with the samples stored in the HashMap debug!("Create Queryable on {}", selector); - let queryable = session.declare_queryable(&selector).res().await.unwrap(); + let queryable = session.declare_queryable(&selector).await.unwrap(); // Plugin's event loop, while the flag is true while flag.load(Relaxed) { @@ -164,16 +197,17 @@ async fn run(runtime: Runtime, selector: KeyExpr<'_>, flag: Arc) { // on sample received by the Subscriber sample = sub.recv_async() => { let sample = sample.unwrap(); - info!("Received data ('{}': '{}')", sample.key_expr, sample.value); - stored.insert(sample.key_expr.to_string(), sample); + let payload = sample.payload().deserialize::>().unwrap_or_else(|e| Cow::from(e.to_string())); + info!("Received data ('{}': '{}')", sample.key_expr(), payload); + stored.insert(sample.key_expr().to_string(), sample); }, // on query received by the Queryable query = queryable.recv_async() => { let query = query.unwrap(); info!("Handling query '{}'", query.selector()); for (key_expr, sample) in stored.iter() { - if query.selector().key_expr.intersects(unsafe{keyexpr::from_str_unchecked(key_expr)}) { - query.reply(Ok(sample.clone())).res().await.unwrap(); + if query.key_expr().intersects(unsafe{keyexpr::from_str_unchecked(key_expr)}) { + query.reply_sample(sample.clone()).await.unwrap(); } } } diff --git a/plugins/zenoh-plugin-rest/Cargo.toml b/plugins/zenoh-plugin-rest/Cargo.toml index 989bd1b86d..ba35c43c86 100644 --- a/plugins/zenoh-plugin-rest/Cargo.toml +++ b/plugins/zenoh-plugin-rest/Cargo.toml @@ -24,7 +24,7 @@ categories = ["network-programming", "web-programming::http-server"] description = "The zenoh REST plugin" [features] -default = ["dynamic_plugin", "zenoh/default", "zenoh/unstable", "zenoh/plugins"] +default = ["dynamic_plugin"] dynamic_plugin = [] [lib] @@ -33,23 +33,26 @@ crate-type = ["cdylib", "rlib"] [dependencies] anyhow = { workspace = true, features = ["default"] } -async-std = { workspace = true, features = ["default", "attributes"] } base64 = { workspace = true } const_format = { workspace = true } -zenoh-util = {workspace = true } flume = { workspace = true } futures = { workspace = true } git-version = { workspace = true } http-types = { workspace = true } lazy_static = { workspace = true } -tracing = {workspace = true} +tracing = { workspace = true } schemars = { workspace = true } serde = { workspace = true, features = ["default"] } serde_json = { workspace = true } tide = { workspace = true } -zenoh = { workspace = true, features = ["unstable"] } +tokio = { workspace = true } +zenoh = { workspace = true, features = [ + "plugins", + "default", + "internal", + "unstable", +] } zenoh-plugin-trait = { workspace = true } -zenoh-result = { workspace = true } [build-dependencies] rustc_version = { workspace = true } @@ -71,4 +74,4 @@ maintainer = "zenoh-dev@eclipse.org" copyright = "2024 ZettaScale Technology" section = "net" license-file = ["../../LICENSE", "0"] -depends = "zenohd (=0.11.0-dev-1)" +depends = "zenohd (=1.0.0~dev-1)" diff --git a/plugins/zenoh-plugin-rest/examples/z_serve_sse.rs b/plugins/zenoh-plugin-rest/examples/z_serve_sse.rs index e7d44d8ce4..aefdfd4f86 100644 --- a/plugins/zenoh-plugin-rest/examples/z_serve_sse.rs +++ b/plugins/zenoh-plugin-rest/examples/z_serve_sse.rs @@ -11,11 +11,15 @@ // Contributors: // ZettaScale Zenoh Team, // -use clap::{arg, Command}; use std::time::Duration; -use zenoh::prelude::r#async::*; -use zenoh::publication::CongestionControl; -use zenoh::{config::Config, key_expr::keyexpr}; + +use clap::{arg, Command}; +use zenoh::{ + config::Config, + key_expr::keyexpr, + qos::{CongestionControl, QoSBuilderTrait}, + session::SessionDeclarations, +}; const HTML: &str = r#"
@@ -30,30 +34,26 @@ if(typeof(EventSource) !== "undefined") { } "#; -#[async_std::main] +#[tokio::main] async fn main() { // initiate logging - zenoh_util::try_init_log_from_env(); + zenoh::try_init_log_from_env(); let config = parse_args(); let key = keyexpr::new("demo/sse").unwrap(); let value = "Pub from sse server!"; println!("Opening session..."); - let session = zenoh::open(config).res().await.unwrap(); + let session = zenoh::open(config).await.unwrap(); println!("Declaring Queryable on '{key}'..."); - let queryable = session.declare_queryable(key).res().await.unwrap(); + let queryable = session.declare_queryable(key).await.unwrap(); - async_std::task::spawn({ - let receiver = queryable.receiver.clone(); + tokio::task::spawn({ + let receiver = queryable.handler().clone(); async move { while let Ok(request) = receiver.recv_async().await { - request - .reply(Ok(Sample::new(key, HTML))) - .res() - .await - .unwrap(); + request.reply(key, HTML).await.unwrap(); } } }); @@ -64,7 +64,6 @@ async fn main() { let publisher = session .declare_publisher(&event_key) .congestion_control(CongestionControl::Block) - .res() .await .unwrap(); @@ -75,12 +74,8 @@ async fn main() { println!("Data updates are accessible through HTML5 SSE at http://:8000/{key}"); loop { - publisher - .put(Value::from(value).encoding(KnownEncoding::TextPlain.into())) - .res() - .await - .unwrap(); - async_std::task::sleep(Duration::from_secs(1)).await; + publisher.put(value).await.unwrap(); + tokio::time::sleep(Duration::from_secs(1)).await; } } @@ -115,13 +110,15 @@ fn parse_args() -> Config { config .connect .endpoints - .extend(values.into_iter().map(|v| v.parse().unwrap())) + .set(values.into_iter().map(|v| v.parse().unwrap()).collect()) + .unwrap(); } if let Some(values) = args.get_many::<&String>("listen") { config .listen .endpoints - .extend(values.into_iter().map(|v| v.parse().unwrap())) + .set(values.into_iter().map(|v| v.parse().unwrap()).collect()) + .unwrap(); } if args.get_flag("no-multicast-scouting") { config.scouting.multicast.set_enabled(Some(false)).unwrap(); diff --git a/plugins/zenoh-plugin-rest/src/config.rs b/plugins/zenoh-plugin-rest/src/config.rs index 4ef28e9cf6..d215b8a5a7 100644 --- a/plugins/zenoh-plugin-rest/src/config.rs +++ b/plugins/zenoh-plugin-rest/src/config.rs @@ -11,18 +11,28 @@ // Contributors: // ZettaScale Zenoh Team, // -use schemars::JsonSchema; -use serde::de::{Unexpected, Visitor}; -use serde::{de, Deserialize, Deserializer}; use std::fmt; +use schemars::JsonSchema; +use serde::{ + de, + de::{Unexpected, Visitor}, + Deserialize, Deserializer, +}; + const DEFAULT_HTTP_INTERFACE: &str = "[::]"; +pub const DEFAULT_WORK_THREAD_NUM: usize = 2; +pub const DEFAULT_MAX_BLOCK_THREAD_NUM: usize = 50; #[derive(JsonSchema, Deserialize, serde::Serialize, Clone, Debug)] #[serde(deny_unknown_fields)] pub struct Config { #[serde(deserialize_with = "deserialize_http_port")] pub http_port: String, + #[serde(default = "default_work_thread_num")] + pub work_thread_num: usize, + #[serde(default = "default_max_block_thread_num")] + pub max_block_thread_num: usize, #[serde(default, deserialize_with = "deserialize_path")] __path__: Option>, __required__: Option, @@ -43,6 +53,14 @@ where deserializer.deserialize_any(HttpPortVisitor) } +fn default_work_thread_num() -> usize { + DEFAULT_WORK_THREAD_NUM +} + +fn default_max_block_thread_num() -> usize { + DEFAULT_MAX_BLOCK_THREAD_NUM +} + struct HttpPortVisitor; impl<'de> Visitor<'de> for HttpPortVisitor { diff --git a/plugins/zenoh-plugin-rest/src/lib.rs b/plugins/zenoh-plugin-rest/src/lib.rs index f683e3992a..eb65a991d6 100644 --- a/plugins/zenoh-plugin-rest/src/lib.rs +++ b/plugins/zenoh-plugin-rest/src/lib.rs @@ -17,28 +17,43 @@ //! This crate is intended for Zenoh's internal use. //! //! [Click here for Zenoh's documentation](../zenoh/index.html) -use async_std::prelude::FutureExt; -use base64::{engine::general_purpose::STANDARD as b64_std_engine, Engine}; +use std::{ + borrow::Cow, + convert::TryFrom, + future::Future, + str::FromStr, + sync::{ + atomic::{AtomicUsize, Ordering}, + Arc, + }, + time::Duration, +}; + +use base64::Engine; use futures::StreamExt; use http_types::Method; -use std::convert::TryFrom; -use std::str::FromStr; -use std::sync::Arc; -use tide::http::Mime; -use tide::sse::Sender; -use tide::{Request, Response, Server, StatusCode}; -use zenoh::plugins::{RunningPluginTrait, ZenohPlugin}; -use zenoh::prelude::r#async::*; -use zenoh::properties::Properties; -use zenoh::query::{QueryConsolidation, Reply}; -use zenoh::runtime::Runtime; -use zenoh::selector::TIME_RANGE_KEY; -use zenoh::Session; +use serde::{Deserialize, Serialize}; +use tide::{http::Mime, sse::Sender, Request, Response, Server, StatusCode}; +use tokio::time::timeout; +use zenoh::{ + bytes::{Encoding, ZBytes}, + internal::{ + bail, + plugins::{RunningPluginTrait, ZenohPlugin}, + runtime::Runtime, + zerror, + }, + key_expr::{keyexpr, KeyExpr}, + prelude::*, + query::{Parameters, QueryConsolidation, Reply, Selector, ZenohParameters}, + sample::{Sample, SampleKind}, + session::{Session, SessionDeclarations}, +}; use zenoh_plugin_trait::{plugin_long_version, plugin_version, Plugin, PluginControl}; -use zenoh_result::{bail, zerror, ZResult}; mod config; pub use config::Config; +use zenoh::{bytes::EncodingBuilderTrait, query::ReplyError}; const GIT_VERSION: &str = git_version::git_version!(prefix = "v", cargo_prefix = "v"); lazy_static::lazy_static! { @@ -46,93 +61,129 @@ lazy_static::lazy_static! { } const RAW_KEY: &str = "_raw"; -fn value_to_json(value: Value) -> String { - // @TODO: transcode to JSON when implemented in Value - match &value.encoding { - p if p.starts_with(KnownEncoding::TextPlain) - || p.starts_with(KnownEncoding::AppXWwwFormUrlencoded) => - { - // convert to Json string for special characters escaping - serde_json::json!(value.to_string()).to_string() - } - p if p.starts_with(KnownEncoding::AppProperties) => { - // convert to Json string for special characters escaping - serde_json::json!(*Properties::from(value.to_string())).to_string() - } - p if p.starts_with(KnownEncoding::AppJson) - || p.starts_with(KnownEncoding::AppInteger) - || p.starts_with(KnownEncoding::AppFloat) => - { - value.to_string() +lazy_static::lazy_static! { + static ref WORKER_THREAD_NUM: AtomicUsize = AtomicUsize::new(config::DEFAULT_WORK_THREAD_NUM); + static ref MAX_BLOCK_THREAD_NUM: AtomicUsize = AtomicUsize::new(config::DEFAULT_MAX_BLOCK_THREAD_NUM); + // The global runtime is used in the dynamic plugins, which we can't get the current runtime + static ref TOKIO_RUNTIME: tokio::runtime::Runtime = tokio::runtime::Builder::new_multi_thread() + .worker_threads(WORKER_THREAD_NUM.load(Ordering::SeqCst)) + .max_blocking_threads(MAX_BLOCK_THREAD_NUM.load(Ordering::SeqCst)) + .enable_all() + .build() + .expect("Unable to create runtime"); +} +#[inline(always)] +pub(crate) fn blockon_runtime(task: F) -> F::Output { + // Check whether able to get the current runtime + match tokio::runtime::Handle::try_current() { + Ok(rt) => { + // Able to get the current runtime (standalone binary), use the current runtime + tokio::task::block_in_place(|| rt.block_on(task)) } - _ => { - format!(r#""{}""#, b64_std_engine.encode(value.payload.contiguous())) + Err(_) => { + // Unable to get the current runtime (dynamic plugins), reuse the global runtime + tokio::task::block_in_place(|| TOKIO_RUNTIME.block_on(task)) } } } -fn sample_to_json(sample: Sample) -> String { - let encoding = sample.value.encoding.to_string(); - format!( - r#"{{ "key": "{}", "value": {}, "encoding": "{}", "time": "{}" }}"#, - sample.key_expr.as_str(), - value_to_json(sample.value), - encoding, - if let Some(ts) = sample.timestamp { - ts.to_string() - } else { - "None".to_string() +#[derive(Serialize, Deserialize)] +struct JSONSample { + key: String, + value: serde_json::Value, + encoding: String, + timestamp: Option, +} + +pub fn base64_encode(data: &[u8]) -> String { + use base64::engine::general_purpose; + general_purpose::STANDARD.encode(data) +} + +fn payload_to_json(payload: &ZBytes, encoding: &Encoding) -> serde_json::Value { + match payload.is_empty() { + // If the value is empty return a JSON null + true => serde_json::Value::Null, + // if it is not check the encoding + false => { + match encoding { + // If it is a JSON try to deserialize as json, if it fails fallback to base64 + &Encoding::APPLICATION_JSON | &Encoding::TEXT_JSON | &Encoding::TEXT_JSON5 => { + payload + .deserialize::() + .unwrap_or_else(|e| { + tracing::warn!("Encoding is JSON but data is not JSON, converting to base64, Error: {e:?}"); + serde_json::Value::String(base64_encode(&Cow::from(payload))) + }) + } + &Encoding::TEXT_PLAIN | &Encoding::ZENOH_STRING => serde_json::Value::String( + payload + .deserialize::() + .unwrap_or_else(|e| { + tracing::warn!("Encoding is String but data is not String, converting to base64, Error: {e:?}"); + base64_encode(&Cow::from(payload)) + }), + ), + // otherwise convert to JSON string + _ => serde_json::Value::String(base64_encode(&Cow::from(payload))), + } } - ) + } +} + +fn sample_to_json(sample: &Sample) -> JSONSample { + JSONSample { + key: sample.key_expr().as_str().to_string(), + value: payload_to_json(sample.payload(), sample.encoding()), + encoding: sample.encoding().to_string(), + timestamp: sample.timestamp().map(|ts| ts.to_string()), + } } -fn result_to_json(sample: Result) -> String { +fn result_to_json(sample: Result<&Sample, &ReplyError>) -> JSONSample { match sample { Ok(sample) => sample_to_json(sample), - Err(err) => { - let encoding = err.encoding.to_string(); - format!( - r#"{{ "key": "ERROR", "value": {}, "encoding": "{}"}}"#, - value_to_json(err), - encoding, - ) - } + Err(err) => JSONSample { + key: "ERROR".into(), + value: payload_to_json(err.payload(), err.encoding()), + encoding: err.encoding().to_string(), + timestamp: None, + }, } } async fn to_json(results: flume::Receiver) -> String { let values = results .stream() - .filter_map(move |reply| async move { Some(result_to_json(reply.sample)) }) - .collect::>() - .await - .join(",\n"); - format!("[\n{values}\n]\n") + .filter_map(move |reply| async move { Some(result_to_json(reply.result())) }) + .collect::>() + .await; + + serde_json::to_string(&values).unwrap_or("[]".into()) } async fn to_json_response(results: flume::Receiver) -> Response { - response( - StatusCode::Ok, - Mime::from_str("application/json").unwrap(), - &to_json(results).await, - ) + response(StatusCode::Ok, "application/json", &to_json(results).await) } -fn sample_to_html(sample: Sample) -> String { +fn sample_to_html(sample: &Sample) -> String { format!( "
{}
\n
{}
\n", - sample.key_expr.as_str(), - String::from_utf8_lossy(&sample.payload.contiguous()) + sample.key_expr().as_str(), + sample + .payload() + .deserialize::>() + .unwrap_or_default() ) } -fn result_to_html(sample: Result) -> String { +fn result_to_html(sample: Result<&Sample, &ReplyError>) -> String { match sample { Ok(sample) => sample_to_html(sample), Err(err) => { format!( "
ERROR
\n
{}
\n", - String::from_utf8_lossy(&err.payload.contiguous()) + err.payload().deserialize::>().unwrap_or_default() ) } } @@ -141,7 +192,7 @@ fn result_to_html(sample: Result) -> String { async fn to_html(results: flume::Receiver) -> String { let values = results .stream() - .filter_map(move |reply| async move { Some(result_to_html(reply.sample)) }) + .filter_map(move |reply| async move { Some(result_to_html(reply.result())) }) .collect::>() .await .join("\n"); @@ -154,16 +205,22 @@ async fn to_html_response(results: flume::Receiver) -> Response { async fn to_raw_response(results: flume::Receiver) -> Response { match results.recv_async().await { - Ok(reply) => match reply.sample { + Ok(reply) => match reply.result() { Ok(sample) => response( StatusCode::Ok, - sample.value.encoding.to_string().as_ref(), - String::from_utf8_lossy(&sample.payload.contiguous()).as_ref(), + Cow::from(sample.encoding()).as_ref(), + &sample + .payload() + .deserialize::>() + .unwrap_or_default(), ), Err(value) => response( StatusCode::Ok, - value.encoding.to_string().as_ref(), - String::from_utf8_lossy(&value.payload.contiguous()).as_ref(), + Cow::from(value.encoding()).as_ref(), + &value + .payload() + .deserialize::>() + .unwrap_or_default(), ), }, Err(_) => response(StatusCode::Ok, "", ""), @@ -178,12 +235,17 @@ fn method_to_kind(method: Method) -> SampleKind { } } -fn response(status: StatusCode, content_type: impl TryInto, body: &str) -> Response { +fn response<'a, S: Into<&'a str> + std::fmt::Debug>( + status: StatusCode, + content_type: S, + body: &str, +) -> Response { + tracing::trace!("Outgoing Response: {status} - {content_type:?} - body: {body}"); let mut builder = Response::builder(status) .header("content-length", body.len().to_string()) .header("Access-Control-Allow-Origin", "*") .body(body); - if let Ok(mime) = content_type.try_into() { + if let Ok(mime) = Mime::from_str(content_type.into()) { builder = builder.content_type(mime); } builder.build() @@ -198,16 +260,19 @@ impl ZenohPlugin for RestPlugin {} impl Plugin for RestPlugin { type StartArgs = Runtime; - type Instance = zenoh::plugins::RunningPlugin; + type Instance = zenoh::internal::plugins::RunningPlugin; const DEFAULT_NAME: &'static str = "rest"; const PLUGIN_VERSION: &'static str = plugin_version!(); const PLUGIN_LONG_VERSION: &'static str = plugin_long_version!(); - fn start(name: &str, runtime: &Self::StartArgs) -> ZResult { + fn start( + name: &str, + runtime: &Self::StartArgs, + ) -> ZResult { // Try to initiate login. // Required in case of dynamic lib, otherwise no logs. // But cannot be done twice in case of static link. - zenoh_util::try_init_log_from_env(); + zenoh::try_init_log_from_env(); tracing::debug!("REST plugin {}", LONG_VERSION.as_str()); let runtime_conf = runtime.config().lock(); @@ -217,8 +282,14 @@ impl Plugin for RestPlugin { let conf: Config = serde_json::from_value(plugin_conf.clone()) .map_err(|e| zerror!("Plugin `{}` configuration error: {}", name, e))?; - let task = async_std::task::spawn(run(runtime.clone(), conf.clone())); - let task = async_std::task::block_on(task.timeout(std::time::Duration::from_millis(1))); + WORKER_THREAD_NUM.store(conf.work_thread_num, Ordering::SeqCst); + MAX_BLOCK_THREAD_NUM.store(conf.max_block_thread_num, Ordering::SeqCst); + + let task = run(runtime.clone(), conf.clone()); + let task = blockon_runtime(async { + timeout(Duration::from_millis(1), TOKIO_RUNTIME.spawn(task)).await + }); + if let Ok(Err(e)) = task { bail!("REST server failed within 1ms: {e}") } @@ -233,17 +304,14 @@ impl PluginControl for RunningPlugin {} impl RunningPluginTrait for RunningPlugin { fn adminspace_getter<'a>( &'a self, - selector: &'a Selector<'a>, + key_expr: &'a KeyExpr<'a>, plugin_status_key: &str, - ) -> ZResult> { + ) -> ZResult> { let mut responses = Vec::new(); let mut key = String::from(plugin_status_key); with_extended_string(&mut key, &["/version"], |key| { - if keyexpr::new(key.as_str()) - .unwrap() - .intersects(&selector.key_expr) - { - responses.push(zenoh::plugins::Response::new( + if keyexpr::new(key.as_str()).unwrap().intersects(key_expr) { + responses.push(zenoh::internal::plugins::Response::new( key.clone(), GIT_VERSION.into(), )) @@ -252,9 +320,9 @@ impl RunningPluginTrait for RunningPlugin { with_extended_string(&mut key, &["/port"], |port_key| { if keyexpr::new(port_key.as_str()) .unwrap() - .intersects(&selector.key_expr) + .intersects(key_expr) { - responses.push(zenoh::plugins::Response::new( + responses.push(zenoh::internal::plugins::Response::new( port_key.clone(), (&self.0).into(), )) @@ -306,45 +374,32 @@ async fn query(mut req: Request<(Arc, String)>) -> tide::Result {} Ok(Err(e)) => { - tracing::debug!( - "SSE error ({})! Unsubscribe and terminate (task {})", - e, - async_std::task::current().id() - ); - if let Err(e) = sub.undeclare().res().await { + tracing::debug!("SSE error ({})! Unsubscribe and terminate", e); + if let Err(e) = sub.undeclare().await { tracing::error!("Error undeclaring subscriber: {}", e); } break; } Err(_) => { - tracing::debug!( - "SSE timeout! Unsubscribe and terminate (task {})", - async_std::task::current().id() - ); - if let Err(e) = sub.undeclare().res().await { + tracing::debug!("SSE timeout! Unsubscribe and terminate",); + if let Err(e) = sub.undeclare().await { tracing::error!("Error undeclaring subscriber: {}", e); } break; @@ -369,31 +424,27 @@ async fn query(mut req: Request<(Arc, String)>) -> tide::Result { if raw { Ok(to_raw_response(receiver).await) @@ -426,21 +477,19 @@ async fn write(mut req: Request<(Arc, String)>) -> tide::Result session.put(&key_expr, bytes).encoding(encoding).await, + SampleKind::Delete => session.delete(&key_expr).await, + }; + match res { Ok(_) => Ok(Response::new(StatusCode::Ok)), Err(e) => Ok(response( StatusCode::InternalServerError, @@ -461,10 +510,10 @@ pub async fn run(runtime: Runtime, conf: Config) -> ZResult<()> { // Try to initiate login. // Required in case of dynamic lib, otherwise no logs. // But cannot be done twice in case of static link. - zenoh_util::try_init_log_from_env(); + zenoh::try_init_log_from_env(); let zid = runtime.zid().to_string(); - let session = zenoh::init(runtime).res().await.unwrap(); + let session = zenoh::session::init(runtime).await.unwrap(); let mut app = Server::with_state((Arc::new(session), zid)); app.with( @@ -500,10 +549,10 @@ pub async fn run(runtime: Runtime, conf: Config) -> ZResult<()> { fn path_to_key_expr<'a>(path: &'a str, zid: &str) -> ZResult> { let path = path.strip_prefix('/').unwrap_or(path); - if path == "@/router/local" { - KeyExpr::try_from(format!("@/router/{zid}")) - } else if let Some(suffix) = path.strip_prefix("@/router/local/") { - KeyExpr::try_from(format!("@/router/{zid}/{suffix}")) + if path == "@/local" { + KeyExpr::try_from(format!("@/{zid}")) + } else if let Some(suffix) = path.strip_prefix("@/local/") { + KeyExpr::try_from(format!("@/{zid}/{suffix}")) } else { KeyExpr::try_from(path) } diff --git a/plugins/zenoh-plugin-storage-manager/Cargo.toml b/plugins/zenoh-plugin-storage-manager/Cargo.toml index 058722965f..27458af929 100644 --- a/plugins/zenoh-plugin-storage-manager/Cargo.toml +++ b/plugins/zenoh-plugin-storage-manager/Cargo.toml @@ -24,7 +24,7 @@ categories = { workspace = true } description = "The zenoh storages plugin." [features] -default = ["dynamic_plugin", "zenoh/default", "zenoh/unstable", "zenoh/plugins"] +default = ["dynamic_plugin"] dynamic_plugin = [] [lib] @@ -32,27 +32,29 @@ name = "zenoh_plugin_storage_manager" crate-type = ["cdylib", "rlib"] [dependencies] -async-std = { workspace = true, features = ["default"] } async-trait = { workspace = true } crc = { workspace = true } const_format = { workspace = true } derive-new = { workspace = true } -zenoh-util = {workspace = true } flume = { workspace = true } futures = { workspace = true } git-version = { workspace = true } +lazy_static = { workspace = true } libloading = { workspace = true } -tracing = {workspace = true} +tracing = { workspace = true } serde = { workspace = true, features = ["default"] } serde_json = { workspace = true } +tokio = { workspace = true } urlencoding = { workspace = true } -zenoh = { workspace = true, features = ["unstable"] } -zenoh-collections = { workspace = true } -zenoh-core = { workspace = true } -zenoh-keyexpr = { workspace = true } +zenoh = { workspace = true, features = [ + "default", + "plugins", + "internal", + "unstable", +] } zenoh-plugin-trait = { workspace = true } -zenoh-result = { workspace = true } zenoh_backend_traits = { workspace = true } +zenoh-util = { workspace = true } [build-dependencies] rustc_version = { workspace = true } @@ -71,4 +73,4 @@ maintainer = "zenoh-dev@eclipse.org" copyright = "2024 ZettaScale Technology" section = "net" license-file = ["../../LICENSE", "0"] -depends = "zenohd (=0.11.0-dev-1)" +depends = "zenohd (=1.0.0~dev-1)" diff --git a/plugins/zenoh-plugin-storage-manager/src/backends_mgt.rs b/plugins/zenoh-plugin-storage-manager/src/backends_mgt.rs index e39929ecce..b789b563d2 100644 --- a/plugins/zenoh-plugin-storage-manager/src/backends_mgt.rs +++ b/plugins/zenoh-plugin-storage-manager/src/backends_mgt.rs @@ -11,28 +11,23 @@ // Contributors: // ZettaScale Zenoh Team, // -use super::storages_mgt::*; -use flume::Sender; use std::sync::Arc; -use zenoh::prelude::r#async::*; -use zenoh::Session; -use zenoh_backend_traits::config::StorageConfig; -use zenoh_backend_traits::{Capability, VolumeInstance}; -use zenoh_result::ZResult; + +use flume::Sender; +use zenoh::{session::Session, Result as ZResult}; +use zenoh_backend_traits::{config::StorageConfig, Capability, VolumeInstance}; + +use super::storages_mgt::*; pub struct StoreIntercept { pub storage: Box, pub capability: Capability, - pub in_interceptor: Option Sample + Send + Sync>>, - pub out_interceptor: Option Sample + Send + Sync>>, } pub(crate) async fn create_and_start_storage( admin_key: String, config: StorageConfig, backend: &VolumeInstance, - in_interceptor: Option Sample + Send + Sync>>, - out_interceptor: Option Sample + Send + Sync>>, zenoh: Arc, ) -> ZResult> { tracing::trace!("Create storage '{}'", &admin_key); @@ -41,8 +36,6 @@ pub(crate) async fn create_and_start_storage( let store_intercept = StoreIntercept { storage, capability, - in_interceptor, - out_interceptor, }; start_storage(store_intercept, config, admin_key, zenoh).await diff --git a/plugins/zenoh-plugin-storage-manager/src/lib.rs b/plugins/zenoh-plugin-storage-manager/src/lib.rs index b17e4dcb98..ac778f3633 100644 --- a/plugins/zenoh-plugin-storage-manager/src/lib.rs +++ b/plugins/zenoh-plugin-storage-manager/src/lib.rs @@ -19,39 +19,55 @@ //! [Click here for Zenoh's documentation](../zenoh/index.html) #![recursion_limit = "512"] -use async_std::task; +use std::{ + collections::HashMap, + convert::TryFrom, + str::FromStr, + sync::{Arc, Mutex}, +}; + use flume::Sender; use memory_backend::MemoryBackend; -use std::collections::HashMap; -use std::convert::TryFrom; -use std::sync::Arc; -use std::sync::Mutex; use storages_mgt::StorageMessage; -use zenoh::plugins::{RunningPluginTrait, ZenohPlugin}; -use zenoh::prelude::sync::*; -use zenoh::runtime::Runtime; -use zenoh::Session; -use zenoh_backend_traits::config::ConfigDiff; -use zenoh_backend_traits::config::PluginConfig; -use zenoh_backend_traits::config::StorageConfig; -use zenoh_backend_traits::config::VolumeConfig; -use zenoh_backend_traits::VolumeInstance; -use zenoh_core::zlock; -use zenoh_plugin_trait::plugin_long_version; -use zenoh_plugin_trait::plugin_version; -use zenoh_plugin_trait::Plugin; -use zenoh_plugin_trait::PluginControl; -use zenoh_plugin_trait::PluginReport; -use zenoh_plugin_trait::PluginStatusRec; -use zenoh_result::ZResult; -use zenoh_util::LibLoader; +use zenoh::{ + internal::{ + bail, + plugins::{Response, RunningPlugin, RunningPluginTrait, ZenohPlugin}, + runtime::Runtime, + zlock, LibLoader, + }, + key_expr::{keyexpr, KeyExpr, OwnedKeyExpr}, + prelude::Wait, + session::Session, + Result as ZResult, +}; +use zenoh_backend_traits::{ + config::{ConfigDiff, PluginConfig, StorageConfig, VolumeConfig}, + VolumeInstance, +}; +use zenoh_plugin_trait::{ + plugin_long_version, plugin_version, Plugin, PluginControl, PluginReport, PluginStatusRec, +}; mod backends_mgt; use backends_mgt::*; + mod memory_backend; mod replica; mod storages_mgt; +const WORKER_THREAD_NUM: usize = 2; +const MAX_BLOCK_THREAD_NUM: usize = 50; +lazy_static::lazy_static! { + // The global runtime is used in the zenohd case, which we can't get the current runtime + static ref TOKIO_RUNTIME: tokio::runtime::Runtime = tokio::runtime::Builder::new_multi_thread() + .worker_threads(WORKER_THREAD_NUM) + .max_blocking_threads(MAX_BLOCK_THREAD_NUM) + .enable_all() + .build() + .expect("Unable to create runtime"); +} + #[cfg(feature = "dynamic_plugin")] zenoh_plugin_trait::declare_plugin!(StoragesPlugin); @@ -63,10 +79,10 @@ impl Plugin for StoragesPlugin { const PLUGIN_LONG_VERSION: &'static str = plugin_long_version!(); type StartArgs = Runtime; - type Instance = zenoh::plugins::RunningPlugin; + type Instance = RunningPlugin; fn start(name: &str, runtime: &Self::StartArgs) -> ZResult { - zenoh_util::try_init_log_from_env(); + zenoh::try_init_log_from_env(); tracing::debug!("StorageManager plugin {}", Self::PLUGIN_VERSION); let config = { PluginConfig::try_from((name, runtime.config().lock().plugin(name).unwrap())) }?; @@ -90,8 +106,9 @@ struct StorageRuntimeInner { impl StorageRuntimeInner { fn status_key(&self) -> String { format!( - "@/router/{}/status/plugins/{}", + "@/{}/{}/status/plugins/{}", &self.runtime.zid(), + &self.runtime.whatami().to_str(), &self.name ) } @@ -99,7 +116,7 @@ impl StorageRuntimeInner { // Try to initiate login. // Required in case of dynamic lib, otherwise no logs. // But cannot be done twice in case of static link. - zenoh_util::try_init_log_from_env(); + zenoh::try_init_log_from_env(); let PluginConfig { name, backend_search_dirs, @@ -107,15 +124,29 @@ impl StorageRuntimeInner { storages, .. } = config; - let lib_loader = backend_search_dirs - .map(|search_dirs| LibLoader::new(&search_dirs, false)) - .unwrap_or_default(); + let lib_loader = LibLoader::new(backend_search_dirs); let plugins_manager = PluginsManager::dynamic(lib_loader.clone(), BACKEND_LIB_PREFIX) .declare_static_plugin::(MEMORY_BACKEND_NAME, true); - let session = Arc::new(zenoh::init(runtime.clone()).res_sync()?); + let session = Arc::new(zenoh::session::init(runtime.clone()).wait()?); + + // NOTE: All storage **must** have a timestamp associated with a Sample. Considering that it is possible to make + // a publication without associating a timestamp, that means that the node managing the storage (be it a + // Zenoh client / peer / router) has to add it. + // + // If the `timestamping` configuration setting is disabled then there is no HLC associated with the + // Session. That eventually means that no timestamp can be generated which goes against the previous + // requirement. + // + // Hence, in that scenario, we refuse to start the storage manager and any storage. + if session.hlc().is_none() { + tracing::error!( + "Cannot start storage manager (and thus any storage) without the 'timestamping' setting enabled in the Zenoh configuration" + ); + bail!("Cannot start storage manager, 'timestamping' is disabled in the configuration"); + } // After this moment result should be only Ok. Failure of loading of one voulme or storage should not affect others. @@ -175,11 +206,13 @@ impl StorageRuntimeInner { let name = name.as_ref(); tracing::info!("Killing volume '{}'", name); if let Some(storages) = self.storages.remove(name) { - async_std::task::block_on(futures::future::join_all( - storages - .into_values() - .map(|s| async move { s.send(StorageMessage::Stop) }), - )); + tokio::task::block_in_place(|| { + TOKIO_RUNTIME.block_on(futures::future::join_all( + storages + .into_values() + .map(|s| async move { s.send(StorageMessage::Stop) }), + )) + }); } self.plugins_manager .started_plugin_mut(name) @@ -208,7 +241,9 @@ impl StorageRuntimeInner { self.plugins_manager .declare_dynamic_plugin_by_name(volume_id, backend_name, true)? }; - let loaded = declared.load()?; + let loaded = declared + .load()? + .expect("Volumes should not loaded if if the storage-manager plugin is not loaded"); loaded.start(config)?; Ok(()) @@ -245,16 +280,14 @@ impl StorageRuntimeInner { volume_id, backend.name() ); - let in_interceptor = backend.instance().incoming_data_interceptor(); - let out_interceptor = backend.instance().outgoing_data_interceptor(); - let stopper = async_std::task::block_on(create_and_start_storage( - admin_key, - storage.clone(), - backend.instance(), - in_interceptor, - out_interceptor, - self.session.clone(), - ))?; + let stopper = tokio::task::block_in_place(|| { + TOKIO_RUNTIME.block_on(create_and_start_storage( + admin_key, + storage.clone(), + backend.instance(), + self.session.clone(), + )) + })?; self.storages .entry(volume_id) .or_default() @@ -305,18 +338,15 @@ impl RunningPluginTrait for StorageRuntime { fn adminspace_getter<'a>( &'a self, - selector: &'a Selector<'a>, + key_expr: &'a KeyExpr<'a>, plugin_status_key: &str, - ) -> ZResult> { + ) -> ZResult> { let mut responses = Vec::new(); let mut key = String::from(plugin_status_key); // TODO: to be removed when "__version__" is implemented in admoin space with_extended_string(&mut key, &["/version"], |key| { - if keyexpr::new(key.as_str()) - .unwrap() - .intersects(&selector.key_expr) - { - responses.push(zenoh::plugins::Response::new( + if keyexpr::new(key.as_str()).unwrap().intersects(key_expr) { + responses.push(Response::new( key.clone(), StoragesPlugin::PLUGIN_VERSION.into(), )) @@ -327,21 +357,12 @@ impl RunningPluginTrait for StorageRuntime { for plugin in guard.plugins_manager.started_plugins_iter() { with_extended_string(key, &[plugin.id()], |key| { with_extended_string(key, &["/__path__"], |key| { - if keyexpr::new(key.as_str()) - .unwrap() - .intersects(&selector.key_expr) - { - responses.push(zenoh::plugins::Response::new( - key.clone(), - plugin.path().into(), - )) + if keyexpr::new(key.as_str()).unwrap().intersects(key_expr) { + responses.push(Response::new(key.clone(), plugin.path().into())) } }); - if keyexpr::new(key.as_str()) - .unwrap() - .intersects(&selector.key_expr) - { - responses.push(zenoh::plugins::Response::new( + if keyexpr::new(key.as_str()).unwrap().intersects(key_expr) { + responses.push(Response::new( key.clone(), plugin.instance().get_admin_status(), )) @@ -353,16 +374,15 @@ impl RunningPluginTrait for StorageRuntime { for storages in guard.storages.values() { for (storage, handle) in storages { with_extended_string(key, &[storage], |key| { - if keyexpr::new(key.as_str()) - .unwrap() - .intersects(&selector.key_expr) - { - if let Ok(value) = task::block_on(async { - let (tx, rx) = async_std::channel::bounded(1); - let _ = handle.send(StorageMessage::GetStatus(tx)); - rx.recv().await + if keyexpr::new(key.as_str()).unwrap().intersects(key_expr) { + if let Some(value) = tokio::task::block_in_place(|| { + TOKIO_RUNTIME.block_on(async { + let (tx, mut rx) = tokio::sync::mpsc::channel(1); + let _ = handle.send(StorageMessage::GetStatus(tx)); + rx.recv().await + }) }) { - responses.push(zenoh::plugins::Response::new(key.clone(), value)) + responses.push(Response::new(key.clone(), value)) } } }) @@ -389,3 +409,65 @@ fn with_extended_string R>( prefix.truncate(prefix_len); result } + +/// Returns the key expression stripped of the provided prefix. +/// +/// If no prefix is provided this function returns the key expression untouched. +/// +/// If `None` is returned, it indicates that the key expression is equal to the prefix. +/// +/// This function will internally call [strip_prefix], see its documentation for possible outcomes. +/// +/// # Errors +/// +/// This function will return an error if: +/// - The provided prefix contains a wildcard. +/// NOTE: The configuration of a Storage is checked and will reject any prefix that contains a +/// wildcard. In theory, this error should never occur. +/// - The key expression is not prefixed by the provided prefix. +/// - The resulting stripped key is not a valid key expression (this should, in theory, never +/// happen). +/// +/// [strip_prefix]: zenoh::key_expr::keyexpr::strip_prefix() +pub fn strip_prefix( + maybe_prefix: Option<&OwnedKeyExpr>, + key_expr: &KeyExpr<'_>, +) -> ZResult> { + match maybe_prefix { + None => Ok(Some(key_expr.clone().into())), + Some(prefix) => { + if prefix.is_wild() { + bail!( + "Prefix < {} > contains a wild character (\"**\" or \"*\")", + prefix + ); + } + + match key_expr.strip_prefix(prefix).as_slice() { + [stripped_key_expr] => { + if stripped_key_expr.is_empty() { + return Ok(None); + } + + OwnedKeyExpr::from_str(stripped_key_expr).map(Some) + } + _ => bail!("Failed to strip prefix < {} > from: {}", prefix, key_expr), + } + } + } +} + +/// Returns the key with an additional prefix, if one was provided. +/// +/// If no prefix is provided, this function returns `maybe_stripped_key`. +/// +/// If a prefix is provided, this function returns the concatenation of both. +pub fn prefix( + maybe_prefix: Option<&OwnedKeyExpr>, + maybe_stripped_key: &OwnedKeyExpr, +) -> OwnedKeyExpr { + match maybe_prefix { + Some(prefix) => prefix / maybe_stripped_key, + None => maybe_stripped_key.clone(), + } +} diff --git a/plugins/zenoh-plugin-storage-manager/src/memory_backend/mod.rs b/plugins/zenoh-plugin-storage-manager/src/memory_backend/mod.rs index 8b37094ffd..b056cf7faf 100644 --- a/plugins/zenoh-plugin-storage-manager/src/memory_backend/mod.rs +++ b/plugins/zenoh-plugin-storage-manager/src/memory_backend/mod.rs @@ -11,16 +11,16 @@ // Contributors: // ZettaScale Zenoh Team, // -use async_std::sync::RwLock; +use std::{collections::HashMap, sync::Arc}; + use async_trait::async_trait; -use std::collections::HashMap; -use std::sync::Arc; -use zenoh::prelude::r#async::*; -use zenoh::time::Timestamp; -use zenoh_backend_traits::config::{StorageConfig, VolumeConfig}; -use zenoh_backend_traits::*; +use tokio::sync::RwLock; +use zenoh::{internal::Value, key_expr::OwnedKeyExpr, time::Timestamp, Result as ZResult}; +use zenoh_backend_traits::{ + config::{StorageConfig, VolumeConfig}, + *, +}; use zenoh_plugin_trait::{plugin_long_version, plugin_version, Plugin}; -use zenoh_result::ZResult; use crate::MEMORY_BACKEND_NAME; @@ -61,26 +61,6 @@ impl Volume for MemoryBackend { tracing::debug!("Create Memory Storage with configuration: {:?}", properties); Ok(Box::new(MemoryStorage::new(properties).await?)) } - - fn incoming_data_interceptor(&self) -> Option Sample + Send + Sync>> { - // By default: no interception point - None - // To test interceptors, uncomment this line: - // Some(Arc::new(|sample| { - // trace!(">>>> IN INTERCEPTOR FOR {:?}", sample); - // sample - // })) - } - - fn outgoing_data_interceptor(&self) -> Option Sample + Send + Sync>> { - // By default: no interception point - None - // To test interceptors, uncomment this line: - // Some(Arc::new(|sample| { - // trace!("<<<< OUT INTERCEPTOR FOR {:?}", sample); - // sample - // })) - } } impl Drop for MemoryBackend { diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs b/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs index 76a086d352..737ce79144 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs @@ -11,16 +11,21 @@ // Contributors: // ZettaScale Zenoh Team, // -use super::digest::*; -use super::Snapshotter; -use async_std::sync::Arc; -use std::cmp::Ordering; -use std::collections::{BTreeSet, HashMap, HashSet}; -use std::str; -use std::str::FromStr; -use zenoh::prelude::r#async::*; -use zenoh::time::Timestamp; -use zenoh::Session; +use std::{ + borrow::Cow, + cmp::Ordering, + collections::{BTreeSet, HashMap, HashSet}, + str, + str::FromStr, + sync::Arc, +}; + +use zenoh::{ + internal::Value, key_expr::OwnedKeyExpr, prelude::*, query::Parameters, sample::Sample, + time::Timestamp, Session, +}; + +use super::{digest::*, Snapshotter}; pub struct AlignQueryable { session: Arc, @@ -70,7 +75,6 @@ impl AlignQueryable { .session .declare_queryable(&self.digest_key) .complete(true) // This queryable is meant to have all the history - .res() .await .unwrap(); @@ -83,7 +87,7 @@ impl AlignQueryable { } }; tracing::trace!("[ALIGN QUERYABLE] Received Query '{}'", query.selector()); - let diff_required = self.parse_selector(query.selector()); + let diff_required = self.parse_parameters(query.parameters()); tracing::trace!( "[ALIGN QUERYABLE] Parsed selector diff_required:{:?}", diff_required @@ -94,29 +98,39 @@ impl AlignQueryable { for value in values { match value { AlignData::Interval(i, c) => { - let sample = Sample::new( - query.key_expr().clone(), - serde_json::to_string(&(i, c)).unwrap(), - ); - query.reply(Ok(sample)).res().await.unwrap(); + query + .reply( + query.key_expr().clone(), + serde_json::to_string(&(i, c)).unwrap(), + ) + .await + .unwrap(); } AlignData::Subinterval(i, c) => { - let sample = Sample::new( - query.key_expr().clone(), - serde_json::to_string(&(i, c)).unwrap(), - ); - query.reply(Ok(sample)).res().await.unwrap(); + query + .reply( + query.key_expr().clone(), + serde_json::to_string(&(i, c)).unwrap(), + ) + .await + .unwrap(); } AlignData::Content(i, c) => { - let sample = Sample::new( - query.key_expr().clone(), - serde_json::to_string(&(i, c)).unwrap(), - ); - query.reply(Ok(sample)).res().await.unwrap(); + query + .reply( + query.key_expr().clone(), + serde_json::to_string(&(i, c)).unwrap(), + ) + .await + .unwrap(); } AlignData::Data(k, (v, ts)) => { - let sample = Sample::new(k, v).with_timestamp(ts); - query.reply(Ok(sample)).res().await.unwrap(); + query + .reply(k, v.payload().clone()) + .encoding(v.encoding().clone()) + .timestamp(ts) + .await + .unwrap(); } } } @@ -164,8 +178,8 @@ impl AlignQueryable { if entry.is_some() { let entry = entry.unwrap(); result.push(AlignData::Data( - OwnedKeyExpr::from(entry.key_expr), - (entry.value, each.timestamp), + OwnedKeyExpr::from(entry.key_expr().clone()), + (Value::from(entry), each.timestamp), )); } } @@ -174,15 +188,14 @@ impl AlignQueryable { } } - fn parse_selector(&self, selector: Selector) -> Option { - let properties = selector.parameters_stringmap().unwrap(); // note: this is a hashmap - tracing::trace!("[ALIGN QUERYABLE] Properties are: {:?}", properties); - if properties.contains_key(super::ERA) { + fn parse_parameters(&self, parameters: &Parameters) -> Option { + tracing::trace!("[ALIGN QUERYABLE] Parameters are: {:?}", parameters); + if parameters.contains_key(super::ERA) { Some(AlignComponent::Era( - EraType::from_str(properties.get(super::ERA).unwrap()).unwrap(), + EraType::from_str(parameters.get(super::ERA).unwrap()).unwrap(), )) - } else if properties.contains_key(super::INTERVALS) { - let mut intervals = properties.get(super::INTERVALS).unwrap().to_string(); + } else if parameters.contains_key(super::INTERVALS) { + let mut intervals = parameters.get(super::INTERVALS).unwrap().to_string(); intervals.remove(0); intervals.pop(); Some(AlignComponent::Intervals( @@ -191,8 +204,8 @@ impl AlignQueryable { .map(|x| x.parse::().unwrap()) .collect::>(), )) - } else if properties.contains_key(super::SUBINTERVALS) { - let mut subintervals = properties.get(super::SUBINTERVALS).unwrap().to_string(); + } else if parameters.contains_key(super::SUBINTERVALS) { + let mut subintervals = parameters.get(super::SUBINTERVALS).unwrap().to_string(); subintervals.remove(0); subintervals.pop(); Some(AlignComponent::Subintervals( @@ -201,8 +214,8 @@ impl AlignQueryable { .map(|x| x.parse::().unwrap()) .collect::>(), )) - } else if properties.contains_key(super::CONTENTS) { - let contents = serde_json::from_str(properties.get(super::CONTENTS).unwrap()).unwrap(); + } else if parameters.contains_key(super::CONTENTS) { + let contents = serde_json::from_str(parameters.get(super::CONTENTS).unwrap()).unwrap(); Some(AlignComponent::Contents(contents)) } else { None @@ -214,17 +227,20 @@ impl AlignQueryable { impl AlignQueryable { async fn get_entry(&self, logentry: &LogEntry) -> Option { // get corresponding key from log - let replies = self.session.get(&logentry.key).res().await.unwrap(); + let replies = self.session.get(&logentry.key).await.unwrap(); if let Ok(reply) = replies.recv_async().await { - match reply.sample { + match reply.into_result() { Ok(sample) => { tracing::trace!( "[ALIGN QUERYABLE] Received ('{}': '{}' @ {:?})", - sample.key_expr.as_str(), - sample.value, - sample.timestamp + sample.key_expr().as_str(), + sample + .payload() + .deserialize::>() + .unwrap_or(Cow::Borrowed("")), + sample.timestamp(), ); - if let Some(timestamp) = sample.timestamp { + if let Some(timestamp) = sample.timestamp() { match timestamp.cmp(&logentry.timestamp) { Ordering::Greater => { tracing::error!( diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs b/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs index 58abc7e05a..952a72f499 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs @@ -12,16 +12,26 @@ // ZettaScale Zenoh Team, // -use super::{Digest, EraType, LogEntry, Snapshotter}; -use super::{CONTENTS, ERA, INTERVALS, SUBINTERVALS}; -use async_std::sync::{Arc, RwLock}; +use std::{ + borrow::Cow, + collections::{HashMap, HashSet}, + str, + sync::Arc, +}; + use flume::{Receiver, Sender}; -use std::collections::{HashMap, HashSet}; -use std::str; -use zenoh::key_expr::{KeyExpr, OwnedKeyExpr}; -use zenoh::prelude::r#async::*; -use zenoh::time::Timestamp; -use zenoh::Session; +use tokio::sync::RwLock; +use zenoh::{ + internal::Value, + key_expr::{KeyExpr, OwnedKeyExpr}, + prelude::*, + query::Selector, + sample::{Sample, SampleBuilder}, + time::Timestamp, + Session, +}; + +use super::{Digest, EraType, LogEntry, Snapshotter, CONTENTS, ERA, INTERVALS, SUBINTERVALS}; pub struct Aligner { session: Arc, @@ -104,7 +114,10 @@ impl Aligner { tracing::trace!("[ALIGNER] Received queried samples: {missing_data:?}"); for (key, (ts, value)) in missing_data { - let sample = Sample::new(key, value).with_timestamp(ts); + let sample = SampleBuilder::put(key, value.payload().clone()) + .encoding(value.encoding().clone()) + .timestamp(ts) + .into(); tracing::debug!("[ALIGNER] Adding {:?} to storage", sample); self.tx_sample.send_async(sample).await.unwrap_or_else(|e| { tracing::error!("[ALIGNER] Error adding sample to storage: {}", e) @@ -125,18 +138,18 @@ impl Aligner { from: &str, ) -> (HashMap, bool) { let mut result = HashMap::new(); - let properties = format!( + let parameters = format!( "timestamp={}&{}={}", timestamp, CONTENTS, serde_json::to_string(missing_content).unwrap() ); - let (replies, no_err) = self.perform_query(from, properties.clone()).await; + let (replies, no_err) = self.perform_query(from, parameters.clone()).await; for sample in replies { result.insert( - sample.key_expr.into(), - (sample.timestamp.unwrap(), sample.value), + sample.key_expr().clone().into(), + (*sample.timestamp().unwrap(), Value::from(sample)), ); } (result, no_err) @@ -199,12 +212,12 @@ impl Aligner { other_rep: &str, ) -> (HashSet, bool) { let (other_intervals, no_err) = if era.eq(&EraType::Cold) { - let properties = format!("timestamp={}&{}=cold", other.timestamp, ERA); - let (reply_content, mut no_err) = self.perform_query(other_rep, properties).await; + let parameters = format!("timestamp={}&{}=cold", other.timestamp, ERA); + let (reply_content, mut no_err) = self.perform_query(other_rep, parameters).await; let mut other_intervals: HashMap = HashMap::new(); - // expecting sample.value to be a vec of intervals with their checksum + // expecting sample.payload to be a vec of intervals with their checksum for each in reply_content { - match serde_json::from_str(&each.value.to_string()) { + match serde_json::from_reader(each.payload().reader()) { Ok((i, c)) => { other_intervals.insert(i, c); } @@ -212,7 +225,7 @@ impl Aligner { tracing::error!("[ALIGNER] Error decoding reply: {}", e); no_err = false; } - }; + } } (other_intervals, no_err) } else { @@ -240,17 +253,17 @@ impl Aligner { for each_int in diff_intervals { diff_string.push(each_int.to_string()); } - let properties = format!( + let parameters = format!( "timestamp={}&{}=[{}]", other.timestamp, INTERVALS, diff_string.join(",") ); - // expecting sample.value to be a vec of subintervals with their checksum - let (reply_content, mut no_err) = self.perform_query(other_rep, properties).await; + // expecting sample.payload to be a vec of subintervals with their checksum + let (reply_content, mut no_err) = self.perform_query(other_rep, parameters).await; let mut other_subintervals: HashMap = HashMap::new(); for each in reply_content { - match serde_json::from_str(&each.value.to_string()) { + match serde_json::from_reader(each.payload().reader()) { Ok((i, c)) => { other_subintervals.insert(i, c); } @@ -258,7 +271,7 @@ impl Aligner { tracing::error!("[ALIGNER] Error decoding reply: {}", e); no_err = false; } - }; + } } (other_subintervals, no_err) }; @@ -281,17 +294,17 @@ impl Aligner { for each_sub in diff_subintervals { diff_string.push(each_sub.to_string()); } - let properties = format!( + let parameters = format!( "timestamp={}&{}=[{}]", other.timestamp, SUBINTERVALS, diff_string.join(",") ); - // expecting sample.value to be a vec of log entries with their checksum - let (reply_content, mut no_err) = self.perform_query(other_rep, properties).await; + // expecting sample.payload to be a vec of log entries with their checksum + let (reply_content, mut no_err) = self.perform_query(other_rep, parameters).await; let mut other_content: HashMap> = HashMap::new(); for each in reply_content { - match serde_json::from_str(&each.value.to_string()) { + match serde_json::from_reader(each.payload().reader()) { Ok((i, c)) => { other_content.insert(i, c); } @@ -299,7 +312,7 @@ impl Aligner { tracing::error!("[ALIGNER] Error decoding reply: {}", e); no_err = false; } - }; + } } // get subintervals diff let result = this.get_full_content_diff(other_content); @@ -309,12 +322,12 @@ impl Aligner { } } - async fn perform_query(&self, from: &str, properties: String) -> (Vec, bool) { + async fn perform_query(&self, from: &str, parameters: String) -> (Vec, bool) { let mut no_err = true; - let selector = KeyExpr::from(&self.digest_key) - .join(&from) - .unwrap() - .with_parameters(&properties); + let selector = Selector::owned( + KeyExpr::from(&self.digest_key).join(&from).unwrap(), + parameters, + ); tracing::trace!("[ALIGNER] Sending Query '{}'...", selector); let mut return_val = Vec::new(); match self @@ -322,23 +335,25 @@ impl Aligner { .get(&selector) .consolidation(zenoh::query::ConsolidationMode::None) .accept_replies(zenoh::query::ReplyKeyExpr::Any) - .res() .await { Ok(replies) => { while let Ok(reply) = replies.recv_async().await { - match reply.sample { + match reply.into_result() { Ok(sample) => { tracing::trace!( "[ALIGNER] Received ('{}': '{}')", - sample.key_expr.as_str(), - sample.value + sample.key_expr().as_str(), + sample + .payload() + .deserialize::>() + .unwrap_or(Cow::Borrowed("")) ); return_val.push(sample); } Err(err) => { tracing::error!( - "[ALIGNER] Received error for query on selector {} :{}", + "[ALIGNER] Received error for query on selector {} :{:?}", selector, err ); @@ -348,7 +363,11 @@ impl Aligner { } } Err(err) => { - tracing::error!("[ALIGNER] Query failed on selector `{}`: {}", selector, err); + tracing::error!( + "[ALIGNER] Query failed on selector `{}`: {:?}", + selector, + err + ); no_err = false; } }; diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/digest.rs b/plugins/zenoh-plugin-storage-manager/src/replica/digest.rs index 55ae097892..07ba7e9ea3 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/digest.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/digest.rs @@ -12,16 +12,18 @@ // ZettaScale Zenoh Team, // +use std::{ + collections::{BTreeMap, BTreeSet, HashMap, HashSet}, + convert::TryFrom, + str::FromStr, + string::ParseError, + time::Duration, +}; + use crc::{Crc, CRC_64_ECMA_182}; use derive_new::new; use serde::{Deserialize, Serialize}; -use std::collections::{BTreeMap, BTreeSet, HashMap, HashSet}; -use std::convert::TryFrom; -use std::str::FromStr; -use std::string::ParseError; -use std::time::Duration; -use zenoh::key_expr::OwnedKeyExpr; -use zenoh::time::Timestamp; +use zenoh::{key_expr::OwnedKeyExpr, time::Timestamp}; #[derive(Eq, PartialEq, Clone, Debug, Deserialize, Serialize)] pub struct DigestConfig { @@ -831,384 +833,3 @@ impl Digest { } } } - -#[test] -fn test_create_digest_empty_initial() { - async_std::task::block_on(async { - zenoh_core::zasync_executor_init!(); - }); - let created = Digest::create_digest( - Timestamp::from_str("2022-12-21T15:00:00.000000000Z/1").unwrap(), - DigestConfig { - delta: Duration::from_millis(1000), - sub_intervals: 10, - hot: 6, - warm: 30, - }, - Vec::new(), - 1671612730, - ); - let expected = Digest { - timestamp: Timestamp::from_str("2022-12-21T15:00:00.000000000Z/1").unwrap(), - config: DigestConfig { - delta: Duration::from_millis(1000), - sub_intervals: 10, - hot: 6, - warm: 30, - }, - checksum: 0, - eras: HashMap::new(), - intervals: HashMap::new(), - subintervals: HashMap::new(), - }; - assert_eq!(created, expected); -} - -#[test] -fn test_create_digest_with_initial_hot() { - async_std::task::block_on(async { - zenoh_core::zasync_executor_init!(); - }); - let created = Digest::create_digest( - Timestamp::from_str("2022-12-21T15:00:00.000000000Z/1").unwrap(), - DigestConfig { - delta: Duration::from_millis(1000), - sub_intervals: 10, - hot: 6, - warm: 30, - }, - vec![LogEntry { - timestamp: Timestamp::from_str("2022-12-21T15:00:00.000000000Z/1").unwrap(), - key: OwnedKeyExpr::from_str("demo/example/a").unwrap(), - }], - 1671634800, - ); - let expected = Digest { - timestamp: Timestamp::from_str("2022-12-21T15:00:00.000000000Z/1").unwrap(), - config: DigestConfig { - delta: Duration::from_millis(1000), - sub_intervals: 10, - hot: 6, - warm: 30, - }, - checksum: 6001159706341373391, - eras: HashMap::from([( - EraType::Hot, - Interval { - checksum: 4598971083408074426, - content: BTreeSet::from([1671634800]), - }, - )]), - intervals: HashMap::from([( - 1671634800, - Interval { - checksum: 8436018757196527319, - content: BTreeSet::from([16716348000]), - }, - )]), - subintervals: HashMap::from([( - 16716348000, - SubInterval { - checksum: 10827088509365589085, - content: BTreeSet::from([LogEntry { - timestamp: Timestamp::from_str("2022-12-21T15:00:00.000000000Z/1").unwrap(), - key: OwnedKeyExpr::from_str("demo/example/a").unwrap(), - }]), - }, - )]), - }; - assert_eq!(created, expected); -} - -#[test] -fn test_create_digest_with_initial_warm() { - async_std::task::block_on(async { - zenoh_core::zasync_executor_init!(); - }); - let created = Digest::create_digest( - Timestamp::from_str("2022-12-21T15:00:00.000000000Z/1").unwrap(), - DigestConfig { - delta: Duration::from_millis(1000), - sub_intervals: 10, - hot: 6, - warm: 30, - }, - vec![LogEntry { - timestamp: Timestamp::from_str("2022-12-21T15:00:00.000000000Z/1").unwrap(), - key: OwnedKeyExpr::from_str("demo/example/a").unwrap(), - }], - 1671634810, - ); - let expected = Digest { - timestamp: Timestamp::from_str("2022-12-21T15:00:00.000000000Z/1").unwrap(), - config: DigestConfig { - delta: Duration::from_millis(1000), - sub_intervals: 10, - hot: 6, - warm: 30, - }, - checksum: 6001159706341373391, - eras: HashMap::from([( - EraType::Warm, - Interval { - checksum: 4598971083408074426, - content: BTreeSet::from([1671634800]), - }, - )]), - intervals: HashMap::from([( - 1671634800, - Interval { - checksum: 8436018757196527319, - content: BTreeSet::from([16716348000]), - }, - )]), - subintervals: HashMap::from([( - 16716348000, - SubInterval { - checksum: 10827088509365589085, - content: BTreeSet::from([LogEntry { - timestamp: Timestamp::from_str("2022-12-21T15:00:00.000000000Z/1").unwrap(), - key: OwnedKeyExpr::from_str("demo/example/a").unwrap(), - }]), - }, - )]), - }; - assert_eq!(created, expected); -} - -#[test] -fn test_create_digest_with_initial_cold() { - async_std::task::block_on(async { - zenoh_core::zasync_executor_init!(); - }); - let created = Digest::create_digest( - Timestamp::from_str("2022-12-21T15:00:00.000000000Z/1").unwrap(), - DigestConfig { - delta: Duration::from_millis(1000), - sub_intervals: 10, - hot: 6, - warm: 30, - }, - vec![LogEntry { - timestamp: Timestamp::from_str("2022-12-21T15:00:00.000000000Z/1").unwrap(), - key: OwnedKeyExpr::from_str("demo/example/a").unwrap(), - }], - 1671634910, - ); - let expected = Digest { - timestamp: Timestamp::from_str("2022-12-21T15:00:00.000000000Z/1").unwrap(), - config: DigestConfig { - delta: Duration::from_millis(1000), - sub_intervals: 10, - hot: 6, - warm: 30, - }, - checksum: 6001159706341373391, - eras: HashMap::from([( - EraType::Cold, - Interval { - checksum: 4598971083408074426, - content: BTreeSet::from([1671634800]), - }, - )]), - intervals: HashMap::from([( - 1671634800, - Interval { - checksum: 8436018757196527319, - content: BTreeSet::from([16716348000]), - }, - )]), - subintervals: HashMap::from([( - 16716348000, - SubInterval { - checksum: 10827088509365589085, - content: BTreeSet::from([LogEntry { - timestamp: Timestamp::from_str("2022-12-21T15:00:00.000000000Z/1").unwrap(), - key: OwnedKeyExpr::from_str("demo/example/a").unwrap(), - }]), - }, - )]), - }; - assert_eq!(created, expected); -} - -#[test] -fn test_update_digest_add_content() { - async_std::task::block_on(async { - zenoh_core::zasync_executor_init!(); - }); - let created = Digest::update_digest( - Digest { - timestamp: Timestamp::from_str("2022-12-21T13:00:00.000000000Z/1").unwrap(), - config: DigestConfig { - delta: Duration::from_millis(1000), - sub_intervals: 10, - hot: 6, - warm: 30, - }, - checksum: 0, - eras: HashMap::new(), - intervals: HashMap::new(), - subintervals: HashMap::new(), - }, - 1671634910, - Timestamp::from_str("2022-12-21T15:00:00.000000000Z/1").unwrap(), - HashSet::from([LogEntry { - timestamp: Timestamp::from_str("2022-12-21T15:00:00.000000000Z/1").unwrap(), - key: OwnedKeyExpr::from_str("demo/example/a").unwrap(), - }]), - HashSet::new(), - ); - let expected = Digest { - timestamp: Timestamp::from_str("2022-12-21T15:00:00.000000000Z/1").unwrap(), - config: DigestConfig { - delta: Duration::from_millis(1000), - sub_intervals: 10, - hot: 6, - warm: 30, - }, - checksum: 6001159706341373391, - eras: HashMap::from([( - EraType::Cold, - Interval { - checksum: 4598971083408074426, - content: BTreeSet::from([1671634800]), - }, - )]), - intervals: HashMap::from([( - 1671634800, - Interval { - checksum: 8436018757196527319, - content: BTreeSet::from([16716348000]), - }, - )]), - subintervals: HashMap::from([( - 16716348000, - SubInterval { - checksum: 10827088509365589085, - content: BTreeSet::from([LogEntry { - timestamp: Timestamp::from_str("2022-12-21T15:00:00.000000000Z/1").unwrap(), - key: OwnedKeyExpr::from_str("demo/example/a").unwrap(), - }]), - }, - )]), - }; - assert_eq!(created, expected); -} - -#[test] -fn test_update_digest_remove_content() { - async_std::task::block_on(async { - zenoh_core::zasync_executor_init!(); - }); - let created = Digest::update_digest( - Digest { - timestamp: Timestamp::from_str("2022-12-21T13:00:00.000000000Z/1").unwrap(), - config: DigestConfig { - delta: Duration::from_millis(1000), - sub_intervals: 10, - hot: 6, - warm: 30, - }, - checksum: 3304302629246049840, - eras: HashMap::from([( - EraType::Cold, - Interval { - checksum: 8238986480495191270, - content: BTreeSet::from([1671634800]), - }, - )]), - intervals: HashMap::from([( - 1671634800, - Interval { - checksum: 12344398372324783476, - content: BTreeSet::from([16716348000]), - }, - )]), - subintervals: HashMap::from([( - 16716348000, - SubInterval { - checksum: 10007212639402189432, - content: BTreeSet::from([LogEntry { - timestamp: Timestamp::from_str("2022-12-21T15:00:00.000000000Z/1").unwrap(), - key: OwnedKeyExpr::from_str("demo/example/a").unwrap(), - }]), - }, - )]), - }, - 1671634910, - Timestamp::from_str("2022-12-21T15:00:00.000000000Z/1").unwrap(), - HashSet::new(), - HashSet::from([LogEntry { - timestamp: Timestamp::from_str("2022-12-21T15:00:00.000000000Z/1").unwrap(), - key: OwnedKeyExpr::from_str("demo/example/a").unwrap(), - }]), - ); - let expected = Digest { - timestamp: Timestamp::from_str("2022-12-21T15:00:00.000000000Z/1").unwrap(), - config: DigestConfig { - delta: Duration::from_millis(1000), - sub_intervals: 10, - hot: 6, - warm: 30, - }, - checksum: 0, - eras: HashMap::new(), - intervals: HashMap::new(), - subintervals: HashMap::new(), - }; - assert_eq!(created, expected); -} - -#[test] -fn test_update_remove_digest() { - async_std::task::block_on(async { - zenoh_core::zasync_executor_init!(); - }); - let created = Digest::create_digest( - Timestamp::from_str("2022-12-21T15:00:00.000000000Z/1").unwrap(), - DigestConfig { - delta: Duration::from_millis(1000), - sub_intervals: 10, - hot: 6, - warm: 30, - }, - Vec::new(), - 1671612730, - ); - let added = Digest::update_digest( - created.clone(), - 1671612730, - Timestamp::from_str("2022-12-21T15:00:00.000000000Z/1").unwrap(), - HashSet::from([LogEntry { - timestamp: Timestamp::from_str("2022-12-21T12:00:00.000000000Z/1").unwrap(), - key: OwnedKeyExpr::from_str("a/b/c").unwrap(), - }]), - HashSet::new(), - ); - assert_ne!(created, added); - - let removed = Digest::update_digest( - added.clone(), - 1671612730, - Timestamp::from_str("2022-12-21T15:00:00.000000000Z/1").unwrap(), - HashSet::new(), - HashSet::from([LogEntry { - timestamp: Timestamp::from_str("2022-12-21T12:00:00.000000000Z/1").unwrap(), - key: OwnedKeyExpr::from_str("a/b/c").unwrap(), - }]), - ); - assert_eq!(created, removed); - - let added_again = Digest::update_digest( - removed, - 1671612730, - Timestamp::from_str("2022-12-21T15:00:00.000000000Z/1").unwrap(), - HashSet::from([LogEntry { - timestamp: Timestamp::from_str("2022-12-21T12:00:00.000000000Z/1").unwrap(), - key: OwnedKeyExpr::from_str("a/b/c").unwrap(), - }]), - HashSet::new(), - ); - assert_eq!(added, added_again); -} diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/mod.rs b/plugins/zenoh-plugin-storage-manager/src/replica/mod.rs index 51bd613e22..4766914e21 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/mod.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/mod.rs @@ -14,23 +14,21 @@ // This module extends Storage with alignment protocol that aligns storages subscribing to the same key_expr -use crate::backends_mgt::StoreIntercept; -use crate::storages_mgt::StorageMessage; -use async_std::stream::{interval, StreamExt}; -use async_std::sync::Arc; -use async_std::sync::RwLock; +use std::{ + collections::{HashMap, HashSet}, + str, + sync::Arc, + time::{Duration, SystemTime}, +}; + use flume::{Receiver, Sender}; use futures::{pin_mut, select, FutureExt}; -use std::collections::{HashMap, HashSet}; -use std::str; -use std::str::FromStr; -use std::time::{Duration, SystemTime}; -use urlencoding::encode; -use zenoh::prelude::r#async::*; -use zenoh::time::Timestamp; -use zenoh::Session; +use tokio::{sync::RwLock, time::interval}; +use zenoh::{key_expr::keyexpr, prelude::*}; use zenoh_backend_traits::config::{ReplicaConfig, StorageConfig}; +use crate::{backends_mgt::StoreIntercept, storages_mgt::StorageMessage}; + pub mod align_queryable; pub mod aligner; pub mod digest; @@ -42,16 +40,19 @@ pub use aligner::Aligner; pub use digest::{Digest, DigestConfig, EraType, LogEntry}; pub use snapshotter::Snapshotter; pub use storage::{ReplicationService, StorageService}; +use zenoh::{key_expr::OwnedKeyExpr, sample::Locality, time::Timestamp, Session}; const ERA: &str = "era"; const INTERVALS: &str = "intervals"; const SUBINTERVALS: &str = "subintervals"; const CONTENTS: &str = "contents"; pub const EPOCH_START: SystemTime = SystemTime::UNIX_EPOCH; - -pub const ALIGN_PREFIX: &str = "@-digest"; pub const SUBINTERVAL_CHUNKS: usize = 10; +lazy_static::lazy_static!( + static ref KE_PREFIX_DIGEST: &'static keyexpr = unsafe { keyexpr::from_str_unchecked("@-digest") }; +); + // A replica consists of a storage service and services required for anti-entropy // To perform anti-entropy, we need a `Digest` that contains the state of the datastore // `Snapshotter` computes the `Digest` and maintains all related information @@ -93,10 +94,7 @@ impl Replica { } } else { result.push(( - StorageService::get_prefixed( - &storage_config.strip_prefix, - &entry.0.unwrap().into(), - ), + crate::prefix(storage_config.strip_prefix.as_ref(), &entry.0.unwrap()), entry.1, )); } @@ -109,14 +107,15 @@ impl Replica { } }; + // Zid of session for generating timestamps + let replica = Replica { name: name.to_string(), - session, + session: session.clone(), key_expr: storage_config.key_expr.clone(), replica_config: storage_config.replica_config.clone().unwrap(), digests_published: RwLock::new(HashSet::new()), }; - // Create channels for communication between components // channel to queue digests to be aligned let (tx_digest, rx_digest) = flume::unbounded(); @@ -127,11 +126,12 @@ impl Replica { let config = replica.replica_config.clone(); // snapshotter - let snapshotter = Arc::new(Snapshotter::new(rx_log, &startup_entries, &config).await); + let snapshotter = + Arc::new(Snapshotter::new(session, rx_log, &startup_entries, &config).await); // digest sub let digest_sub = replica.start_digest_sub(tx_digest).fuse(); // queryable for alignment - let digest_key = Replica::get_digest_key(&replica.key_expr, ALIGN_PREFIX); + let digest_key = Replica::get_digest_key(&replica.key_expr); let align_q = AlignQueryable::start_align_queryable( replica.session.clone(), digest_key.clone(), @@ -195,9 +195,7 @@ impl Replica { pub async fn start_digest_sub(&self, tx: Sender<(String, Digest)>) { let mut received = HashMap::::new(); - let digest_key = Replica::get_digest_key(&self.key_expr, ALIGN_PREFIX) - .join("**") - .unwrap(); + let digest_key = Replica::get_digest_key(&self.key_expr).join("**").unwrap(); tracing::debug!( "[DIGEST_SUB] Declaring Subscriber named {} on '{}'", @@ -208,7 +206,6 @@ impl Replica { .session .declare_subscriber(&digest_key) .allowed_origin(Locality::Remote) - .res() .await .unwrap(); loop { @@ -219,22 +216,25 @@ impl Replica { continue; } }; - let from = &sample.key_expr.as_str() - [Replica::get_digest_key(&self.key_expr, ALIGN_PREFIX).len() + 1..]; - tracing::trace!( - "[DIGEST_SUB] From {} Received {} ('{}': '{}')", - from, - sample.kind, - sample.key_expr.as_str(), - sample.value - ); - let digest: Digest = match serde_json::from_str(&format!("{}", sample.value)) { + let from = + &sample.key_expr().as_str()[Replica::get_digest_key(&self.key_expr).len() + 1..]; + + let digest: Digest = match serde_json::from_reader(sample.payload().reader()) { Ok(digest) => digest, Err(e) => { tracing::error!("[DIGEST_SUB] Error in decoding the digest: {}", e); continue; } }; + + tracing::trace!( + "[DIGEST_SUB] From {} Received {} ('{}': '{:?}')", + from, + sample.kind(), + sample.key_expr().as_str(), + digest, + ); + let ts = digest.timestamp; let to_be_processed = self .processing_needed( @@ -253,7 +253,7 @@ impl Replica { tracing::error!("[DIGEST_SUB] Error sending digest to aligner: {}", e) } } - }; + } received.insert(from.to_string(), ts); } } @@ -261,23 +261,18 @@ impl Replica { // Create a publisher to periodically publish digests from the snapshotter // Publish on // pub async fn start_digest_pub(&self, snapshotter: Arc) { - let digest_key = Replica::get_digest_key(&self.key_expr, ALIGN_PREFIX) + let digest_key = Replica::get_digest_key(&self.key_expr) .join(&self.name) .unwrap(); tracing::debug!("[DIGEST_PUB] Declaring Publisher on '{}'...", digest_key); - let publisher = self - .session - .declare_publisher(digest_key) - .res() - .await - .unwrap(); + let publisher = self.session.declare_publisher(digest_key).await.unwrap(); // Ensure digest gets published every interval, accounting for // time it takes to publish. let mut interval = interval(self.replica_config.publication_interval); loop { - let _ = interval.next().await; + let _ = interval.tick().await; let digest = snapshotter.get_digest().await; let digest = digest.compress(); @@ -288,7 +283,7 @@ impl Replica { drop(digest); tracing::trace!("[DIGEST_PUB] Putting Digest: {} ...", digest_json); - match publisher.put(digest_json).res().await { + match publisher.put(digest_json).await { Ok(()) => {} Err(e) => tracing::error!("[DIGEST_PUB] Digest publication failed: {}", e), } @@ -332,12 +327,8 @@ impl Replica { true } - fn get_digest_key(key_expr: &OwnedKeyExpr, align_prefix: &str) -> OwnedKeyExpr { - let key_expr = encode(key_expr).to_string(); - OwnedKeyExpr::from_str(align_prefix) - .unwrap() - .join(&key_expr) - .unwrap() + fn get_digest_key(key_expr: &keyexpr) -> OwnedKeyExpr { + *KE_PREFIX_DIGEST / key_expr } pub fn get_hot_interval_number(publication_interval: Duration, delta: Duration) -> usize { diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/snapshotter.rs b/plugins/zenoh-plugin-storage-manager/src/replica/snapshotter.rs index e66a6e88ca..3f00648597 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/snapshotter.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/snapshotter.rs @@ -11,21 +11,27 @@ // Contributors: // ZettaScale Zenoh Team, // -use super::{Digest, DigestConfig, LogEntry}; -use async_std::stream::{interval, StreamExt}; -use async_std::sync::Arc; -use async_std::sync::RwLock; -use async_std::task::sleep; +use std::{ + collections::{HashMap, HashSet}, + convert::TryFrom, + sync::Arc, + time::Duration, +}; + use flume::Receiver; use futures::join; -use std::collections::{HashMap, HashSet}; -use std::convert::TryFrom; -use std::time::Duration; -use zenoh::key_expr::OwnedKeyExpr; -use zenoh::time::Timestamp; +use tokio::{ + sync::RwLock, + time::{interval, sleep}, +}; +use zenoh::{key_expr::OwnedKeyExpr, time::Timestamp, Session}; use zenoh_backend_traits::config::ReplicaConfig; +use super::{Digest, DigestConfig, LogEntry}; + pub struct Snapshotter { + // session ref for timestamp generation + session: Arc, // channel to get updates from the storage storage_update: Receiver<(OwnedKeyExpr, Timestamp)>, // configuration parameters of the replica @@ -51,6 +57,7 @@ pub struct ReplicationInfo { impl Snapshotter { // Initialize the snapshot parameters, logs and digest pub async fn new( + session: Arc, rx_sample: Receiver<(OwnedKeyExpr, Timestamp)>, initial_entries: &Vec<(OwnedKeyExpr, Timestamp)>, replica_config: &ReplicaConfig, @@ -59,10 +66,12 @@ impl Snapshotter { // from initial entries, populate the log - stable and volatile // compute digest let (last_snapshot_time, last_interval) = Snapshotter::compute_snapshot_params( + session.clone(), replica_config.propagation_delay, replica_config.delta, ); let snapshotter = Snapshotter { + session, storage_update: rx_sample, replica_config: replica_config.clone(), content: ReplicationInfo { @@ -117,11 +126,12 @@ impl Snapshotter { let mut interval = interval(self.replica_config.delta); loop { - let _ = interval.next().await; + let _ = interval.tick().await; let mut last_snapshot_time = self.content.last_snapshot_time.write().await; let mut last_interval = self.content.last_interval.write().await; let (time, interval) = Snapshotter::compute_snapshot_params( + self.session.clone(), self.replica_config.propagation_delay, self.replica_config.delta, ); @@ -133,12 +143,15 @@ impl Snapshotter { } } + // TODO // Compute latest snapshot time and latest interval with respect to the current time pub fn compute_snapshot_params( + session: Arc, propagation_delay: Duration, delta: Duration, ) -> (Timestamp, u64) { - let now = zenoh::time::new_reception_timestamp(); + let now = session.new_timestamp(); + let latest_interval = (now .get_time() .to_system_time() @@ -195,7 +208,7 @@ impl Snapshotter { // Create digest from the stable log at startup async fn initialize_digest(&self) { - let now = zenoh::time::new_reception_timestamp(); + let now = self.session.new_timestamp(); let replica_data = &self.content; let log_locked = replica_data.stable_log.read().await; let latest_interval = replica_data.last_interval.read().await; diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs index 63352fab0a..d2147b137c 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs @@ -11,29 +11,40 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::backends_mgt::StoreIntercept; -use crate::storages_mgt::StorageMessage; -use async_std::sync::Arc; -use async_std::sync::{Mutex, RwLock}; +use std::{ + collections::{HashMap, HashSet}, + str::{self, FromStr}, + sync::Arc, + time::{SystemTime, UNIX_EPOCH}, +}; + use async_trait::async_trait; use flume::{Receiver, Sender}; use futures::select; -use std::collections::{HashMap, HashSet}; -use std::str::{self, FromStr}; -use std::time::{SystemTime, UNIX_EPOCH}; -use zenoh::buffers::ZBuf; -use zenoh::prelude::r#async::*; -use zenoh::query::ConsolidationMode; -use zenoh::time::{Timestamp, NTP64}; -use zenoh::{Result as ZResult, Session}; -use zenoh_backend_traits::config::{GarbageCollectionConfig, StorageConfig}; -use zenoh_backend_traits::{Capability, History, Persistence, StorageInsertionResult, StoredData}; -use zenoh_keyexpr::key_expr::OwnedKeyExpr; -use zenoh_keyexpr::keyexpr_tree::impls::KeyedSetProvider; -use zenoh_keyexpr::keyexpr_tree::{support::NonWild, support::UnknownWildness, KeBoxTree}; -use zenoh_keyexpr::keyexpr_tree::{IKeyExprTree, IKeyExprTreeMut}; -use zenoh_result::bail; -use zenoh_util::{zenoh_home, Timed, TimedEvent, Timer}; +use tokio::sync::{Mutex, RwLock}; +use zenoh::{ + bytes::EncodingBuilderTrait, + internal::{ + buffers::{SplitBuffer, ZBuf}, + zenoh_home, Timed, TimedEvent, Timer, Value, + }, + key_expr::{ + keyexpr_tree::{ + IKeyExprTree, IKeyExprTreeMut, KeBoxTree, KeyedSetProvider, NonWild, UnknownWildness, + }, + KeyExpr, OwnedKeyExpr, + }, + query::{ConsolidationMode, QueryTarget}, + sample::{Sample, SampleBuilder, SampleKind, TimestampBuilderTrait}, + session::{Session, SessionDeclarations}, + time::{Timestamp, NTP64}, +}; +use zenoh_backend_traits::{ + config::{GarbageCollectionConfig, StorageConfig}, + Capability, History, Persistence, StorageInsertionResult, StoredData, +}; + +use crate::{backends_mgt::StoreIntercept, storages_mgt::StorageMessage}; pub const WILDCARD_UPDATES_FILENAME: &str = "wildcard_updates"; pub const TOMBSTONE_FILENAME: &str = "tombstones"; @@ -60,8 +71,6 @@ pub struct StorageService { capability: Capability, tombstones: Arc>>, wildcard_updates: Arc>>, - in_interceptor: Option Sample + Send + Sync>>, - out_interceptor: Option Sample + Send + Sync>>, replication: Option, } @@ -85,8 +94,6 @@ impl StorageService { capability: store_intercept.capability, tombstones: Arc::new(RwLock::new(KeBoxTree::default())), wildcard_updates: Arc::new(RwLock::new(KeBoxTree::default())), - in_interceptor: store_intercept.in_interceptor, - out_interceptor: store_intercept.out_interceptor, replication, }; if storage_service @@ -141,7 +148,7 @@ impl StorageService { t.add_async(gc).await; // subscribe on key_expr - let storage_sub = match self.session.declare_subscriber(&self.key_expr).res().await { + let storage_sub = match self.session.declare_subscriber(&self.key_expr).await { Ok(storage_sub) => storage_sub, Err(e) => { tracing::error!("Error starting storage '{}': {}", self.name, e); @@ -154,7 +161,6 @@ impl StorageService { .session .declare_queryable(&self.key_expr) .complete(self.complete) - .res() .await { Ok(storage_queryable) => storage_queryable, @@ -179,8 +185,8 @@ impl StorageService { }; // log error if the sample is not timestamped // This is to reduce down the line inconsistencies of having duplicate samples stored - if sample.get_timestamp().is_none() { - tracing::error!("Sample {} is not timestamped. Please timestamp samples meant for replicated storage.", sample); + if sample.timestamp().is_none() { + tracing::error!("Sample {:?} is not timestamped. Please timestamp samples meant for replicated storage.", sample); } else { self.process_sample(sample).await; @@ -223,14 +229,15 @@ impl StorageService { select!( // on sample for key_expr sample = storage_sub.recv_async() => { - let mut sample = match sample { + let sample = match sample { Ok(sample) => sample, Err(e) => { tracing::error!("Error in sample: {}", e); continue; } }; - sample.ensure_timestamp(); + let timestamp = sample.timestamp().cloned().unwrap_or(self.session.new_timestamp()); + let sample = SampleBuilder::from(sample).timestamp(timestamp).into(); self.process_sample(sample).await; }, // on query on key_expr @@ -262,66 +269,81 @@ impl StorageService { // The storage should only simply save the key, sample pair while put and retrieve the same during get // the trimming during PUT and GET should be handled by the plugin async fn process_sample(&self, sample: Sample) { - tracing::trace!("[STORAGE] Processing sample: {}", sample); - // Call incoming data interceptor (if any) - let sample = if let Some(ref interceptor) = self.in_interceptor { - interceptor(sample) - } else { - sample + tracing::trace!("[STORAGE] Processing sample: {:?}", sample); + + // A Sample, in theory, will not arrive to a Storage without a Timestamp. This check (which, again, should + // never enter the `None` branch) ensures that the Storage Manager does not panic even if it ever happens. + let sample_timestamp = match sample.timestamp() { + Some(timestamp) => timestamp, + None => { + tracing::error!("Discarding Sample that has no Timestamp: {:?}", sample); + return; + } }; // if wildcard, update wildcard_updates - if sample.key_expr.is_wild() { + if sample.key_expr().is_wild() { self.register_wildcard_update(sample.clone()).await; } - let matching_keys = if sample.key_expr.is_wild() { - self.get_matching_keys(&sample.key_expr).await + let matching_keys = if sample.key_expr().is_wild() { + self.get_matching_keys(sample.key_expr()).await } else { - vec![sample.key_expr.clone().into()] + vec![sample.key_expr().clone().into()] }; tracing::trace!( "The list of keys matching `{}` is : {:?}", - sample.key_expr, + sample.key_expr(), matching_keys ); for k in matching_keys { - if !self - .is_deleted(&k.clone(), sample.get_timestamp().unwrap()) - .await + if !self.is_deleted(&k.clone(), sample_timestamp).await && (self.capability.history.eq(&History::All) || (self.capability.history.eq(&History::Latest) - && self.is_latest(&k, sample.get_timestamp().unwrap()).await)) + && self.is_latest(&k, sample_timestamp).await)) { tracing::trace!( - "Sample `{}` identified as needed processing for key {}", + "Sample `{:?}` identified as needed processing for key {}", sample, k ); // there might be the case that the actual update was outdated due to a wild card update, but not stored yet in the storage. // get the relevant wild card entry and use that value and timestamp to update the storage - let sample_to_store = match self - .ovderriding_wild_update(&k, sample.get_timestamp().unwrap()) - .await + let sample_to_store: Sample = if let Some(update) = + self.ovderriding_wild_update(&k, sample_timestamp).await { - Some(overriding_update) => { - let mut sample_to_store = - Sample::new(KeyExpr::from(k.clone()), overriding_update.data.value) - .with_timestamp(overriding_update.data.timestamp); - sample_to_store.kind = overriding_update.kind; - sample_to_store + match update.kind { + SampleKind::Put => { + SampleBuilder::put(k.clone(), update.data.value.payload().clone()) + .encoding(update.data.value.encoding().clone()) + .timestamp(update.data.timestamp) + .into() + } + SampleKind::Delete => SampleBuilder::delete(k.clone()) + .timestamp(update.data.timestamp) + .into(), } + } else { + SampleBuilder::from(sample.clone()) + .keyexpr(k.clone()) + .into() + }; + + // A Sample that is to be stored **must** have a Timestamp. In theory, the Sample generated should have + // a Timestamp and, in theory, this check is unneeded. + let sample_to_store_timestamp = match sample_to_store.timestamp() { + Some(timestamp) => *timestamp, None => { - let mut sample_to_store = - Sample::new(KeyExpr::from(k.clone()), sample.value.clone()) - .with_timestamp(sample.timestamp.unwrap()); - sample_to_store.kind = sample.kind; - sample_to_store + tracing::error!("Discarding `Sample` generated through `SampleBuilder` that has no Timestamp: {:?}", sample_to_store); + continue; } }; - let stripped_key = match self.strip_prefix(&sample_to_store.key_expr) { + let stripped_key = match crate::strip_prefix( + self.strip_prefix.as_ref(), + sample_to_store.key_expr(), + ) { Ok(stripped) => stripped, Err(e) => { tracing::error!("{}", e); @@ -329,23 +351,26 @@ impl StorageService { } }; let mut storage = self.storage.lock().await; - let result = if sample.kind == SampleKind::Put { - storage - .put( - stripped_key, - sample_to_store.value.clone(), - sample_to_store.timestamp.unwrap(), - ) - .await - } else if sample.kind == SampleKind::Delete { - // register a tombstone - self.mark_tombstone(&k, sample_to_store.timestamp.unwrap()) - .await; - storage - .delete(stripped_key, sample_to_store.timestamp.unwrap()) - .await - } else { - Err("sample kind not implemented".into()) + let result = match sample.kind() { + SampleKind::Put => { + storage + .put( + stripped_key, + Value::new( + sample_to_store.payload().clone(), + sample_to_store.encoding().clone(), + ), + sample_to_store_timestamp, + ) + .await + } + SampleKind::Delete => { + // register a tombstone + self.mark_tombstone(&k, sample_to_store_timestamp).await; + storage + .delete(stripped_key, sample_to_store_timestamp) + .await + } }; drop(storage); if self.replication.is_some() @@ -357,7 +382,7 @@ impl StorageService { .as_ref() .unwrap() .log_propagation - .send((k.clone(), *sample_to_store.get_timestamp().unwrap())); + .send((k.clone(), sample_to_store_timestamp)); match sending { Ok(_) => (), Err(e) => { @@ -390,15 +415,16 @@ impl StorageService { async fn register_wildcard_update(&self, sample: Sample) { // @TODO: change into a better store that does incremental writes - let key = sample.clone().key_expr; + let key = sample.key_expr().clone(); let mut wildcards = self.wildcard_updates.write().await; + let timestamp = *sample.timestamp().unwrap(); wildcards.insert( &key, Update { - kind: sample.kind, + kind: sample.kind(), data: StoredData { - value: sample.value, - timestamp: sample.timestamp.unwrap(), + value: Value::from(sample), + timestamp, }, }, ); @@ -438,13 +464,14 @@ impl StorageService { if weight.is_some() && weight.unwrap().data.timestamp > *ts { // if the key matches a wild card update, check whether it was saved in storage // remember that wild card updates change only existing keys - let stripped_key = match self.strip_prefix(&key_expr.into()) { - Ok(stripped) => stripped, - Err(e) => { - tracing::error!("{}", e); - break; - } - }; + let stripped_key = + match crate::strip_prefix(self.strip_prefix.as_ref(), &key_expr.into()) { + Ok(stripped) => stripped, + Err(e) => { + tracing::error!("{}", e); + break; + } + }; let mut storage = self.storage.lock().await; match storage.get(stripped_key, "").await { Ok(stored_data) => { @@ -473,7 +500,7 @@ impl StorageService { async fn is_latest(&self, key_expr: &OwnedKeyExpr, timestamp: &Timestamp) -> bool { // @TODO: if cache exists, read from there let mut storage = self.storage.lock().await; - let stripped_key = match self.strip_prefix(&key_expr.into()) { + let stripped_key = match crate::strip_prefix(self.strip_prefix.as_ref(), &key_expr.into()) { Ok(stripped) => stripped, Err(e) => { tracing::error!("{}", e); @@ -490,7 +517,7 @@ impl StorageService { true } - async fn reply_query(&self, query: Result) { + async fn reply_query(&self, query: Result) { let q = match query { Ok(q) => q, Err(e) => { @@ -504,26 +531,24 @@ impl StorageService { let matching_keys = self.get_matching_keys(q.key_expr()).await; let mut storage = self.storage.lock().await; for key in matching_keys { - let stripped_key = match self.strip_prefix(&key.clone().into()) { - Ok(k) => k, - Err(e) => { - tracing::error!("{}", e); - // @TODO: return error when it is supported - return; - } - }; - match storage.get(stripped_key, q.parameters()).await { + let stripped_key = + match crate::strip_prefix(self.strip_prefix.as_ref(), &key.clone().into()) { + Ok(k) => k, + Err(e) => { + tracing::error!("{}", e); + // @TODO: return error when it is supported + return; + } + }; + match storage.get(stripped_key, q.parameters().as_str()).await { Ok(stored_data) => { for entry in stored_data { - let sample = Sample::new(key.clone(), entry.value) - .with_timestamp(entry.timestamp); - // apply outgoing interceptor on results - let sample = if let Some(ref interceptor) = self.out_interceptor { - interceptor(sample) - } else { - sample - }; - if let Err(e) = q.reply(Ok(sample)).res().await { + if let Err(e) = q + .reply(key.clone(), entry.value.payload().clone()) + .encoding(entry.value.encoding().clone()) + .timestamp(entry.timestamp) + .await + { tracing::warn!( "Storage '{}' raised an error replying a query: {}", self.name, @@ -539,7 +564,7 @@ impl StorageService { } drop(storage); } else { - let stripped_key = match self.strip_prefix(q.key_expr()) { + let stripped_key = match crate::strip_prefix(self.strip_prefix.as_ref(), q.key_expr()) { Ok(k) => k, Err(e) => { tracing::error!("{}", e); @@ -548,18 +573,15 @@ impl StorageService { } }; let mut storage = self.storage.lock().await; - match storage.get(stripped_key, q.parameters()).await { + match storage.get(stripped_key, q.parameters().as_str()).await { Ok(stored_data) => { for entry in stored_data { - let sample = Sample::new(q.key_expr().clone(), entry.value) - .with_timestamp(entry.timestamp); - // apply outgoing interceptor on results - let sample = if let Some(ref interceptor) = self.out_interceptor { - interceptor(sample) - } else { - sample - }; - if let Err(e) = q.reply(Ok(sample)).res().await { + if let Err(e) = q + .reply(q.key_expr().clone(), entry.value.payload().clone()) + .encoding(entry.value.encoding().clone()) + .timestamp(entry.timestamp) + .await + { tracing::warn!( "Storage '{}' raised an error replying a query: {}", self.name, @@ -584,7 +606,7 @@ impl StorageService { for (k, _ts) in entries { // @TODO: optimize adding back the prefix (possible inspiration from https://github.com/eclipse-zenoh/zenoh/blob/0.5.0-beta.9/backends/traits/src/utils.rs#L79) let full_key = match k { - Some(key) => StorageService::get_prefixed(&self.strip_prefix, &key.into()), + Some(key) => crate::prefix(self.strip_prefix.as_ref(), &key), None => self.strip_prefix.clone().unwrap(), }; if key_expr.intersects(&full_key.clone()) { @@ -601,51 +623,15 @@ impl StorageService { result } - fn strip_prefix(&self, key_expr: &KeyExpr<'_>) -> ZResult> { - let key = match &self.strip_prefix { - Some(prefix) => { - if key_expr.as_str().eq(prefix.as_str()) { - "" - } else { - match key_expr.strip_prefix(prefix).as_slice() { - [ke] => ke.as_str(), - _ => bail!( - "Keyexpr doesn't start with prefix '{}': '{}'", - prefix, - key_expr - ), - } - } - } - None => key_expr.as_str(), - }; - if key.is_empty() { - Ok(None) - } else { - Ok(Some(OwnedKeyExpr::new(key.to_string()).unwrap())) - } - } - - pub fn get_prefixed( - strip_prefix: &Option, - key_expr: &KeyExpr<'_>, - ) -> OwnedKeyExpr { - match strip_prefix { - Some(prefix) => prefix.join(key_expr.as_keyexpr()).unwrap(), - None => OwnedKeyExpr::from(key_expr.as_keyexpr()), - } - } - async fn initialize_if_empty(&mut self) { if self.replication.is_some() && self.replication.as_ref().unwrap().empty_start { // align with other storages, querying them on key_expr, // with `_time=[..]` to get historical data (in case of time-series) let replies = match self .session - .get(KeyExpr::from(&self.key_expr).with_parameters("_time=[..]")) + .get((&self.key_expr, "_time=[..]")) .target(QueryTarget::All) .consolidation(ConsolidationMode::None) - .res() .await { Ok(replies) => replies, @@ -655,12 +641,12 @@ impl StorageService { } }; while let Ok(reply) = replies.recv_async().await { - match reply.sample { + match reply.into_result() { Ok(sample) => { self.process_sample(sample).await; } Err(e) => tracing::warn!( - "Storage '{}' received an error to align query: {}", + "Storage '{}' received an error to align query: {:?}", self.name, e ), @@ -671,25 +657,31 @@ impl StorageService { } fn serialize_update(update: &Update) -> String { + let Update { + kind, + data: StoredData { value, timestamp }, + } = update; + let zbuf: ZBuf = value.payload().into(); + let result = ( - update.kind.to_string(), - update.data.timestamp.to_string(), - update.data.value.encoding.to_string(), - update.data.value.payload.slices().collect::>(), + kind.to_string(), + timestamp.to_string(), + value.encoding().to_string(), + zbuf.slices().collect::>(), ); serde_json::to_string_pretty(&result).unwrap() } fn construct_update(data: String) -> Update { - let result: (String, String, String, Vec<&[u8]>) = serde_json::from_str(&data).unwrap(); + let result: (String, String, String, Vec<&[u8]>) = serde_json::from_str(&data).unwrap(); // @TODO: remove the unwrap() let mut payload = ZBuf::default(); for slice in result.3 { payload.push_zslice(slice.to_vec().into()); } - let value = Value::new(payload).encoding(Encoding::from(result.2)); + let value = Value::new(payload, result.2); let data = StoredData { value, - timestamp: Timestamp::from_str(&result.1).unwrap(), + timestamp: Timestamp::from_str(&result.1).unwrap(), // @TODO: remove the unwrap() }; let kind = if result.0.eq(&(SampleKind::Put).to_string()) { SampleKind::Put diff --git a/plugins/zenoh-plugin-storage-manager/src/storages_mgt.rs b/plugins/zenoh-plugin-storage-manager/src/storages_mgt.rs index ff3bec3e02..fcc8425545 100644 --- a/plugins/zenoh-plugin-storage-manager/src/storages_mgt.rs +++ b/plugins/zenoh-plugin-storage-manager/src/storages_mgt.rs @@ -11,16 +11,16 @@ // Contributors: // ZettaScale Zenoh Team, // -use async_std::sync::Arc; -use zenoh::Session; +use std::sync::Arc; + +use zenoh::{session::Session, Result as ZResult}; use zenoh_backend_traits::config::StorageConfig; -use zenoh_result::ZResult; pub use super::replica::{Replica, StorageService}; pub enum StorageMessage { Stop, - GetStatus(async_std::channel::Sender), + GetStatus(tokio::sync::mpsc::Sender), } pub(crate) async fn start_storage( @@ -29,7 +29,7 @@ pub(crate) async fn start_storage( admin_key: String, zenoh: Arc, ) -> ZResult> { - // Ex: @/router/390CEC11A1E34977A1C609A35BC015E6/status/plugins/storage_manager/storages/demo1 -> 390CEC11A1E34977A1C609A35BC015E6/demo1 (/ needed????) + // Ex: @/390CEC11A1E34977A1C609A35BC015E6/router/status/plugins/storage_manager/storages/demo1 -> 390CEC11A1E34977A1C609A35BC015E6/demo1 (/ needed????) let parts: Vec<&str> = admin_key.split('/').collect(); let uuid = parts[2]; let storage_name = parts[7]; @@ -39,7 +39,7 @@ pub(crate) async fn start_storage( let (tx, rx) = flume::bounded(1); - async_std::task::spawn(async move { + tokio::task::spawn(async move { // If a configuration for replica is present, we initialize a replica, else only a storage service // A replica contains a storage service and all metadata required for anti-entropy if config.replica_config.is_some() { diff --git a/plugins/zenoh-plugin-storage-manager/tests/operations.rs b/plugins/zenoh-plugin-storage-manager/tests/operations.rs index cb1c42c201..483b87e223 100644 --- a/plugins/zenoh-plugin-storage-manager/tests/operations.rs +++ b/plugins/zenoh-plugin-storage-manager/tests/operations.rs @@ -16,40 +16,33 @@ // 1. normal case, just some wild card puts and deletes on existing keys and ensure it works // 2. check for dealing with out of order updates -use std::str::FromStr; -use std::thread::sleep; - -use async_std::task; -use zenoh::prelude::r#async::*; -use zenoh::query::Reply; -use zenoh::{prelude::Config, time::Timestamp}; -use zenoh_core::zasync_executor_init; +use std::{borrow::Cow, str::FromStr, thread::sleep}; + +use tokio::runtime::Runtime; +use zenoh::{ + internal::zasync_executor_init, prelude::*, query::Reply, sample::Sample, time::Timestamp, + Config, Session, +}; use zenoh_plugin_trait::Plugin; -async fn put_data(session: &zenoh::Session, key_expr: &str, value: &str, _timestamp: Timestamp) { +async fn put_data(session: &Session, key_expr: &str, value: &str, _timestamp: Timestamp) { println!("Putting Data ('{key_expr}': '{value}')..."); // @TODO: how to add timestamp metadata with put, not manipulating sample... - session.put(key_expr, value).res().await.unwrap(); + session.put(key_expr, value).await.unwrap(); } -async fn delete_data(session: &zenoh::Session, key_expr: &str, _timestamp: Timestamp) { +async fn delete_data(session: &Session, key_expr: &str, _timestamp: Timestamp) { println!("Deleting Data '{key_expr}'..."); // @TODO: how to add timestamp metadata with delete, not manipulating sample... - session.delete(key_expr).res().await.unwrap(); + session.delete(key_expr).await.unwrap(); } -async fn get_data(session: &zenoh::Session, key_expr: &str) -> Vec { - let replies: Vec = session - .get(key_expr) - .res() - .await - .unwrap() - .into_iter() - .collect(); +async fn get_data(session: &Session, key_expr: &str) -> Vec { + let replies: Vec = session.get(key_expr).await.unwrap().into_iter().collect(); println!("Getting replies on '{key_expr}': '{replies:?}'..."); let mut samples = Vec::new(); for reply in replies { - if let Ok(sample) = reply.sample { + if let Ok(sample) = reply.into_result() { samples.push(sample); } } @@ -58,9 +51,10 @@ async fn get_data(session: &zenoh::Session, key_expr: &str) -> Vec { } async fn test_updates_in_order() { - task::block_on(async { + async { zasync_executor_init!(); - }); + } + .await; let mut config = Config::default(); config .insert_json5( @@ -77,15 +71,27 @@ async fn test_updates_in_order() { }"#, ) .unwrap(); + config + .insert_json5( + "timestamping", + r#"{ + enabled: { + router: true, + peer: true, + client: true + } + }"#, + ) + .unwrap(); - let runtime = zenoh::runtime::RuntimeBuilder::new(config) + let runtime = zenoh::internal::runtime::RuntimeBuilder::new(config) .build() .await .unwrap(); let storage = zenoh_plugin_storage_manager::StoragesPlugin::start("storage-manager", &runtime).unwrap(); - let session = zenoh::init(runtime).res().await.unwrap(); + let session = zenoh::session::init(runtime).await.unwrap(); sleep(std::time::Duration::from_secs(1)); @@ -93,8 +99,7 @@ async fn test_updates_in_order() { &session, "operation/test/a", "1", - Timestamp::from_str("2022-01-17T10:42:10.418555997Z/BC779A06D7E049BD88C3FF3DB0C17FCC") - .unwrap(), + Timestamp::from_str("7054123566570568799/BC779A06D7E049BD88C3FF3DB0C17FCC").unwrap(), ) .await; @@ -103,14 +108,13 @@ async fn test_updates_in_order() { // expects exactly one sample let data = get_data(&session, "operation/test/a").await; assert_eq!(data.len(), 1); - assert_eq!(format!("{}", data[0].value), "1"); + assert_eq!(data[0].payload().deserialize::>().unwrap(), "1"); put_data( &session, "operation/test/b", "2", - Timestamp::from_str("2022-01-17T10:43:10.418555997Z/BC779A06D7E049BD88C3FF3DB0C17FCC") - .unwrap(), + Timestamp::from_str("7054123824268606559/BC779A06D7E049BD88C3FF3DB0C17FCC").unwrap(), ) .await; @@ -119,13 +123,12 @@ async fn test_updates_in_order() { // expects exactly one sample let data = get_data(&session, "operation/test/b").await; assert_eq!(data.len(), 1); - assert_eq!(format!("{}", data[0].value), "2"); + assert_eq!(data[0].payload().deserialize::>().unwrap(), "2"); delete_data( &session, "operation/test/a", - Timestamp::from_str("2022-01-17T10:43:10.418555997Z/BC779A06D7E049BD88C3FF3DB0C17FCC") - .unwrap(), + Timestamp::from_str("7054123824268606559/BC779A06D7E049BD88C3FF3DB0C17FCC").unwrap(), ) .await; @@ -138,13 +141,14 @@ async fn test_updates_in_order() { // expects exactly one sample let data = get_data(&session, "operation/test/b").await; assert_eq!(data.len(), 1); - assert_eq!(format!("{}", data[0].value), "2"); - assert_eq!(data[0].key_expr.as_str(), "operation/test/b"); + assert_eq!(data[0].payload().deserialize::>().unwrap(), "2"); + assert_eq!(data[0].key_expr().as_str(), "operation/test/b"); drop(storage); } #[test] fn updates_test() { - task::block_on(async { test_updates_in_order().await }); + let rt = Runtime::new().unwrap(); + rt.block_on(async { test_updates_in_order().await }); } diff --git a/plugins/zenoh-plugin-storage-manager/tests/wildcard.rs b/plugins/zenoh-plugin-storage-manager/tests/wildcard.rs index 75fb6d3a87..6a6e36b2fd 100644 --- a/plugins/zenoh-plugin-storage-manager/tests/wildcard.rs +++ b/plugins/zenoh-plugin-storage-manager/tests/wildcard.rs @@ -16,41 +16,34 @@ // 1. normal case, just some wild card puts and deletes on existing keys and ensure it works // 2. check for dealing with out of order updates -use std::str::FromStr; -use std::thread::sleep; +use std::{borrow::Cow, str::FromStr, thread::sleep}; // use std::collections::HashMap; -use async_std::task; -use zenoh::prelude::r#async::*; -use zenoh::query::Reply; -use zenoh::{prelude::Config, time::Timestamp}; -use zenoh_core::zasync_executor_init; +use tokio::runtime::Runtime; +use zenoh::{ + internal::zasync_executor_init, prelude::*, query::Reply, sample::Sample, time::Timestamp, + Config, Session, +}; use zenoh_plugin_trait::Plugin; -async fn put_data(session: &zenoh::Session, key_expr: &str, value: &str, _timestamp: Timestamp) { +async fn put_data(session: &Session, key_expr: &str, value: &str, _timestamp: Timestamp) { println!("Putting Data ('{key_expr}': '{value}')..."); // @TODO: how to add timestamp metadata with put, not manipulating sample... - session.put(key_expr, value).res().await.unwrap(); + session.put(key_expr, value).await.unwrap(); } -async fn delete_data(session: &zenoh::Session, key_expr: &str, _timestamp: Timestamp) { +async fn delete_data(session: &Session, key_expr: &str, _timestamp: Timestamp) { println!("Deleting Data '{key_expr}'..."); // @TODO: how to add timestamp metadata with delete, not manipulating sample... - session.delete(key_expr).res().await.unwrap(); + session.delete(key_expr).await.unwrap(); } -async fn get_data(session: &zenoh::Session, key_expr: &str) -> Vec { - let replies: Vec = session - .get(key_expr) - .res() - .await - .unwrap() - .into_iter() - .collect(); +async fn get_data(session: &Session, key_expr: &str) -> Vec { + let replies: Vec = session.get(key_expr).await.unwrap().into_iter().collect(); println!("Getting replies on '{key_expr}': '{replies:?}'..."); let mut samples = Vec::new(); for reply in replies { - if let Ok(sample) = reply.sample { + if let Ok(sample) = reply.into_result() { samples.push(sample); } } @@ -59,9 +52,7 @@ async fn get_data(session: &zenoh::Session, key_expr: &str) -> Vec { } async fn test_wild_card_in_order() { - task::block_on(async { - zasync_executor_init!(); - }); + zasync_executor_init!(); let mut config = Config::default(); config .insert_json5( @@ -78,15 +69,27 @@ async fn test_wild_card_in_order() { }"#, ) .unwrap(); + config + .insert_json5( + "timestamping", + r#"{ + enabled: { + router: true, + peer: true, + client: true + } + }"#, + ) + .unwrap(); - let runtime = zenoh::runtime::RuntimeBuilder::new(config) + let runtime = zenoh::internal::runtime::RuntimeBuilder::new(config) .build() .await .unwrap(); let storage = zenoh_plugin_storage_manager::StoragesPlugin::start("storage-manager", &runtime).unwrap(); - let session = zenoh::init(runtime).res().await.unwrap(); + let session = zenoh::session::init(runtime).await.unwrap(); sleep(std::time::Duration::from_secs(1)); // put *, ts: 1 @@ -94,8 +97,7 @@ async fn test_wild_card_in_order() { &session, "wild/test/*", "1", - Timestamp::from_str("2022-01-17T10:42:10.418555997Z/BC779A06D7E049BD88C3FF3DB0C17FCC") - .unwrap(), + Timestamp::from_str("7054123566570568799/BC779A06D7E049BD88C3FF3DB0C17FCC").unwrap(), ) .await; @@ -109,8 +111,7 @@ async fn test_wild_card_in_order() { &session, "wild/test/a", "2", - Timestamp::from_str("2022-01-17T10:42:11.418555997Z/BC779A06D7E049BD88C3FF3DB0C17FCC") - .unwrap(), + Timestamp::from_str("7054123570865536095/BC779A06D7E049BD88C3FF3DB0C17FCC").unwrap(), ) .await; @@ -119,15 +120,14 @@ async fn test_wild_card_in_order() { // expected single entry let data = get_data(&session, "wild/test/*").await; assert_eq!(data.len(), 1); - assert_eq!(data[0].key_expr.as_str(), "wild/test/a"); - assert_eq!(format!("{}", data[0].value), "2"); + assert_eq!(data[0].key_expr().as_str(), "wild/test/a"); + assert_eq!(data[0].payload().deserialize::>().unwrap(), "2"); put_data( &session, "wild/test/b", "3", - Timestamp::from_str("2022-01-17T10:42:11.418555997Z/BC779A06D7E049BD88C3FF3DB0C17FCC") - .unwrap(), + Timestamp::from_str("7054123570865536095/BC779A06D7E049BD88C3FF3DB0C17FCC").unwrap(), ) .await; @@ -136,17 +136,28 @@ async fn test_wild_card_in_order() { // expected two entries let data = get_data(&session, "wild/test/*").await; assert_eq!(data.len(), 2); - assert!(["wild/test/a", "wild/test/b"].contains(&data[0].key_expr.as_str())); - assert!(["wild/test/a", "wild/test/b"].contains(&data[1].key_expr.as_str())); - assert!(["2", "3"].contains(&format!("{}", data[0].value).as_str())); - assert!(["2", "3"].contains(&format!("{}", data[1].value).as_str())); + assert!(["wild/test/a", "wild/test/b"].contains(&data[0].key_expr().as_str())); + assert!(["wild/test/a", "wild/test/b"].contains(&data[1].key_expr().as_str())); + assert!(["2", "3"].contains( + &data[0] + .payload() + .deserialize::>() + .unwrap() + .as_ref() + )); + assert!(["2", "3"].contains( + &data[1] + .payload() + .deserialize::>() + .unwrap() + .as_ref() + )); put_data( &session, "wild/test/*", "4", - Timestamp::from_str("2022-01-17T10:43:12.418555997Z/BC779A06D7E049BD88C3FF3DB0C17FCC") - .unwrap(), + Timestamp::from_str("7054123832858541151/BC779A06D7E049BD88C3FF3DB0C17FCC").unwrap(), ) .await; @@ -155,16 +166,15 @@ async fn test_wild_card_in_order() { // expected two entries let data = get_data(&session, "wild/test/*").await; assert_eq!(data.len(), 2); - assert!(["wild/test/a", "wild/test/b"].contains(&data[0].key_expr.as_str())); - assert!(["wild/test/a", "wild/test/b"].contains(&data[1].key_expr.as_str())); - assert_eq!(format!("{}", data[0].value).as_str(), "4"); - assert_eq!(format!("{}", data[1].value).as_str(), "4"); + assert!(["wild/test/a", "wild/test/b"].contains(&data[0].key_expr().as_str())); + assert!(["wild/test/a", "wild/test/b"].contains(&data[1].key_expr().as_str())); + assert_eq!(data[0].payload().deserialize::>().unwrap(), "4"); + assert_eq!(data[1].payload().deserialize::>().unwrap(), "4"); delete_data( &session, "wild/test/*", - Timestamp::from_str("2022-01-17T13:43:10.418555997Z/BC779A06D7E049BD88C3FF3DB0C17FCC") - .unwrap(), + Timestamp::from_str("7054170209915403359/BC779A06D7E049BD88C3FF3DB0C17FCC").unwrap(), ) .await; @@ -177,12 +187,8 @@ async fn test_wild_card_in_order() { drop(storage); } -// fn test_wild_card_out_of_order() { -// assert_eq!(true, true); -// } - #[test] fn wildcard_test() { - task::block_on(async { test_wild_card_in_order().await }); - // task::block_on(async { test_wild_card_out_of_order() }); + let rt = Runtime::new().unwrap(); + rt.block_on(async { test_wild_card_in_order().await }); } diff --git a/plugins/zenoh-plugin-trait/Cargo.toml b/plugins/zenoh-plugin-trait/Cargo.toml index f78967fe3d..8a355f6e47 100644 --- a/plugins/zenoh-plugin-trait/Cargo.toml +++ b/plugins/zenoh-plugin-trait/Cargo.toml @@ -28,11 +28,10 @@ name = "zenoh_plugin_trait" [dependencies] libloading = { workspace = true } -tracing = {workspace = true} +tracing = { workspace = true } serde = { workspace = true } serde_json = { workspace = true } zenoh-macros = { workspace = true } zenoh-result = { workspace = true } zenoh-util = { workspace = true } -zenoh-keyexpr = { workspace = true } -const_format = { workspace = true } \ No newline at end of file +zenoh-keyexpr = { workspace = true, features = ["internal", "unstable"] } \ No newline at end of file diff --git a/plugins/zenoh-plugin-trait/src/lib.rs b/plugins/zenoh-plugin-trait/src/lib.rs index b9dbb455ab..36c5097795 100644 --- a/plugins/zenoh-plugin-trait/src/lib.rs +++ b/plugins/zenoh-plugin-trait/src/lib.rs @@ -25,13 +25,13 @@ //! //! The actual work of the plugin is performed by the instance, which is created by the [`start`](Plugin::start) function. //! -//! Plugins are loaded, started and stopped by [`PluginsManager`](crate::manager::PluginsManager). Stopping plugin is just dropping it's instance. +//! Plugins are loaded, started and stopped by [`PluginsManager`]. Stopping plugin is just dropping it's instance. //! //! Plugins can be static and dynamic. //! -//! Static plugin is just a type which implements [`Plugin`] trait. It can be added to [`PluginsManager`](crate::manager::PluginsManager) by [`PluginsManager::add_static_plugin`](crate::manager::PluginsManager::add_static_plugin) method. +//! Static plugin is just a type which implements [`Plugin`] trait. It can be added to [`PluginsManager`] by [`PluginsManager::declare_static_plugin`](crate::manager::PluginsManager::declare_static_plugin) method. //! -//! Dynamic plugin is a shared library which exports set of C-repr (unmangled) functions which allows to check plugin compatibility and create plugin instance. These functiuons are defined automatically by [`declare_plugin`](crate::declare_plugin) macro. +//! Dynamic plugin is a shared library which exports set of C-repr (unmangled) functions which allows to check plugin compatibility and create plugin instance. These functiuons are defined automatically by [`declare_plugin`] macro. //! mod compatibility; mod manager; diff --git a/plugins/zenoh-plugin-trait/src/manager.rs b/plugins/zenoh-plugin-trait/src/manager.rs index 2aa754906a..4776aa31a3 100644 --- a/plugins/zenoh-plugin-trait/src/manager.rs +++ b/plugins/zenoh-plugin-trait/src/manager.rs @@ -13,7 +13,6 @@ mod dynamic_plugin; mod static_plugin; -use crate::*; use zenoh_keyexpr::keyexpr; use zenoh_result::ZResult; use zenoh_util::LibLoader; @@ -22,10 +21,11 @@ use self::{ dynamic_plugin::{DynamicPlugin, DynamicPluginSource}, static_plugin::StaticPlugin, }; +use crate::*; pub trait DeclaredPlugin: PluginStatus { fn as_status(&self) -> &dyn PluginStatus; - fn load(&mut self) -> ZResult<&mut dyn LoadedPlugin>; + fn load(&mut self) -> ZResult>>; fn loaded(&self) -> Option<&dyn LoadedPlugin>; fn loaded_mut(&mut self) -> Option<&mut dyn LoadedPlugin>; } @@ -88,7 +88,7 @@ impl DeclaredPlugin &dyn PluginStatus { self } - fn load(&mut self) -> ZResult<&mut dyn LoadedPlugin> { + fn load(&mut self) -> ZResult>> { self.0.load() } fn loaded(&self) -> Option<&dyn LoadedPlugin> { @@ -100,7 +100,7 @@ impl DeclaredPlugin { default_lib_prefix: String, loader: Option, diff --git a/plugins/zenoh-plugin-trait/src/manager/dynamic_plugin.rs b/plugins/zenoh-plugin-trait/src/manager/dynamic_plugin.rs index 7e881e8ebd..50bed07a4f 100644 --- a/plugins/zenoh-plugin-trait/src/manager/dynamic_plugin.rs +++ b/plugins/zenoh-plugin-trait/src/manager/dynamic_plugin.rs @@ -10,13 +10,14 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::*; use std::path::{Path, PathBuf}; use libloading::Library; -use zenoh_result::{bail, ZResult}; +use zenoh_result::{bail, zerror, ZResult}; use zenoh_util::LibLoader; +use crate::*; + /// This enum contains information where to load the plugin from. pub enum DynamicPluginSource { /// Load plugin with the name in String + `.so | .dll | .dylib` @@ -27,7 +28,7 @@ pub enum DynamicPluginSource { } impl DynamicPluginSource { - fn load(&self) -> ZResult<(Library, PathBuf)> { + fn load(&self) -> ZResult> { match self { DynamicPluginSource::ByName((libloader, name)) => unsafe { libloader.search_and_load(name) @@ -35,11 +36,11 @@ impl DynamicPluginSource { DynamicPluginSource::ByPaths(paths) => { for path in paths { match unsafe { LibLoader::load_file(path) } { - Ok((l, p)) => return Ok((l, p)), + Ok((l, p)) => return Ok(Some((l, p))), Err(e) => tracing::debug!("Attempt to load {} failed: {}", path, e), } } - bail!("Plugin not found in {:?}", &paths) + Err(zerror!("Plugin not found in {:?}", &paths).into()) } } } @@ -178,16 +179,22 @@ impl DeclaredPlugin &dyn PluginStatus { self } - fn load(&mut self) -> ZResult<&mut dyn LoadedPlugin> { + fn load(&mut self) -> ZResult>> { if self.starter.is_none() { - let (lib, path) = self.source.load().add_error(&mut self.report)?; + let Some((lib, path)) = self.source.load().add_error(&mut self.report)? else { + tracing::warn!( + "Plugin `{}` will not be loaded as plugin loading is disabled", + self.name + ); + return Ok(None); + }; let starter = DynamicPluginStarter::new(lib, path).add_error(&mut self.report)?; tracing::debug!("Plugin {} loaded from {}", self.name, starter.path()); self.starter = Some(starter); } else { tracing::warn!("Plugin `{}` already loaded", self.name); } - Ok(self) + Ok(Some(self)) } fn loaded(&self) -> Option<&dyn LoadedPlugin> { if self.starter.is_some() { diff --git a/plugins/zenoh-plugin-trait/src/manager/static_plugin.rs b/plugins/zenoh-plugin-trait/src/manager/static_plugin.rs index a77be2f815..2354a8f926 100644 --- a/plugins/zenoh-plugin-trait/src/manager/static_plugin.rs +++ b/plugins/zenoh-plugin-trait/src/manager/static_plugin.rs @@ -11,10 +11,12 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::*; use std::marker::PhantomData; + use zenoh_result::ZResult; +use crate::*; + pub struct StaticPlugin where P: Plugin, @@ -82,8 +84,8 @@ where fn as_status(&self) -> &dyn PluginStatus { self } - fn load(&mut self) -> ZResult<&mut dyn LoadedPlugin> { - Ok(self) + fn load(&mut self) -> ZResult>> { + Ok(Some(self)) } fn loaded(&self) -> Option<&dyn LoadedPlugin> { Some(self) diff --git a/plugins/zenoh-plugin-trait/src/plugin.rs b/plugins/zenoh-plugin-trait/src/plugin.rs index 43e69696a8..b0651d9842 100644 --- a/plugins/zenoh-plugin-trait/src/plugin.rs +++ b/plugins/zenoh-plugin-trait/src/plugin.rs @@ -11,12 +11,14 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::StructVersion; -use serde::{Deserialize, Serialize}; use std::{borrow::Cow, ops::BitOrAssign}; + +use serde::{Deserialize, Serialize}; use zenoh_keyexpr::keyexpr; use zenoh_result::ZResult; +use crate::StructVersion; + /// The plugin can be in one of these states: /// - Declared: the plugin is declared in the configuration file, but not loaded yet or failed to load /// - Loaded: the plugin is loaded, but not started yet or failed to start @@ -173,9 +175,9 @@ pub trait Plugin: Sized + 'static { type Instance: PluginInstance; /// Plugins' default name when statically linked. const DEFAULT_NAME: &'static str; - /// Plugin's version. Used only for information purposes. It's recommended to use [plugin_version!] macro to generate this string. + /// Plugin's version. Used only for information purposes. It's recommended to use [plugin_version!](crate::plugin_version!) macro to generate this string. const PLUGIN_VERSION: &'static str; - /// Plugin's long version (with git commit hash). Used only for information purposes. It's recommended to use [plugin_long_version!] macro to generate this string. + /// Plugin's long version (with git commit hash). Used only for information purposes. It's recommended to use [plugin_version!](crate::plugin_version!) macro to generate this string. const PLUGIN_LONG_VERSION: &'static str; /// Starts your plugin. Use `Ok` to return your plugin's control structure fn start(name: &str, args: &Self::StartArgs) -> ZResult; diff --git a/rust-toolchain.toml b/rust-toolchain.toml index 743f7cd993..4dd8e5c567 100644 --- a/rust-toolchain.toml +++ b/rust-toolchain.toml @@ -1,2 +1,2 @@ [toolchain] -channel = "1.72.0" +channel = "1.75.0" \ No newline at end of file diff --git a/zenoh-ext/Cargo.toml b/zenoh-ext/Cargo.toml index 3b842244d3..4f2613cb70 100644 --- a/zenoh-ext/Cargo.toml +++ b/zenoh-ext/Cargo.toml @@ -29,22 +29,30 @@ maintenance = { status = "actively-developed" } [features] unstable = [] default = [] +shared-memory = ["zenoh/shared-memory"] [dependencies] -tokio = { workspace = true, features = ["rt", "sync", "time", "macros", "io-std"] } +tokio = { workspace = true, features = [ + "rt", + "sync", + "time", + "macros", + "io-std", +] } bincode = { workspace = true } -zenoh-util = {workspace = true } +zenoh-util = { workspace = true } flume = { workspace = true } futures = { workspace = true } -tracing = {workspace = true} +phf = { workspace = true } +tracing = { workspace = true } serde = { workspace = true, features = ["default"] } -zenoh = { workspace = true, features = ["unstable"], default-features = false } -zenoh-core = { workspace = true } +serde_cbor = { workspace = true } +serde_json = { workspace = true } +zenoh = { workspace = true, features = ["unstable", "internal"], default-features = false } zenoh-macros = { workspace = true } -zenoh-result = { workspace = true } -zenoh-sync = { workspace = true } -zenoh-runtime = { workspace = true } -zenoh-task = { workspace = true } + +[dev-dependencies] +zenoh = { workspace = true, features = ["unstable"], default-features = true } [package.metadata.docs.rs] features = ["unstable"] diff --git a/zenoh-ext/examples/examples/z_member.rs b/zenoh-ext/examples/examples/z_member.rs index 5eef7882fc..90129ca21e 100644 --- a/zenoh-ext/examples/examples/z_member.rs +++ b/zenoh-ext/examples/examples/z_member.rs @@ -11,17 +11,16 @@ // Contributors: // ZettaScale Zenoh Team, // +use std::{sync::Arc, time::Duration}; + use futures::StreamExt; -use std::sync::Arc; -use std::time::Duration; -use zenoh::config::Config; -use zenoh::prelude::r#async::*; +use zenoh::Config; use zenoh_ext::group::*; #[tokio::main] async fn main() { - zenoh_util::try_init_log_from_env(); - let z = Arc::new(zenoh::open(Config::default()).res().await.unwrap()); + zenoh::try_init_log_from_env(); + let z = Arc::new(zenoh::open(Config::default()).await.unwrap()); let member = Member::new(z.zid().to_string()) .unwrap() .lease(Duration::from_secs(3)); diff --git a/zenoh-ext/examples/examples/z_pub_cache.rs b/zenoh-ext/examples/examples/z_pub_cache.rs index 982829f845..ae4a73112b 100644 --- a/zenoh-ext/examples/examples/z_pub_cache.rs +++ b/zenoh-ext/examples/examples/z_pub_cache.rs @@ -11,22 +11,25 @@ // Contributors: // ZettaScale Zenoh Team, // -use clap::{arg, Parser}; use std::time::Duration; -use zenoh::config::{Config, ModeDependentValue}; -use zenoh::prelude::r#async::*; + +use clap::{arg, Parser}; +use zenoh::{ + config::{Config, ModeDependentValue}, + key_expr::KeyExpr, +}; use zenoh_ext::*; use zenoh_ext_examples::CommonArgs; #[tokio::main] async fn main() { // Initiate logging - zenoh_util::try_init_log_from_env(); + zenoh::try_init_log_from_env(); let (config, key_expr, value, history, prefix, complete) = parse_args(); println!("Opening session..."); - let session = zenoh::open(config).res().await.unwrap(); + let session = zenoh::open(config).await.unwrap(); println!("Declaring PublicationCache on {}", &key_expr); let mut publication_cache_builder = session @@ -36,14 +39,14 @@ async fn main() { if let Some(prefix) = prefix { publication_cache_builder = publication_cache_builder.queryable_prefix(prefix); } - let _publication_cache = publication_cache_builder.res().await.unwrap(); + let _publication_cache = publication_cache_builder.await.unwrap(); println!("Press CTRL-C to quit..."); for idx in 0..u32::MAX { tokio::time::sleep(Duration::from_secs(1)).await; let buf = format!("[{idx:4}] {value}"); println!("Put Data ('{}': '{}')", &key_expr, buf); - session.put(&key_expr, buf).res().await.unwrap(); + session.put(&key_expr, buf).await.unwrap(); } } diff --git a/zenoh-ext/examples/examples/z_query_sub.rs b/zenoh-ext/examples/examples/z_query_sub.rs index ae919c2c10..c819a2a831 100644 --- a/zenoh-ext/examples/examples/z_query_sub.rs +++ b/zenoh-ext/examples/examples/z_query_sub.rs @@ -11,23 +11,20 @@ // Contributors: // ZettaScale Zenoh Team, // -use clap::arg; -use clap::Parser; -use zenoh::config::Config; -use zenoh::prelude::r#async::*; -use zenoh::query::ReplyKeyExpr; +use clap::{arg, Parser}; +use zenoh::{config::Config, prelude::*, query::ReplyKeyExpr}; use zenoh_ext::*; use zenoh_ext_examples::CommonArgs; #[tokio::main] async fn main() { // Initiate logging - zenoh_util::try_init_log_from_env(); + zenoh::try_init_log_from_env(); let (config, key_expr, query) = parse_args(); println!("Opening session..."); - let session = zenoh::open(config).res().await.unwrap(); + let session = zenoh::open(config).await.unwrap(); println!( "Declaring QueryingSubscriber on {} with an initial query on {}", @@ -40,25 +37,27 @@ async fn main() { .querying() .query_selector(&selector) .query_accept_replies(ReplyKeyExpr::Any) - .res() .await .unwrap() } else { session .declare_subscriber(key_expr) .querying() - .res() .await .unwrap() }; println!("Press CTRL-C to quit..."); while let Ok(sample) = subscriber.recv_async().await { + let payload = sample + .payload() + .deserialize::() + .unwrap_or_else(|e| format!("{}", e)); println!( ">> [Subscriber] Received {} ('{}': '{}')", - sample.kind, - sample.key_expr.as_str(), - sample.value + sample.kind(), + sample.key_expr().as_str(), + payload ); } } diff --git a/zenoh-ext/examples/examples/z_view_size.rs b/zenoh-ext/examples/examples/z_view_size.rs index 66e79cd301..a38120cfb4 100644 --- a/zenoh-ext/examples/examples/z_view_size.rs +++ b/zenoh-ext/examples/examples/z_view_size.rs @@ -11,21 +11,20 @@ // Contributors: // ZettaScale Zenoh Team, // +use std::{sync::Arc, time::Duration}; + use clap::{arg, Parser}; -use std::sync::Arc; -use std::time::Duration; use zenoh::config::Config; -use zenoh::prelude::r#async::*; use zenoh_ext::group::*; use zenoh_ext_examples::CommonArgs; #[tokio::main] async fn main() { - zenoh_util::try_init_log_from_env(); + zenoh::try_init_log_from_env(); let (config, group_name, id, size, timeout) = parse_args(); - let z = Arc::new(zenoh::open(config).res().await.unwrap()); + let z = Arc::new(zenoh::open(config).await.unwrap()); let member_id = id.unwrap_or_else(|| z.zid().to_string()); let member = Member::new(member_id.as_str()) .unwrap() diff --git a/zenoh-ext/examples/src/lib.rs b/zenoh-ext/examples/src/lib.rs index 4c203d5cff..881d60c138 100644 --- a/zenoh-ext/examples/src/lib.rs +++ b/zenoh-ext/examples/src/lib.rs @@ -43,17 +43,25 @@ impl From<&CommonArgs> for Config { None => Config::default(), }; match value.mode { - Some(Wai::Peer) => config.set_mode(Some(zenoh::scouting::WhatAmI::Peer)), - Some(Wai::Client) => config.set_mode(Some(zenoh::scouting::WhatAmI::Client)), - Some(Wai::Router) => config.set_mode(Some(zenoh::scouting::WhatAmI::Router)), + Some(Wai::Peer) => config.set_mode(Some(zenoh::config::WhatAmI::Peer)), + Some(Wai::Client) => config.set_mode(Some(zenoh::config::WhatAmI::Client)), + Some(Wai::Router) => config.set_mode(Some(zenoh::config::WhatAmI::Router)), None => Ok(None), } .unwrap(); if !value.connect.is_empty() { - config.connect.endpoints = value.connect.iter().map(|v| v.parse().unwrap()).collect(); + config + .connect + .endpoints + .set(value.connect.iter().map(|v| v.parse().unwrap()).collect()) + .unwrap(); } if !value.listen.is_empty() { - config.listen.endpoints = value.listen.iter().map(|v| v.parse().unwrap()).collect(); + config + .listen + .endpoints + .set(value.listen.iter().map(|v| v.parse().unwrap()).collect()) + .unwrap(); } config } diff --git a/zenoh-ext/src/group.rs b/zenoh-ext/src/group.rs index 5d19964d19..f8de27d662 100644 --- a/zenoh-ext/src/group.rs +++ b/zenoh-ext/src/group.rs @@ -13,26 +13,27 @@ // //! To manage groups and group memberships +use std::{ + collections::HashMap, + convert::TryInto, + ops::Add, + sync::Arc, + time::{Duration, Instant}, +}; use flume::{Receiver, Sender}; -use futures::prelude::*; -use futures::select; +use futures::{prelude::*, select}; use serde::{Deserialize, Serialize}; -use std::collections::HashMap; -use std::convert::TryInto; -use std::ops::Add; -use std::sync::Arc; -use std::time::{Duration, Instant}; use tokio::sync::Mutex; -use zenoh::prelude::r#async::*; -use zenoh::publication::Publisher; -use zenoh::query::ConsolidationMode; -use zenoh::Error as ZError; -use zenoh::Result as ZResult; -use zenoh::Session; -use zenoh_result::bail; -use zenoh_sync::Condition; -use zenoh_task::TaskController; +use zenoh::{ + bytes::ZBytesReader, + internal::{bail, Condition, TaskController}, + key_expr::{keyexpr, KeyExpr, OwnedKeyExpr}, + prelude::*, + pubsub::Publisher, + qos::Priority, + Session, +}; const GROUP_PREFIX: &str = "zenoh/ext/net/group"; const EVENT_POSTFIX: &str = "evt"; @@ -184,7 +185,7 @@ async fn keep_alive_task(state: Arc) { loop { tokio::time::sleep(period).await; tracing::trace!("Sending Keep Alive for: {}", &state.local_member.mid); - let _ = state.group_publisher.put(buf.clone()).res().await; + let _ = state.group_publisher.put(buf.clone()).await; } } @@ -226,26 +227,21 @@ async fn query_handler(z: Arc, state: Arc) { .unwrap(); tracing::debug!("Started query handler for: {}", &qres); let buf = bincode::serialize(&state.local_member).unwrap(); - let queryable = z.declare_queryable(&qres).res().await.unwrap(); + let queryable = z.declare_queryable(&qres).await.unwrap(); while let Ok(query) = queryable.recv_async().await { tracing::trace!("Serving query for: {}", &qres); - query - .reply(Ok(Sample::new(qres.clone(), buf.clone()))) - .res() - .await - .unwrap(); + query.reply(qres.clone(), buf.clone()).await.unwrap(); } } async fn net_event_handler(z: Arc, state: Arc) { let sub = z .declare_subscriber(state.group_publisher.key_expr()) - .res() .await .unwrap(); while let Ok(s) = sub.recv_async().await { - match bincode::deserialize::(&(s.value.payload.contiguous())) { + match bincode::deserialize_from::(s.payload().reader()) { Ok(evt) => match evt { GroupNetEvent::Join(je) => { tracing::debug!("Member join: {:?}", &je.member); @@ -297,15 +293,15 @@ async fn net_event_handler(z: Arc, state: Arc) { ); let qres = format!("{}/{}/{}", GROUP_PREFIX, &state.gid, kae.mid); // @TODO: we could also send this member info - let qc = ConsolidationMode::None; + let qc = zenoh::query::ConsolidationMode::None; tracing::trace!("Issuing Query for {}", &qres); - let receiver = z.get(&qres).consolidation(qc).res().await.unwrap(); + let receiver = z.get(&qres).consolidation(qc).await.unwrap(); while let Ok(reply) = receiver.recv_async().await { - match reply.sample { + match reply.result() { Ok(sample) => { - match bincode::deserialize::( - &sample.payload.contiguous(), + match bincode::deserialize_from::( + sample.payload().reader(), ) { Ok(m) => { let mut expiry = Instant::now(); @@ -335,7 +331,7 @@ async fn net_event_handler(z: Arc, state: Arc) { } } Err(e) => { - tracing::warn!("Error received: {}", e); + tracing::warn!("Error received: {:?}", e); } } } @@ -369,7 +365,6 @@ impl Group { let publisher = z .declare_publisher(event_expr) .priority(with.priority) - .res() .await .unwrap(); let state = Arc::new(GroupState { @@ -386,7 +381,7 @@ impl Group { tracing::debug!("Sending Join Message for local member: {:?}", &with); let join_evt = GroupNetEvent::Join(JoinEvent { member: with }); let buf = bincode::serialize(&join_evt).unwrap(); - let _ = state.group_publisher.put(buf).res().await; + let _ = state.group_publisher.put(buf).await; let task_controller = TaskController::default(); // If the liveliness is manual it is the user who has to assert it. diff --git a/zenoh-ext/src/lib.rs b/zenoh-ext/src/lib.rs index 7440d80a53..659afa006d 100644 --- a/zenoh-ext/src/lib.rs +++ b/zenoh-ext/src/lib.rs @@ -21,8 +21,8 @@ pub use querying_subscriber::{ FetchingSubscriber, FetchingSubscriberBuilder, QueryingSubscriberBuilder, }; pub use session_ext::SessionExt; -pub use subscriber_ext::SubscriberBuilderExt; -pub use subscriber_ext::SubscriberForward; +pub use subscriber_ext::{SubscriberBuilderExt, SubscriberForward}; +use zenoh::{internal::zerror, query::Reply, sample::Sample, Result as ZResult}; /// The space of keys to use in a [`FetchingSubscriber`]. pub enum KeySpace { @@ -51,3 +51,13 @@ impl From for KeySpace { KeySpace::Liveliness } } + +pub trait ExtractSample { + fn extract(self) -> ZResult; +} + +impl ExtractSample for Reply { + fn extract(self) -> ZResult { + self.into_result().map_err(|e| zerror!("{:?}", e).into()) + } +} diff --git a/zenoh-ext/src/publication_cache.rs b/zenoh-ext/src/publication_cache.rs index 821e621482..9c1536c2a1 100644 --- a/zenoh-ext/src/publication_cache.rs +++ b/zenoh-ext/src/publication_cache.rs @@ -11,18 +11,23 @@ // Contributors: // ZettaScale Zenoh Team, // -use std::collections::{HashMap, VecDeque}; -use std::convert::TryInto; -use std::future::Ready; -use std::time::Duration; -use zenoh::prelude::r#async::*; -use zenoh::queryable::{Query, Queryable}; -use zenoh::subscriber::FlumeSubscriber; -use zenoh::SessionRef; -use zenoh_core::{AsyncResolve, Resolvable, SyncResolve}; -use zenoh_result::{bail, ZResult}; -use zenoh_task::TerminatableTask; -use zenoh_util::core::ResolveFuture; +use std::{ + collections::{HashMap, VecDeque}, + convert::TryInto, + future::{IntoFuture, Ready}, + time::Duration, +}; + +use zenoh::{ + internal::{bail, runtime::ZRuntime, ResolveFuture, TerminatableTask}, + key_expr::{keyexpr, KeyExpr, OwnedKeyExpr}, + prelude::Wait, + pubsub::FlumeSubscriber, + query::{Query, Queryable, ZenohParameters}, + sample::{Locality, Sample}, + session::{SessionDeclarations, SessionRef}, + Error, Resolvable, Resolve, Result as ZResult, +}; /// The builder of PublicationCache, allowing to configure it. #[must_use = "Resolvables do nothing unless you resolve them using the `res` method from either `SyncResolve` or `AsyncResolve`"] @@ -56,7 +61,7 @@ impl<'a, 'b, 'c> PublicationCacheBuilder<'a, 'b, 'c> { pub fn queryable_prefix(mut self, queryable_prefix: TryIntoKeyExpr) -> Self where TryIntoKeyExpr: TryInto>, - >>::Error: Into, + >>::Error: Into, { self.queryable_prefix = Some(queryable_prefix.try_into().map_err(Into::into)); self @@ -94,17 +99,18 @@ impl<'a> Resolvable for PublicationCacheBuilder<'a, '_, '_> { type To = ZResult>; } -impl SyncResolve for PublicationCacheBuilder<'_, '_, '_> { - fn res_sync(self) -> ::To { +impl Wait for PublicationCacheBuilder<'_, '_, '_> { + fn wait(self) -> ::To { PublicationCache::new(self) } } -impl<'a> AsyncResolve for PublicationCacheBuilder<'a, '_, '_> { - type Future = Ready; +impl<'a> IntoFuture for PublicationCacheBuilder<'a, '_, '_> { + type Output = ::To; + type IntoFuture = Ready<::To>; - fn res_async(self) -> Self::Future { - std::future::ready(self.res_sync()) + fn into_future(self) -> Self::IntoFuture { + std::future::ready(self.wait()) } } @@ -137,8 +143,8 @@ impl<'a> PublicationCache<'a> { if conf.session.hlc().is_none() { bail!( "Failed requirement for PublicationCache on {}: \ - the Session is not configured with 'add_timestamp=true'", - key_expr + the 'timestamping' setting must be enabled in the Zenoh configuration", + key_expr, ) } @@ -147,7 +153,7 @@ impl<'a> PublicationCache<'a> { .session .declare_subscriber(&key_expr) .allowed_origin(Locality::SessionLocal) - .res_sync()?; + .wait()?; // declare the queryable which returns the cached publications let mut queryable = conf.session.declare_queryable(&queryable_key_expr); @@ -157,11 +163,11 @@ impl<'a> PublicationCache<'a> { if let Some(complete) = conf.complete { queryable = queryable.complete(complete); } - let queryable = queryable.res_sync()?; + let queryable = queryable.wait()?; // take local ownership of stuff to be moved into task - let sub_recv = local_sub.receiver.clone(); - let quer_recv = queryable.receiver.clone(); + let sub_recv = local_sub.handler().clone(); + let quer_recv = queryable.handler().clone(); let pub_key_expr = key_expr.into_owned(); let resources_limit = conf.resources_limit; let history = conf.history; @@ -170,7 +176,7 @@ impl<'a> PublicationCache<'a> { let token = TerminatableTask::create_cancellation_token(); let token2 = token.clone(); let task = TerminatableTask::spawn( - zenoh_runtime::ZRuntime::Application, + ZRuntime::Application, async move { let mut cache: HashMap> = HashMap::with_capacity(resources_limit.unwrap_or(32)); @@ -181,9 +187,9 @@ impl<'a> PublicationCache<'a> { sample = sub_recv.recv_async() => { if let Ok(sample) = sample { let queryable_key_expr: KeyExpr<'_> = if let Some(prefix) = &queryable_prefix { - prefix.join(&sample.key_expr).unwrap().into() + prefix.join(&sample.key_expr()).unwrap().into() } else { - sample.key_expr.clone() + sample.key_expr().clone() }; if let Some(queue) = cache.get_mut(queryable_key_expr.as_keyexpr()) { @@ -205,29 +211,29 @@ impl<'a> PublicationCache<'a> { // on query, reply with cached content query = quer_recv.recv_async() => { if let Ok(query) = query { - if !query.selector().key_expr.as_str().contains('*') { - if let Some(queue) = cache.get(query.selector().key_expr.as_keyexpr()) { + if !query.key_expr().as_str().contains('*') { + if let Some(queue) = cache.get(query.key_expr().as_keyexpr()) { for sample in queue { - if let (Ok(Some(time_range)), Some(timestamp)) = (query.selector().time_range(), sample.timestamp) { + if let (Some(Ok(time_range)), Some(timestamp)) = (query.parameters().time_range(), sample.timestamp()) { if !time_range.contains(timestamp.get_time().to_system_time()){ continue; } } - if let Err(e) = query.reply(Ok(sample.clone())).res_async().await { + if let Err(e) = query.reply_sample(sample.clone()).await { tracing::warn!("Error replying to query: {}", e); } } } } else { for (key_expr, queue) in cache.iter() { - if query.selector().key_expr.intersects(unsafe{ keyexpr::from_str_unchecked(key_expr) }) { + if query.key_expr().intersects(unsafe{ keyexpr::from_str_unchecked(key_expr) }) { for sample in queue { - if let (Ok(Some(time_range)), Some(timestamp)) = (query.selector().time_range(), sample.timestamp) { + if let (Some(Ok(time_range)), Some(timestamp)) = (query.parameters().time_range(), sample.timestamp()) { if !time_range.contains(timestamp.get_time().to_system_time()){ continue; } } - if let Err(e) = query.reply(Ok(sample.clone())).res_async().await { + if let Err(e) = query.reply_sample(sample.clone()).await { tracing::warn!("Error replying to query: {}", e); } } @@ -250,17 +256,17 @@ impl<'a> PublicationCache<'a> { }) } - /// Close this PublicationCache + /// Undeclare this [`PublicationCache`]`. #[inline] - pub fn close(self) -> impl Resolve> + 'a { + pub fn undeclare(self) -> impl Resolve> + 'a { ResolveFuture::new(async move { let PublicationCache { _queryable, local_sub, mut task, } = self; - _queryable.undeclare().res_async().await?; - local_sub.undeclare().res_async().await?; + _queryable.undeclare().await?; + local_sub.undeclare().await?; task.terminate(Duration::from_secs(10)); Ok(()) }) diff --git a/zenoh-ext/src/querying_subscriber.rs b/zenoh-ext/src/querying_subscriber.rs index 3c738b7da4..224abfde87 100644 --- a/zenoh-ext/src/querying_subscriber.rs +++ b/zenoh-ext/src/querying_subscriber.rs @@ -11,20 +11,29 @@ // Contributors: // ZettaScale Zenoh Team, // -use std::collections::{btree_map, BTreeMap, VecDeque}; -use std::convert::TryInto; -use std::future::Ready; -use std::mem::swap; -use std::sync::{Arc, Mutex}; -use std::time::Duration; -use zenoh::handlers::{locked, DefaultHandler}; -use zenoh::prelude::r#async::*; -use zenoh::query::{QueryConsolidation, QueryTarget, ReplyKeyExpr}; -use zenoh::subscriber::{Reliability, Subscriber}; -use zenoh::time::Timestamp; -use zenoh::Result as ZResult; -use zenoh::SessionRef; -use zenoh_core::{zlock, AsyncResolve, Resolvable, SyncResolve}; +use std::{ + collections::{btree_map, BTreeMap, VecDeque}, + convert::TryInto, + future::{IntoFuture, Ready}, + mem::swap, + sync::{Arc, Mutex}, + time::{Duration, SystemTime, UNIX_EPOCH}, +}; + +use zenoh::{ + handlers::{locked, DefaultHandler, IntoHandler}, + internal::zlock, + key_expr::KeyExpr, + prelude::Wait, + pubsub::{Reliability, Subscriber}, + query::{QueryConsolidation, QueryTarget, ReplyKeyExpr, Selector}, + sample::{Locality, Sample, SampleBuilder, TimestampBuilderTrait}, + session::{SessionDeclarations, SessionRef}, + time::Timestamp, + Error, Resolvable, Resolve, Result as ZResult, +}; + +use crate::ExtractSample; /// The builder of [`FetchingSubscriber`], allowing to configure it. #[must_use = "Resolvables do nothing unless you resolve them using the `res` method from either `SyncResolve` or `AsyncResolve`"] @@ -103,7 +112,7 @@ impl<'a, 'b, KeySpace> QueryingSubscriberBuilder<'a, 'b, KeySpace, DefaultHandle handler: Handler, ) -> QueryingSubscriberBuilder<'a, 'b, KeySpace, Handler> where - Handler: zenoh::prelude::IntoCallbackReceiverPair<'static, Sample>, + Handler: IntoHandler<'static, Sample>, { let QueryingSubscriberBuilder { session, @@ -170,7 +179,7 @@ impl<'a, 'b, Handler> QueryingSubscriberBuilder<'a, 'b, crate::UserSpace, Handle pub fn query_selector(mut self, query_selector: IntoSelector) -> Self where IntoSelector: TryInto>, - >>::Error: Into, + >>::Error: Into, { self.query_selector = Some(query_selector.try_into().map_err(Into::into)); self @@ -212,19 +221,19 @@ impl<'a, 'b, KeySpace, Handler> QueryingSubscriberBuilder<'a, 'b, KeySpace, Hand impl<'a, KeySpace, Handler> Resolvable for QueryingSubscriberBuilder<'a, '_, KeySpace, Handler> where - Handler: IntoCallbackReceiverPair<'static, Sample>, - Handler::Receiver: Send, + Handler: IntoHandler<'static, Sample>, + Handler::Handler: Send, { - type To = ZResult>; + type To = ZResult>; } -impl SyncResolve for QueryingSubscriberBuilder<'_, '_, KeySpace, Handler> +impl Wait for QueryingSubscriberBuilder<'_, '_, KeySpace, Handler> where KeySpace: Into + Clone, - Handler: IntoCallbackReceiverPair<'static, Sample> + Send, - Handler::Receiver: Send, + Handler: IntoHandler<'static, Sample> + Send, + Handler::Handler: Send, { - fn res_sync(self) -> ::To { + fn wait(self) -> ::To { let session = self.session.clone(); let key_expr = self.key_expr?; let key_space = self.key_space.clone().into(); @@ -252,31 +261,32 @@ where .consolidation(query_consolidation) .accept_replies(query_accept_replies) .timeout(query_timeout) - .res_sync(), + .wait(), crate::KeySpace::Liveliness => session .liveliness() .get(key_expr) .callback(cb) .timeout(query_timeout) - .res_sync(), + .wait(), }, handler: self.handler, phantom: std::marker::PhantomData, } - .res_sync() + .wait() } } -impl<'a, KeySpace, Handler> AsyncResolve for QueryingSubscriberBuilder<'a, '_, KeySpace, Handler> +impl<'a, KeySpace, Handler> IntoFuture for QueryingSubscriberBuilder<'a, '_, KeySpace, Handler> where KeySpace: Into + Clone, - Handler: IntoCallbackReceiverPair<'static, Sample> + Send, - Handler::Receiver: Send, + Handler: IntoHandler<'static, Sample> + Send, + Handler::Handler: Send, { - type Future = Ready; + type Output = ::To; + type IntoFuture = Ready<::To>; - fn res_async(self) -> Self::Future { - std::future::ready(self.res_sync()) + fn into_future(self) -> Self::IntoFuture { + std::future::ready(self.wait()) } } @@ -302,8 +312,8 @@ impl MergeQueue { } fn push(&mut self, sample: Sample) { - if let Some(ts) = sample.timestamp { - self.timstamped.entry(ts).or_insert(sample); + if let Some(ts) = sample.timestamp() { + self.timstamped.entry(*ts).or_insert(sample); } else { self.untimestamped.push_back(sample); } @@ -350,8 +360,7 @@ pub struct FetchingSubscriberBuilder< Fetch: FnOnce(Box) -> ZResult<()>, TryIntoSample, > where - TryIntoSample: TryInto, - >::Error: Into, + TryIntoSample: ExtractSample, { pub(crate) session: SessionRef<'a>, pub(crate) key_expr: ZResult>, @@ -372,8 +381,7 @@ impl< TryIntoSample, > FetchingSubscriberBuilder<'a, 'b, KeySpace, Handler, Fetch, TryIntoSample> where - TryIntoSample: TryInto, - >::Error: Into, + TryIntoSample: ExtractSample, { fn with_static_keys( self, @@ -399,8 +407,7 @@ impl< TryIntoSample, > FetchingSubscriberBuilder<'a, 'b, KeySpace, DefaultHandler, Fetch, TryIntoSample> where - TryIntoSample: TryInto, - >::Error: Into, + TryIntoSample: ExtractSample, { /// Add callback to [`FetchingSubscriber`]. #[inline] @@ -463,7 +470,7 @@ where handler: Handler, ) -> FetchingSubscriberBuilder<'a, 'b, KeySpace, Handler, Fetch, TryIntoSample> where - Handler: zenoh::prelude::IntoCallbackReceiverPair<'static, Sample>, + Handler: IntoHandler<'static, Sample>, { let FetchingSubscriberBuilder { session, @@ -496,8 +503,7 @@ impl< TryIntoSample, > FetchingSubscriberBuilder<'a, 'b, crate::UserSpace, Handler, Fetch, TryIntoSample> where - TryIntoSample: TryInto, - >::Error: Into, + TryIntoSample: ExtractSample, { /// Change the subscription reliability. #[inline] @@ -538,12 +544,11 @@ impl< TryIntoSample, > Resolvable for FetchingSubscriberBuilder<'a, '_, KeySpace, Handler, Fetch, TryIntoSample> where - Handler: IntoCallbackReceiverPair<'static, Sample>, - Handler::Receiver: Send, - TryIntoSample: TryInto, - >::Error: Into, + Handler: IntoHandler<'static, Sample>, + Handler::Handler: Send, + TryIntoSample: ExtractSample, { - type To = ZResult>; + type To = ZResult>; } impl< @@ -551,15 +556,14 @@ impl< Handler, Fetch: FnOnce(Box) -> ZResult<()> + Send + Sync, TryIntoSample, - > SyncResolve for FetchingSubscriberBuilder<'_, '_, KeySpace, Handler, Fetch, TryIntoSample> + > Wait for FetchingSubscriberBuilder<'_, '_, KeySpace, Handler, Fetch, TryIntoSample> where KeySpace: Into, - Handler: IntoCallbackReceiverPair<'static, Sample> + Send, - Handler::Receiver: Send, - TryIntoSample: TryInto + Send + Sync, - >::Error: Into, + Handler: IntoHandler<'static, Sample> + Send, + Handler::Handler: Send, + TryIntoSample: ExtractSample + Send + Sync, { - fn res_sync(self) -> ::To { + fn wait(self) -> ::To { FetchingSubscriber::new(self.with_static_keys()) } } @@ -570,18 +574,18 @@ impl< Handler, Fetch: FnOnce(Box) -> ZResult<()> + Send + Sync, TryIntoSample, - > AsyncResolve for FetchingSubscriberBuilder<'a, '_, KeySpace, Handler, Fetch, TryIntoSample> + > IntoFuture for FetchingSubscriberBuilder<'a, '_, KeySpace, Handler, Fetch, TryIntoSample> where KeySpace: Into, - Handler: IntoCallbackReceiverPair<'static, Sample> + Send, - Handler::Receiver: Send, - TryIntoSample: TryInto + Send + Sync, - >::Error: Into, + Handler: IntoHandler<'static, Sample> + Send, + Handler::Handler: Send, + TryIntoSample: ExtractSample + Send + Sync, { - type Future = Ready; + type Output = ::To; + type IntoFuture = Ready<::To>; - fn res_async(self) -> Self::Future { - std::future::ready(self.res_sync()) + fn into_future(self) -> Self::IntoFuture { + std::future::ready(self.wait()) } } @@ -597,20 +601,18 @@ where /// ```no_run /// # #[tokio::main] /// # async fn main() { -/// use zenoh::prelude::r#async::*; +/// use zenoh::prelude::*; /// use zenoh_ext::*; /// -/// let session = zenoh::open(config::peer()).res().await.unwrap(); +/// let session = zenoh::open(zenoh::config::peer()).await.unwrap(); /// let subscriber = session /// .declare_subscriber("key/expr") /// .fetching( |cb| { -/// use zenoh::prelude::sync::SyncResolve; /// session /// .get("key/expr") /// .callback(cb) -/// .res_sync() +/// .wait() /// }) -/// .res() /// .await /// .unwrap(); /// while let Ok(sample) = subscriber.recv_async().await { @@ -618,51 +620,52 @@ where /// } /// # } /// ``` -pub struct FetchingSubscriber<'a, Receiver> { +pub struct FetchingSubscriber<'a, Handler> { subscriber: Subscriber<'a, ()>, callback: Arc, state: Arc>, - receiver: Receiver, + handler: Handler, } -impl std::ops::Deref for FetchingSubscriber<'_, Receiver> { - type Target = Receiver; +impl std::ops::Deref for FetchingSubscriber<'_, Handler> { + type Target = Handler; fn deref(&self) -> &Self::Target { - &self.receiver + &self.handler } } -impl std::ops::DerefMut for FetchingSubscriber<'_, Receiver> { +impl std::ops::DerefMut for FetchingSubscriber<'_, Handler> { fn deref_mut(&mut self) -> &mut Self::Target { - &mut self.receiver + &mut self.handler } } -impl<'a, Receiver> FetchingSubscriber<'a, Receiver> { +impl<'a, Handler> FetchingSubscriber<'a, Handler> { fn new< KeySpace, - Handler, + InputHandler, Fetch: FnOnce(Box) -> ZResult<()> + Send + Sync, TryIntoSample, >( - conf: FetchingSubscriberBuilder<'a, 'a, KeySpace, Handler, Fetch, TryIntoSample>, + conf: FetchingSubscriberBuilder<'a, 'a, KeySpace, InputHandler, Fetch, TryIntoSample>, ) -> ZResult where KeySpace: Into, - Handler: IntoCallbackReceiverPair<'static, Sample, Receiver = Receiver> + Send, - TryIntoSample: TryInto + Send + Sync, - >::Error: Into, + InputHandler: IntoHandler<'static, Sample, Handler = Handler> + Send, + TryIntoSample: ExtractSample + Send + Sync, { + let session_id = conf.session.zid(); + let state = Arc::new(Mutex::new(InnerState { pending_fetches: 0, merge_queue: MergeQueue::new(), })); - let (callback, receiver) = conf.handler.into_cb_receiver_pair(); + let (callback, receiver) = conf.handler.into_handler(); let sub_callback = { let state = state.clone(); let callback = callback.clone(); - move |mut s| { + move |s| { let state = &mut zlock!(state); if state.pending_fetches == 0 { callback(s); @@ -670,10 +673,17 @@ impl<'a, Receiver> FetchingSubscriber<'a, Receiver> { tracing::trace!( "Sample received while fetch in progress: push it to merge_queue" ); + // ensure the sample has a timestamp, thus it will always be sorted into the MergeQueue // after any timestamped Sample possibly coming from a fetch reply. - s.ensure_timestamp(); - state.merge_queue.push(s); + let now = SystemTime::now().duration_since(UNIX_EPOCH).unwrap().into(); // UNIX_EPOCH is Returns a Timespec::zero(), Unwrap Should be permissable here + let timestamp = s + .timestamp() + .cloned() + .unwrap_or(Timestamp::new(now, session_id.into())); + state + .merge_queue + .push(SampleBuilder::from(s).timestamp(timestamp).into()); } } }; @@ -690,20 +700,20 @@ impl<'a, Receiver> FetchingSubscriber<'a, Receiver> { .callback(sub_callback) .reliability(conf.reliability) .allowed_origin(conf.origin) - .res_sync()?, + .wait()?, crate::KeySpace::Liveliness => conf .session .liveliness() .declare_subscriber(&key_expr) .callback(sub_callback) - .res_sync()?, + .wait()?, }; let fetch_subscriber = FetchingSubscriber { subscriber, callback, state, - receiver, + handler: receiver, }; // run fetch @@ -712,9 +722,9 @@ impl<'a, Receiver> FetchingSubscriber<'a, Receiver> { Ok(fetch_subscriber) } - /// Close this FetchingSubscriber + /// Undeclare this [`FetchingSubscriber`]`. #[inline] - pub fn close(self) -> impl Resolve> + 'a { + pub fn undeclare(self) -> impl Resolve> + 'a { self.subscriber.undeclare() } @@ -733,33 +743,29 @@ impl<'a, Receiver> FetchingSubscriber<'a, Receiver> { /// ```no_run /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::r#async::*; + /// use zenoh::prelude::*; /// use zenoh_ext::*; /// - /// let session = zenoh::open(config::peer()).res().await.unwrap(); + /// let session = zenoh::open(zenoh::config::peer()).await.unwrap(); /// let mut subscriber = session /// .declare_subscriber("key/expr") /// .fetching( |cb| { - /// use zenoh::prelude::sync::SyncResolve; /// session /// .get("key/expr") /// .callback(cb) - /// .res_sync() + /// .wait() /// }) - /// .res() /// .await /// .unwrap(); /// /// // perform an additional fetch /// subscriber /// .fetch( |cb| { - /// use zenoh::prelude::sync::SyncResolve; /// session /// .get("key/expr") /// .callback(cb) - /// .res_sync() + /// .wait() /// }) - /// .res() /// .await /// .unwrap(); /// # } @@ -773,8 +779,7 @@ impl<'a, Receiver> FetchingSubscriber<'a, Receiver> { fetch: Fetch, ) -> impl Resolve> where - TryIntoSample: TryInto + Send + Sync, - >::Error: Into, + TryIntoSample: ExtractSample + Send + Sync, { FetchBuilder { fetch, @@ -816,32 +821,28 @@ impl Drop for RepliesHandler { /// ```no_run /// # #[tokio::main] /// # async fn main() { -/// # use zenoh::prelude::r#async::*; +/// # use zenoh::prelude::*; /// # use zenoh_ext::*; /// # -/// # let session = zenoh::open(config::peer()).res().await.unwrap(); +/// # let session = zenoh::open(zenoh::config::peer()).await.unwrap(); /// # let mut fetching_subscriber = session /// # .declare_subscriber("key/expr") /// # .fetching( |cb| { -/// # use zenoh::prelude::sync::SyncResolve; /// # session /// # .get("key/expr") /// # .callback(cb) -/// # .res_sync() +/// # .wait() /// # }) -/// # .res() /// # .await /// # .unwrap(); /// # /// fetching_subscriber /// .fetch( |cb| { -/// use zenoh::prelude::sync::SyncResolve; /// session /// .get("key/expr") /// .callback(cb) -/// .res_sync() +/// .wait() /// }) -/// .res() /// .await /// .unwrap(); /// # } @@ -851,8 +852,7 @@ pub struct FetchBuilder< Fetch: FnOnce(Box) -> ZResult<()>, TryIntoSample, > where - TryIntoSample: TryInto, - >::Error: Into, + TryIntoSample: ExtractSample, { fetch: Fetch, phantom: std::marker::PhantomData, @@ -863,34 +863,32 @@ pub struct FetchBuilder< impl) -> ZResult<()>, TryIntoSample> Resolvable for FetchBuilder where - TryIntoSample: TryInto, - >::Error: Into, + TryIntoSample: ExtractSample, { type To = ZResult<()>; } -impl) -> ZResult<()>, TryIntoSample> - SyncResolve for FetchBuilder +impl) -> ZResult<()>, TryIntoSample> Wait + for FetchBuilder where - TryIntoSample: TryInto, - >::Error: Into, + TryIntoSample: ExtractSample, { - fn res_sync(self) -> ::To { + fn wait(self) -> ::To { let handler = register_handler(self.state, self.callback); run_fetch(self.fetch, handler) } } impl) -> ZResult<()>, TryIntoSample> - AsyncResolve for FetchBuilder + IntoFuture for FetchBuilder where - TryIntoSample: TryInto, - >::Error: Into, + TryIntoSample: ExtractSample, { - type Future = Ready; + type Output = ::To; + type IntoFuture = Ready<::To>; - fn res_async(self) -> Self::Future { - std::future::ready(self.res_sync()) + fn into_future(self) -> Self::IntoFuture { + std::future::ready(self.wait()) } } @@ -911,16 +909,15 @@ fn run_fetch< handler: RepliesHandler, ) -> ZResult<()> where - TryIntoSample: TryInto, - >::Error: Into, + TryIntoSample: ExtractSample, { tracing::debug!("Fetch data for FetchingSubscriber"); - (fetch)(Box::new(move |s: TryIntoSample| match s.try_into() { + (fetch)(Box::new(move |s: TryIntoSample| match s.extract() { Ok(s) => { let mut state = zlock!(handler.state); tracing::trace!("Fetched sample received: push it to merge_queue"); state.merge_queue.push(s); } - Err(e) => tracing::debug!("Received error fetching data: {}", e.into()), + Err(e) => tracing::debug!("Received error fetching data: {}", e), })) } diff --git a/zenoh-ext/src/session_ext.rs b/zenoh-ext/src/session_ext.rs index 73fbd7dfc4..606f00743b 100644 --- a/zenoh-ext/src/session_ext.rs +++ b/zenoh-ext/src/session_ext.rs @@ -11,11 +11,15 @@ // Contributors: // ZettaScale Zenoh Team, // +use std::{convert::TryInto, sync::Arc}; + +use zenoh::{ + key_expr::KeyExpr, + session::{Session, SessionRef}, + Error, +}; + use super::PublicationCacheBuilder; -use std::convert::TryInto; -use std::sync::Arc; -use zenoh::prelude::KeyExpr; -use zenoh::{Session, SessionRef}; /// Some extensions to the [`zenoh::Session`](zenoh::Session) pub trait SessionExt<'s, 'a> { @@ -25,7 +29,7 @@ pub trait SessionExt<'s, 'a> { ) -> PublicationCacheBuilder<'a, 'b, 'c> where TryIntoKeyExpr: TryInto>, - >>::Error: Into; + >>::Error: Into; } impl<'s, 'a> SessionExt<'s, 'a> for SessionRef<'a> { @@ -35,7 +39,7 @@ impl<'s, 'a> SessionExt<'s, 'a> for SessionRef<'a> { ) -> PublicationCacheBuilder<'a, 'b, 'c> where TryIntoKeyExpr: TryInto>, - >>::Error: Into, + >>::Error: Into, { PublicationCacheBuilder::new(self.clone(), pub_key_expr.try_into().map_err(Into::into)) } @@ -48,7 +52,7 @@ impl<'a> SessionExt<'a, 'a> for Session { ) -> PublicationCacheBuilder<'a, 'b, 'c> where TryIntoKeyExpr: TryInto>, - >>::Error: Into, + >>::Error: Into, { SessionRef::Borrow(self).declare_publication_cache(pub_key_expr) } @@ -59,14 +63,14 @@ impl<'s> SessionExt<'s, 'static> for Arc { /// ``` /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::r#async::*; + /// use zenoh::prelude::*; /// use zenoh::config::ModeDependentValue::Unique; /// use zenoh_ext::SessionExt; /// - /// let mut config = config::default(); + /// let mut config = zenoh::config::default(); /// config.timestamping.set_enabled(Some(Unique(true))); - /// let session = zenoh::open(config).res().await.unwrap().into_arc(); - /// let publication_cache = session.declare_publication_cache("key/expression").res().await.unwrap(); + /// let session = zenoh::open(config).await.unwrap().into_arc(); + /// let publication_cache = session.declare_publication_cache("key/expression").await.unwrap(); /// tokio::task::spawn(async move { /// publication_cache.key_expr(); /// }).await; @@ -78,7 +82,7 @@ impl<'s> SessionExt<'s, 'static> for Arc { ) -> PublicationCacheBuilder<'static, 'b, 'c> where TryIntoKeyExpr: TryInto>, - >>::Error: Into, + >>::Error: Into, { SessionRef::Shared(self.clone()).declare_publication_cache(pub_key_expr) } diff --git a/zenoh-ext/src/subscriber_ext.rs b/zenoh-ext/src/subscriber_ext.rs index 5a9c05972f..a7356f86dc 100644 --- a/zenoh-ext/src/subscriber_ext.rs +++ b/zenoh-ext/src/subscriber_ext.rs @@ -11,20 +11,21 @@ // Contributors: // ZettaScale Zenoh Team, // +use std::time::Duration; + use flume::r#async::RecvStream; use futures::stream::{Forward, Map}; -use std::{convert::TryInto, time::Duration}; -use zenoh::query::ReplyKeyExpr; -use zenoh::sample::Locality; -use zenoh::Result as ZResult; use zenoh::{ liveliness::LivelinessSubscriberBuilder, - prelude::Sample, - query::{QueryConsolidation, QueryTarget}, - subscriber::{PushMode, Reliability, Subscriber, SubscriberBuilder}, + pubsub::{Reliability, Subscriber, SubscriberBuilder}, + query::{QueryConsolidation, QueryTarget, ReplyKeyExpr}, + sample::{Locality, Sample}, + Result as ZResult, }; -use crate::{querying_subscriber::QueryingSubscriberBuilder, FetchingSubscriberBuilder}; +use crate::{ + querying_subscriber::QueryingSubscriberBuilder, ExtractSample, FetchingSubscriberBuilder, +}; /// Allows writing `subscriber.forward(receiver)` instead of `subscriber.stream().map(Ok).forward(publisher)` pub trait SubscriberForward<'a, S> { @@ -37,11 +38,11 @@ where { type Output = Forward, fn(Sample) -> Result>, S>; fn forward(&'a mut self, sink: S) -> Self::Output { - futures::StreamExt::forward(futures::StreamExt::map(self.receiver.stream(), Ok), sink) + futures::StreamExt::forward(futures::StreamExt::map(self.stream(), Ok), sink) } } -/// Some extensions to the [`zenoh::subscriber::SubscriberBuilder`](zenoh::subscriber::SubscriberBuilder) +/// Some extensions to the [`zenoh::subscriber::SubscriberBuilder`](zenoh::pubsub::SubscriberBuilder) pub trait SubscriberBuilderExt<'a, 'b, Handler> { type KeySpace; @@ -59,20 +60,18 @@ pub trait SubscriberBuilderExt<'a, 'b, Handler> { /// ```no_run /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::r#async::*; + /// use zenoh::prelude::*; /// use zenoh_ext::*; /// - /// let session = zenoh::open(config::peer()).res().await.unwrap(); + /// let session = zenoh::open(zenoh::config::peer()).await.unwrap(); /// let subscriber = session /// .declare_subscriber("key/expr") /// .fetching( |cb| { - /// use zenoh::prelude::sync::SyncResolve; /// session /// .get("key/expr") /// .callback(cb) - /// .res_sync() + /// .wait() /// }) - /// .res() /// .await /// .unwrap(); /// while let Ok(sample) = subscriber.recv_async().await { @@ -88,8 +87,7 @@ pub trait SubscriberBuilderExt<'a, 'b, Handler> { fetch: Fetch, ) -> FetchingSubscriberBuilder<'a, 'b, Self::KeySpace, Handler, Fetch, TryIntoSample> where - TryIntoSample: TryInto, - >::Error: Into; + TryIntoSample: ExtractSample; /// Create a [`FetchingSubscriber`](super::FetchingSubscriber) that will perform a query (`session.get()`) as it's /// initial fetch. @@ -106,14 +104,13 @@ pub trait SubscriberBuilderExt<'a, 'b, Handler> { /// ```no_run /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::r#async::*; + /// use zenoh::prelude::*; /// use zenoh_ext::*; /// - /// let session = zenoh::open(config::peer()).res().await.unwrap(); + /// let session = zenoh::open(zenoh::config::peer()).await.unwrap(); /// let subscriber = session /// .declare_subscriber("key/expr") /// .querying() - /// .res() /// .await /// .unwrap(); /// while let Ok(sample) = subscriber.recv_async().await { @@ -124,9 +121,7 @@ pub trait SubscriberBuilderExt<'a, 'b, Handler> { fn querying(self) -> QueryingSubscriberBuilder<'a, 'b, Self::KeySpace, Handler>; } -impl<'a, 'b, Handler> SubscriberBuilderExt<'a, 'b, Handler> - for SubscriberBuilder<'a, 'b, PushMode, Handler> -{ +impl<'a, 'b, Handler> SubscriberBuilderExt<'a, 'b, Handler> for SubscriberBuilder<'a, 'b, Handler> { type KeySpace = crate::UserSpace; /// Create a [`FetchingSubscriber`](super::FetchingSubscriber). @@ -143,20 +138,18 @@ impl<'a, 'b, Handler> SubscriberBuilderExt<'a, 'b, Handler> /// ```no_run /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::r#async::*; + /// use zenoh::prelude::*; /// use zenoh_ext::*; /// - /// let session = zenoh::open(config::peer()).res().await.unwrap(); + /// let session = zenoh::open(zenoh::config::peer()).await.unwrap(); /// let subscriber = session /// .declare_subscriber("key/expr") /// .fetching( |cb| { - /// use zenoh::prelude::sync::SyncResolve; /// session /// .get("key/expr") /// .callback(cb) - /// .res_sync() + /// .wait() /// }) - /// .res() /// .await /// .unwrap(); /// while let Ok(sample) = subscriber.recv_async().await { @@ -172,8 +165,7 @@ impl<'a, 'b, Handler> SubscriberBuilderExt<'a, 'b, Handler> fetch: Fetch, ) -> FetchingSubscriberBuilder<'a, 'b, Self::KeySpace, Handler, Fetch, TryIntoSample> where - TryIntoSample: TryInto, - >::Error: Into, + TryIntoSample: ExtractSample, { FetchingSubscriberBuilder { session: self.session, @@ -202,14 +194,13 @@ impl<'a, 'b, Handler> SubscriberBuilderExt<'a, 'b, Handler> /// ```no_run /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::r#async::*; + /// use zenoh::prelude::*; /// use zenoh_ext::*; /// - /// let session = zenoh::open(config::peer()).res().await.unwrap(); + /// let session = zenoh::open(zenoh::config::peer()).await.unwrap(); /// let subscriber = session /// .declare_subscriber("key/expr") /// .querying() - /// .res() /// .await /// .unwrap(); /// while let Ok(sample) = subscriber.recv_async().await { @@ -257,22 +248,20 @@ impl<'a, 'b, Handler> SubscriberBuilderExt<'a, 'b, Handler> /// ```no_run /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::r#async::*; + /// use zenoh::prelude::*; /// use zenoh_ext::*; /// - /// let session = zenoh::open(config::peer()).res().await.unwrap(); + /// let session = zenoh::open(zenoh::config::peer()).await.unwrap(); /// let subscriber = session /// .liveliness() /// .declare_subscriber("key/expr") /// .fetching( |cb| { - /// use zenoh::prelude::sync::SyncResolve; /// session /// .liveliness() /// .get("key/expr") /// .callback(cb) - /// .res_sync() + /// .wait() /// }) - /// .res() /// .await /// .unwrap(); /// while let Ok(sample) = subscriber.recv_async().await { @@ -288,14 +277,13 @@ impl<'a, 'b, Handler> SubscriberBuilderExt<'a, 'b, Handler> fetch: Fetch, ) -> FetchingSubscriberBuilder<'a, 'b, Self::KeySpace, Handler, Fetch, TryIntoSample> where - TryIntoSample: TryInto, - >::Error: Into, + TryIntoSample: ExtractSample, { FetchingSubscriberBuilder { session: self.session, key_expr: self.key_expr, key_space: crate::LivelinessSpace, - reliability: Reliability::default(), + reliability: Reliability::DEFAULT, origin: Locality::default(), fetch, handler: self.handler, @@ -319,15 +307,14 @@ impl<'a, 'b, Handler> SubscriberBuilderExt<'a, 'b, Handler> /// ```no_run /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::r#async::*; + /// use zenoh::prelude::*; /// use zenoh_ext::*; /// - /// let session = zenoh::open(config::peer()).res().await.unwrap(); + /// let session = zenoh::open(zenoh::config::peer()).await.unwrap(); /// let subscriber = session /// .liveliness() /// .declare_subscriber("key/expr") /// .querying() - /// .res() /// .await /// .unwrap(); /// while let Ok(sample) = subscriber.recv_async().await { @@ -340,11 +327,11 @@ impl<'a, 'b, Handler> SubscriberBuilderExt<'a, 'b, Handler> session: self.session, key_expr: self.key_expr, key_space: crate::LivelinessSpace, - reliability: Reliability::default(), + reliability: Reliability::DEFAULT, origin: Locality::default(), query_selector: None, - query_target: QueryTarget::default(), - query_consolidation: QueryConsolidation::default(), + query_target: QueryTarget::DEFAULT, + query_consolidation: QueryConsolidation::DEFAULT, query_accept_replies: ReplyKeyExpr::MatchingQuery, query_timeout: Duration::from_secs(10), handler: self.handler, diff --git a/zenoh-ext/tests/liveliness.rs b/zenoh-ext/tests/liveliness.rs new file mode 100644 index 0000000000..637d07ba57 --- /dev/null +++ b/zenoh-ext/tests/liveliness.rs @@ -0,0 +1,394 @@ +// +// Copyright (c) 2024 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// + +use zenoh::{ + config::{self, EndPoint, WhatAmI}, + sample::SampleKind, +}; + +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +async fn test_liveliness_querying_subscriber_clique() { + use std::time::Duration; + + use zenoh::{internal::ztimeout, prelude::*}; + use zenoh_ext::SubscriberBuilderExt; + + const TIMEOUT: Duration = Duration::from_secs(60); + const SLEEP: Duration = Duration::from_secs(1); + const PEER1_ENDPOINT: &str = "udp/localhost:47447"; + + const LIVELINESS_KEYEXPR_1: &str = "test/liveliness/querying-subscriber/brokered/1"; + const LIVELINESS_KEYEXPR_2: &str = "test/liveliness/querying-subscriber/brokered/2"; + const LIVELINESS_KEYEXPR_ALL: &str = "test/liveliness/querying-subscriber/brokered/*"; + + zenoh_util::try_init_log_from_env(); + + let peer1 = { + let mut c = config::default(); + c.listen + .endpoints + .set(vec![PEER1_ENDPOINT.parse::().unwrap()]) + .unwrap(); + c.scouting.multicast.set_enabled(Some(false)).unwrap(); + let _ = c.set_mode(Some(WhatAmI::Peer)); + let s = ztimeout!(zenoh::open(c)).unwrap(); + tracing::info!("Peer (1) ZID: {}", s.zid()); + s + }; + + let peer2 = { + let mut c = config::default(); + c.connect + .endpoints + .set(vec![PEER1_ENDPOINT.parse::().unwrap()]) + .unwrap(); + c.scouting.multicast.set_enabled(Some(false)).unwrap(); + let _ = c.set_mode(Some(WhatAmI::Peer)); + let s = ztimeout!(zenoh::open(c)).unwrap(); + tracing::info!("Peer (2) ZID: {}", s.zid()); + s + }; + + let token1 = ztimeout!(peer2.liveliness().declare_token(LIVELINESS_KEYEXPR_1)).unwrap(); + tokio::time::sleep(SLEEP).await; + + let sub = ztimeout!(peer1 + .liveliness() + .declare_subscriber(LIVELINESS_KEYEXPR_ALL) + .querying()) + .unwrap(); + tokio::time::sleep(SLEEP).await; + + let token2 = ztimeout!(peer2.liveliness().declare_token(LIVELINESS_KEYEXPR_2)).unwrap(); + tokio::time::sleep(SLEEP).await; + + let sample = ztimeout!(sub.recv_async()).unwrap(); + assert_eq!(sample.kind(), SampleKind::Put); + assert_eq!(sample.key_expr().as_str(), LIVELINESS_KEYEXPR_1); + + let sample = ztimeout!(sub.recv_async()).unwrap(); + assert_eq!(sample.kind(), SampleKind::Put); + assert_eq!(sample.key_expr().as_str(), LIVELINESS_KEYEXPR_2); + + token1.undeclare().await.unwrap(); + tokio::time::sleep(SLEEP).await; + + let sample = ztimeout!(sub.recv_async()).unwrap(); + assert_eq!(sample.kind(), SampleKind::Delete); + assert_eq!(sample.key_expr().as_str(), LIVELINESS_KEYEXPR_1); + + token2.undeclare().await.unwrap(); + sub.undeclare().await.unwrap(); + + peer1.close().await.unwrap(); + peer2.close().await.unwrap(); +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +async fn test_liveliness_querying_subscriber_brokered() { + use std::time::Duration; + + use zenoh::{internal::ztimeout, prelude::*}; + use zenoh_ext::SubscriberBuilderExt; + + const TIMEOUT: Duration = Duration::from_secs(60); + const SLEEP: Duration = Duration::from_secs(1); + const ROUTER_ENDPOINT: &str = "tcp/localhost:47448"; + + const LIVELINESS_KEYEXPR_1: &str = "test/liveliness/querying-subscriber/brokered/1"; + const LIVELINESS_KEYEXPR_2: &str = "test/liveliness/querying-subscriber/brokered/2"; + const LIVELINESS_KEYEXPR_ALL: &str = "test/liveliness/querying-subscriber/brokered/*"; + + zenoh_util::try_init_log_from_env(); + + let router = { + let mut c = config::default(); + c.listen + .endpoints + .set(vec![ROUTER_ENDPOINT.parse::().unwrap()]) + .unwrap(); + c.scouting.multicast.set_enabled(Some(false)).unwrap(); + let _ = c.set_mode(Some(WhatAmI::Router)); + let s = ztimeout!(zenoh::open(c)).unwrap(); + tracing::info!("Router ZID: {}", s.zid()); + s + }; + + let client1 = { + let mut c = config::default(); + c.connect + .endpoints + .set(vec![ROUTER_ENDPOINT.parse::().unwrap()]) + .unwrap(); + c.scouting.multicast.set_enabled(Some(false)).unwrap(); + let _ = c.set_mode(Some(WhatAmI::Client)); + let s = ztimeout!(zenoh::open(c)).unwrap(); + tracing::info!("Client (1) ZID: {}", s.zid()); + s + }; + + let client2 = { + let mut c = config::default(); + c.connect + .endpoints + .set(vec![ROUTER_ENDPOINT.parse::().unwrap()]) + .unwrap(); + c.scouting.multicast.set_enabled(Some(false)).unwrap(); + let _ = c.set_mode(Some(WhatAmI::Client)); + let s = ztimeout!(zenoh::open(c)).unwrap(); + tracing::info!("Client (2) ZID: {}", s.zid()); + s + }; + + let client3 = { + let mut c = config::default(); + c.connect + .endpoints + .set(vec![ROUTER_ENDPOINT.parse::().unwrap()]) + .unwrap(); + c.scouting.multicast.set_enabled(Some(false)).unwrap(); + let _ = c.set_mode(Some(WhatAmI::Client)); + let s = ztimeout!(zenoh::open(c)).unwrap(); + tracing::info!("Client (3) ZID: {}", s.zid()); + s + }; + + let token1 = ztimeout!(client2.liveliness().declare_token(LIVELINESS_KEYEXPR_1)).unwrap(); + tokio::time::sleep(SLEEP).await; + + let sub = ztimeout!(client1 + .liveliness() + .declare_subscriber(LIVELINESS_KEYEXPR_ALL) + .querying()) + .unwrap(); + tokio::time::sleep(SLEEP).await; + + let token2 = ztimeout!(client3.liveliness().declare_token(LIVELINESS_KEYEXPR_2)).unwrap(); + tokio::time::sleep(SLEEP).await; + + let sample = ztimeout!(sub.recv_async()).unwrap(); + assert_eq!(sample.kind(), SampleKind::Put); + assert_eq!(sample.key_expr().as_str(), LIVELINESS_KEYEXPR_1); + + let sample = ztimeout!(sub.recv_async()).unwrap(); + assert_eq!(sample.kind(), SampleKind::Put); + assert_eq!(sample.key_expr().as_str(), LIVELINESS_KEYEXPR_2); + + token1.undeclare().await.unwrap(); + tokio::time::sleep(SLEEP).await; + + let sample = ztimeout!(sub.recv_async()).unwrap(); + assert_eq!(sample.kind(), SampleKind::Delete); + assert_eq!(sample.key_expr().as_str(), LIVELINESS_KEYEXPR_1); + + token2.undeclare().await.unwrap(); + sub.undeclare().await.unwrap(); + + router.close().await.unwrap(); + client1.close().await.unwrap(); + client2.close().await.unwrap(); + client3.close().await.unwrap(); +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +async fn test_liveliness_fetching_subscriber_clique() { + use std::time::Duration; + + use zenoh::{internal::ztimeout, prelude::*}; + use zenoh_ext::SubscriberBuilderExt; + + const TIMEOUT: Duration = Duration::from_secs(60); + const SLEEP: Duration = Duration::from_secs(1); + const PEER1_ENDPOINT: &str = "udp/localhost:47449"; + + const LIVELINESS_KEYEXPR_1: &str = "test/liveliness/querying-subscriber/brokered/1"; + const LIVELINESS_KEYEXPR_2: &str = "test/liveliness/querying-subscriber/brokered/2"; + const LIVELINESS_KEYEXPR_ALL: &str = "test/liveliness/querying-subscriber/brokered/*"; + + zenoh_util::try_init_log_from_env(); + + let peer1 = { + let mut c = config::default(); + c.listen + .endpoints + .set(vec![PEER1_ENDPOINT.parse::().unwrap()]) + .unwrap(); + c.scouting.multicast.set_enabled(Some(false)).unwrap(); + let _ = c.set_mode(Some(WhatAmI::Peer)); + let s = ztimeout!(zenoh::open(c)).unwrap(); + tracing::info!("Peer (1) ZID: {}", s.zid()); + s + }; + + let peer2 = { + let mut c = config::default(); + c.connect + .endpoints + .set(vec![PEER1_ENDPOINT.parse::().unwrap()]) + .unwrap(); + c.scouting.multicast.set_enabled(Some(false)).unwrap(); + let _ = c.set_mode(Some(WhatAmI::Peer)); + let s = ztimeout!(zenoh::open(c)).unwrap(); + tracing::info!("Peer (2) ZID: {}", s.zid()); + s + }; + + let token1 = ztimeout!(peer2.liveliness().declare_token(LIVELINESS_KEYEXPR_1)).unwrap(); + tokio::time::sleep(SLEEP).await; + + let sub = ztimeout!(peer1 + .liveliness() + .declare_subscriber(LIVELINESS_KEYEXPR_ALL) + .fetching(|cb| peer1 + .liveliness() + .get(LIVELINESS_KEYEXPR_ALL) + .callback(cb) + .wait())) + .unwrap(); + tokio::time::sleep(SLEEP).await; + + let token2 = ztimeout!(peer2.liveliness().declare_token(LIVELINESS_KEYEXPR_2)).unwrap(); + tokio::time::sleep(SLEEP).await; + + let sample = ztimeout!(sub.recv_async()).unwrap(); + assert_eq!(sample.kind(), SampleKind::Put); + assert_eq!(sample.key_expr().as_str(), LIVELINESS_KEYEXPR_1); + + let sample = ztimeout!(sub.recv_async()).unwrap(); + assert_eq!(sample.kind(), SampleKind::Put); + assert_eq!(sample.key_expr().as_str(), LIVELINESS_KEYEXPR_2); + + token1.undeclare().await.unwrap(); + tokio::time::sleep(SLEEP).await; + + let sample = ztimeout!(sub.recv_async()).unwrap(); + assert_eq!(sample.kind(), SampleKind::Delete); + assert_eq!(sample.key_expr().as_str(), LIVELINESS_KEYEXPR_1); + + token2.undeclare().await.unwrap(); + sub.undeclare().await.unwrap(); + + peer1.close().await.unwrap(); + peer2.close().await.unwrap(); +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +async fn test_liveliness_fetching_subscriber_brokered() { + use std::time::Duration; + + use zenoh::{internal::ztimeout, prelude::*}; + use zenoh_ext::SubscriberBuilderExt; + + const TIMEOUT: Duration = Duration::from_secs(60); + const SLEEP: Duration = Duration::from_secs(1); + const ROUTER_ENDPOINT: &str = "tcp/localhost:47450"; + + const LIVELINESS_KEYEXPR_1: &str = "test/liveliness/querying-subscriber/brokered/1"; + const LIVELINESS_KEYEXPR_2: &str = "test/liveliness/querying-subscriber/brokered/2"; + const LIVELINESS_KEYEXPR_ALL: &str = "test/liveliness/querying-subscriber/brokered/*"; + + zenoh_util::try_init_log_from_env(); + + let router = { + let mut c = config::default(); + c.listen + .endpoints + .set(vec![ROUTER_ENDPOINT.parse::().unwrap()]) + .unwrap(); + c.scouting.multicast.set_enabled(Some(false)).unwrap(); + let _ = c.set_mode(Some(WhatAmI::Router)); + let s = ztimeout!(zenoh::open(c)).unwrap(); + tracing::info!("Router ZID: {}", s.zid()); + s + }; + + let client1 = { + let mut c = config::default(); + c.connect + .endpoints + .set(vec![ROUTER_ENDPOINT.parse::().unwrap()]) + .unwrap(); + c.scouting.multicast.set_enabled(Some(false)).unwrap(); + let _ = c.set_mode(Some(WhatAmI::Client)); + let s = ztimeout!(zenoh::open(c)).unwrap(); + tracing::info!("Client (1) ZID: {}", s.zid()); + s + }; + + let client2 = { + let mut c = config::default(); + c.connect + .endpoints + .set(vec![ROUTER_ENDPOINT.parse::().unwrap()]) + .unwrap(); + c.scouting.multicast.set_enabled(Some(false)).unwrap(); + let _ = c.set_mode(Some(WhatAmI::Client)); + let s = ztimeout!(zenoh::open(c)).unwrap(); + tracing::info!("Client (2) ZID: {}", s.zid()); + s + }; + + let client3 = { + let mut c = config::default(); + c.connect + .endpoints + .set(vec![ROUTER_ENDPOINT.parse::().unwrap()]) + .unwrap(); + c.scouting.multicast.set_enabled(Some(false)).unwrap(); + let _ = c.set_mode(Some(WhatAmI::Client)); + let s = ztimeout!(zenoh::open(c)).unwrap(); + tracing::info!("Client (3) ZID: {}", s.zid()); + s + }; + + let token1 = ztimeout!(client2.liveliness().declare_token(LIVELINESS_KEYEXPR_1)).unwrap(); + tokio::time::sleep(SLEEP).await; + + let sub = ztimeout!(client1 + .liveliness() + .declare_subscriber(LIVELINESS_KEYEXPR_ALL) + .fetching(|cb| client1 + .liveliness() + .get(LIVELINESS_KEYEXPR_ALL) + .callback(cb) + .wait())) + .unwrap(); + tokio::time::sleep(SLEEP).await; + + let token2 = ztimeout!(client3.liveliness().declare_token(LIVELINESS_KEYEXPR_2)).unwrap(); + tokio::time::sleep(SLEEP).await; + + let sample = ztimeout!(sub.recv_async()).unwrap(); + assert_eq!(sample.kind(), SampleKind::Put); + assert_eq!(sample.key_expr().as_str(), LIVELINESS_KEYEXPR_1); + + let sample = ztimeout!(sub.recv_async()).unwrap(); + assert_eq!(sample.kind(), SampleKind::Put); + assert_eq!(sample.key_expr().as_str(), LIVELINESS_KEYEXPR_2); + + token1.undeclare().await.unwrap(); + tokio::time::sleep(SLEEP).await; + + let sample = ztimeout!(sub.recv_async()).unwrap(); + assert_eq!(sample.kind(), SampleKind::Delete); + assert_eq!(sample.key_expr().as_str(), LIVELINESS_KEYEXPR_1); + + token2.undeclare().await.unwrap(); + sub.undeclare().await.unwrap(); + + router.close().await.unwrap(); + client1.close().await.unwrap(); + client2.close().await.unwrap(); + client3.close().await.unwrap(); +} diff --git a/zenoh/Cargo.toml b/zenoh/Cargo.toml index 78b90ad9f1..7bdb393a6c 100644 --- a/zenoh/Cargo.toml +++ b/zenoh/Cargo.toml @@ -31,12 +31,25 @@ maintenance = { status = "actively-developed" } [features] auth_pubkey = ["zenoh-transport/auth_pubkey"] auth_usrpwd = ["zenoh-transport/auth_usrpwd"] -complete_n = ["zenoh-codec/complete_n"] +default = [ + "auth_pubkey", + "auth_usrpwd", + "transport_multilink", + "transport_compression", + "transport_quic", + "transport_tcp", + "transport_tls", + "transport_udp", + "transport_unixsock-stream", + "transport_ws", +] +internal = ["zenoh-keyexpr/internal", "zenoh-config/internal"] plugins = [] shared-memory = [ "zenoh-shm", "zenoh-protocol/shared-memory", "zenoh-transport/shared-memory", + "zenoh-buffers/shared-memory", ] stats = ["zenoh-transport/stats", "zenoh-protocol/stats"] transport_multilink = ["zenoh-transport/transport_multilink"] @@ -50,19 +63,7 @@ transport_udp = ["zenoh-transport/transport_udp"] transport_unixsock-stream = ["zenoh-transport/transport_unixsock-stream"] transport_ws = ["zenoh-transport/transport_ws"] transport_vsock = ["zenoh-transport/transport_vsock"] -unstable = [] -default = [ - "auth_pubkey", - "auth_usrpwd", - "transport_multilink", - "transport_compression", - "transport_quic", - "transport_tcp", - "transport_tls", - "transport_udp", - "transport_unixsock-stream", - "transport_ws", -] +unstable = ["zenoh-keyexpr/unstable"] [dependencies] tokio = { workspace = true, features = ["rt", "macros", "time"] } @@ -70,24 +71,30 @@ tokio-util = { workspace = true } ahash = { workspace = true } async-trait = { workspace = true } base64 = { workspace = true } -const_format = { workspace = true } +bytes = { workspace = true } event-listener = { workspace = true } flume = { workspace = true } form_urlencoded = { workspace = true } futures = { workspace = true } git-version = { workspace = true } +itertools = { workspace = true } lazy_static = { workspace = true } tracing = { workspace = true } ordered-float = { workspace = true } paste = { workspace = true } petgraph = { workspace = true } +phf = { workspace = true } rand = { workspace = true, features = ["default"] } regex = { workspace = true } serde = { workspace = true, features = ["default"] } +serde_cbor = { workspace = true } serde_json = { workspace = true } +serde-pickle = { workspace = true } +serde_yaml = { workspace = true } socket2 = { workspace = true } stop-token = { workspace = true } uhlc = { workspace = true, features = ["default"] } +unwrap-infallible = { workspace = true } uuid = { workspace = true, features = ["default"] } vec_map = { workspace = true } zenoh-buffers = { workspace = true, features = ["std"] } @@ -108,6 +115,10 @@ zenoh-transport = { workspace = true } zenoh-util = { workspace = true } zenoh-runtime = { workspace = true } zenoh-task = { workspace = true } +once_cell = { workspace = true } + +[dev-dependencies] +tokio = { workspace = true } [build-dependencies] rustc_version = { workspace = true } @@ -127,7 +138,7 @@ maintainer = "zenoh-dev@eclipse.org" copyright = "2024 ZettaScale Technology" section = "net" license-file = ["../LICENSE", "0"] -depends = "zenohd (=0.11.0-dev-1), zenoh-plugin-rest (=0.11.0-dev-1), zenoh-plugin-storage-manager (=0.11.0-dev-1)" +depends = "zenohd (=1.0.0~dev-1), zenoh-plugin-rest (=1.0.0~dev-1), zenoh-plugin-storage-manager (=1.0.0~dev-1)" maintainer-scripts = ".deb" assets = [["../README.md", "README.md", "644"]] diff --git a/zenoh/src/admin.rs b/zenoh/src/api/admin.rs similarity index 71% rename from zenoh/src/admin.rs rename to zenoh/src/api/admin.rs index 7fd972c9a6..e794c87db5 100644 --- a/zenoh/src/admin.rs +++ b/zenoh/src/api/admin.rs @@ -11,43 +11,40 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::{ - keyexpr, - prelude::sync::{KeyExpr, Locality, SampleKind}, - queryable::Query, - sample::DataInfo, - Sample, Session, ZResult, -}; use std::{ collections::hash_map::DefaultHasher, hash::{Hash, Hasher}, sync::Arc, }; -use zenoh_core::SyncResolve; -use zenoh_protocol::{ - core::{Encoding, KnownEncoding, WireExpr}, - network::NetworkMessage, -}; + +use zenoh_core::{Result as ZResult, Wait}; +use zenoh_keyexpr::keyexpr; +use zenoh_protocol::{core::WireExpr, network::NetworkMessage}; use zenoh_transport::{ TransportEventHandler, TransportMulticastEventHandler, TransportPeer, TransportPeerEventHandler, }; -macro_rules! ke_for_sure { - ($val:expr) => { - unsafe { keyexpr::from_str_unchecked($val) } - }; -} +use super::{ + bytes::ZBytes, + encoding::Encoding, + key_expr::KeyExpr, + queryable::Query, + sample::{DataInfo, Locality, SampleKind}, + session::Session, + subscriber::SubscriberKind, +}; lazy_static::lazy_static!( - static ref KE_STARSTAR: &'static keyexpr = ke_for_sure!("**"); - static ref KE_PREFIX: &'static keyexpr = ke_for_sure!("@/session"); - static ref KE_TRANSPORT_UNICAST: &'static keyexpr = ke_for_sure!("transport/unicast"); - static ref KE_LINK: &'static keyexpr = ke_for_sure!("link"); + static ref KE_STARSTAR: &'static keyexpr = unsafe { keyexpr::from_str_unchecked("**") }; + static ref KE_PREFIX: &'static keyexpr = unsafe { keyexpr::from_str_unchecked("@") }; + static ref KE_SESSION: &'static keyexpr = unsafe { keyexpr::from_str_unchecked("session") }; + static ref KE_TRANSPORT_UNICAST: &'static keyexpr = unsafe { keyexpr::from_str_unchecked("transport/unicast") }; + static ref KE_LINK: &'static keyexpr = unsafe { keyexpr::from_str_unchecked("link") }; ); pub(crate) fn init(session: &Session) { if let Ok(own_zid) = keyexpr::new(&session.zid().to_string()) { - let admin_key = KeyExpr::from(*KE_PREFIX / own_zid / *KE_STARSTAR) + let admin_key = KeyExpr::from(*KE_PREFIX / own_zid / *KE_SESSION / *KE_STARSTAR) .to_wire(session) .to_owned(); @@ -67,10 +64,15 @@ pub(crate) fn on_admin_query(session: &Session, query: Query) { fn reply_peer(own_zid: &keyexpr, query: &Query, peer: TransportPeer) { let zid = peer.zid.to_string(); if let Ok(zid) = keyexpr::new(&zid) { - let key_expr = *KE_PREFIX / own_zid / *KE_TRANSPORT_UNICAST / zid; + let key_expr = *KE_PREFIX / own_zid / *KE_SESSION / *KE_TRANSPORT_UNICAST / zid; if query.key_expr().intersects(&key_expr) { if let Ok(value) = serde_json::value::to_value(peer.clone()) { - let _ = query.reply(Ok(Sample::new(key_expr, value))).res_sync(); + match ZBytes::try_from(value) { + Ok(zbuf) => { + let _ = query.reply(key_expr, zbuf).wait(); + } + Err(e) => tracing::debug!("Admin query error: {}", e), + } } } @@ -78,11 +80,21 @@ pub(crate) fn on_admin_query(session: &Session, query: Query) { let mut s = DefaultHasher::new(); link.hash(&mut s); if let Ok(lid) = keyexpr::new(&s.finish().to_string()) { - let key_expr = - *KE_PREFIX / own_zid / *KE_TRANSPORT_UNICAST / zid / *KE_LINK / lid; + let key_expr = *KE_PREFIX + / own_zid + / *KE_SESSION + / *KE_TRANSPORT_UNICAST + / zid + / *KE_LINK + / lid; if query.key_expr().intersects(&key_expr) { if let Ok(value) = serde_json::value::to_value(link) { - let _ = query.reply(Ok(Sample::new(key_expr, value))).res_sync(); + match ZBytes::try_from(value) { + Ok(zbuf) => { + let _ = query.reply(key_expr, zbuf).wait(); + } + Err(e) => tracing::debug!("Admin query error: {}", e), + } } } } @@ -145,18 +157,20 @@ impl TransportMulticastEventHandler for Handler { ) -> ZResult> { if let Ok(own_zid) = keyexpr::new(&self.session.zid().to_string()) { if let Ok(zid) = keyexpr::new(&peer.zid.to_string()) { - let expr = WireExpr::from(&(*KE_PREFIX / own_zid / *KE_TRANSPORT_UNICAST / zid)) - .to_owned(); + let expr = WireExpr::from( + &(*KE_PREFIX / own_zid / *KE_SESSION / *KE_TRANSPORT_UNICAST / zid), + ) + .to_owned(); let info = DataInfo { - encoding: Some(Encoding::Exact(KnownEncoding::AppJson)), + encoding: Some(Encoding::APPLICATION_JSON), ..Default::default() }; - self.session.handle_data( + self.session.execute_subscriber_callbacks( true, &expr, Some(info), serde_json::to_vec(&peer).unwrap().into(), - #[cfg(feature = "unstable")] + SubscriberKind::Subscriber, None, ); Ok(Arc::new(PeerHandler { @@ -194,10 +208,10 @@ impl TransportPeerEventHandler for PeerHandler { let mut s = DefaultHasher::new(); link.hash(&mut s); let info = DataInfo { - encoding: Some(Encoding::Exact(KnownEncoding::AppJson)), + encoding: Some(Encoding::APPLICATION_JSON), ..Default::default() }; - self.session.handle_data( + self.session.execute_subscriber_callbacks( true, &self .expr @@ -205,7 +219,7 @@ impl TransportPeerEventHandler for PeerHandler { .with_suffix(&format!("/link/{}", s.finish())), Some(info), serde_json::to_vec(&link).unwrap().into(), - #[cfg(feature = "unstable")] + SubscriberKind::Subscriber, None, ); } @@ -217,7 +231,7 @@ impl TransportPeerEventHandler for PeerHandler { kind: SampleKind::Delete, ..Default::default() }; - self.session.handle_data( + self.session.execute_subscriber_callbacks( true, &self .expr @@ -225,7 +239,7 @@ impl TransportPeerEventHandler for PeerHandler { .with_suffix(&format!("/link/{}", s.finish())), Some(info), vec![0u8; 0].into(), - #[cfg(feature = "unstable")] + SubscriberKind::Subscriber, None, ); } @@ -237,12 +251,12 @@ impl TransportPeerEventHandler for PeerHandler { kind: SampleKind::Delete, ..Default::default() }; - self.session.handle_data( + self.session.execute_subscriber_callbacks( true, &self.expr, Some(info), vec![0u8; 0].into(), - #[cfg(feature = "unstable")] + SubscriberKind::Subscriber, None, ); } diff --git a/zenoh/src/api/builders.rs b/zenoh/src/api/builders.rs new file mode 100644 index 0000000000..5327dabe90 --- /dev/null +++ b/zenoh/src/api/builders.rs @@ -0,0 +1,16 @@ +// +// Copyright (c) 2024 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// + +pub(crate) mod publisher; +pub(crate) mod sample; diff --git a/zenoh/src/api/builders/publisher.rs b/zenoh/src/api/builders/publisher.rs new file mode 100644 index 0000000000..666b4378e0 --- /dev/null +++ b/zenoh/src/api/builders/publisher.rs @@ -0,0 +1,423 @@ +// +// Copyright (c) 2024 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// +use std::future::{IntoFuture, Ready}; + +use zenoh_core::{Resolvable, Result as ZResult, Wait}; +use zenoh_protocol::{core::CongestionControl, network::Mapping}; + +#[cfg(feature = "unstable")] +use crate::api::sample::SourceInfo; +use crate::api::{ + builders::sample::{ + EncodingBuilderTrait, QoSBuilderTrait, SampleBuilderTrait, TimestampBuilderTrait, + }, + bytes::{OptionZBytes, ZBytes}, + encoding::Encoding, + key_expr::KeyExpr, + publisher::{Priority, Publisher}, + sample::{Locality, SampleKind}, + session::SessionRef, +}; + +pub type SessionPutBuilder<'a, 'b> = + PublicationBuilder, PublicationBuilderPut>; + +pub type SessionDeleteBuilder<'a, 'b> = + PublicationBuilder, PublicationBuilderDelete>; + +pub type PublisherPutBuilder<'a> = PublicationBuilder<&'a Publisher<'a>, PublicationBuilderPut>; + +pub type PublisherDeleteBuilder<'a> = + PublicationBuilder<&'a Publisher<'a>, PublicationBuilderDelete>; + +#[derive(Debug, Clone)] +pub struct PublicationBuilderPut { + pub(crate) payload: ZBytes, + pub(crate) encoding: Encoding, +} +#[derive(Debug, Clone)] +pub struct PublicationBuilderDelete; + +/// A builder for initializing [`Session::put`](crate::session::Session::put), [`Session::delete`](crate::session::Session::delete), +/// [`Publisher::put`](crate::pubsub::Publisher::put), and [`Publisher::delete`](crate::pubsub::Publisher::delete) operations. +/// +/// # Examples +/// ``` +/// # #[tokio::main] +/// # async fn main() { +/// use zenoh::{bytes::Encoding, prelude::*, qos::CongestionControl}; +/// +/// let session = zenoh::open(zenoh::config::peer()).await.unwrap(); +/// session +/// .put("key/expression", "payload") +/// .encoding(Encoding::TEXT_PLAIN) +/// .congestion_control(CongestionControl::Block) +/// .await +/// .unwrap(); +/// # } +/// ``` +#[must_use = "Resolvables do nothing unless you resolve them using the `res` method from either `SyncResolve` or `AsyncResolve`"] +#[derive(Debug, Clone)] +pub struct PublicationBuilder { + pub(crate) publisher: P, + pub(crate) kind: T, + pub(crate) timestamp: Option, + #[cfg(feature = "unstable")] + pub(crate) source_info: SourceInfo, + pub(crate) attachment: Option, +} + +impl QoSBuilderTrait for PublicationBuilder, T> { + #[inline] + fn congestion_control(self, congestion_control: CongestionControl) -> Self { + Self { + publisher: self.publisher.congestion_control(congestion_control), + ..self + } + } + #[inline] + fn priority(self, priority: Priority) -> Self { + Self { + publisher: self.publisher.priority(priority), + ..self + } + } + #[inline] + fn express(self, is_express: bool) -> Self { + Self { + publisher: self.publisher.express(is_express), + ..self + } + } +} + +impl PublicationBuilder, T> { + /// Restrict the matching subscribers that will receive the published data + /// to the ones that have the given [`Locality`](crate::prelude::Locality). + #[zenoh_macros::unstable] + #[inline] + pub fn allowed_destination(mut self, destination: Locality) -> Self { + self.publisher = self.publisher.allowed_destination(destination); + self + } +} + +impl EncodingBuilderTrait for PublisherBuilder<'_, '_> { + fn encoding>(self, encoding: T) -> Self { + Self { + encoding: encoding.into(), + ..self + } + } +} + +impl
- /// - /// Infos on the source of this Sample. - pub source_info: SourceInfo, - - #[cfg(feature = "unstable")] - ///
- /// 🔬 - /// This API has been marked as unstable: it works as advertised, but we may change it in a future release. - /// To use it, you must enable zenoh's unstable feature flag. - ///
- /// - /// A map of key-value pairs, where each key and value are byte-slices. - pub attachment: Option, -} - -impl Sample { - /// Creates a new Sample. - #[inline] - pub fn new(key_expr: IntoKeyExpr, value: IntoValue) -> Self - where - IntoKeyExpr: Into>, - IntoValue: Into, - { - Sample { - key_expr: key_expr.into(), - value: value.into(), - kind: SampleKind::default(), - timestamp: None, - qos: QoS::default(), - #[cfg(feature = "unstable")] - source_info: SourceInfo::empty(), - #[cfg(feature = "unstable")] - attachment: None, - } - } - /// Creates a new Sample. - #[inline] - pub fn try_from( - key_expr: TryIntoKeyExpr, - value: IntoValue, - ) -> Result - where - TryIntoKeyExpr: TryInto>, - >>::Error: Into, - IntoValue: Into, - { - Ok(Sample { - key_expr: key_expr.try_into().map_err(Into::into)?, - value: value.into(), - kind: SampleKind::default(), - timestamp: None, - qos: QoS::default(), - #[cfg(feature = "unstable")] - source_info: SourceInfo::empty(), - #[cfg(feature = "unstable")] - attachment: None, - }) - } - - /// Creates a new Sample with optional data info. - #[inline] - pub(crate) fn with_info( - key_expr: KeyExpr<'static>, - payload: ZBuf, - data_info: Option, - ) -> Self { - let mut value: Value = payload.into(); - if let Some(data_info) = data_info { - if let Some(encoding) = &data_info.encoding { - value.encoding = encoding.clone(); - } - Sample { - key_expr, - value, - kind: data_info.kind, - timestamp: data_info.timestamp, - qos: data_info.qos, - #[cfg(feature = "unstable")] - source_info: data_info.into(), - #[cfg(feature = "unstable")] - attachment: None, - } - } else { - Sample { - key_expr, - value, - kind: SampleKind::default(), - timestamp: None, - qos: QoS::default(), - #[cfg(feature = "unstable")] - source_info: SourceInfo::empty(), - #[cfg(feature = "unstable")] - attachment: None, - } - } - } - - /// Gets the timestamp of this Sample. - #[inline] - pub fn get_timestamp(&self) -> Option<&Timestamp> { - self.timestamp.as_ref() - } - - /// Sets the timestamp of this Sample. - #[inline] - pub fn with_timestamp(mut self, timestamp: Timestamp) -> Self { - self.timestamp = Some(timestamp); - self - } - - /// Sets the source info of this Sample. - #[zenoh_macros::unstable] - #[inline] - pub fn with_source_info(mut self, source_info: SourceInfo) -> Self { - self.source_info = source_info; - self - } - - #[inline] - /// Ensure that an associated Timestamp is present in this Sample. - /// If not, a new one is created with the current system time and 0x00 as id. - /// Get the timestamp of this sample (either existing one or newly created) - pub fn ensure_timestamp(&mut self) -> &Timestamp { - if let Some(ref timestamp) = self.timestamp { - timestamp - } else { - let timestamp = new_reception_timestamp(); - self.timestamp = Some(timestamp); - self.timestamp.as_ref().unwrap() - } - } - - #[zenoh_macros::unstable] - pub fn attachment(&self) -> Option<&Attachment> { - self.attachment.as_ref() - } - - #[zenoh_macros::unstable] - pub fn attachment_mut(&mut self) -> &mut Option { - &mut self.attachment - } - - #[allow(clippy::result_large_err)] - #[zenoh_macros::unstable] - pub fn with_attachment(mut self, attachment: Attachment) -> Self { - self.attachment = Some(attachment); - self - } -} - -impl std::ops::Deref for Sample { - type Target = Value; - - fn deref(&self) -> &Self::Target { - &self.value - } -} - -impl std::ops::DerefMut for Sample { - fn deref_mut(&mut self) -> &mut Self::Target { - &mut self.value - } -} - -impl std::fmt::Display for Sample { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - match self.kind { - SampleKind::Delete => write!(f, "{}({})", self.kind, self.key_expr), - _ => write!(f, "{}({}: {})", self.kind, self.key_expr, self.value), - } - } -} - -impl TryFrom for Sample { - type Error = Value; - - fn try_from(value: Reply) -> Result { - value.sample - } -} - -/// Structure containing quality of service data -#[derive(Debug, Default, Copy, Clone, Eq, PartialEq)] -pub struct QoS { - inner: QoSType, -} - -impl QoS { - /// Gets priority of the message. - pub fn priority(&self) -> Priority { - match Priority::try_from(self.inner.get_priority()) { - Ok(p) => p, - Err(e) => { - tracing::trace!( - "Failed to convert priority: {}; replacing with default value", - e.to_string() - ); - Priority::default() - } - } - } - - /// Gets congestion control of the message. - pub fn congestion_control(&self) -> CongestionControl { - self.inner.get_congestion_control() - } - - /// Gets express flag value. If true, the message is not batched during transmission, in order to reduce latency. - pub fn express(&self) -> bool { - self.inner.is_express() - } - - /// Sets priority value. - pub fn with_priority(mut self, priority: Priority) -> Self { - self.inner.set_priority(priority.into()); - self - } - - /// Sets congestion control value. - pub fn with_congestion_control(mut self, congestion_control: CongestionControl) -> Self { - self.inner.set_congestion_control(congestion_control); - self - } - - /// Sets express flag value. - pub fn with_express(mut self, is_express: bool) -> Self { - self.inner.set_is_express(is_express); - self - } -} - -impl From for QoS { - fn from(qos: QoSType) -> Self { - QoS { inner: qos } - } -} diff --git a/zenoh/src/selector.rs b/zenoh/src/selector.rs deleted file mode 100644 index 05a7349881..0000000000 --- a/zenoh/src/selector.rs +++ /dev/null @@ -1,553 +0,0 @@ -// -// Copyright (c) 2023 ZettaScale Technology -// -// This program and the accompanying materials are made available under the -// terms of the Eclipse Public License 2.0 which is available at -// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 -// which is available at https://www.apache.org/licenses/LICENSE-2.0. -// -// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 -// -// Contributors: -// ZettaScale Zenoh Team, -// - -//! [Selector](https://github.com/eclipse-zenoh/roadmap/tree/main/rfcs/ALL/Selectors) to issue queries - -use zenoh_protocol::core::key_expr::{keyexpr, OwnedKeyExpr}; -use zenoh_result::ZResult; -pub use zenoh_util::time_range::{TimeBound, TimeExpr, TimeRange}; - -use crate::{prelude::KeyExpr, queryable::Query}; - -use std::{ - borrow::{Borrow, Cow}, - collections::HashMap, - convert::TryFrom, - hash::Hash, - str::FromStr, -}; - -/// A selector is the combination of a [Key Expression](crate::prelude::KeyExpr), which defines the -/// set of keys that are relevant to an operation, and a set of parameters -/// with a few intended uses: -/// - specifying arguments to a queryable, allowing the passing of Remote Procedure Call parameters -/// - filtering by value, -/// - filtering by metadata, such as the timestamp of a value, -/// - specifying arguments to zenoh when using the REST API. -/// -/// When in string form, selectors look a lot like a URI, with similar semantics: -/// - the `key_expr` before the first `?` must be a valid key expression. -/// - the `parameters` after the first `?` should be encoded like the query section of a URL: -/// - parameters are separated by `&`, -/// - the parameter name and value are separated by the first `=`, -/// - in the absence of `=`, the parameter value is considered to be the empty string, -/// - both name and value should use percent-encoding to escape characters, -/// - defining a value for the same parameter name twice is considered undefined behavior, -/// with the encouraged behaviour being to reject operations when a duplicate parameter is detected. -/// -/// Zenoh intends to standardize the usage of a set of parameter names. To avoid conflicting with RPC parameters, -/// the Zenoh team has settled on reserving the set of parameter names that start with non-alphanumeric characters. -/// -/// The full specification for selectors is available [here](https://github.com/eclipse-zenoh/roadmap/tree/main/rfcs/ALL/Selectors), -/// it includes standardized parameters. -/// -/// Queryable implementers are encouraged to prefer these standardized parameter names when implementing their -/// associated features, and to prefix their own parameter names to avoid having conflicting parameter names with other -/// queryables. -/// -/// Here are the currently standardized parameters for Zenoh (check the specification page for the exhaustive list): -/// - `_time`: used to express interest in only values dated within a certain time range, values for -/// this parameter must be readable by the [Zenoh Time DSL](zenoh_util::time_range::TimeRange) for the value to be considered valid. -/// - **`[unstable]`** `_anyke`: used in queries to express interest in replies coming from any key expression. By default, only replies -/// whose key expression match query's key expression are accepted. `_anyke` disables the query-reply key expression matching check. -#[non_exhaustive] -#[derive(Clone, PartialEq, Eq)] -pub struct Selector<'a> { - /// The part of this selector identifying which keys should be part of the selection. - pub key_expr: KeyExpr<'a>, - /// the part of this selector identifying which values should be part of the selection. - pub(crate) parameters: Cow<'a, str>, -} - -pub const TIME_RANGE_KEY: &str = "_time"; -impl<'a> Selector<'a> { - /// Gets the parameters as a raw string. - pub fn parameters(&self) -> &str { - &self.parameters - } - /// Extracts the selector parameters into a hashmap, returning an error in case of duplicated parameter names. - pub fn parameters_map(&'a self) -> ZResult> - where - K: AsRef + std::hash::Hash + std::cmp::Eq, - ExtractedName<'a, Self>: Into, - ExtractedValue<'a, Self>: Into, - { - self.decode_into_map() - } - /// Extracts the selector parameters' name-value pairs into a hashmap, returning an error in case of duplicated parameters. - pub fn parameters_cowmap(&'a self) -> ZResult, Cow<'a, str>>> { - self.decode_into_map() - } - /// Extracts the selector parameters' name-value pairs into a hashmap, returning an error in case of duplicated parameters. - pub fn parameters_stringmap(&'a self) -> ZResult> { - self.decode_into_map() - } - /// Gets a mutable reference to the parameters as a String. - /// - /// Note that calling this function may cause an allocation and copy if the selector's parameters wasn't - /// already owned by `self`. `self` owns its parameters as soon as this function returns. - pub fn parameters_mut(&mut self) -> &mut String { - if let Cow::Borrowed(s) = self.parameters { - self.parameters = Cow::Owned(s.to_owned()) - } - if let Cow::Owned(s) = &mut self.parameters { - s - } else { - unsafe { std::hint::unreachable_unchecked() } // this is safe because we just replaced the borrowed variant - } - } - pub fn set_parameters(&mut self, selector: impl Into>) { - self.parameters = selector.into(); - } - pub fn borrowing_clone(&'a self) -> Self { - Selector { - key_expr: self.key_expr.clone(), - parameters: self.parameters.as_ref().into(), - } - } - pub fn into_owned(self) -> Selector<'static> { - Selector { - key_expr: self.key_expr.into_owned(), - parameters: self.parameters.into_owned().into(), - } - } - - #[deprecated = "If you have ownership of this selector, prefer `Selector::into_owned`"] - pub fn to_owned(&self) -> Selector<'static> { - self.borrowing_clone().into_owned() - } - - /// Returns this selectors components as a tuple. - pub fn split(self) -> (KeyExpr<'a>, Cow<'a, str>) { - (self.key_expr, self.parameters) - } - - /// Sets the `parameters` part of this `Selector`. - #[inline(always)] - pub fn with_parameters(mut self, parameters: &'a str) -> Self { - self.parameters = parameters.into(); - self - } - - pub fn extend<'b, I, K, V>(&'b mut self, parameters: I) - where - I: IntoIterator, - I::Item: std::borrow::Borrow<(K, V)>, - K: AsRef + 'b, - V: AsRef + 'b, - { - let it = parameters.into_iter(); - let selector = self.parameters_mut(); - let mut encoder = form_urlencoded::Serializer::new(selector); - encoder.extend_pairs(it).finish(); - } - - /// Sets the time range targeted by the selector. - pub fn with_time_range(&mut self, time_range: TimeRange) { - self.remove_time_range(); - let selector = self.parameters_mut(); - if !selector.is_empty() { - selector.push('&') - } - use std::fmt::Write; - write!(selector, "{TIME_RANGE_KEY}={time_range}").unwrap(); // This unwrap is safe because `String: Write` should be infallible. - } - - pub fn remove_time_range(&mut self) { - let selector = self.parameters_mut(); - - let mut splice_start = 0; - let mut splice_end = 0; - for argument in selector.split('&') { - if argument.starts_with(TIME_RANGE_KEY) - && matches!( - argument.as_bytes().get(TIME_RANGE_KEY.len()), - None | Some(b'=') - ) - { - splice_end = splice_start + argument.len(); - break; - } - splice_start += argument.len() + 1 - } - if splice_end > 0 { - selector.drain(splice_start..(splice_end + (splice_end != selector.len()) as usize)); - } - } - #[cfg(any(feature = "unstable", test))] - pub(crate) fn parameter_index(&self, param_name: &str) -> ZResult> { - let starts_with_param = |s: &str| { - if let Some(rest) = s.strip_prefix(param_name) { - matches!(rest.as_bytes().first(), None | Some(b'=')) - } else { - false - } - }; - let mut acc = 0; - let mut res = None; - for chunk in self.parameters().split('&') { - if starts_with_param(chunk) { - if res.is_none() { - res = Some(acc) - } else { - bail!( - "parameter `{}` appeared multiple times in selector `{}`.", - param_name, - self - ) - } - } - acc += chunk.len() as u32 + 1; - } - Ok(res) - } - #[cfg(any(feature = "unstable", test))] - pub(crate) fn accept_any_keyexpr(self, any: bool) -> ZResult> { - use crate::query::_REPLY_KEY_EXPR_ANY_SEL_PARAM; - let mut s = self.into_owned(); - let any_selparam = s.parameter_index(_REPLY_KEY_EXPR_ANY_SEL_PARAM)?; - match (any, any_selparam) { - (true, None) => { - let s = s.parameters_mut(); - if !s.is_empty() { - s.push('&') - } - s.push_str(_REPLY_KEY_EXPR_ANY_SEL_PARAM); - } - (false, Some(index)) => { - let s = dbg!(s.parameters_mut()); - let mut start = index as usize; - let pend = start + _REPLY_KEY_EXPR_ANY_SEL_PARAM.len(); - if dbg!(start) != 0 { - start -= 1 - } - match dbg!(&s[pend..]).find('&') { - Some(end) => std::mem::drop(s.drain(start..end + pend)), - None => s.truncate(start), - } - dbg!(s); - } - _ => {} - } - Ok(s) - } -} - -#[test] -fn selector_accessors() { - let time_range = "[now(-2s)..now(2s)]".parse().unwrap(); - for selector in [ - "hello/there?_timetrick", - "hello/there?_timetrick&_time", - "hello/there?_timetrick&_time&_filter", - "hello/there?_timetrick&_time=[..]", - "hello/there?_timetrick&_time=[..]&_filter", - ] { - let mut selector = Selector::try_from(selector).unwrap(); - selector.with_time_range(time_range); - assert_eq!(selector.time_range().unwrap().unwrap(), time_range); - assert!(dbg!(selector.parameters()).contains("_time=[now(-2s)..now(2s)]")); - let map_selector = selector.parameters_cowmap().unwrap(); - assert_eq!( - selector.time_range().unwrap(), - map_selector.time_range().unwrap() - ); - let without_any = selector.to_string(); - let with_any = selector.to_string() + "&" + crate::query::_REPLY_KEY_EXPR_ANY_SEL_PARAM; - selector = selector.accept_any_keyexpr(false).unwrap(); - assert_eq!(selector.to_string(), without_any); - selector = selector.accept_any_keyexpr(true).unwrap(); - assert_eq!(selector.to_string(), with_any); - selector = selector.accept_any_keyexpr(true).unwrap(); - assert_eq!(selector.to_string(), with_any); - selector = selector.accept_any_keyexpr(false).unwrap(); - assert_eq!(selector.to_string(), without_any); - selector = selector.accept_any_keyexpr(true).unwrap(); - assert_eq!(selector.to_string(), with_any); - selector.parameters_mut().push_str("&other"); - assert_eq!(selector.to_string(), with_any + "&other"); - selector = selector.accept_any_keyexpr(false).unwrap(); - assert_eq!(selector.to_string(), without_any + "&other"); - } -} -pub trait Parameter: Sized { - type Name: AsRef + Sized; - type Value: AsRef + Sized; - fn name(&self) -> &Self::Name; - fn value(&self) -> &Self::Value; - fn split(self) -> (Self::Name, Self::Value); - fn extract_name(self) -> Self::Name { - self.split().0 - } - fn extract_value(self) -> Self::Value { - self.split().1 - } -} -impl + Sized, V: AsRef + Sized> Parameter for (N, V) { - type Name = N; - type Value = V; - fn name(&self) -> &N { - &self.0 - } - fn value(&self) -> &V { - &self.1 - } - fn split(self) -> (Self::Name, Self::Value) { - self - } - fn extract_name(self) -> Self::Name { - self.0 - } - fn extract_value(self) -> Self::Value { - self.1 - } -} - -#[allow(type_alias_bounds)] -type ExtractedName<'a, VS: Parameters<'a>> = <::Item as Parameter>::Name; -#[allow(type_alias_bounds)] -type ExtractedValue<'a, VS: Parameters<'a>> = <::Item as Parameter>::Value; -/// A trait to help decode zenoh selector parameters. -/// -/// Most methods will return an Error if duplicates of a same parameter are found, to avoid HTTP Parameter Pollution like vulnerabilities. -pub trait Parameters<'a> { - type Decoder: Iterator + 'a; - /// Returns this selector's parameters as an iterator. - fn decode(&'a self) -> Self::Decoder - where - ::Item: Parameter; - - /// Extracts all parameters into a HashMap, returning an error if duplicate parameters arise. - fn decode_into_map(&'a self) -> ZResult> - where - ::Item: Parameter, - N: AsRef + std::hash::Hash + std::cmp::Eq, - ExtractedName<'a, Self>: Into, - ExtractedValue<'a, Self>: Into, - { - let mut result: HashMap = HashMap::new(); - for (name, value) in self.decode().map(Parameter::split) { - match result.entry(name.into()) { - std::collections::hash_map::Entry::Occupied(e) => { - bail!("Duplicated parameter `{}` detected", e.key().as_ref()) - } - std::collections::hash_map::Entry::Vacant(e) => { - e.insert(value.into()); - } - } - } - Ok(result) - } - - /// Extracts the requested parameters from the selector parameters. - /// - /// The default implementation is done in a single pass through the selector parameters, returning an error if any of the requested parameters are present more than once. - fn get_parameters( - &'a self, - names: [&str; N], - ) -> ZResult<[Option>; N]> - where - ::Item: Parameter, - { - let mut result = unsafe { - let mut result: std::mem::MaybeUninit<[Option>; N]> = - std::mem::MaybeUninit::uninit(); - for slot in result.assume_init_mut() { - std::ptr::write(slot, None); - } - result.assume_init() - }; - for pair in self.decode() { - if let Some(index) = names.iter().position(|k| *k == pair.name().as_ref()) { - let slot = &mut result[index]; - if slot.is_some() { - bail!("Duplicated parameter `{}` detected.", names[index]) - } - *slot = Some(pair.extract_value()) - } - } - Ok(result) - } - - /// Extracts the requested arguments from the selector parameters as booleans, following the Zenoh convention that if a parameter name is present and has a value different from "false", its value is truthy. - /// - /// The default implementation is done in a single pass through the selector parameters, returning an error if some of the requested parameters are present more than once. - fn get_bools(&'a self, names: [&str; N]) -> ZResult<[bool; N]> - where - ::Item: Parameter, - { - Ok(self.get_parameters(names)?.map(|v| match v { - None => false, - Some(s) => s.as_ref() != "false", - })) - } - - /// Extracts the standardized `_time` argument from the selector parameters. - /// - /// The default implementation still causes a complete pass through the selector parameters to ensure that there are no duplicates of the `_time` key. - fn time_range(&'a self) -> ZResult> - where - ::Item: Parameter, - { - Ok(match &self.get_parameters([TIME_RANGE_KEY])?[0] { - Some(s) => Some(s.as_ref().parse()?), - None => None, - }) - } -} -impl<'a> Parameters<'a> for Selector<'a> { - type Decoder = >::Decoder; - fn decode(&'a self) -> Self::Decoder { - self.parameters().decode() - } -} -impl<'a> Parameters<'a> for str { - type Decoder = form_urlencoded::Parse<'a>; - fn decode(&'a self) -> Self::Decoder { - form_urlencoded::parse(self.as_bytes()) - } -} - -impl<'a, K: Borrow + Hash + Eq + 'a, V: Borrow + 'a> Parameters<'a> for HashMap { - type Decoder = std::collections::hash_map::Iter<'a, K, V>; - fn decode(&'a self) -> Self::Decoder { - self.iter() - } - fn get_parameters( - &'a self, - names: [&str; N], - ) -> ZResult<[Option>; N]> - where - ::Item: Parameter, - { - // `Ok(names.map(|key| self.get(key)))` would be very slightly faster, but doesn't compile for some reason :( - Ok(names.map(|key| self.get_key_value(key).map(|kv| kv.extract_value()))) - } -} - -impl std::fmt::Debug for Selector<'_> { - fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { - write!(f, "sel\"{self}\"") - } -} - -impl std::fmt::Display for Selector<'_> { - fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { - write!(f, "{}", self.key_expr)?; - if !self.parameters.is_empty() { - write!(f, "?{}", self.parameters)?; - } - Ok(()) - } -} - -impl<'a> From<&Selector<'a>> for Selector<'a> { - fn from(s: &Selector<'a>) -> Self { - s.clone() - } -} - -impl TryFrom for Selector<'_> { - type Error = zenoh_result::Error; - fn try_from(mut s: String) -> Result { - match s.find('?') { - Some(qmark_position) => { - let parameters = s[qmark_position + 1..].to_owned(); - s.truncate(qmark_position); - Ok(KeyExpr::try_from(s)?.with_owned_parameters(parameters)) - } - None => Ok(KeyExpr::try_from(s)?.into()), - } - } -} - -impl<'a> TryFrom<&'a str> for Selector<'a> { - type Error = zenoh_result::Error; - fn try_from(s: &'a str) -> Result { - match s.find('?') { - Some(qmark_position) => { - let params = &s[qmark_position + 1..]; - Ok(KeyExpr::try_from(&s[..qmark_position])?.with_parameters(params)) - } - None => Ok(KeyExpr::try_from(s)?.into()), - } - } -} -impl FromStr for Selector<'static> { - type Err = zenoh_result::Error; - fn from_str(s: &str) -> Result { - s.to_owned().try_into() - } -} - -impl<'a> TryFrom<&'a String> for Selector<'a> { - type Error = zenoh_result::Error; - fn try_from(s: &'a String) -> Result { - Self::try_from(s.as_str()) - } -} - -impl<'a> From<&'a Query> for Selector<'a> { - fn from(q: &'a Query) -> Self { - Selector { - key_expr: q.inner.key_expr.clone(), - parameters: (&q.inner.parameters).into(), - } - } -} - -impl<'a> From<&KeyExpr<'a>> for Selector<'a> { - fn from(key_selector: &KeyExpr<'a>) -> Self { - Self { - key_expr: key_selector.clone(), - parameters: "".into(), - } - } -} - -impl<'a> From<&'a keyexpr> for Selector<'a> { - fn from(key_selector: &'a keyexpr) -> Self { - Self { - key_expr: key_selector.into(), - parameters: "".into(), - } - } -} - -impl<'a> From<&'a OwnedKeyExpr> for Selector<'a> { - fn from(key_selector: &'a OwnedKeyExpr) -> Self { - Self { - key_expr: key_selector.into(), - parameters: "".into(), - } - } -} - -impl From for Selector<'static> { - fn from(key_selector: OwnedKeyExpr) -> Self { - Self { - key_expr: key_selector.into(), - parameters: "".into(), - } - } -} - -impl<'a> From> for Selector<'a> { - fn from(key_selector: KeyExpr<'a>) -> Self { - Self { - key_expr: key_selector, - parameters: "".into(), - } - } -} diff --git a/zenoh/src/subscriber.rs b/zenoh/src/subscriber.rs deleted file mode 100644 index c4ecd6cbd4..0000000000 --- a/zenoh/src/subscriber.rs +++ /dev/null @@ -1,823 +0,0 @@ -// -// Copyright (c) 2023 ZettaScale Technology -// -// This program and the accompanying materials are made available under the -// terms of the Eclipse Public License 2.0 which is available at -// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 -// which is available at https://www.apache.org/licenses/LICENSE-2.0. -// -// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 -// -// Contributors: -// ZettaScale Zenoh Team, -// - -//! Subscribing primitives. -use crate::handlers::{locked, Callback, DefaultHandler}; -use crate::prelude::Locality; -use crate::prelude::{Id, IntoCallbackReceiverPair, KeyExpr, Sample}; -use crate::Undeclarable; -use crate::{Result as ZResult, SessionRef}; -use std::fmt; -use std::future::Ready; -use std::ops::{Deref, DerefMut}; -use std::sync::Arc; -use zenoh_core::{AsyncResolve, Resolvable, Resolve, SyncResolve}; -use zenoh_protocol::network::declare::{subscriber::ext::SubscriberInfo, Mode}; - -/// The subscription mode. -pub use zenoh_protocol::core::SubMode; - -/// The kind of reliability. -pub use zenoh_protocol::core::Reliability; - -pub(crate) struct SubscriberState { - pub(crate) id: Id, - pub(crate) key_expr: KeyExpr<'static>, - pub(crate) scope: Option>, - pub(crate) origin: Locality, - pub(crate) callback: Callback<'static, Sample>, -} - -impl fmt::Debug for SubscriberState { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.debug_struct("Subscriber") - .field("id", &self.id) - .field("key_expr", &self.key_expr) - .finish() - } -} - -/// A subscriber that provides data through a callback. -/// -/// CallbackSubscribers can be created from a zenoh [`Session`](crate::Session) -/// with the [`declare_subscriber`](crate::SessionDeclarations::declare_subscriber) function -/// and the [`callback`](SubscriberBuilder::callback) function -/// of the resulting builder. -/// -/// Subscribers are automatically undeclared when dropped. -/// -/// # Examples -/// ``` -/// # #[tokio::main] -/// # async fn main() { -/// use zenoh::prelude::r#async::*; -/// -/// let session = zenoh::open(config::peer()).res().await.unwrap(); -/// let subscriber = session -/// .declare_subscriber("key/expression") -/// .callback(|sample| { println!("Received: {} {}", sample.key_expr, sample.value); }) -/// .res() -/// .await -/// .unwrap(); -/// # } -/// ``` -#[derive(Debug)] -pub(crate) struct SubscriberInner<'a> { - pub(crate) session: SessionRef<'a>, - pub(crate) state: Arc, - pub(crate) alive: bool, -} - -/// A [`PullMode`] subscriber that provides data through a callback. -/// -/// CallbackPullSubscribers only provide data when explicitly pulled by the -/// application with the [`pull`](CallbackPullSubscriber::pull) function. -/// CallbackPullSubscribers can be created from a zenoh [`Session`](crate::Session) -/// with the [`declare_subscriber`](crate::SessionDeclarations::declare_subscriber) function, -/// the [`callback`](SubscriberBuilder::callback) function -/// and the [`pull_mode`](SubscriberBuilder::pull_mode) function -/// of the resulting builder. -/// -/// Subscribers are automatically undeclared when dropped. -/// -/// # Examples -/// ``` -/// # #[tokio::main] -/// # async fn main() { -/// use zenoh::prelude::r#async::*; -/// -/// let session = zenoh::open(config::peer()).res().await.unwrap(); -/// let subscriber = session -/// .declare_subscriber("key/expression") -/// .callback(|sample| { println!("Received: {} {}", sample.key_expr, sample.value); }) -/// .pull_mode() -/// .res() -/// .await -/// .unwrap(); -/// subscriber.pull(); -/// # } -/// ``` -pub(crate) struct PullSubscriberInner<'a> { - inner: SubscriberInner<'a>, -} - -impl<'a> PullSubscriberInner<'a> { - /// Pull available data for a [`CallbackPullSubscriber`]. - /// - /// # Examples - /// ``` - /// # #[tokio::main] - /// # async fn main() { - /// use zenoh::prelude::r#async::*; - /// use zenoh::subscriber::SubMode; - /// - /// let session = zenoh::open(config::peer()).res().await.unwrap(); - /// let subscriber = session - /// .declare_subscriber("key/expression") - /// .callback(|sample| { println!("Received: {} {}", sample.key_expr, sample.value); }) - /// .pull_mode() - /// .res() - /// .await - /// .unwrap(); - /// subscriber.pull(); - /// # } - /// ``` - #[inline] - pub fn pull(&self) -> impl Resolve> + '_ { - self.inner.session.pull(&self.inner.state.key_expr) - } - - /// Close a [`CallbackPullSubscriber`](CallbackPullSubscriber). - /// - /// `CallbackPullSubscribers` are automatically closed when dropped, but you may want to use this function to handle errors or - /// close the `CallbackPullSubscriber` asynchronously. - /// - /// # Examples - /// ``` - /// # #[tokio::main] - /// # async fn main() { - /// use zenoh::prelude::r#async::*; - /// - /// let session = zenoh::open(config::peer()).res().await.unwrap(); - /// # fn data_handler(_sample: Sample) { }; - /// let subscriber = session - /// .declare_subscriber("key/expression") - /// .callback(data_handler) - /// .pull_mode() - /// .res() - /// .await - /// .unwrap(); - /// subscriber.undeclare().res().await.unwrap(); - /// # } - /// ``` - #[inline] - pub fn undeclare(self) -> impl Resolve> + 'a { - Undeclarable::undeclare_inner(self.inner, ()) - } -} - -impl<'a> SubscriberInner<'a> { - /// Close a [`CallbackSubscriber`](CallbackSubscriber). - /// - /// `CallbackSubscribers` are automatically closed when dropped, but you may want to use this function to handle errors or - /// close the `CallbackSubscriber` asynchronously. - /// - /// # Examples - /// ``` - /// # #[tokio::main] - /// # async fn main() { - /// use zenoh::prelude::r#async::*; - /// - /// let session = zenoh::open(config::peer()).res().await.unwrap(); - /// # fn data_handler(_sample: Sample) { }; - /// let subscriber = session - /// .declare_subscriber("key/expression") - /// .callback(data_handler) - /// .res() - /// .await - /// .unwrap(); - /// subscriber.undeclare().res().await.unwrap(); - /// # } - /// ``` - #[inline] - pub fn undeclare(self) -> SubscriberUndeclaration<'a> { - Undeclarable::undeclare_inner(self, ()) - } -} - -impl<'a> Undeclarable<(), SubscriberUndeclaration<'a>> for SubscriberInner<'a> { - fn undeclare_inner(self, _: ()) -> SubscriberUndeclaration<'a> { - SubscriberUndeclaration { subscriber: self } - } -} - -/// A [`Resolvable`] returned when undeclaring a subscriber. -/// -/// # Examples -/// ``` -/// # #[tokio::main] -/// # async fn main() { -/// use zenoh::prelude::r#async::*; -/// -/// let session = zenoh::open(config::peer()).res().await.unwrap(); -/// let subscriber = session -/// .declare_subscriber("key/expression") -/// .res() -/// .await -/// .unwrap(); -/// subscriber.undeclare().res().await.unwrap(); -/// # } -/// ``` -#[must_use = "Resolvables do nothing unless you resolve them using the `res` method from either `SyncResolve` or `AsyncResolve`"] -pub struct SubscriberUndeclaration<'a> { - subscriber: SubscriberInner<'a>, -} - -impl Resolvable for SubscriberUndeclaration<'_> { - type To = ZResult<()>; -} - -impl SyncResolve for SubscriberUndeclaration<'_> { - fn res_sync(mut self) -> ::To { - self.subscriber.alive = false; - self.subscriber - .session - .unsubscribe(self.subscriber.state.id) - } -} - -impl AsyncResolve for SubscriberUndeclaration<'_> { - type Future = Ready; - - fn res_async(self) -> Self::Future { - std::future::ready(self.res_sync()) - } -} - -impl Drop for SubscriberInner<'_> { - fn drop(&mut self) { - if self.alive { - let _ = self.session.unsubscribe(self.state.id); - } - } -} - -/// The mode for pull subscribers. -#[non_exhaustive] -#[derive(Debug, Clone, Copy)] -pub struct PullMode; - -impl From for SubMode { - fn from(_: PullMode) -> Self { - SubMode::Pull - } -} - -impl From for Mode { - fn from(_: PullMode) -> Self { - Mode::Pull - } -} - -/// The mode for push subscribers. -#[non_exhaustive] -#[derive(Debug, Clone, Copy)] -pub struct PushMode; - -impl From for SubMode { - fn from(_: PushMode) -> Self { - SubMode::Push - } -} - -impl From for Mode { - fn from(_: PushMode) -> Self { - Mode::Push - } -} - -/// A builder for initializing a [`FlumeSubscriber`]. -/// -/// # Examples -/// ``` -/// # #[tokio::main] -/// # async fn main() { -/// use zenoh::prelude::r#async::*; -/// -/// let session = zenoh::open(config::peer()).res().await.unwrap(); -/// let subscriber = session -/// .declare_subscriber("key/expression") -/// .best_effort() -/// .pull_mode() -/// .res() -/// .await -/// .unwrap(); -/// # } -/// ``` -#[must_use = "Resolvables do nothing unless you resolve them using the `res` method from either `SyncResolve` or `AsyncResolve`"] -#[derive(Debug)] -pub struct SubscriberBuilder<'a, 'b, Mode, Handler> { - #[cfg(feature = "unstable")] - pub session: SessionRef<'a>, - #[cfg(not(feature = "unstable"))] - pub(crate) session: SessionRef<'a>, - - #[cfg(feature = "unstable")] - pub key_expr: ZResult>, - #[cfg(not(feature = "unstable"))] - pub(crate) key_expr: ZResult>, - - #[cfg(feature = "unstable")] - pub reliability: Reliability, - #[cfg(not(feature = "unstable"))] - pub(crate) reliability: Reliability, - - #[cfg(feature = "unstable")] - pub mode: Mode, - #[cfg(not(feature = "unstable"))] - pub(crate) mode: Mode, - - #[cfg(feature = "unstable")] - pub origin: Locality, - #[cfg(not(feature = "unstable"))] - pub(crate) origin: Locality, - - #[cfg(feature = "unstable")] - pub handler: Handler, - #[cfg(not(feature = "unstable"))] - pub(crate) handler: Handler, -} - -impl<'a, 'b, Mode> SubscriberBuilder<'a, 'b, Mode, DefaultHandler> { - /// Receive the samples for this subscription with a callback. - /// - /// # Examples - /// ``` - /// # #[tokio::main] - /// # async fn main() { - /// use zenoh::prelude::r#async::*; - /// - /// let session = zenoh::open(config::peer()).res().await.unwrap(); - /// let subscriber = session - /// .declare_subscriber("key/expression") - /// .callback(|sample| { println!("Received: {} {}", sample.key_expr, sample.value); }) - /// .res() - /// .await - /// .unwrap(); - /// # } - /// ``` - #[inline] - pub fn callback(self, callback: Callback) -> SubscriberBuilder<'a, 'b, Mode, Callback> - where - Callback: Fn(Sample) + Send + Sync + 'static, - { - let SubscriberBuilder { - session, - key_expr, - reliability, - mode, - origin, - handler: _, - } = self; - SubscriberBuilder { - session, - key_expr, - reliability, - mode, - origin, - handler: callback, - } - } - - /// Receive the samples for this subscription with a mutable callback. - /// - /// Using this guarantees that your callback will never be called concurrently. - /// If your callback is also accepted by the [`callback`](SubscriberBuilder::callback) method, we suggest you use it instead of `callback_mut` - /// - /// # Examples - /// ``` - /// # #[tokio::main] - /// # async fn main() { - /// use zenoh::prelude::r#async::*; - /// - /// let session = zenoh::open(config::peer()).res().await.unwrap(); - /// let mut n = 0; - /// let subscriber = session - /// .declare_subscriber("key/expression") - /// .callback_mut(move |_sample| { n += 1; }) - /// .res() - /// .await - /// .unwrap(); - /// # } - /// ``` - #[inline] - pub fn callback_mut( - self, - callback: CallbackMut, - ) -> SubscriberBuilder<'a, 'b, Mode, impl Fn(Sample) + Send + Sync + 'static> - where - CallbackMut: FnMut(Sample) + Send + Sync + 'static, - { - self.callback(locked(callback)) - } - - /// Receive the samples for this subscription with a [`Handler`](crate::prelude::IntoCallbackReceiverPair). - /// - /// # Examples - /// ```no_run - /// # #[tokio::main] - /// # async fn main() { - /// use zenoh::prelude::r#async::*; - /// - /// let session = zenoh::open(config::peer()).res().await.unwrap(); - /// let subscriber = session - /// .declare_subscriber("key/expression") - /// .with(flume::bounded(32)) - /// .res() - /// .await - /// .unwrap(); - /// while let Ok(sample) = subscriber.recv_async().await { - /// println!("Received: {} {}", sample.key_expr, sample.value); - /// } - /// # } - /// ``` - #[inline] - pub fn with(self, handler: Handler) -> SubscriberBuilder<'a, 'b, Mode, Handler> - where - Handler: crate::prelude::IntoCallbackReceiverPair<'static, Sample>, - { - let SubscriberBuilder { - session, - key_expr, - reliability, - mode, - origin, - handler: _, - } = self; - SubscriberBuilder { - session, - key_expr, - reliability, - mode, - origin, - handler, - } - } -} -impl<'a, 'b, Mode, Handler> SubscriberBuilder<'a, 'b, Mode, Handler> { - /// Change the subscription reliability. - #[inline] - pub fn reliability(mut self, reliability: Reliability) -> Self { - self.reliability = reliability; - self - } - - /// Change the subscription reliability to `Reliable`. - #[inline] - pub fn reliable(mut self) -> Self { - self.reliability = Reliability::Reliable; - self - } - - /// Change the subscription reliability to `BestEffort`. - #[inline] - pub fn best_effort(mut self) -> Self { - self.reliability = Reliability::BestEffort; - self - } - - /// Restrict the matching publications that will be receive by this [`Subscriber`] - /// to the ones that have the given [`Locality`](crate::prelude::Locality). - #[zenoh_macros::unstable] - #[inline] - pub fn allowed_origin(mut self, origin: Locality) -> Self { - self.origin = origin; - self - } - - /// Change the subscription mode to Pull. - #[inline] - pub fn pull_mode(self) -> SubscriberBuilder<'a, 'b, PullMode, Handler> { - let SubscriberBuilder { - session, - key_expr, - reliability, - mode: _, - origin, - handler, - } = self; - SubscriberBuilder { - session, - key_expr, - reliability, - mode: PullMode, - origin, - handler, - } - } - - /// Change the subscription mode to Push. - #[inline] - pub fn push_mode(self) -> SubscriberBuilder<'a, 'b, PushMode, Handler> { - let SubscriberBuilder { - session, - key_expr, - reliability, - mode: _, - origin, - handler, - } = self; - SubscriberBuilder { - session, - key_expr, - reliability, - mode: PushMode, - origin, - handler, - } - } -} - -// Push mode -impl<'a, Handler> Resolvable for SubscriberBuilder<'a, '_, PushMode, Handler> -where - Handler: IntoCallbackReceiverPair<'static, Sample> + Send, - Handler::Receiver: Send, -{ - type To = ZResult>; -} - -impl<'a, Handler> SyncResolve for SubscriberBuilder<'a, '_, PushMode, Handler> -where - Handler: IntoCallbackReceiverPair<'static, Sample> + Send, - Handler::Receiver: Send, -{ - fn res_sync(self) -> ::To { - let key_expr = self.key_expr?; - let session = self.session; - let (callback, receiver) = self.handler.into_cb_receiver_pair(); - session - .declare_subscriber_inner( - &key_expr, - &None, - self.origin, - callback, - &SubscriberInfo { - reliability: self.reliability, - mode: self.mode.into(), - }, - ) - .map(|sub_state| Subscriber { - subscriber: SubscriberInner { - session, - state: sub_state, - alive: true, - }, - receiver, - }) - } -} - -impl<'a, Handler> AsyncResolve for SubscriberBuilder<'a, '_, PushMode, Handler> -where - Handler: IntoCallbackReceiverPair<'static, Sample> + Send, - Handler::Receiver: Send, -{ - type Future = Ready; - - fn res_async(self) -> Self::Future { - std::future::ready(self.res_sync()) - } -} - -// Pull mode -impl<'a, Handler> Resolvable for SubscriberBuilder<'a, '_, PullMode, Handler> -where - Handler: IntoCallbackReceiverPair<'static, Sample> + Send, - Handler::Receiver: Send, -{ - type To = ZResult>; -} - -impl<'a, Handler> SyncResolve for SubscriberBuilder<'a, '_, PullMode, Handler> -where - Handler: IntoCallbackReceiverPair<'static, Sample> + Send, - Handler::Receiver: Send, -{ - fn res_sync(self) -> ::To { - let key_expr = self.key_expr?; - let session = self.session; - let (callback, receiver) = self.handler.into_cb_receiver_pair(); - session - .declare_subscriber_inner( - &key_expr, - &None, - self.origin, - callback, - &SubscriberInfo { - reliability: self.reliability, - mode: self.mode.into(), - }, - ) - .map(|sub_state| PullSubscriber { - subscriber: PullSubscriberInner { - inner: SubscriberInner { - session, - state: sub_state, - alive: true, - }, - }, - receiver, - }) - } -} - -impl<'a, Handler> AsyncResolve for SubscriberBuilder<'a, '_, PullMode, Handler> -where - Handler: IntoCallbackReceiverPair<'static, Sample> + Send, - Handler::Receiver: Send, -{ - type Future = Ready; - - fn res_async(self) -> Self::Future { - std::future::ready(self.res_sync()) - } -} - -/// A subscriber that provides data through a [`Handler`](crate::prelude::IntoCallbackReceiverPair). -/// -/// Subscribers can be created from a zenoh [`Session`](crate::Session) -/// with the [`declare_subscriber`](crate::SessionDeclarations::declare_subscriber) function -/// and the [`with`](SubscriberBuilder::with) function -/// of the resulting builder. -/// -/// Subscribers are automatically undeclared when dropped. -/// -/// # Examples -/// ```no_run -/// # #[tokio::main] -/// # async fn main() { -/// use zenoh::prelude::r#async::*; -/// -/// let session = zenoh::open(config::peer()).res().await.unwrap(); -/// let subscriber = session -/// .declare_subscriber("key/expression") -/// .with(flume::bounded(32)) -/// .res() -/// .await -/// .unwrap(); -/// while let Ok(sample) = subscriber.recv_async().await { -/// println!("Received: {} {}", sample.key_expr, sample.value); -/// } -/// # } -/// ``` -#[non_exhaustive] -#[derive(Debug)] -pub struct Subscriber<'a, Receiver> { - pub(crate) subscriber: SubscriberInner<'a>, - pub receiver: Receiver, -} - -/// A [`PullMode`] subscriber that provides data through a [`Handler`](crate::prelude::IntoCallbackReceiverPair). -/// -/// PullSubscribers only provide data when explicitly pulled by the -/// application with the [`pull`](PullSubscriber::pull) function. -/// PullSubscribers can be created from a zenoh [`Session`](crate::Session) -/// with the [`declare_subscriber`](crate::SessionDeclarations::declare_subscriber) function, -/// the [`with`](SubscriberBuilder::with) function -/// and the [`pull_mode`](SubscriberBuilder::pull_mode) function -/// of the resulting builder. -/// -/// Subscribers are automatically undeclared when dropped. -/// -/// # Examples -/// ``` -/// # #[tokio::main] -/// # async fn main() { -/// use zenoh::prelude::r#async::*; -/// -/// let session = zenoh::open(config::peer()).res().await.unwrap(); -/// let subscriber = session -/// .declare_subscriber("key/expression") -/// .with(flume::bounded(32)) -/// .pull_mode() -/// .res() -/// .await -/// .unwrap(); -/// subscriber.pull(); -/// # } -/// ``` -#[non_exhaustive] -pub struct PullSubscriber<'a, Receiver> { - pub(crate) subscriber: PullSubscriberInner<'a>, - pub receiver: Receiver, -} - -impl<'a, Receiver> Deref for PullSubscriber<'a, Receiver> { - type Target = Receiver; - fn deref(&self) -> &Self::Target { - &self.receiver - } -} - -impl<'a, Receiver> DerefMut for PullSubscriber<'a, Receiver> { - fn deref_mut(&mut self) -> &mut Self::Target { - &mut self.receiver - } -} - -impl<'a, Receiver> PullSubscriber<'a, Receiver> { - /// Pull available data for a [`PullSubscriber`]. - /// - /// # Examples - /// ``` - /// # #[tokio::main] - /// # async fn main() { - /// use zenoh::prelude::r#async::*; - /// use zenoh::subscriber::SubMode; - /// - /// let session = zenoh::open(config::peer()).res().await.unwrap(); - /// let subscriber = session - /// .declare_subscriber("key/expression") - /// .with(flume::bounded(32)) - /// .pull_mode() - /// .res() - /// .await - /// .unwrap(); - /// subscriber.pull(); - /// # } - /// ``` - #[inline] - pub fn pull(&self) -> impl Resolve> + '_ { - self.subscriber.pull() - } - - /// Close a [`PullSubscriber`]. - /// - /// Subscribers are automatically closed when dropped, but you may want to use this function to handle errors or - /// close the Subscriber asynchronously. - /// - /// # Examples - /// ``` - /// # #[tokio::main] - /// # async fn main() { - /// use zenoh::prelude::r#async::*; - /// - /// let session = zenoh::open(config::peer()).res().await.unwrap(); - /// let subscriber = session.declare_subscriber("key/expression") - /// .pull_mode() - /// .res() - /// .await - /// .unwrap(); - /// subscriber.undeclare().res().await.unwrap(); - /// # } - /// ``` - #[inline] - pub fn undeclare(self) -> impl Resolve> + 'a { - self.subscriber.undeclare() - } -} - -impl<'a, Receiver> Subscriber<'a, Receiver> { - /// Returns the [`KeyExpr`] this Subscriber subscribes to. - pub fn key_expr(&self) -> &KeyExpr<'static> { - &self.subscriber.state.key_expr - } - - /// Close a [`Subscriber`]. - /// - /// Subscribers are automatically closed when dropped, but you may want to use this function to handle errors or - /// close the Subscriber asynchronously. - /// - /// # Examples - /// ``` - /// # #[tokio::main] - /// # async fn main() { - /// use zenoh::prelude::r#async::*; - /// - /// let session = zenoh::open(config::peer()).res().await.unwrap(); - /// let subscriber = session.declare_subscriber("key/expression") - /// .res() - /// .await - /// .unwrap(); - /// subscriber.undeclare().res().await.unwrap(); - /// # } - /// ``` - #[inline] - pub fn undeclare(self) -> SubscriberUndeclaration<'a> { - self.subscriber.undeclare() - } -} - -impl<'a, T> Undeclarable<(), SubscriberUndeclaration<'a>> for Subscriber<'a, T> { - fn undeclare_inner(self, _: ()) -> SubscriberUndeclaration<'a> { - Undeclarable::undeclare_inner(self.subscriber, ()) - } -} - -impl Deref for Subscriber<'_, Receiver> { - type Target = Receiver; - - fn deref(&self) -> &Self::Target { - &self.receiver - } -} -impl DerefMut for Subscriber<'_, Receiver> { - fn deref_mut(&mut self) -> &mut Self::Target { - &mut self.receiver - } -} - -/// A [`Subscriber`] that provides data through a `flume` channel. -pub type FlumeSubscriber<'a> = Subscriber<'a, flume::Receiver>; diff --git a/zenoh/src/value.rs b/zenoh/src/value.rs deleted file mode 100644 index 849cfd57d5..0000000000 --- a/zenoh/src/value.rs +++ /dev/null @@ -1,705 +0,0 @@ -// -// Copyright (c) 2023 ZettaScale Technology -// -// This program and the accompanying materials are made available under the -// terms of the Eclipse Public License 2.0 which is available at -// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 -// which is available at https://www.apache.org/licenses/LICENSE-2.0. -// -// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 -// -// Contributors: -// ZettaScale Zenoh Team, -// - -//! Value primitives. - -use base64::{engine::general_purpose::STANDARD as b64_std_engine, Engine}; -use std::borrow::Cow; -use std::convert::TryFrom; -#[cfg(feature = "shared-memory")] -use std::sync::Arc; - -use zenoh_collections::Properties; -use zenoh_result::ZError; - -use crate::buffers::ZBuf; -use crate::prelude::{Encoding, KnownEncoding, Sample, SplitBuffer}; -#[cfg(feature = "shared-memory")] -use zenoh_shm::SharedMemoryBuf; - -/// A zenoh Value. -#[non_exhaustive] -#[derive(Clone)] -pub struct Value { - /// The payload of this Value. - pub payload: ZBuf, - /// An encoding description indicating how the associated payload is encoded. - pub encoding: Encoding, -} - -impl Value { - /// Creates a new zenoh Value. - pub fn new(payload: ZBuf) -> Self { - Value { - payload, - encoding: KnownEncoding::AppOctetStream.into(), - } - } - - /// Creates an empty Value. - pub fn empty() -> Self { - Value { - payload: ZBuf::empty(), - encoding: KnownEncoding::AppOctetStream.into(), - } - } - - /// Sets the encoding of this zenoh Value. - #[inline(always)] - pub fn encoding(mut self, encoding: Encoding) -> Self { - self.encoding = encoding; - self - } -} - -impl std::fmt::Debug for Value { - fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { - write!( - f, - "Value{{ payload: {:?}, encoding: {} }}", - self.payload, self.encoding - ) - } -} - -impl std::fmt::Display for Value { - fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { - let payload = self.payload.contiguous(); - write!( - f, - "{}", - String::from_utf8(payload.clone().into_owned()) - .unwrap_or_else(|_| b64_std_engine.encode(payload)) - ) - } -} - -impl std::error::Error for Value {} - -// Shared memory conversion -#[cfg(feature = "shared-memory")] -impl From> for Value { - fn from(smb: Arc) -> Self { - Value { - payload: smb.into(), - encoding: KnownEncoding::AppOctetStream.into(), - } - } -} - -#[cfg(feature = "shared-memory")] -impl From> for Value { - fn from(smb: Box) -> Self { - let smb: Arc = smb.into(); - Self::from(smb) - } -} - -#[cfg(feature = "shared-memory")] -impl From for Value { - fn from(smb: SharedMemoryBuf) -> Self { - Value { - payload: smb.into(), - encoding: KnownEncoding::AppOctetStream.into(), - } - } -} - -// Bytes conversion -impl From for Value { - fn from(buf: ZBuf) -> Self { - Value { - payload: buf, - encoding: KnownEncoding::AppOctetStream.into(), - } - } -} - -impl TryFrom<&Value> for ZBuf { - type Error = ZError; - - fn try_from(v: &Value) -> Result { - match v.encoding.prefix() { - KnownEncoding::AppOctetStream => Ok(v.payload.clone()), - unexpected => Err(zerror!( - "{:?} can not be converted into Cow<'a, [u8]>", - unexpected - )), - } - } -} - -impl TryFrom for ZBuf { - type Error = ZError; - - fn try_from(v: Value) -> Result { - Self::try_from(&v) - } -} - -impl From<&[u8]> for Value { - fn from(buf: &[u8]) -> Self { - Value::from(ZBuf::from(buf.to_vec())) - } -} - -impl<'a> TryFrom<&'a Value> for Cow<'a, [u8]> { - type Error = ZError; - - fn try_from(v: &'a Value) -> Result { - match v.encoding.prefix() { - KnownEncoding::AppOctetStream => Ok(v.payload.contiguous()), - unexpected => Err(zerror!( - "{:?} can not be converted into Cow<'a, [u8]>", - unexpected - )), - } - } -} - -impl From> for Value { - fn from(buf: Vec) -> Self { - Value::from(ZBuf::from(buf)) - } -} - -impl TryFrom<&Value> for Vec { - type Error = ZError; - - fn try_from(v: &Value) -> Result { - match v.encoding.prefix() { - KnownEncoding::AppOctetStream => Ok(v.payload.contiguous().to_vec()), - unexpected => Err(zerror!( - "{:?} can not be converted into Vec", - unexpected - )), - } - } -} - -impl TryFrom for Vec { - type Error = ZError; - - fn try_from(v: Value) -> Result { - Self::try_from(&v) - } -} - -// String conversion -impl From for Value { - fn from(s: String) -> Self { - Value { - payload: ZBuf::from(s.into_bytes()), - encoding: KnownEncoding::TextPlain.into(), - } - } -} - -impl From<&str> for Value { - fn from(s: &str) -> Self { - Value { - payload: ZBuf::from(Vec::::from(s)), - encoding: KnownEncoding::TextPlain.into(), - } - } -} - -impl TryFrom<&Value> for String { - type Error = ZError; - - fn try_from(v: &Value) -> Result { - match v.encoding.prefix() { - KnownEncoding::TextPlain => { - String::from_utf8(v.payload.contiguous().to_vec()).map_err(|e| zerror!("{}", e)) - } - unexpected => Err(zerror!("{:?} can not be converted into String", unexpected)), - } - } -} - -impl TryFrom for String { - type Error = ZError; - - fn try_from(v: Value) -> Result { - Self::try_from(&v) - } -} - -// Sample conversion -impl From for Value { - fn from(s: Sample) -> Self { - s.value - } -} - -// i64 conversion -impl From for Value { - fn from(i: i64) -> Self { - Value { - payload: ZBuf::from(Vec::::from(i.to_string())), - encoding: KnownEncoding::AppInteger.into(), - } - } -} - -impl TryFrom<&Value> for i64 { - type Error = ZError; - - fn try_from(v: &Value) -> Result { - match v.encoding.prefix() { - KnownEncoding::AppInteger => std::str::from_utf8(&v.payload.contiguous()) - .map_err(|e| zerror!("{}", e))? - .parse() - .map_err(|e| zerror!("{}", e)), - unexpected => Err(zerror!("{:?} can not be converted into i64", unexpected)), - } - } -} - -impl TryFrom for i64 { - type Error = ZError; - - fn try_from(v: Value) -> Result { - Self::try_from(&v) - } -} - -// i32 conversion -impl From for Value { - fn from(i: i32) -> Self { - Value { - payload: ZBuf::from(Vec::::from(i.to_string())), - encoding: KnownEncoding::AppInteger.into(), - } - } -} - -impl TryFrom<&Value> for i32 { - type Error = ZError; - - fn try_from(v: &Value) -> Result { - match v.encoding.prefix() { - KnownEncoding::AppInteger => std::str::from_utf8(&v.payload.contiguous()) - .map_err(|e| zerror!("{}", e))? - .parse() - .map_err(|e| zerror!("{}", e)), - unexpected => Err(zerror!("{:?} can not be converted into i32", unexpected)), - } - } -} - -impl TryFrom for i32 { - type Error = ZError; - - fn try_from(v: Value) -> Result { - Self::try_from(&v) - } -} - -// i16 conversion -impl From for Value { - fn from(i: i16) -> Self { - Value { - payload: ZBuf::from(Vec::::from(i.to_string())), - encoding: KnownEncoding::AppInteger.into(), - } - } -} - -impl TryFrom<&Value> for i16 { - type Error = ZError; - - fn try_from(v: &Value) -> Result { - match v.encoding.prefix() { - KnownEncoding::AppInteger => std::str::from_utf8(&v.payload.contiguous()) - .map_err(|e| zerror!("{}", e))? - .parse() - .map_err(|e| zerror!("{}", e)), - unexpected => Err(zerror!("{:?} can not be converted into i16", unexpected)), - } - } -} - -impl TryFrom for i16 { - type Error = ZError; - - fn try_from(v: Value) -> Result { - Self::try_from(&v) - } -} - -// i8 conversion -impl From for Value { - fn from(i: i8) -> Self { - Value { - payload: ZBuf::from(Vec::::from(i.to_string())), - encoding: KnownEncoding::AppInteger.into(), - } - } -} - -impl TryFrom<&Value> for i8 { - type Error = ZError; - - fn try_from(v: &Value) -> Result { - match v.encoding.prefix() { - KnownEncoding::AppInteger => std::str::from_utf8(&v.payload.contiguous()) - .map_err(|e| zerror!("{}", e))? - .parse() - .map_err(|e| zerror!("{}", e)), - unexpected => Err(zerror!("{:?} can not be converted into i8", unexpected)), - } - } -} - -impl TryFrom for i8 { - type Error = ZError; - - fn try_from(v: Value) -> Result { - Self::try_from(&v) - } -} - -// isize conversion -impl From for Value { - fn from(i: isize) -> Self { - Value { - payload: ZBuf::from(Vec::::from(i.to_string())), - encoding: KnownEncoding::AppInteger.into(), - } - } -} - -impl TryFrom<&Value> for isize { - type Error = ZError; - - fn try_from(v: &Value) -> Result { - match v.encoding.prefix() { - KnownEncoding::AppInteger => std::str::from_utf8(&v.payload.contiguous()) - .map_err(|e| zerror!("{}", e))? - .parse() - .map_err(|e| zerror!("{}", e)), - unexpected => Err(zerror!("{:?} can not be converted into isize", unexpected)), - } - } -} - -impl TryFrom for isize { - type Error = ZError; - - fn try_from(v: Value) -> Result { - Self::try_from(&v) - } -} - -// u64 conversion -impl From for Value { - fn from(i: u64) -> Self { - Value { - payload: ZBuf::from(Vec::::from(i.to_string())), - encoding: KnownEncoding::AppInteger.into(), - } - } -} - -impl TryFrom<&Value> for u64 { - type Error = ZError; - - fn try_from(v: &Value) -> Result { - match v.encoding.prefix() { - KnownEncoding::AppInteger => std::str::from_utf8(&v.payload.contiguous()) - .map_err(|e| zerror!("{}", e))? - .parse() - .map_err(|e| zerror!("{}", e)), - unexpected => Err(zerror!("{:?} can not be converted into u64", unexpected)), - } - } -} - -impl TryFrom for u64 { - type Error = ZError; - - fn try_from(v: Value) -> Result { - Self::try_from(&v) - } -} - -// u32 conversion -impl From for Value { - fn from(i: u32) -> Self { - Value { - payload: ZBuf::from(Vec::::from(i.to_string())), - encoding: KnownEncoding::AppInteger.into(), - } - } -} - -impl TryFrom<&Value> for u32 { - type Error = ZError; - - fn try_from(v: &Value) -> Result { - match v.encoding.prefix() { - KnownEncoding::AppInteger => std::str::from_utf8(&v.payload.contiguous()) - .map_err(|e| zerror!("{}", e))? - .parse() - .map_err(|e| zerror!("{}", e)), - unexpected => Err(zerror!("{:?} can not be converted into u32", unexpected)), - } - } -} - -impl TryFrom for u32 { - type Error = ZError; - - fn try_from(v: Value) -> Result { - Self::try_from(&v) - } -} - -// u16 conversion -impl From for Value { - fn from(i: u16) -> Self { - Value { - payload: ZBuf::from(Vec::::from(i.to_string())), - encoding: KnownEncoding::AppInteger.into(), - } - } -} - -impl TryFrom<&Value> for u16 { - type Error = ZError; - - fn try_from(v: &Value) -> Result { - match v.encoding.prefix() { - KnownEncoding::AppInteger => std::str::from_utf8(&v.payload.contiguous()) - .map_err(|e| zerror!("{}", e))? - .parse() - .map_err(|e| zerror!("{}", e)), - unexpected => Err(zerror!("{:?} can not be converted into u16", unexpected)), - } - } -} - -impl TryFrom for u16 { - type Error = ZError; - - fn try_from(v: Value) -> Result { - Self::try_from(&v) - } -} - -// u8 conversion -impl From for Value { - fn from(i: u8) -> Self { - Value { - payload: ZBuf::from(Vec::::from(i.to_string())), - encoding: KnownEncoding::AppInteger.into(), - } - } -} - -impl TryFrom<&Value> for u8 { - type Error = ZError; - - fn try_from(v: &Value) -> Result { - match v.encoding.prefix() { - KnownEncoding::AppInteger => std::str::from_utf8(&v.payload.contiguous()) - .map_err(|e| zerror!("{}", e))? - .parse() - .map_err(|e| zerror!("{}", e)), - unexpected => Err(zerror!("{:?} can not be converted into u8", unexpected)), - } - } -} - -impl TryFrom for u8 { - type Error = ZError; - - fn try_from(v: Value) -> Result { - Self::try_from(&v) - } -} - -// usize conversion -impl From for Value { - fn from(i: usize) -> Self { - Value { - payload: ZBuf::from(Vec::::from(i.to_string())), - encoding: KnownEncoding::AppInteger.into(), - } - } -} - -impl TryFrom<&Value> for usize { - type Error = ZError; - - fn try_from(v: &Value) -> Result { - match v.encoding.prefix() { - KnownEncoding::AppInteger => std::str::from_utf8(&v.payload.contiguous()) - .map_err(|e| zerror!("{}", e))? - .parse() - .map_err(|e| zerror!("{}", e)), - unexpected => Err(zerror!("{:?} can not be converted into usize", unexpected)), - } - } -} - -impl TryFrom for usize { - type Error = ZError; - - fn try_from(v: Value) -> Result { - Self::try_from(&v) - } -} - -// f64 conversion -impl From for Value { - fn from(f: f64) -> Self { - Value { - payload: ZBuf::from(Vec::::from(f.to_string())), - encoding: KnownEncoding::AppFloat.into(), - } - } -} - -impl TryFrom<&Value> for f64 { - type Error = ZError; - - fn try_from(v: &Value) -> Result { - match v.encoding.prefix() { - KnownEncoding::AppFloat => std::str::from_utf8(&v.payload.contiguous()) - .map_err(|e| zerror!("{}", e))? - .parse() - .map_err(|e| zerror!("{}", e)), - unexpected => Err(zerror!("{:?} can not be converted into f64", unexpected)), - } - } -} - -impl TryFrom for f64 { - type Error = ZError; - - fn try_from(v: Value) -> Result { - Self::try_from(&v) - } -} - -// f32 conversion -impl From for Value { - fn from(f: f32) -> Self { - Value { - payload: ZBuf::from(Vec::::from(f.to_string())), - encoding: KnownEncoding::AppFloat.into(), - } - } -} - -impl TryFrom<&Value> for f32 { - type Error = ZError; - - fn try_from(v: &Value) -> Result { - match v.encoding.prefix() { - KnownEncoding::AppFloat => std::str::from_utf8(&v.payload.contiguous()) - .map_err(|e| zerror!("{}", e))? - .parse() - .map_err(|e| zerror!("{}", e)), - unexpected => Err(zerror!("{:?} can not be converted into f32", unexpected)), - } - } -} - -impl TryFrom for f32 { - type Error = ZError; - - fn try_from(v: Value) -> Result { - Self::try_from(&v) - } -} - -// JSON conversion -impl From<&serde_json::Value> for Value { - fn from(json: &serde_json::Value) -> Self { - Value { - payload: ZBuf::from(Vec::::from(json.to_string())), - encoding: KnownEncoding::AppJson.into(), - } - } -} - -impl From for Value { - fn from(json: serde_json::Value) -> Self { - Value::from(&json) - } -} - -impl TryFrom<&Value> for serde_json::Value { - type Error = ZError; - - fn try_from(v: &Value) -> Result { - match v.encoding.prefix() { - KnownEncoding::AppJson | KnownEncoding::TextJson => { - let r = serde::Deserialize::deserialize(&mut serde_json::Deserializer::from_slice( - &v.payload.contiguous(), - )); - r.map_err(|e| zerror!("{}", e)) - } - unexpected => Err(zerror!( - "{:?} can not be converted into Properties", - unexpected - )), - } - } -} - -impl TryFrom for serde_json::Value { - type Error = ZError; - - fn try_from(v: Value) -> Result { - Self::try_from(&v) - } -} - -// Properties conversion -impl From for Value { - fn from(p: Properties) -> Self { - Value { - payload: ZBuf::from(Vec::::from(p.to_string())), - encoding: KnownEncoding::AppProperties.into(), - } - } -} - -impl TryFrom<&Value> for Properties { - type Error = ZError; - - fn try_from(v: &Value) -> Result { - match *v.encoding.prefix() { - KnownEncoding::AppProperties => Ok(Properties::from( - std::str::from_utf8(&v.payload.contiguous()).map_err(|e| zerror!("{}", e))?, - )), - unexpected => Err(zerror!( - "{:?} can not be converted into Properties", - unexpected - )), - } - } -} - -impl TryFrom for Properties { - type Error = ZError; - - fn try_from(v: Value) -> Result { - Self::try_from(&v) - } -} diff --git a/zenoh/tests/acl.rs b/zenoh/tests/acl.rs index 6f8539f659..13104338b7 100644 --- a/zenoh/tests/acl.rs +++ b/zenoh/tests/acl.rs @@ -13,10 +13,19 @@ // #![cfg(target_family = "unix")] mod test { - use std::sync::{Arc, Mutex}; - use std::time::Duration; + use std::{ + sync::{Arc, Mutex}, + time::Duration, + }; + use tokio::runtime::Handle; - use zenoh::prelude::r#async::*; + use zenoh::{ + config, + config::{EndPoint, WhatAmI}, + prelude::*, + sample::SampleKind, + Config, Session, + }; use zenoh_core::{zlock, ztimeout}; const TIMEOUT: Duration = Duration::from_secs(60); @@ -25,509 +34,756 @@ mod test { const VALUE: &str = "zenoh"; #[tokio::test(flavor = "multi_thread", worker_threads = 4)] - async fn test_acl() { - zenoh_util::try_init_log_from_env(); - test_pub_sub_deny().await; - test_pub_sub_allow().await; - test_pub_sub_deny_then_allow().await; - test_pub_sub_allow_then_deny().await; - test_get_qbl_deny().await; - test_get_qbl_allow().await; - test_get_qbl_allow_then_deny().await; - test_get_qbl_deny_then_allow().await; + async fn test_acl_pub_sub() { + zenoh::try_init_log_from_env(); + test_pub_sub_deny(27447).await; + test_pub_sub_allow(27447).await; + test_pub_sub_deny_then_allow(27447).await; + test_pub_sub_allow_then_deny(27447).await; } - async fn get_basic_router_config() -> Config { + + #[tokio::test(flavor = "multi_thread", worker_threads = 4)] + async fn test_acl_get_queryable() { + zenoh::try_init_log_from_env(); + test_get_qbl_deny(27448).await; + test_get_qbl_allow(27448).await; + test_get_qbl_allow_then_deny(27448).await; + test_get_qbl_deny_then_allow(27448).await; + } + + #[tokio::test(flavor = "multi_thread", worker_threads = 4)] + async fn test_acl_queryable_reply() { + zenoh::try_init_log_from_env(); + // Only test cases not covered by `test_acl_get_queryable` + test_reply_deny(27449).await; + test_reply_allow_then_deny(27449).await; + } + + async fn get_basic_router_config(port: u16) -> Config { let mut config = config::default(); config.set_mode(Some(WhatAmI::Router)).unwrap(); - config.listen.endpoints = vec!["tcp/127.0.0.1:7447".parse().unwrap()]; + config + .listen + .endpoints + .set(vec![format!("tcp/127.0.0.1:{port}").parse().unwrap()]) + .unwrap(); config.scouting.multicast.set_enabled(Some(false)).unwrap(); config } async fn close_router_session(s: Session) { println!("Closing router session"); - ztimeout!(s.close().res_async()).unwrap(); + ztimeout!(s.close()).unwrap(); } - async fn get_client_sessions() -> (Session, Session) { + async fn get_client_sessions(port: u16) -> (Session, Session) { println!("Opening client sessions"); - let config = config::client(["tcp/127.0.0.1:7447".parse::().unwrap()]); - let s01 = ztimeout!(zenoh::open(config).res_async()).unwrap(); - let config = config::client(["tcp/127.0.0.1:7447".parse::().unwrap()]); - let s02 = ztimeout!(zenoh::open(config).res_async()).unwrap(); + let config = config::client([format!("tcp/127.0.0.1:{port}").parse::().unwrap()]); + let s01 = ztimeout!(zenoh::open(config)).unwrap(); + let config = config::client([format!("tcp/127.0.0.1:{port}").parse::().unwrap()]); + let s02 = ztimeout!(zenoh::open(config)).unwrap(); (s01, s02) } async fn close_sessions(s01: Session, s02: Session) { println!("Closing client sessions"); - ztimeout!(s01.close().res_async()).unwrap(); - ztimeout!(s02.close().res_async()).unwrap(); + ztimeout!(s01.close()).unwrap(); + ztimeout!(s02.close()).unwrap(); } - async fn test_pub_sub_deny() { + async fn test_pub_sub_deny(port: u16) { println!("test_pub_sub_deny"); - let mut config_router = get_basic_router_config().await; + let mut config_router = get_basic_router_config(port).await; config_router .insert_json5( "access_control", r#"{ - "enabled": true, - "default_permission": "deny", - "rules": - [ - ] - }"#, + "enabled": true, + "default_permission": "deny", + "rules": [], + "subjects": [], + "policies": [], + }"#, ) .unwrap(); println!("Opening router session"); - let session = ztimeout!(zenoh::open(config_router).res_async()).unwrap(); + let session = ztimeout!(zenoh::open(config_router)).unwrap(); - let (sub_session, pub_session) = get_client_sessions().await; + let (sub_session, pub_session) = get_client_sessions(port).await; { - let publisher = pub_session - .declare_publisher(KEY_EXPR) - .res_async() - .await - .unwrap(); + let publisher = pub_session.declare_publisher(KEY_EXPR).await.unwrap(); let received_value = Arc::new(Mutex::new(String::new())); + let deleted = Arc::new(Mutex::new(false)); + let temp_recv_value = received_value.clone(); + let deleted_clone = deleted.clone(); let subscriber = sub_session .declare_subscriber(KEY_EXPR) .callback(move |sample| { - let mut temp_value = zlock!(temp_recv_value); - *temp_value = sample.value.to_string(); + if sample.kind() == SampleKind::Put { + let mut temp_value = zlock!(temp_recv_value); + *temp_value = sample.payload().deserialize::().unwrap(); + } else if sample.kind() == SampleKind::Delete { + let mut deleted = zlock!(deleted_clone); + *deleted = true; + } }) - .res_async() .await .unwrap(); tokio::time::sleep(SLEEP).await; - publisher.put(VALUE).res_async().await.unwrap(); + publisher.put(VALUE).await.unwrap(); tokio::time::sleep(SLEEP).await; assert_ne!(*zlock!(received_value), VALUE); - ztimeout!(subscriber.undeclare().res_async()).unwrap(); + + publisher.delete().await.unwrap(); + tokio::time::sleep(SLEEP).await; + assert!(!(*zlock!(deleted))); + ztimeout!(subscriber.undeclare()).unwrap(); } close_sessions(sub_session, pub_session).await; close_router_session(session).await; } - async fn test_pub_sub_allow() { + async fn test_pub_sub_allow(port: u16) { println!("test_pub_sub_allow"); - let mut config_router = get_basic_router_config().await; + let mut config_router = get_basic_router_config(port).await; config_router .insert_json5( "access_control", r#"{ - - "enabled": false, - "default_permission": "allow", - "rules": - [ - ] - - }"#, + "enabled": true, + "default_permission": "allow", + "rules": [], + "subjects": [], + "policies": [], + }"#, ) .unwrap(); println!("Opening router session"); - let session = ztimeout!(zenoh::open(config_router).res_async()).unwrap(); - let (sub_session, pub_session) = get_client_sessions().await; + let session = ztimeout!(zenoh::open(config_router)).unwrap(); + let (sub_session, pub_session) = get_client_sessions(port).await; { - let publisher = ztimeout!(pub_session.declare_publisher(KEY_EXPR).res_async()).unwrap(); + let publisher = ztimeout!(pub_session.declare_publisher(KEY_EXPR)).unwrap(); let received_value = Arc::new(Mutex::new(String::new())); + let deleted = Arc::new(Mutex::new(false)); + let temp_recv_value = received_value.clone(); - let subscriber = ztimeout!(sub_session + let deleted_clone = deleted.clone(); + let subscriber = sub_session .declare_subscriber(KEY_EXPR) .callback(move |sample| { - let mut temp_value = zlock!(temp_recv_value); - *temp_value = sample.value.to_string(); + if sample.kind() == SampleKind::Put { + let mut temp_value = zlock!(temp_recv_value); + *temp_value = sample.payload().deserialize::().unwrap(); + } else if sample.kind() == SampleKind::Delete { + let mut deleted = zlock!(deleted_clone); + *deleted = true; + } }) - .res_async()) - .unwrap(); + .await + .unwrap(); tokio::time::sleep(SLEEP).await; - - ztimeout!(publisher.put(VALUE).res_async()).unwrap(); + publisher.put(VALUE).await.unwrap(); tokio::time::sleep(SLEEP).await; - assert_eq!(*zlock!(received_value), VALUE); - ztimeout!(subscriber.undeclare().res_async()).unwrap(); + + publisher.delete().await.unwrap(); + tokio::time::sleep(SLEEP).await; + assert!(*zlock!(deleted)); + ztimeout!(subscriber.undeclare()).unwrap(); } close_sessions(sub_session, pub_session).await; close_router_session(session).await; } - async fn test_pub_sub_allow_then_deny() { + async fn test_pub_sub_allow_then_deny(port: u16) { println!("test_pub_sub_allow_then_deny"); - let mut config_router = get_basic_router_config().await; + let mut config_router = get_basic_router_config(port).await; config_router .insert_json5( "access_control", - r#" - {"enabled": true, - "default_permission": "allow", - "rules": - [ - { - "permission": "deny", - "flows": ["egress"], - "actions": [ - "put", - "declare_subscriber" - ], - "key_exprs": [ - "test/demo" - ], - "interfaces": [ - "lo","lo0" - ] - }, - ] - } - "#, + r#"{ + "enabled": true, + "default_permission": "allow", + "rules": [ + { + "id": "r1", + "permission": "deny", + "flows": ["egress", "ingress"], + "messages": [ + "put", + "delete", + "declare_subscriber" + ], + "key_exprs": [ + "test/demo" + ], + }, + ], + "subjects": [ + { + "id": "s1", + "interfaces": [ + "lo", "lo0" + ], + } + ], + "policies": [ + { + "rules": ["r1"], + "subjects": ["s1"], + } + ] + }"#, ) .unwrap(); println!("Opening router session"); - let session = ztimeout!(zenoh::open(config_router).res_async()).unwrap(); - let (sub_session, pub_session) = get_client_sessions().await; + let session = ztimeout!(zenoh::open(config_router)).unwrap(); + let (sub_session, pub_session) = get_client_sessions(port).await; { - let publisher = ztimeout!(pub_session.declare_publisher(KEY_EXPR).res_async()).unwrap(); + let publisher = ztimeout!(pub_session.declare_publisher(KEY_EXPR)).unwrap(); let received_value = Arc::new(Mutex::new(String::new())); + let deleted = Arc::new(Mutex::new(false)); + let temp_recv_value = received_value.clone(); - let subscriber = ztimeout!(sub_session + let deleted_clone = deleted.clone(); + let subscriber = sub_session .declare_subscriber(KEY_EXPR) .callback(move |sample| { - let mut temp_value = zlock!(temp_recv_value); - *temp_value = sample.value.to_string(); + if sample.kind() == SampleKind::Put { + let mut temp_value = zlock!(temp_recv_value); + *temp_value = sample.payload().deserialize::().unwrap(); + } else if sample.kind() == SampleKind::Delete { + let mut deleted = zlock!(deleted_clone); + *deleted = true; + } }) - .res_async()) - .unwrap(); + .await + .unwrap(); tokio::time::sleep(SLEEP).await; - - ztimeout!(publisher.put(VALUE).res_async()).unwrap(); + publisher.put(VALUE).await.unwrap(); tokio::time::sleep(SLEEP).await; - assert_ne!(*zlock!(received_value), VALUE); - ztimeout!(subscriber.undeclare().res_async()).unwrap(); + + publisher.delete().await.unwrap(); + tokio::time::sleep(SLEEP).await; + assert!(!(*zlock!(deleted))); + ztimeout!(subscriber.undeclare()).unwrap(); } close_sessions(sub_session, pub_session).await; close_router_session(session).await; } - async fn test_pub_sub_deny_then_allow() { + async fn test_pub_sub_deny_then_allow(port: u16) { println!("test_pub_sub_deny_then_allow"); - let mut config_router = get_basic_router_config().await; + let mut config_router = get_basic_router_config(port).await; config_router .insert_json5( "access_control", - r#" - {"enabled": true, - "default_permission": "deny", - "rules": - [ - { - "permission": "allow", - "flows": ["egress","ingress"], - "actions": [ - "put", - "declare_subscriber" - ], - "key_exprs": [ - "test/demo" - ], - "interfaces": [ - "lo","lo0" - ] - }, - ] - } - "#, + r#"{ + "enabled": true, + "default_permission": "deny", + "rules": [ + { + "id": "r1", + "permission": "allow", + "flows": ["egress", "ingress"], + "messages": [ + "put", + "delete", + "declare_subscriber" + ], + "key_exprs": [ + "test/demo" + ], + }, + ], + "subjects": [ + { + "id": "s1", + "interfaces": [ + "lo", "lo0" + ], + } + ], + "policies": [ + { + "rules": ["r1"], + "subjects": ["s1"], + } + ] + }"#, ) .unwrap(); println!("Opening router session"); - let session = ztimeout!(zenoh::open(config_router).res_async()).unwrap(); - let (sub_session, pub_session) = get_client_sessions().await; + let session = ztimeout!(zenoh::open(config_router)).unwrap(); + let (sub_session, pub_session) = get_client_sessions(port).await; { - let publisher = ztimeout!(pub_session.declare_publisher(KEY_EXPR).res_async()).unwrap(); + let publisher = ztimeout!(pub_session.declare_publisher(KEY_EXPR)).unwrap(); let received_value = Arc::new(Mutex::new(String::new())); + let deleted = Arc::new(Mutex::new(false)); + let temp_recv_value = received_value.clone(); - let subscriber = ztimeout!(sub_session + let deleted_clone = deleted.clone(); + let subscriber = sub_session .declare_subscriber(KEY_EXPR) .callback(move |sample| { - let mut temp_value = zlock!(temp_recv_value); - *temp_value = sample.value.to_string(); + if sample.kind() == SampleKind::Put { + let mut temp_value = zlock!(temp_recv_value); + *temp_value = sample.payload().deserialize::().unwrap(); + } else if sample.kind() == SampleKind::Delete { + let mut deleted = zlock!(deleted_clone); + *deleted = true; + } }) - .res_async()) - .unwrap(); + .await + .unwrap(); tokio::time::sleep(SLEEP).await; - - ztimeout!(publisher.put(VALUE).res_async()).unwrap(); + publisher.put(VALUE).await.unwrap(); tokio::time::sleep(SLEEP).await; - assert_eq!(*zlock!(received_value), VALUE); - ztimeout!(subscriber.undeclare().res_async()).unwrap(); + + publisher.delete().await.unwrap(); + tokio::time::sleep(SLEEP).await; + assert!(*zlock!(deleted)); + ztimeout!(subscriber.undeclare()).unwrap(); } close_sessions(sub_session, pub_session).await; close_router_session(session).await; } - async fn test_get_qbl_deny() { + async fn test_get_qbl_deny(port: u16) { println!("test_get_qbl_deny"); - let mut config_router = get_basic_router_config().await; + let mut config_router = get_basic_router_config(port).await; config_router .insert_json5( "access_control", r#"{ - "enabled": true, - "default_permission": "deny", - "rules": - [ - ] - }"#, + "enabled": true, + "default_permission": "deny", + "rules": [ + { + "id": "allow reply", + "permission": "allow", + "messages": ["reply"], + "flows": ["egress", "ingress"], + "key_exprs": ["test/demo"], + } + ], + "subjects": [ + { "id": "all" } + ], + "policies": [ + { + "rules": ["allow reply"], + "subjects": ["all"], + } + ], + }"#, ) .unwrap(); println!("Opening router session"); - let session = ztimeout!(zenoh::open(config_router).res_async()).unwrap(); + let session = ztimeout!(zenoh::open(config_router)).unwrap(); - let (get_session, qbl_session) = get_client_sessions().await; + let (get_session, qbl_session) = get_client_sessions(port).await; { let mut received_value = String::new(); let qbl = ztimeout!(qbl_session .declare_queryable(KEY_EXPR) .callback(move |sample| { - let rep = Sample::try_from(KEY_EXPR, VALUE).unwrap(); tokio::task::block_in_place(move || { Handle::current().block_on(async move { - ztimeout!(sample.reply(Ok(rep)).res_async()).unwrap() + ztimeout!(sample.reply(KEY_EXPR, VALUE)).unwrap() }); }); - }) - .res_async()) + })) .unwrap(); tokio::time::sleep(SLEEP).await; - let recv_reply = ztimeout!(get_session.get(KEY_EXPR).res_async()).unwrap(); + let recv_reply = ztimeout!(get_session.get(KEY_EXPR)).unwrap(); while let Ok(reply) = ztimeout!(recv_reply.recv_async()) { - match reply.sample { + match reply.result() { Ok(sample) => { - received_value = sample.value.to_string(); + received_value = sample.payload().deserialize::().unwrap(); break; } - Err(e) => println!("Error : {}", e), + Err(e) => println!("Error : {:?}", e), } } tokio::time::sleep(SLEEP).await; assert_ne!(received_value, VALUE); - ztimeout!(qbl.undeclare().res_async()).unwrap(); + ztimeout!(qbl.undeclare()).unwrap(); } close_sessions(get_session, qbl_session).await; close_router_session(session).await; } - async fn test_get_qbl_allow() { + async fn test_get_qbl_allow(port: u16) { println!("test_get_qbl_allow"); - let mut config_router = get_basic_router_config().await; + let mut config_router = get_basic_router_config(port).await; config_router .insert_json5( "access_control", r#"{ - "enabled": true, - "default_permission": "allow", - "rules": - [ - ] - }"#, + "enabled": true, + "default_permission": "allow", + "rules": [], + "subjects": [], + "policies": [], + }"#, ) .unwrap(); println!("Opening router session"); - let session = ztimeout!(zenoh::open(config_router).res_async()).unwrap(); + let session = ztimeout!(zenoh::open(config_router)).unwrap(); - let (get_session, qbl_session) = get_client_sessions().await; + let (get_session, qbl_session) = get_client_sessions(port).await; { let mut received_value = String::new(); let qbl = ztimeout!(qbl_session .declare_queryable(KEY_EXPR) .callback(move |sample| { - let rep = Sample::try_from(KEY_EXPR, VALUE).unwrap(); tokio::task::block_in_place(move || { Handle::current().block_on(async move { - ztimeout!(sample.reply(Ok(rep)).res_async()).unwrap() + ztimeout!(sample.reply(KEY_EXPR, VALUE)).unwrap() }); }); - }) - .res_async()) + })) .unwrap(); tokio::time::sleep(SLEEP).await; - let recv_reply = ztimeout!(get_session.get(KEY_EXPR).res_async()).unwrap(); + let recv_reply = ztimeout!(get_session.get(KEY_EXPR)).unwrap(); while let Ok(reply) = ztimeout!(recv_reply.recv_async()) { - match reply.sample { + match reply.result() { Ok(sample) => { - received_value = sample.value.to_string(); + received_value = sample.payload().deserialize::().unwrap(); break; } - Err(e) => println!("Error : {}", e), + Err(e) => println!("Error : {:?}", e), } } tokio::time::sleep(SLEEP).await; assert_eq!(received_value, VALUE); - ztimeout!(qbl.undeclare().res_async()).unwrap(); + ztimeout!(qbl.undeclare()).unwrap(); } close_sessions(get_session, qbl_session).await; close_router_session(session).await; } - async fn test_get_qbl_deny_then_allow() { + async fn test_get_qbl_deny_then_allow(port: u16) { println!("test_get_qbl_deny_then_allow"); - let mut config_router = get_basic_router_config().await; + let mut config_router = get_basic_router_config(port).await; config_router .insert_json5( "access_control", - r#" - {"enabled": true, - "default_permission": "deny", - "rules": - [ - { - "permission": "allow", - "flows": ["egress","ingress"], - "actions": [ - "get", - "declare_queryable"], - "key_exprs": [ - "test/demo" - ], - "interfaces": [ - "lo","lo0" - ] - }, - ] - } - "#, + r#"{ + "enabled": true, + "default_permission": "deny", + "rules": [ + { + "id": "r1", + "permission": "allow", + "flows": ["egress", "ingress"], + "messages": [ + "query", + "declare_queryable", + "reply" + ], + "key_exprs": [ + "test/demo" + ], + }, + ], + "subjects": [ + { + "id": "s1", + "interfaces": [ + "lo", "lo0" + ], + } + ], + "policies": [ + { + "rules": ["r1"], + "subjects": ["s1"], + } + ] + }"#, ) .unwrap(); println!("Opening router session"); - let session = ztimeout!(zenoh::open(config_router).res_async()).unwrap(); + let session = ztimeout!(zenoh::open(config_router)).unwrap(); - let (get_session, qbl_session) = get_client_sessions().await; + let (get_session, qbl_session) = get_client_sessions(port).await; { let mut received_value = String::new(); let qbl = ztimeout!(qbl_session .declare_queryable(KEY_EXPR) .callback(move |sample| { - let rep = Sample::try_from(KEY_EXPR, VALUE).unwrap(); tokio::task::block_in_place(move || { Handle::current().block_on(async move { - ztimeout!(sample.reply(Ok(rep)).res_async()).unwrap() + ztimeout!(sample.reply(KEY_EXPR, VALUE)).unwrap() }); }); - }) - .res_async()) + })) .unwrap(); tokio::time::sleep(SLEEP).await; - let recv_reply = ztimeout!(get_session.get(KEY_EXPR).res_async()).unwrap(); + let recv_reply = ztimeout!(get_session.get(KEY_EXPR)).unwrap(); while let Ok(reply) = ztimeout!(recv_reply.recv_async()) { - match reply.sample { + match reply.result() { Ok(sample) => { - received_value = sample.value.to_string(); + received_value = sample.payload().deserialize::().unwrap(); break; } - Err(e) => println!("Error : {}", e), + Err(e) => println!("Error : {:?}", e), } } tokio::time::sleep(SLEEP).await; assert_eq!(received_value, VALUE); - ztimeout!(qbl.undeclare().res_async()).unwrap(); + ztimeout!(qbl.undeclare()).unwrap(); } close_sessions(get_session, qbl_session).await; close_router_session(session).await; } - async fn test_get_qbl_allow_then_deny() { + async fn test_get_qbl_allow_then_deny(port: u16) { println!("test_get_qbl_allow_then_deny"); - let mut config_router = get_basic_router_config().await; + let mut config_router = get_basic_router_config(port).await; + config_router + .insert_json5( + "access_control", + r#"{ + "enabled": true, + "default_permission": "allow", + "rules": [ + { + "id": "r1", + "permission": "deny", + "flows": ["egress", "ingress"], + "messages": [ + "query", + "declare_queryable" + ], + "key_exprs": [ + "test/demo" + ], + }, + ], + "subjects": [ + { + "id": "s1", + "interfaces": [ + "lo", "lo0" + ], + } + ], + "policies": [ + { + "rules": ["r1"], + "subjects": ["s1"], + } + ] + }"#, + ) + .unwrap(); + println!("Opening router session"); + + let session = ztimeout!(zenoh::open(config_router)).unwrap(); + + let (get_session, qbl_session) = get_client_sessions(port).await; + { + let mut received_value = String::new(); + + let qbl = ztimeout!(qbl_session + .declare_queryable(KEY_EXPR) + .callback(move |sample| { + tokio::task::block_in_place(move || { + Handle::current().block_on(async move { + ztimeout!(sample.reply(KEY_EXPR, VALUE)).unwrap() + }); + }); + })) + .unwrap(); + + tokio::time::sleep(SLEEP).await; + let recv_reply = ztimeout!(get_session.get(KEY_EXPR)).unwrap(); + while let Ok(reply) = ztimeout!(recv_reply.recv_async()) { + match reply.result() { + Ok(sample) => { + received_value = sample.payload().deserialize::().unwrap(); + break; + } + Err(e) => println!("Error : {:?}", e), + } + } + tokio::time::sleep(SLEEP).await; + assert_ne!(received_value, VALUE); + ztimeout!(qbl.undeclare()).unwrap(); + } + close_sessions(get_session, qbl_session).await; + close_router_session(session).await; + } + + async fn test_reply_deny(port: u16) { + println!("test_reply_deny"); + + let mut config_router = get_basic_router_config(port).await; config_router .insert_json5( "access_control", - r#" - {"enabled": true, - "default_permission": "allow", - "rules": - [ - { - "permission": "deny", - "flows": ["egress"], - "actions": [ - "get", - "declare_queryable" ], - "key_exprs": [ - "test/demo" - ], - "interfaces": [ - "lo","lo0" - ] - }, - ] + r#"{ + "enabled": true, + "default_permission": "deny", + "rules": [ + { + "id": "allow get/declare qbl", + "permission": "allow", + "messages": ["query", "declare_queryable"], + "key_exprs": ["test/demo"], + } + ], + "subjects": [ + { "id": "all" } + ], + "policies": [ + { + "rules": ["allow get/declare qbl"], + "subjects": ["all"], + } + ], + }"#, + ) + .unwrap(); + println!("Opening router session"); + + let session = ztimeout!(zenoh::open(config_router)).unwrap(); + + let (get_session, qbl_session) = get_client_sessions(port).await; + { + let mut received_value = String::new(); + + let qbl = ztimeout!(qbl_session + .declare_queryable(KEY_EXPR) + .callback(move |sample| { + tokio::task::block_in_place(move || { + Handle::current().block_on(async move { + ztimeout!(sample.reply(KEY_EXPR, VALUE)).unwrap() + }); + }); + })) + .unwrap(); + + tokio::time::sleep(SLEEP).await; + let recv_reply = ztimeout!(get_session.get(KEY_EXPR)).unwrap(); + while let Ok(reply) = ztimeout!(recv_reply.recv_async()) { + match reply.result() { + Ok(sample) => { + received_value = sample.payload().deserialize::().unwrap(); + break; + } + Err(e) => println!("Error : {:?}", e), + } + } + tokio::time::sleep(SLEEP).await; + assert_ne!(received_value, VALUE); + ztimeout!(qbl.undeclare()).unwrap(); + } + close_sessions(get_session, qbl_session).await; + close_router_session(session).await; } - "#, + + async fn test_reply_allow_then_deny(port: u16) { + println!("test_reply_allow_then_deny"); + + let mut config_router = get_basic_router_config(port).await; + config_router + .insert_json5( + "access_control", + r#"{ + "enabled": true, + "default_permission": "allow", + "rules": [ + { + "id": "r1", + "permission": "deny", + "messages": ["reply"], + "flows": ["egress", "ingress"], + "key_exprs": ["test/demo"], + }, + ], + "subjects": [ + { + "id": "s1", + "interfaces": [ + "lo", "lo0" + ], + } + ], + "policies": [ + { + "rules": ["r1"], + "subjects": ["s1"], + } + ] + }"#, ) .unwrap(); println!("Opening router session"); - let session = ztimeout!(zenoh::open(config_router).res_async()).unwrap(); + let session = ztimeout!(zenoh::open(config_router)).unwrap(); - let (get_session, qbl_session) = get_client_sessions().await; + let (get_session, qbl_session) = get_client_sessions(port).await; { let mut received_value = String::new(); let qbl = ztimeout!(qbl_session .declare_queryable(KEY_EXPR) .callback(move |sample| { - let rep = Sample::try_from(KEY_EXPR, VALUE).unwrap(); tokio::task::block_in_place(move || { Handle::current().block_on(async move { - ztimeout!(sample.reply(Ok(rep)).res_async()).unwrap() + ztimeout!(sample.reply(KEY_EXPR, VALUE)).unwrap() }); }); - }) - .res_async()) + })) .unwrap(); tokio::time::sleep(SLEEP).await; - let recv_reply = ztimeout!(get_session.get(KEY_EXPR).res_async()).unwrap(); + let recv_reply = ztimeout!(get_session.get(KEY_EXPR)).unwrap(); while let Ok(reply) = ztimeout!(recv_reply.recv_async()) { - match reply.sample { + match reply.result() { Ok(sample) => { - received_value = sample.value.to_string(); + received_value = sample.payload().deserialize::().unwrap(); break; } - Err(e) => println!("Error : {}", e), + Err(e) => println!("Error : {:?}", e), } } tokio::time::sleep(SLEEP).await; assert_ne!(received_value, VALUE); - ztimeout!(qbl.undeclare().res_async()).unwrap(); + ztimeout!(qbl.undeclare()).unwrap(); } close_sessions(get_session, qbl_session).await; close_router_session(session).await; diff --git a/zenoh/tests/attachments.rs b/zenoh/tests/attachments.rs index 0010c9d5af..38b1fea136 100644 --- a/zenoh/tests/attachments.rs +++ b/zenoh/tests/attachments.rs @@ -11,26 +11,32 @@ // Contributors: // ZettaScale Zenoh Team, // -#[cfg(feature = "unstable")] -#[test] -fn pubsub() { - use zenoh::prelude::sync::*; +#![cfg(feature = "unstable")] +use zenoh::{bytes::ZBytes, config::Config, prelude::*}; - let zenoh = zenoh::open(Config::default()).res().unwrap(); +#[test] +fn attachment_pubsub() { + let zenoh = zenoh::open(Config::default()).wait().unwrap(); let _sub = zenoh .declare_subscriber("test/attachment") .callback(|sample| { - println!( - "{}", - std::str::from_utf8(&sample.payload.contiguous()).unwrap() - ); - for (k, v) in &sample.attachment.unwrap() { + println!("{}", sample.payload().deserialize::().unwrap()); + for (k, v) in sample + .attachment() + .unwrap() + .iter::<( + [u8; std::mem::size_of::()], + [u8; std::mem::size_of::()], + )>() + .map(Result::unwrap) + { assert!(k.iter().rev().zip(v.as_slice()).all(|(k, v)| k == v)) } }) - .res() + .wait() .unwrap(); - let publisher = zenoh.declare_publisher("test/attachment").res().unwrap(); + + let publisher = zenoh.declare_publisher("test/attachment").wait().unwrap(); for i in 0..10 { let mut backer = [( [0; std::mem::size_of::()], @@ -39,62 +45,60 @@ fn pubsub() { for (j, backer) in backer.iter_mut().enumerate() { *backer = ((i * 10 + j).to_le_bytes(), (i * 10 + j).to_be_bytes()) } + zenoh .put("test/attachment", "put") - .with_attachment( - backer - .iter() - .map(|b| (b.0.as_slice(), b.1.as_slice())) - .collect(), - ) - .res() + .attachment(ZBytes::from_iter(backer.iter())) + .wait() .unwrap(); publisher .put("publisher") - .with_attachment( - backer - .iter() - .map(|b| (b.0.as_slice(), b.1.as_slice())) - .collect(), - ) - .res() + .attachment(ZBytes::from_iter(backer.iter())) + .wait() .unwrap(); } } -#[cfg(feature = "unstable")] -#[test] -fn queries() { - use zenoh::{prelude::sync::*, sample::Attachment}; - let zenoh = zenoh::open(Config::default()).res().unwrap(); +#[test] +fn attachment_queries() { + use zenoh::prelude::*; + let zenoh = zenoh::open(Config::default()).wait().unwrap(); let _sub = zenoh .declare_queryable("test/attachment") .callback(|query| { - println!( - "{}", - std::str::from_utf8( - &query - .value() - .map(|q| q.payload.contiguous()) - .unwrap_or_default() - ) - .unwrap() - ); - let mut attachment = Attachment::new(); - for (k, v) in query.attachment().unwrap() { + let s = query + .payload() + .map(|p| p.deserialize::().unwrap()) + .unwrap_or_default(); + println!("Query value: {}", s); + + let attachment = query.attachment().unwrap(); + println!("Query attachment: {:?}", attachment); + for (k, v) in attachment + .iter::<( + [u8; std::mem::size_of::()], + [u8; std::mem::size_of::()], + )>() + .map(Result::unwrap) + { assert!(k.iter().rev().zip(v.as_slice()).all(|(k, v)| k == v)); - attachment.insert(&k, &k); } + query - .reply(Ok(Sample::new( - query.key_expr().clone(), - query.value().unwrap().clone(), - ) - .with_attachment(attachment))) - .res() + .reply(query.key_expr().clone(), query.payload().unwrap().clone()) + .attachment(ZBytes::from_iter( + attachment + .iter::<( + [u8; std::mem::size_of::()], + [u8; std::mem::size_of::()], + )>() + .map(Result::unwrap) + .map(|(k, _)| (k, k)), + )) + .wait() .unwrap(); }) - .res() + .wait() .unwrap(); for i in 0..10 { let mut backer = [( @@ -104,20 +108,24 @@ fn queries() { for (j, backer) in backer.iter_mut().enumerate() { *backer = ((i * 10 + j).to_le_bytes(), (i * 10 + j).to_be_bytes()) } + let get = zenoh .get("test/attachment") - .with_value("query") - .with_attachment( - backer - .iter() - .map(|b| (b.0.as_slice(), b.1.as_slice())) - .collect(), - ) - .res() + .payload("query") + .attachment(ZBytes::from_iter(backer.iter())) + .wait() .unwrap(); while let Ok(reply) = get.recv() { - let response = reply.sample.as_ref().unwrap(); - for (k, v) in response.attachment().unwrap() { + let response = reply.result().unwrap(); + for (k, v) in response + .attachment() + .unwrap() + .iter::<( + [u8; std::mem::size_of::()], + [u8; std::mem::size_of::()], + )>() + .map(Result::unwrap) + { assert_eq!(k, v) } } diff --git a/zenoh/tests/authentication.rs b/zenoh/tests/authentication.rs new file mode 100644 index 0000000000..fcd448f7d1 --- /dev/null +++ b/zenoh/tests/authentication.rs @@ -0,0 +1,1922 @@ +// +// Copyright (c) 2024 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// +mod test { + use std::{ + fs, + path::PathBuf, + sync::{atomic::AtomicBool, Arc, Mutex}, + time::Duration, + }; + + use once_cell::sync::Lazy; + use tokio::runtime::Handle; + use zenoh::{ + config, + config::{EndPoint, WhatAmI}, + prelude::*, + Config, Session, + }; + use zenoh_core::{zlock, ztimeout}; + + const TIMEOUT: Duration = Duration::from_secs(60); + const SLEEP: Duration = Duration::from_secs(1); + const KEY_EXPR: &str = "test/demo"; + const VALUE: &str = "zenoh"; + static TESTFILES_PATH: Lazy = Lazy::new(std::env::temp_dir); + static TESTFILES_CREATED: Lazy = Lazy::new(|| AtomicBool::new(false)); + + #[tokio::test(flavor = "multi_thread", worker_threads = 4)] + async fn test_authentication_usrpwd() { + zenoh_util::try_init_log_from_env(); + create_new_files(TESTFILES_PATH.to_path_buf()) + .await + .unwrap(); + test_pub_sub_deny_then_allow_usrpswd(37447).await; + test_pub_sub_allow_then_deny_usrpswd(37447).await; + test_get_qbl_allow_then_deny_usrpswd(37447).await; + test_get_qbl_deny_then_allow_usrpswd(37447).await; + } + + #[tokio::test(flavor = "multi_thread", worker_threads = 4)] + async fn test_authentication_tls() { + zenoh_util::try_init_log_from_env(); + create_new_files(TESTFILES_PATH.to_path_buf()) + .await + .unwrap(); + test_pub_sub_deny_then_allow_tls(37448, false).await; + test_pub_sub_allow_then_deny_tls(37449).await; + test_get_qbl_allow_then_deny_tls(37450).await; + test_get_qbl_deny_then_allow_tls(37451).await; + } + + #[tokio::test(flavor = "multi_thread", worker_threads = 4)] + async fn test_authentication_quic() { + zenoh_util::try_init_log_from_env(); + create_new_files(TESTFILES_PATH.to_path_buf()) + .await + .unwrap(); + test_pub_sub_deny_then_allow_quic(37452).await; + test_pub_sub_allow_then_deny_quic(37453).await; + test_get_qbl_deny_then_allow_quic(37454).await; + test_get_qbl_allow_then_deny_quic(37455).await; + } + + #[tokio::test(flavor = "multi_thread", worker_threads = 4)] + async fn test_authentication_lowlatency() { + // Test link AuthIds accessibility for lowlatency transport + zenoh_util::try_init_log_from_env(); + create_new_files(TESTFILES_PATH.to_path_buf()) + .await + .unwrap(); + test_pub_sub_deny_then_allow_tls(37456, true).await; + } + + #[tokio::test(flavor = "multi_thread", worker_threads = 4)] + async fn test_authentication_subject_combinations() { + zenoh_util::try_init_log_from_env(); + create_new_files(TESTFILES_PATH.to_path_buf()) + .await + .unwrap(); + test_deny_allow_combination(37457).await; + test_allow_deny_combination(37458).await; + } + + #[allow(clippy::all)] + async fn create_new_files(certs_dir: std::path::PathBuf) -> std::io::Result<()> { + let created = TESTFILES_CREATED.fetch_or(true, std::sync::atomic::Ordering::SeqCst); + if created { + // only create files once per tests + println!("Skipping testfile creation: files already created by another test instance"); + return Ok(()); + } + use std::io::prelude::*; + let ca_pem = b"-----BEGIN CERTIFICATE----- +MIIDiTCCAnGgAwIBAgIUO1x6LAlICgKs5+pYUTo4CughfKEwDQYJKoZIhvcNAQEL +BQAwVDELMAkGA1UEBhMCRlIxCzAJBgNVBAgMAklGMQswCQYDVQQHDAJQUjERMA8G +A1UECgwIenMsIEluYy4xGDAWBgNVBAMMD3pzX3Rlc3Rfcm9vdF9jYTAeFw0yNDAz +MTExNDM0MjNaFw0yNTAzMTExNDM0MjNaMFQxCzAJBgNVBAYTAkZSMQswCQYDVQQI +DAJJRjELMAkGA1UEBwwCUFIxETAPBgNVBAoMCHpzLCBJbmMuMRgwFgYDVQQDDA96 +c190ZXN0X3Jvb3RfY2EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC3 +pFWM+IJNsRCYHt1v/TliecppwVZV+ZHfFw9JKN9ev4K/fWHUiAOwp91MOLxbaYKd +C6dxW28YVGltoGz3kUZJZcJRQVso1jXv24Op4muOsiYXukLc4TU2F6dG1XqkLt5t +svsYAQFf1uK3//QZFVRBosJEn+jjiJ4XCvt49mnPRolp1pNKX0z31mZO6bSly6c9 +OVlJMjWpDCYSOuf6qZZ36fa9eSut2bRJIPY0QCsgnqYBTnIEhksS+3jy6Qt+QpLz +95pFdLbW/MW4XKpaDltyYkO6QrBekF6uWRlvyAHU+NqvXZ4F/3Z5l26qLuBcsLPJ +kyawkO+yNIDxORmQgMczAgMBAAGjUzBRMB0GA1UdDgQWBBThgotd9ws2ryEEaKp2 ++RMOWV8D7jAfBgNVHSMEGDAWgBThgotd9ws2ryEEaKp2+RMOWV8D7jAPBgNVHRMB +Af8EBTADAQH/MA0GCSqGSIb3DQEBCwUAA4IBAQA9QoPv78hGmvmqF4GZeqrOBKQB +N/H5wL7f8H6BXU/wpNo2nnWOJn3u37lT+zivAdGEv+x+GeKekcugKBCSluhBLpVb +VNXe4WwMm5FBuO2NRBN2nblTMm1kEO00nVk1/yNo4hI8mj7d4YLU62d7324osNpF +wHqu6B0/c99JeKRvODGswyff1i8rJ1jpcgk/JmHg7UQBHEIkn0cRR0f9W3Mxv6b5 +ZeowRe81neWNkC6IMiMmzA0iHGkhoUMA15qG1ZKOr1XR364LH5BfNNpzAWYwkvJs +0JFrrdw+rm+cRJWs55yiyCCs7pyg1IJkY/o8bifdCOUgIyonzffwREk3+kZR +-----END CERTIFICATE-----"; + + let client_side_pem = b"-----BEGIN CERTIFICATE----- +MIIDjDCCAnSgAwIBAgIUOi9jKILrOzfRNGIkQ48S90NehpkwDQYJKoZIhvcNAQEL +BQAwVDELMAkGA1UEBhMCRlIxCzAJBgNVBAgMAklGMQswCQYDVQQHDAJQUjERMA8G +A1UECgwIenMsIEluYy4xGDAWBgNVBAMMD3pzX3Rlc3Rfcm9vdF9jYTAeFw0yNDAz +MTkxMTMxNDhaFw0yNTAzMTkxMTMxNDhaMFAxCzAJBgNVBAYTAkZSMQswCQYDVQQI +DAJJRjELMAkGA1UEBwwCUFIxETAPBgNVBAoMCHpzLCBJbmMuMRQwEgYDVQQDDAtj +bGllbnRfc2lkZTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMzU2p1a +ly/1bi2TDZ8+Qlvk9/3KyHqrg2BGZUxB3Pj/lufDuYNwOHkss99wp8gzMsT28mD4 +y6X7nCgEN8WeHl+/xfLuGsWIBa1OOr6dz0qewoWFsor01cQ8+nwAKlgnz6IvHfkQ +OJZD/QYSdyn6c1AcIyS60vo4qMjyI4OVb1Dl4WpC4vCmWvDT0WjBZ5GckCnuQ8wS +wZ5MtPuMQf8kYX95ll7eBtDfEXF9Oja0l1/5SmlHuKyqDy4sIKovxtFHTqgb8PUc +yT33pUHOsBXruNBxl1MKq1outdMqcQknT6FAC+aVZ7bTlwhnH8p5Apn57g+dJYTI +9dCr1e2oK5NohhkCAwEAAaNaMFgwFgYDVR0RBA8wDYILY2xpZW50X3NpZGUwHQYD +VR0OBBYEFHDUYYfQacLj1tp49OG9NbPuL0N/MB8GA1UdIwQYMBaAFOGCi133Czav +IQRoqnb5Ew5ZXwPuMA0GCSqGSIb3DQEBCwUAA4IBAQB+nFAe6QyD2AaFdgrFOyEE +MeYb97sy9p5ylhMYyU62AYsIzzpTY74wBG78qYPIw3lAYzNcN0L6T6kBQ4lu6gFm +XB0SqCZ2AkwvV8tTlbLkZeoO6rONeke6c8cJsxYN7NiknDvTMrkTTgiyvbCWfEVX +Htnc4j/KzSBX3UjVcbPM3L/6KwMRw050/6RCiOIPFjTOCfTGoDx5fIyBk3ch/Plw +TkH2juHxX0/aCxr8hRE1v9+pXXlGnGoKbsDMLN9Aziu6xzdT/kD7BvyoM8rh7CE5 +ae7/R4sd13cZ2WGDPimqO0z1kItMOIdiYvk4DgOg+J8hZSkKT56erafdDa2LPBE6 +-----END CERTIFICATE-----"; + + let client_side_key = b"-----BEGIN PRIVATE KEY----- +MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDM1NqdWpcv9W4t +kw2fPkJb5Pf9ysh6q4NgRmVMQdz4/5bnw7mDcDh5LLPfcKfIMzLE9vJg+Mul+5wo +BDfFnh5fv8Xy7hrFiAWtTjq+nc9KnsKFhbKK9NXEPPp8ACpYJ8+iLx35EDiWQ/0G +Encp+nNQHCMkutL6OKjI8iODlW9Q5eFqQuLwplrw09FowWeRnJAp7kPMEsGeTLT7 +jEH/JGF/eZZe3gbQ3xFxfTo2tJdf+UppR7isqg8uLCCqL8bRR06oG/D1HMk996VB +zrAV67jQcZdTCqtaLrXTKnEJJ0+hQAvmlWe205cIZx/KeQKZ+e4PnSWEyPXQq9Xt +qCuTaIYZAgMBAAECggEAAlqVVw7UEzLjtN4eX1S6tD3jvCzFBETdjgENF7TfjlR4 +lln9UyV6Xqkc+Y28vdwZwqHwW90sEPCc5ShUQD7+jBzi8FVcZSX4o7rVCbz8RXgg +1eI5EKf632YQflWNpwTxGcTnGCY/sjleil/yst6sDdD+9eR4OXQme2Wt8wyH8pLm +bf1OensGrFu3kJaPMOfP6jXnqEqkUPqmaCNW7+Ans8E+4J9oksRVPQJEuxwSjdJu +BlG50KKpl0XwZ/u/hkkj8/BlRDa62YMGJkFOwaaGUu2/0UU139XaJiMSPoL6t/BU +1H15dtW9liEtnHIssXMRzc9cg+xPgCs79ABXSZaFUQKBgQD4mH/DcEFwkZQcr08i +GUk0RE5arAqHui4eiujcPZVV6j/L7PHHmabKRPBlsndFP7KUCtvzNRmHq7JWDkpF +S36OE4e94CBYb0CIrO8OO5zl1vGAn5qa9ckefSFz9AMWW+hSuo185hFjt67BMaI0 +8CxfYDH+QY5D4JE5RhSwsOmiUQKBgQDS7qjq+MQKPHHTztyHK8IbAfEGlrBdCAjf +K1bDX2BdfbRJMZ+y8LgK5HxDPlNx2/VauBLsIyU1Zirepd8hOsbCVoK1fOq+T7bY +KdB1oqLK1Rq1sMBc26F24LBaZ3Pw5XgYEcvaOW0JFQ9Oc4VjcIXKjTNhobNOegfK +QDnw8fEtSQKBgQDrCuTh2GVHFZ3AcVCUoOvB60NaH4flRHcOkbARbHihvtWK7gC8 +A97bJ8tTnCWA5/TkXFAR54a36/K1wtUeJ38Evhp9wEdU1ftiPn/YKSzzcwLr5fu7 +v9/kX9MdWv0ASu2iKphUGwMeETG9oDwJaXvKwZ0DFOB59P3Z9RTi6qI7wQKBgQCp +uBZ6WgeDJPeBsaSHrpHUIU/KOV1WvaxFxR1evlNPZmG1sxQIat/rA8VoZbHGn3Ff +uVSgY/cAbGB6HYTXu+9JV0p8tTI8Ru+cJqjwvhe2lJmVL87X6HCWsluzoiIL5tcm +pssbn7E36ZYTTag6RsOgItUA7ZbUwiOafOsiD8o64QKBgE6nOkAfy5mbp7X+q9uD +J5y6IXpY/Oia/RwveLWFbI/aum4Nnhb6L9Y0XlrYjm4cJOchQyDR7FF6f4EuAiYb +wdxBbkxXpwXnfKCtNvMF/wZMvPVaS5HTQga8hXMrtlW6jtTJ4HmkTTB/MILAXVkJ +EHi+N70PcrYg6li415TGfgDz +-----END PRIVATE KEY-----"; + + let server_side_pem = b"-----BEGIN CERTIFICATE----- +MIIDjDCCAnSgAwIBAgIUOi9jKILrOzfRNGIkQ48S90NehpgwDQYJKoZIhvcNAQEL +BQAwVDELMAkGA1UEBhMCRlIxCzAJBgNVBAgMAklGMQswCQYDVQQHDAJQUjERMA8G +A1UECgwIenMsIEluYy4xGDAWBgNVBAMMD3pzX3Rlc3Rfcm9vdF9jYTAeFw0yNDAz +MTkxMTMxMDRaFw0yNTAzMTkxMTMxMDRaMFAxCzAJBgNVBAYTAkZSMQswCQYDVQQI +DAJJRjELMAkGA1UEBwwCUFIxETAPBgNVBAoMCHpzLCBJbmMuMRQwEgYDVQQDDAtz +ZXJ2ZXJfc2lkZTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAKw4eKzt +T1inzuEIPBaPksWyjoD9n6uJx9jAQ2wRB6rXiAsXVLRSuczdGDpb1MwAqoIi6ozw +tzDRwkr58vUNaTCswxadlAmB44JEVYKZoublHjlVj5ygr0R4R5F2T9tIV+jpqZuK +HR4dHe8PiDCiWVzWvYwOLVKXQKSeaE2Z143ukVIJ85qmNykJ066AVhgWnIYSCR9c +s7WPBdTWAW3L4yNlast9hfvxdQNDs5AtUnJKfAX+7DylPAm8V7YjU1k9AtTNPbpy +kb9X97ErsB8891MmZaGZp0J6tnuucDkk0dlowMVvi2aUCsYoKF5DgGxtyVAeLhTP +70GenaLe2uwG8fMCAwEAAaNaMFgwFgYDVR0RBA8wDYILc2VydmVyX3NpZGUwHQYD +VR0OBBYEFBKms1sOw8nM/O5SN1EZIH+LsWaPMB8GA1UdIwQYMBaAFOGCi133Czav +IQRoqnb5Ew5ZXwPuMA0GCSqGSIb3DQEBCwUAA4IBAQA6H/sfm8YUn86+GwxNR9i9 +MCL7WHVRx3gS9ENK87+HtZNL2TVvhPJtupG3Pjgqi33FOHrM4rMUcWSZeCEycVgy +5cjimQLwfDljIBRQE6sem3gKf0obdWl5AlPDLTL/iKj5Su7NycrjZFYqkjZjn+58 +fe8lzHNeP/3RQTgjJ98lQI0bdzGDG1+QoxTgPEc77vgN0P4MHJYx2auz/7jYBqNJ +ko8nugIQsd4kOhmOIBUQ8aXkXFktSQIerEGB8uw5iF2cCdH/sTCvhzhxLb4IWo/O +0cAZ+Vs4FW3KUn/Y44yrVAWl1H6xdFsNXBqbzVEMzlt/RV3rH70RDCc20XhP+w+g +-----END CERTIFICATE-----"; + + let server_side_key = b"-----BEGIN PRIVATE KEY----- +MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQCsOHis7U9Yp87h +CDwWj5LFso6A/Z+ricfYwENsEQeq14gLF1S0UrnM3Rg6W9TMAKqCIuqM8Lcw0cJK ++fL1DWkwrMMWnZQJgeOCRFWCmaLm5R45VY+coK9EeEeRdk/bSFfo6ambih0eHR3v +D4gwollc1r2MDi1Sl0CknmhNmdeN7pFSCfOapjcpCdOugFYYFpyGEgkfXLO1jwXU +1gFty+MjZWrLfYX78XUDQ7OQLVJySnwF/uw8pTwJvFe2I1NZPQLUzT26cpG/V/ex +K7AfPPdTJmWhmadCerZ7rnA5JNHZaMDFb4tmlArGKCheQ4BsbclQHi4Uz+9Bnp2i +3trsBvHzAgMBAAECggEAUjpIS/CmkOLWYRVoczEr197QMYBnCyUm2TO7PU7IRWbR +GtKR6+MPuWPbHIoaCSlMQARhztdj8BhG1zuOKDi1/7qNDzA/rWZp9RmhZlDquamt +i5xxjEwgQuXW7fn6WO2qo5dlFtGT43vtfeYBlY7+cdhJ+iQOub9j6vWDQYHxrF7x +yM8xvNzomHThvLFzWXJV/nGjX5pqPraMmwJUW+MGX0YaEr6tClqsc1Kmxhs3iIUo +1JCqh3FpVu2i/mR9fdcQ0ONT/s1UHzy+1Bhmh3j2Fuk4+ZeLMfxTfFxk5U0BeMQY +sES3qmd+pG5iqPW+AmXy299G89jf5+1Q4J2Km5KOUQKBgQDidifoeknpi9hRHLLD +w/7KMMe8yYg3c3dv5p0iUQQ2pXd1lJIFQ+B2/D+hfOXhnN/iCDap89ll2LoQ2Q9L +38kQXH06HCM2q11RP0BEsZCG0CnluS+JVNnjs/ALi+yc4HSpzKPs3zXIC3dLOUbq +ov5Esa5h/RU6+NO+DH72TWTv6wKBgQDCryPKtOcLp1eqdwIBRoXdPZeUdZdnwT8+ +70DnC+YdOjFkqTbaoYE5ePa3ziGOZyTFhJbPgiwEdj9Ez1JSgqLLv5hBc4s6FigK +D7fOnn7Q7+al/kEW7+X5yoSl1bFuPCqGL1xxzxmpDY8Gf3nyZ+QGfWIenbk3nq12 +nTgINyWMGQKBgQDSrxBDxXl8EMGH/MYHQRGKs8UvSuMyi3bjoU4w/eSInno75qPO +yC5NJDJin9sSgar8E54fkSCBExdP01DayvC5CwLqDAFqvBTOIKU/A18tPP6tnRKv +lkQ8Bkxdwai47k07J4qeNa9IU/qA/mGOq2MZL6DHwvd8bMA5gFCh/rDYTwKBgAPm +gGASScK5Ao+evMKLyCjLkBrgVD026O542qMGYQDa5pxuq3Or4qvlGYRLM+7ncBwo +8OCNahZYzCGzyaFvjpVobEN7biGmyfyRngwcrsu+0q8mreUov0HG5etwoZJk0DFK +B58cGBaD+AaYTTgnDrF2l52naUuM+Uq0EahQeocZAoGBAMJEGUFyEdm1JATkNhBv +ruDzj07PCjdvq3lUJix2ZlKlabsi5V+oYxMmrUSU8Nkaxy6O+qETNRNWQeWbPQHL +IZx/qrP32PmWX0IVj3pbfKHQSpOKNGzL9xUJ/FIycZWyT3yGf24KBuJwIx7xSrRx +qNsoty1gY/y3n7SN/iMZo8lO +-----END PRIVATE KEY-----"; + + let credentials_txt = b"client1name:client1passwd +client2name:client2passwd"; + + struct Testfile<'a> { + name: &'a str, + value: &'a [u8], + } + + let test_files = vec![ + Testfile { + name: "ca.pem", + value: ca_pem, + }, + Testfile { + name: "clientsidekey.pem", + value: client_side_key, + }, + Testfile { + name: "clientside.pem", + value: client_side_pem, + }, + Testfile { + name: "serversidekey.pem", + value: server_side_key, + }, + Testfile { + name: "serverside.pem", + value: server_side_pem, + }, + Testfile { + name: "credentials.txt", + value: credentials_txt, + }, + ]; + for test_file in test_files { + let file_path = certs_dir.join(test_file.name); + let mut file = fs::File::create(&file_path)?; + file.write_all(test_file.value)?; + } + + println!("testfiles created successfully."); + Ok(()) + } + + async fn get_basic_router_config_tls(port: u16, lowlatency: bool) -> Config { + let cert_path = TESTFILES_PATH.to_string_lossy(); + let mut config = config::default(); + config.set_mode(Some(WhatAmI::Router)).unwrap(); + config + .listen + .endpoints + .set(vec![format!("tls/127.0.0.1:{}", port).parse().unwrap()]) + .unwrap(); + config.scouting.multicast.set_enabled(Some(false)).unwrap(); + config + .insert_json5( + "transport", + r#"{ + "link": { + "protocols": [ + "tls" + ], + "tls": { + "client_auth": true, + "server_name_verification": false + }, + }, + }"#, + ) + .unwrap(); + config + .transport + .link + .tls + .set_server_private_key(Some(format!("{}/serversidekey.pem", cert_path))) + .unwrap(); + config + .transport + .link + .tls + .set_server_certificate(Some(format!("{}/serverside.pem", cert_path))) + .unwrap(); + config + .transport + .link + .tls + .set_root_ca_certificate(Some(format!("{}/ca.pem", cert_path))) + .unwrap(); + config.transport.unicast.set_lowlatency(lowlatency).unwrap(); + config + .transport + .unicast + .qos + .set_enabled(!lowlatency) + .unwrap(); + config + } + async fn get_basic_router_config_quic(port: u16) -> Config { + let cert_path = TESTFILES_PATH.to_string_lossy(); + let mut config = config::default(); + config.set_mode(Some(WhatAmI::Router)).unwrap(); + config + .listen + .endpoints + .set(vec![format!("quic/127.0.0.1:{}", port).parse().unwrap()]) + .unwrap(); + config.scouting.multicast.set_enabled(Some(false)).unwrap(); + config + .insert_json5( + "transport", + r#"{ + "link": { + "protocols": [ + "quic" + ], + "tls": { + "client_auth": true, + "server_name_verification": false + }, + }, + }"#, + ) + .unwrap(); + config + .transport + .link + .tls + .set_server_private_key(Some(format!("{}/serversidekey.pem", cert_path))) + .unwrap(); + config + .transport + .link + .tls + .set_server_certificate(Some(format!("{}/serverside.pem", cert_path))) + .unwrap(); + config + .transport + .link + .tls + .set_root_ca_certificate(Some(format!("{}/ca.pem", cert_path))) + .unwrap(); + config + } + + async fn get_basic_router_config_usrpswd(port: u16) -> Config { + let mut config = config::default(); + config.set_mode(Some(WhatAmI::Router)).unwrap(); + config + .listen + .endpoints + .set(vec![format!("tcp/127.0.0.1:{port}").parse().unwrap()]) + .unwrap(); + config.scouting.multicast.set_enabled(Some(false)).unwrap(); + config + .insert_json5( + "transport", + r#"{ + "auth": { + usrpwd: { + user: "routername", + password: "routerpasswd", + }, + }, + }"#, + ) + .unwrap(); + config + .transport + .auth + .usrpwd + .set_dictionary_file(Some(format!( + "{}/credentials.txt", + TESTFILES_PATH.to_string_lossy() + ))) + .unwrap(); + config + } + async fn close_router_session(s: Session) { + println!("Closing router session"); + ztimeout!(s.close()).unwrap(); + } + + async fn get_basic_router_config_quic_usrpswd(port: u16) -> Config { + let cert_path = TESTFILES_PATH.to_string_lossy(); + let mut config = config::default(); + config.set_mode(Some(WhatAmI::Router)).unwrap(); + config + .listen + .endpoints + .set(vec![ + format!("quic/127.0.0.1:{port}").parse().unwrap(), + format!("tcp/127.0.0.1:{port}").parse().unwrap(), + ]) + .unwrap(); + config.scouting.multicast.set_enabled(Some(false)).unwrap(); + config + .insert_json5( + "transport", + r#"{ + "link": { + "protocols": [ + "quic", "tcp" + ], + "tls": { + "client_auth": true, + "server_name_verification": false + }, + }, + "auth": { + usrpwd: { + user: "routername", + password: "routerpasswd", + }, + }, + }"#, + ) + .unwrap(); + config + .transport + .auth + .usrpwd + .set_dictionary_file(Some(format!( + "{}/credentials.txt", + TESTFILES_PATH.to_string_lossy() + ))) + .unwrap(); + config + .transport + .link + .tls + .set_server_private_key(Some(format!("{}/serversidekey.pem", cert_path))) + .unwrap(); + config + .transport + .link + .tls + .set_server_certificate(Some(format!("{}/serverside.pem", cert_path))) + .unwrap(); + config + .transport + .link + .tls + .set_root_ca_certificate(Some(format!("{}/ca.pem", cert_path))) + .unwrap(); + config + } + + async fn get_client_sessions_tls(port: u16, lowlatency: bool) -> (Session, Session) { + let cert_path = TESTFILES_PATH.to_string_lossy(); + println!("Opening client sessions"); + let mut config = config::client([format!("tls/127.0.0.1:{}", port) + .parse::() + .unwrap()]); + config + .insert_json5( + "transport", + r#"{ + "link": { + "protocols": [ + "tls" + ], + "tls": { + "client_auth": true, + "server_name_verification": false + } + } + }"#, + ) + .unwrap(); + config + .transport + .link + .tls + .set_client_private_key(Some(format!("{}/clientsidekey.pem", cert_path))) + .unwrap(); + config + .transport + .link + .tls + .set_client_certificate(Some(format!("{}/clientside.pem", cert_path))) + .unwrap(); + config + .transport + .link + .tls + .set_root_ca_certificate(Some(format!("{}/ca.pem", cert_path))) + .unwrap(); + config.transport.unicast.set_lowlatency(lowlatency).unwrap(); + config + .transport + .unicast + .qos + .set_enabled(!lowlatency) + .unwrap(); + let s01 = ztimeout!(zenoh::open(config)).unwrap(); + + let mut config = config::client([format!("tls/127.0.0.1:{}", port) + .parse::() + .unwrap()]); + config + .insert_json5( + "transport", + r#"{ + "link": { + "protocols": [ + "tls" + ], + "tls": { + "client_auth": true, + "server_name_verification": false + } + } + }"#, + ) + .unwrap(); + config + .transport + .link + .tls + .set_client_private_key(Some(format!("{}/clientsidekey.pem", cert_path))) + .unwrap(); + config + .transport + .link + .tls + .set_client_certificate(Some(format!("{}/clientside.pem", cert_path))) + .unwrap(); + config + .transport + .link + .tls + .set_root_ca_certificate(Some(format!("{}/ca.pem", cert_path))) + .unwrap(); + config.transport.unicast.set_lowlatency(lowlatency).unwrap(); + config + .transport + .unicast + .qos + .set_enabled(!lowlatency) + .unwrap(); + let s02 = ztimeout!(zenoh::open(config)).unwrap(); + (s01, s02) + } + + async fn get_client_sessions_quic(port: u16) -> (Session, Session) { + let cert_path = TESTFILES_PATH.to_string_lossy(); + println!("Opening client sessions"); + let mut config = config::client([format!("quic/127.0.0.1:{}", port) + .parse::() + .unwrap()]); + config + .insert_json5( + "transport", + r#"{ + "link": { + "protocols": [ + "quic" + ], + "tls": { + "client_auth": true, + "server_name_verification": false + } + } + }"#, + ) + .unwrap(); + config + .transport + .link + .tls + .set_client_private_key(Some(format!("{}/clientsidekey.pem", cert_path))) + .unwrap(); + config + .transport + .link + .tls + .set_client_certificate(Some(format!("{}/clientside.pem", cert_path))) + .unwrap(); + config + .transport + .link + .tls + .set_root_ca_certificate(Some(format!("{}/ca.pem", cert_path))) + .unwrap(); + let s01 = ztimeout!(zenoh::open(config)).unwrap(); + let mut config = config::client([format!("quic/127.0.0.1:{}", port) + .parse::() + .unwrap()]); + config + .insert_json5( + "transport", + r#"{ + "link": { + "protocols": [ + "quic" + ], + "tls": { + "client_auth": true, + "server_name_verification": false + } + } + }"#, + ) + .unwrap(); + config + .transport + .link + .tls + .set_client_private_key(Some(format!("{}/clientsidekey.pem", cert_path))) + .unwrap(); + config + .transport + .link + .tls + .set_client_certificate(Some(format!("{}/clientside.pem", cert_path))) + .unwrap(); + config + .transport + .link + .tls + .set_root_ca_certificate(Some(format!("{}/ca.pem", cert_path))) + .unwrap(); + let s02 = ztimeout!(zenoh::open(config)).unwrap(); + (s01, s02) + } + + async fn get_client_sessions_usrpswd(port: u16) -> (Session, Session) { + println!("Opening client sessions"); + let mut config = + config::client([format!("tcp/127.0.0.1:{port}").parse::().unwrap()]); + config + .insert_json5( + "transport", + r#"{ + "auth": { + usrpwd: { + user: "client1name", + password: "client1passwd", + }, + } + }"#, + ) + .unwrap(); + let s01 = ztimeout!(zenoh::open(config)).unwrap(); + let mut config = + config::client([format!("tcp/127.0.0.1:{port}").parse::().unwrap()]); + config + .insert_json5( + "transport", + r#"{ + "auth": { + usrpwd: { + user: "client2name", + password: "client2passwd", + }, + } + }"#, + ) + .unwrap(); + let s02 = ztimeout!(zenoh::open(config)).unwrap(); + (s01, s02) + } + + async fn get_client_sessions_quic_usrpswd(port: u16) -> (Session, Session) { + let cert_path = TESTFILES_PATH.to_string_lossy(); + println!("Opening client sessions"); + let mut config = config::client([format!("quic/127.0.0.1:{port}") + .parse::() + .unwrap()]); + config + .insert_json5( + "transport", + r#"{ + "link": { + "protocols": [ + "quic" + ], + "tls": { + "client_auth": true, + "server_name_verification": false + } + }, + "auth": { + usrpwd: { + user: "client1name", + password: "client1passwd", + }, + } + }"#, + ) + .unwrap(); + config + .transport + .link + .tls + .set_client_private_key(Some(format!("{}/clientsidekey.pem", cert_path))) + .unwrap(); + config + .transport + .link + .tls + .set_client_certificate(Some(format!("{}/clientside.pem", cert_path))) + .unwrap(); + config + .transport + .link + .tls + .set_root_ca_certificate(Some(format!("{}/ca.pem", cert_path))) + .unwrap(); + let s01 = ztimeout!(zenoh::open(config)).unwrap(); + + let mut config = config::client([format!("quic/127.0.0.1:{}", port) + .parse::() + .unwrap()]); + config + .insert_json5( + "transport", + r#"{ + "link": { + "protocols": [ + "quic" + ], + "tls": { + "client_auth": true, + "server_name_verification": false + } + }, + "auth": { + usrpwd: { + user: "client2name", + password: "client2passwd", + }, + } + }"#, + ) + .unwrap(); + config + .transport + .link + .tls + .set_client_private_key(Some(format!("{}/clientsidekey.pem", cert_path))) + .unwrap(); + config + .transport + .link + .tls + .set_client_certificate(Some(format!("{}/clientside.pem", cert_path))) + .unwrap(); + config + .transport + .link + .tls + .set_root_ca_certificate(Some(format!("{}/ca.pem", cert_path))) + .unwrap(); + let s02 = ztimeout!(zenoh::open(config)).unwrap(); + (s01, s02) + } + + async fn close_sessions(s01: Session, s02: Session) { + println!("Closing client sessions"); + ztimeout!(s01.close()).unwrap(); + ztimeout!(s02.close()).unwrap(); + } + + async fn test_pub_sub_deny_then_allow_tls(port: u16, lowlatency: bool) { + println!("test_pub_sub_deny_then_allow_tls"); + + let mut config_router = get_basic_router_config_tls(port, lowlatency).await; + + config_router + .insert_json5( + "access_control", + r#"{ + "enabled": true, + "default_permission": "deny", + "rules": [ + { + "id": "r1", + "permission": "allow", + "flows": ["ingress","egress"], + "messages": [ + "put", + "declare_subscriber" + ], + "key_exprs": [ + "test/demo" + ], + }, + ], + "subjects": [ + { + "id": "s1", + "cert_common_names": [ + "client_side" + ] + } + ], + "policies": [ + { + "rules": ["r1"], + "subjects": ["s1"], + } + ] + }"#, + ) + .unwrap(); + println!("Opening router session"); + + let session = ztimeout!(zenoh::open(config_router)).unwrap(); + + let (sub_session, pub_session) = get_client_sessions_tls(port, lowlatency).await; + { + let publisher = pub_session.declare_publisher(KEY_EXPR).await.unwrap(); + let received_value = Arc::new(Mutex::new(String::new())); + let temp_recv_value = received_value.clone(); + let subscriber = sub_session + .declare_subscriber(KEY_EXPR) + .callback(move |sample| { + let mut temp_value = zlock!(temp_recv_value); + *temp_value = sample.payload().deserialize::().unwrap(); + }) + .await + .unwrap(); + + tokio::time::sleep(SLEEP).await; + publisher.put(VALUE).await.unwrap(); + tokio::time::sleep(SLEEP).await; + assert_eq!(*zlock!(received_value), VALUE); + ztimeout!(subscriber.undeclare()).unwrap(); + } + close_sessions(sub_session, pub_session).await; + close_router_session(session).await; + } + + async fn test_pub_sub_allow_then_deny_tls(port: u16) { + println!("test_pub_sub_allow_then_deny_tls"); + let mut config_router = get_basic_router_config_tls(port, false).await; + config_router + .insert_json5( + "access_control", + r#"{ + "enabled": true, + "default_permission": "allow", + "rules": [ + { + "id": "r1", + "permission": "deny", + "flows": ["egress"], + "messages": [ + "put", + "declare_subscriber" + ], + "key_exprs": [ + "test/demo" + ], + }, + ], + "subjects": [ + { + "id": "s1", + "cert_common_names": [ + "client_side" + ] + } + ], + "policies": [ + { + "rules": ["r1"], + "subjects": ["s1"], + } + ] + }"#, + ) + .unwrap(); + println!("Opening router session"); + + let session = ztimeout!(zenoh::open(config_router)).unwrap(); + let (sub_session, pub_session) = get_client_sessions_tls(port, false).await; + { + let publisher = ztimeout!(pub_session.declare_publisher(KEY_EXPR)).unwrap(); + let received_value = Arc::new(Mutex::new(String::new())); + let temp_recv_value = received_value.clone(); + let subscriber = + ztimeout!(sub_session + .declare_subscriber(KEY_EXPR) + .callback(move |sample| { + let mut temp_value = zlock!(temp_recv_value); + *temp_value = sample.payload().deserialize::().unwrap(); + })) + .unwrap(); + + tokio::time::sleep(SLEEP).await; + + ztimeout!(publisher.put(VALUE)).unwrap(); + tokio::time::sleep(SLEEP).await; + + assert_ne!(*zlock!(received_value), VALUE); + ztimeout!(subscriber.undeclare()).unwrap(); + } + close_sessions(sub_session, pub_session).await; + close_router_session(session).await; + } + + async fn test_get_qbl_deny_then_allow_tls(port: u16) { + println!("test_get_qbl_deny_then_allow_tls"); + + let mut config_router = get_basic_router_config_tls(port, false).await; + config_router + .insert_json5( + "access_control", + r#"{ + "enabled": true, + "default_permission": "deny", + "rules": [ + { + "id": "r1", + "permission": "allow", + "flows": ["egress", "ingress"], + "messages": [ + "query", + "declare_queryable", + "reply", + ], + "key_exprs": [ + "test/demo" + ], + }, + ], + "subjects": [ + { + "id": "s1", + "cert_common_names": [ + "client_side" + ] + } + ], + "policies": [ + { + "rules": ["r1"], + "subjects": ["s1"], + } + ] + }"#, + ) + .unwrap(); + + println!("Opening router session"); + + let session = ztimeout!(zenoh::open(config_router)).unwrap(); + + let (get_session, qbl_session) = get_client_sessions_tls(port, false).await; + { + let mut received_value = String::new(); + + let qbl = ztimeout!(qbl_session + .declare_queryable(KEY_EXPR) + .callback(move |sample| { + tokio::task::block_in_place(move || { + Handle::current().block_on(async move { + ztimeout!(sample.reply(KEY_EXPR, VALUE)).unwrap() + }); + }); + })) + .unwrap(); + + tokio::time::sleep(SLEEP).await; + let recv_reply = ztimeout!(get_session.get(KEY_EXPR)).unwrap(); + while let Ok(reply) = ztimeout!(recv_reply.recv_async()) { + match reply.result() { + Ok(sample) => { + received_value = sample.payload().deserialize::().unwrap(); + break; + } + Err(e) => println!( + "Error : {}", + e.payload() + .deserialize::() + .unwrap_or_else(|e| format!("{}", e)) + ), + } + } + tokio::time::sleep(SLEEP).await; + assert_eq!(received_value, VALUE); + ztimeout!(qbl.undeclare()).unwrap(); + } + close_sessions(get_session, qbl_session).await; + close_router_session(session).await; + } + + async fn test_get_qbl_allow_then_deny_tls(port: u16) { + println!("test_get_qbl_allow_then_deny_tls"); + + let mut config_router = get_basic_router_config_tls(port, false).await; + config_router + .insert_json5( + "access_control", + r#"{ + "enabled": true, + "default_permission": "allow", + "rules": [ + { + "id": "r1", + "permission": "deny", + "flows": ["egress"], + "messages": [ + "query", + "declare_queryable", + "reply" + ], + "key_exprs": [ + "test/demo" + ], + }, + ], + "subjects": [ + { + "id": "s1", + "cert_common_names": [ + "client_side" + ] + } + ], + "policies": [ + { + "rules": ["r1"], + "subjects": ["s1"], + } + ] + }"#, + ) + .unwrap(); + println!("Opening router session"); + + let session = ztimeout!(zenoh::open(config_router)).unwrap(); + + let (get_session, qbl_session) = get_client_sessions_tls(port, false).await; + { + let mut received_value = String::new(); + + let qbl = ztimeout!(qbl_session + .declare_queryable(KEY_EXPR) + .callback(move |sample| { + tokio::task::block_in_place(move || { + Handle::current().block_on(async move { + ztimeout!(sample.reply(KEY_EXPR, VALUE)).unwrap() + }); + }); + })) + .unwrap(); + + tokio::time::sleep(SLEEP).await; + let recv_reply = ztimeout!(get_session.get(KEY_EXPR)).unwrap(); + while let Ok(reply) = ztimeout!(recv_reply.recv_async()) { + match reply.result() { + Ok(sample) => { + received_value = sample.payload().deserialize::().unwrap(); + break; + } + Err(e) => println!( + "Error : {}", + e.payload() + .deserialize::() + .unwrap_or_else(|e| format!("{}", e)) + ), + } + } + tokio::time::sleep(SLEEP).await; + assert_ne!(received_value, VALUE); + ztimeout!(qbl.undeclare()).unwrap(); + } + close_sessions(get_session, qbl_session).await; + close_router_session(session).await; + } + + async fn test_pub_sub_deny_then_allow_quic(port: u16) { + println!("test_pub_sub_deny_then_allow_quic"); + + let mut config_router = get_basic_router_config_quic(port).await; + + config_router + .insert_json5( + "access_control", + r#"{ + "enabled": true, + "default_permission": "deny", + "rules": [ + { + "id": "r1", + "permission": "allow", + "flows": ["egress", "ingress"], + "messages": [ + "put", + "declare_subscriber" + ], + "key_exprs": [ + "test/demo" + ], + }, + ], + "subjects": [ + { + "id": "s1", + "cert_common_names": [ + "client_side" + ] + } + ], + "policies": [ + { + "rules": ["r1"], + "subjects": ["s1"], + } + ] + }"#, + ) + .unwrap(); + println!("Opening router session"); + + let session = ztimeout!(zenoh::open(config_router)).unwrap(); + + let (sub_session, pub_session) = get_client_sessions_quic(port).await; + { + let publisher = pub_session.declare_publisher(KEY_EXPR).await.unwrap(); + let received_value = Arc::new(Mutex::new(String::new())); + let temp_recv_value = received_value.clone(); + let subscriber = sub_session + .declare_subscriber(KEY_EXPR) + .callback(move |sample| { + let mut temp_value = zlock!(temp_recv_value); + *temp_value = sample.payload().deserialize::().unwrap(); + }) + .await + .unwrap(); + + tokio::time::sleep(SLEEP).await; + publisher.put(VALUE).await.unwrap(); + tokio::time::sleep(SLEEP).await; + assert_eq!(*zlock!(received_value), VALUE); + ztimeout!(subscriber.undeclare()).unwrap(); + } + close_sessions(sub_session, pub_session).await; + close_router_session(session).await; + } + + #[allow(unused)] + async fn test_pub_sub_allow_then_deny_quic(port: u16) { + println!("test_pub_sub_allow_then_deny_quic"); + + let mut config_router = get_basic_router_config_quic(port).await; + config_router + .insert_json5( + "access_control", + r#"{ + "enabled": true, + "default_permission": "allow", + "rules": [ + { + "id": "r1", + "permission": "deny", + "flows": ["egress"], + "messages": [ + "put", + "declare_subscriber" + ], + "key_exprs": [ + "test/demo" + ], + }, + ], + "subjects": [ + { + "id": "s1", + "cert_common_names": [ + "client_side" + ] + } + ], + "policies": [ + { + "rules": ["r1"], + "subjects": ["s1"], + } + ] + }"#, + ) + .unwrap(); + println!("Opening router session"); + + let session = ztimeout!(zenoh::open(config_router)).unwrap(); + let (sub_session, pub_session) = get_client_sessions_quic(port).await; + { + let publisher = ztimeout!(pub_session.declare_publisher(KEY_EXPR)).unwrap(); + let received_value = Arc::new(Mutex::new(String::new())); + let temp_recv_value = received_value.clone(); + let subscriber = + ztimeout!(sub_session + .declare_subscriber(KEY_EXPR) + .callback(move |sample| { + let mut temp_value = zlock!(temp_recv_value); + *temp_value = sample.payload().deserialize::().unwrap(); + })) + .unwrap(); + + tokio::time::sleep(SLEEP).await; + + ztimeout!(publisher.put(VALUE)).unwrap(); + tokio::time::sleep(SLEEP).await; + + assert_ne!(*zlock!(received_value), VALUE); + ztimeout!(subscriber.undeclare()).unwrap(); + } + close_sessions(sub_session, pub_session).await; + close_router_session(session).await; + } + + #[allow(unused)] + async fn test_get_qbl_deny_then_allow_quic(port: u16) { + println!("test_get_qbl_deny_then_allow_quic"); + + let mut config_router = get_basic_router_config_quic(port).await; + config_router + .insert_json5( + "access_control", + r#"{ + "enabled": true, + "default_permission": "deny", + "rules": [ + { + "id": "r1", + "permission": "allow", + "flows": ["egress", "ingress"], + "messages": [ + "query", + "declare_queryable", + "reply" + ], + "key_exprs": [ + "test/demo" + ], + }, + ], + "subjects": [ + { + "id": "s1", + "cert_common_names": [ + "client_side" + ] + } + ], + "policies": [ + { + "rules": ["r1"], + "subjects": ["s1"], + } + ] + }"#, + ) + .unwrap(); + + println!("Opening router session"); + + let session = ztimeout!(zenoh::open(config_router)).unwrap(); + + let (get_session, qbl_session) = get_client_sessions_quic(port).await; + { + let mut received_value = String::new(); + + let qbl = ztimeout!(qbl_session + .declare_queryable(KEY_EXPR) + .callback(move |sample| { + tokio::task::block_in_place(move || { + Handle::current().block_on(async move { + ztimeout!(sample.reply(KEY_EXPR, VALUE)).unwrap() + }); + }); + })) + .unwrap(); + + tokio::time::sleep(SLEEP).await; + let recv_reply = ztimeout!(get_session.get(KEY_EXPR)).unwrap(); + while let Ok(reply) = ztimeout!(recv_reply.recv_async()) { + match reply.result() { + Ok(sample) => { + received_value = sample.payload().deserialize::().unwrap(); + break; + } + Err(e) => println!( + "Error : {}", + e.payload() + .deserialize::() + .unwrap_or_else(|e| format!("{}", e)) + ), + } + } + tokio::time::sleep(SLEEP).await; + assert_eq!(received_value, VALUE); + ztimeout!(qbl.undeclare()).unwrap(); + } + close_sessions(get_session, qbl_session).await; + close_router_session(session).await; + } + + #[allow(unused)] + async fn test_get_qbl_allow_then_deny_quic(port: u16) { + println!("test_get_qbl_allow_then_deny_quic"); + + let mut config_router = get_basic_router_config_quic(port).await; + config_router + .insert_json5( + "access_control", + r#"{ + "enabled": true, + "default_permission": "allow", + "rules": [ + { + "id": "r1", + "permission": "deny", + "flows": ["egress"], + "messages": [ + "query", + "declare_queryable", + "reply" + ], + "key_exprs": [ + "test/demo" + ], + }, + ], + "subjects": [ + { + "id": "s1", + "cert_common_names": [ + "client_side" + ] + } + ], + "policies": [ + { + "rules": ["r1"], + "subjects": ["s1"], + } + ] + }"#, + ) + .unwrap(); + println!("Opening router session"); + + let session = ztimeout!(zenoh::open(config_router)).unwrap(); + + let (get_session, qbl_session) = get_client_sessions_quic(port).await; + { + let mut received_value = String::new(); + + let qbl = ztimeout!(qbl_session + .declare_queryable(KEY_EXPR) + .callback(move |sample| { + tokio::task::block_in_place(move || { + Handle::current().block_on(async move { + ztimeout!(sample.reply(KEY_EXPR, VALUE)).unwrap() + }); + }); + })) + .unwrap(); + + tokio::time::sleep(SLEEP).await; + let recv_reply = ztimeout!(get_session.get(KEY_EXPR)).unwrap(); + while let Ok(reply) = ztimeout!(recv_reply.recv_async()) { + match reply.result() { + Ok(sample) => { + received_value = sample.payload().deserialize::().unwrap(); + break; + } + Err(e) => println!( + "Error : {}", + e.payload() + .deserialize::() + .unwrap_or_else(|e| format!("{}", e)) + ), + } + } + tokio::time::sleep(SLEEP).await; + assert_ne!(received_value, VALUE); + ztimeout!(qbl.undeclare()).unwrap(); + } + close_sessions(get_session, qbl_session).await; + close_router_session(session).await; + } + + async fn test_pub_sub_deny_then_allow_usrpswd(port: u16) { + println!("test_pub_sub_deny_then_allow_usrpswd"); + + let mut config_router = get_basic_router_config_usrpswd(port).await; + + config_router + .insert_json5( + "access_control", + r#"{ + "enabled": true, + "default_permission": "deny", + "rules": [ + { + "id": "r1", + "permission": "allow", + "flows": ["ingress", "egress"], + "messages": [ + "put", + "declare_subscriber" + ], + "key_exprs": [ + "test/demo" + ], + }, + ], + "subjects": [ + { + "id": "s1", + "usernames": [ + "client1name", + "client2name" + ] + } + ], + "policies": [ + { + "rules": ["r1"], + "subjects": ["s1"], + } + ] + }"#, + ) + .unwrap(); + println!("Opening router session"); + + let session = ztimeout!(zenoh::open(config_router)).unwrap(); + + let (sub_session, pub_session) = get_client_sessions_usrpswd(port).await; + { + let publisher = pub_session.declare_publisher(KEY_EXPR).await.unwrap(); + let received_value = Arc::new(Mutex::new(String::new())); + let temp_recv_value = received_value.clone(); + let subscriber = sub_session + .declare_subscriber(KEY_EXPR) + .callback(move |sample| { + let mut temp_value = zlock!(temp_recv_value); + *temp_value = sample.payload().deserialize::().unwrap(); + }) + .await + .unwrap(); + + tokio::time::sleep(SLEEP).await; + publisher.put(VALUE).await.unwrap(); + tokio::time::sleep(SLEEP).await; + assert_eq!(*zlock!(received_value), VALUE); + ztimeout!(subscriber.undeclare()).unwrap(); + } + close_sessions(sub_session, pub_session).await; + close_router_session(session).await; + } + + async fn test_pub_sub_allow_then_deny_usrpswd(port: u16) { + println!("test_pub_sub_allow_then_deny_usrpswd"); + + let mut config_router = get_basic_router_config_usrpswd(port).await; + config_router + .insert_json5( + "access_control", + r#"{ + "enabled": true, + "default_permission": "allow", + "rules": [ + { + "id": "r1", + "permission": "deny", + "flows": ["egress"], + "messages": [ + "put", + "declare_subscriber" + ], + "key_exprs": [ + "test/demo" + ], + }, + ], + "subjects": [ + { + "id": "s1", + "usernames": [ + "client1name", + "client2name" + ] + } + ], + "policies": [ + { + "rules": ["r1"], + "subjects": ["s1"], + } + ] + }"#, + ) + .unwrap(); + println!("Opening router session"); + + let session = ztimeout!(zenoh::open(config_router)).unwrap(); + let (sub_session, pub_session) = get_client_sessions_usrpswd(port).await; + { + let publisher = ztimeout!(pub_session.declare_publisher(KEY_EXPR)).unwrap(); + let received_value = Arc::new(Mutex::new(String::new())); + let temp_recv_value = received_value.clone(); + let subscriber = + ztimeout!(sub_session + .declare_subscriber(KEY_EXPR) + .callback(move |sample| { + let mut temp_value = zlock!(temp_recv_value); + *temp_value = sample.payload().deserialize::().unwrap(); + })) + .unwrap(); + + tokio::time::sleep(SLEEP).await; + + ztimeout!(publisher.put(VALUE)).unwrap(); + tokio::time::sleep(SLEEP).await; + + assert_ne!(*zlock!(received_value), VALUE); + ztimeout!(subscriber.undeclare()).unwrap(); + } + close_sessions(sub_session, pub_session).await; + close_router_session(session).await; + } + + async fn test_get_qbl_deny_then_allow_usrpswd(port: u16) { + println!("test_get_qbl_deny_then_allow_usrpswd"); + + let mut config_router = get_basic_router_config_usrpswd(port).await; + config_router + .insert_json5( + "access_control", + r#"{ + "enabled": true, + "default_permission": "deny", + "rules": [ + { + "id": "r1", + "permission": "allow", + "flows": ["ingress", "egress"], + "messages": [ + "query", + "declare_queryable", + "reply" + ], + "key_exprs": [ + "test/demo" + ], + }, + ], + "subjects": [ + { + "id": "s1", + "usernames": [ + "client1name", + "client2name" + ] + } + ], + "policies": [ + { + "rules": ["r1"], + "subjects": ["s1"], + } + ] + }"#, + ) + .unwrap(); + + println!("Opening router session"); + + let session = ztimeout!(zenoh::open(config_router)).unwrap(); + + let (get_session, qbl_session) = get_client_sessions_usrpswd(port).await; + { + let mut received_value = String::new(); + + let qbl = ztimeout!(qbl_session + .declare_queryable(KEY_EXPR) + .callback(move |sample| { + tokio::task::block_in_place(move || { + Handle::current().block_on(async move { + ztimeout!(sample.reply(KEY_EXPR, VALUE)).unwrap() + }); + }); + })) + .unwrap(); + + tokio::time::sleep(SLEEP).await; + let recv_reply = ztimeout!(get_session.get(KEY_EXPR)).unwrap(); + while let Ok(reply) = ztimeout!(recv_reply.recv_async()) { + match reply.result() { + Ok(sample) => { + received_value = sample.payload().deserialize::().unwrap(); + break; + } + Err(e) => println!( + "Error : {}", + e.payload() + .deserialize::() + .unwrap_or_else(|e| format!("{}", e)) + ), + } + } + tokio::time::sleep(SLEEP).await; + assert_eq!(received_value, VALUE); + ztimeout!(qbl.undeclare()).unwrap(); + } + close_sessions(get_session, qbl_session).await; + close_router_session(session).await; + } + + async fn test_get_qbl_allow_then_deny_usrpswd(port: u16) { + println!("test_get_qbl_allow_then_deny_usrpswd"); + + let mut config_router = get_basic_router_config_usrpswd(port).await; + config_router + .insert_json5( + "access_control", + r#"{ + "enabled": true, + "default_permission": "allow", + "rules": [ + { + "id": "r1", + "permission": "deny", + "flows": ["egress"], + "messages": [ + "query", + "declare_queryable", + "reply" + ], + "key_exprs": [ + "test/demo" + ], + }, + ], + "subjects": [ + { + "id": "s1", + "usernames": [ + "client1name", + "client2name" + ] + } + ], + "policies": [ + { + "rules": ["r1"], + "subjects": ["s1"], + } + ] + }"#, + ) + .unwrap(); + println!("Opening router session"); + + let session = ztimeout!(zenoh::open(config_router)).unwrap(); + + let (get_session, qbl_session) = get_client_sessions_usrpswd(port).await; + { + let mut received_value = String::new(); + + let qbl = ztimeout!(qbl_session + .declare_queryable(KEY_EXPR) + .callback(move |sample| { + tokio::task::block_in_place(move || { + Handle::current().block_on(async move { + ztimeout!(sample.reply(KEY_EXPR, VALUE)).unwrap() + }); + }); + })) + .unwrap(); + + tokio::time::sleep(SLEEP).await; + let recv_reply = ztimeout!(get_session.get(KEY_EXPR)).unwrap(); + while let Ok(reply) = ztimeout!(recv_reply.recv_async()) { + match reply.result() { + Ok(sample) => { + received_value = sample.payload().deserialize::().unwrap(); + break; + } + Err(e) => println!( + "Error : {}", + e.payload() + .deserialize::() + .unwrap_or_else(|e| format!("{}", e)) + ), + } + } + tokio::time::sleep(SLEEP).await; + assert_ne!(received_value, VALUE); + ztimeout!(qbl.undeclare()).unwrap(); + } + close_sessions(get_session, qbl_session).await; + close_router_session(session).await; + } + + async fn test_deny_allow_combination(port: u16) { + println!("test_deny_allow_combination"); + + let mut config_router = get_basic_router_config_quic_usrpswd(port).await; + config_router + .insert_json5( + "access_control", + r#"{ + "enabled": true, + "default_permission": "deny", + "rules": [ + { + "id": "r1", + "permission": "allow", + "flows": ["ingress", "egress"], + "messages": [ + "put", + "declare_subscriber" + ], + "key_exprs": [ + "test/demo" + ], + }, + ], + "subjects": [ + { + "id": "s1", + "cert_common_names": [ + "client_side" + ], + "usernames": [ + "client1name", + "client2name" + ] + } + ], + "policies": [ + { + "rules": ["r1"], + "subjects": ["s1"], + } + ] + }"#, + ) + .unwrap(); + + println!("Opening router session"); + let session = ztimeout!(zenoh::open(config_router)).unwrap(); + let (sub_session, pub_session) = get_client_sessions_usrpswd(port).await; + { + let publisher = ztimeout!(pub_session.declare_publisher(KEY_EXPR)).unwrap(); + let received_value = Arc::new(Mutex::new(String::new())); + let temp_recv_value = received_value.clone(); + let subscriber = + ztimeout!(sub_session + .declare_subscriber(KEY_EXPR) + .callback(move |sample| { + let mut temp_value = zlock!(temp_recv_value); + *temp_value = sample.payload().deserialize::().unwrap(); + })) + .unwrap(); + + tokio::time::sleep(SLEEP).await; + + ztimeout!(publisher.put(VALUE)).unwrap(); + tokio::time::sleep(SLEEP).await; + + assert_ne!(*zlock!(received_value), VALUE); + ztimeout!(subscriber.undeclare()).unwrap(); + } + close_sessions(sub_session, pub_session).await; + let (sub_session, pub_session) = get_client_sessions_quic_usrpswd(port).await; + { + let publisher = ztimeout!(pub_session.declare_publisher(KEY_EXPR)).unwrap(); + let received_value = Arc::new(Mutex::new(String::new())); + let temp_recv_value = received_value.clone(); + let subscriber = + ztimeout!(sub_session + .declare_subscriber(KEY_EXPR) + .callback(move |sample| { + let mut temp_value = zlock!(temp_recv_value); + *temp_value = sample.payload().deserialize::().unwrap(); + })) + .unwrap(); + + tokio::time::sleep(SLEEP).await; + + ztimeout!(publisher.put(VALUE)).unwrap(); + tokio::time::sleep(SLEEP).await; + + assert_eq!(*zlock!(received_value), VALUE); + ztimeout!(subscriber.undeclare()).unwrap(); + } + close_router_session(session).await; + } + + async fn test_allow_deny_combination(port: u16) { + println!("test_allow_deny_combination"); + + let mut config_router = get_basic_router_config_quic_usrpswd(port).await; + config_router + .insert_json5( + "access_control", + r#"{ + "enabled": true, + "default_permission": "allow", + "rules": [ + { + "id": "r1", + "permission": "deny", + "flows": ["egress"], + "messages": [ + "put", + "declare_subscriber" + ], + "key_exprs": [ + "test/demo" + ], + }, + ], + "subjects": [ + { + "id": "s1", + "cert_common_names": [ + "client_side" + ], + "usernames": [ + "client1name", + "client2name" + ] + } + ], + "policies": [ + { + "rules": ["r1"], + "subjects": ["s1"], + } + ] + }"#, + ) + .unwrap(); + + println!("Opening router session"); + let session = ztimeout!(zenoh::open(config_router)).unwrap(); + let (sub_session, pub_session) = get_client_sessions_usrpswd(port).await; + { + let publisher = ztimeout!(pub_session.declare_publisher(KEY_EXPR)).unwrap(); + let received_value = Arc::new(Mutex::new(String::new())); + let temp_recv_value = received_value.clone(); + let subscriber = + ztimeout!(sub_session + .declare_subscriber(KEY_EXPR) + .callback(move |sample| { + let mut temp_value = zlock!(temp_recv_value); + *temp_value = sample.payload().deserialize::().unwrap(); + })) + .unwrap(); + + tokio::time::sleep(SLEEP).await; + + ztimeout!(publisher.put(VALUE)).unwrap(); + tokio::time::sleep(SLEEP).await; + + assert_eq!(*zlock!(received_value), VALUE); + ztimeout!(subscriber.undeclare()).unwrap(); + } + close_sessions(sub_session, pub_session).await; + let (sub_session, pub_session) = get_client_sessions_quic_usrpswd(port).await; + { + let publisher = ztimeout!(pub_session.declare_publisher(KEY_EXPR)).unwrap(); + let received_value = Arc::new(Mutex::new(String::new())); + let temp_recv_value = received_value.clone(); + let subscriber = + ztimeout!(sub_session + .declare_subscriber(KEY_EXPR) + .callback(move |sample| { + let mut temp_value = zlock!(temp_recv_value); + *temp_value = sample.payload().deserialize::().unwrap(); + })) + .unwrap(); + + tokio::time::sleep(SLEEP).await; + + ztimeout!(publisher.put(VALUE)).unwrap(); + tokio::time::sleep(SLEEP).await; + + assert_ne!(*zlock!(received_value), VALUE); + ztimeout!(subscriber.undeclare()).unwrap(); + } + close_router_session(session).await; + } +} diff --git a/zenoh/tests/bytes.rs b/zenoh/tests/bytes.rs new file mode 100644 index 0000000000..34c9837d04 --- /dev/null +++ b/zenoh/tests/bytes.rs @@ -0,0 +1,75 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// +#![cfg(all(feature = "shared-memory", feature = "unstable"))] +use zenoh::{ + bytes::ZBytes, + prelude::*, + shm::{ + zshm, zshmmut, PosixShmProviderBackend, ShmProviderBuilder, ZShm, ZShmMut, + POSIX_PROTOCOL_ID, + }, +}; + +#[test] +fn shm_bytes_single_buf() { + // create an SHM backend... + let backend = PosixShmProviderBackend::builder() + .with_size(4096) + .unwrap() + .res() + .unwrap(); + // ...and an SHM provider + let provider = ShmProviderBuilder::builder() + .protocol_id::() + .backend(backend) + .res(); + + // Prepare a layout for allocations + let layout = provider.alloc(1024).into_layout().unwrap(); + + // allocate an SHM buffer (ZShmMut) + let owned_shm_buf_mut = layout.alloc().wait().unwrap(); + + // convert into immutable owned buffer (ZShmMut -> ZSlceShm) + let owned_shm_buf: ZShm = owned_shm_buf_mut.into(); + + // convert again into mutable owned buffer (ZShm -> ZSlceShmMut) + let owned_shm_buf_mut: ZShmMut = owned_shm_buf.try_into().unwrap(); + + // build a ZBytes from an SHM buffer (ZShmMut -> ZBytes) + let mut payload: ZBytes = owned_shm_buf_mut.into(); + + // branch to illustrate immutable access to SHM data + { + // deserialize ZBytes as an immutably borrowed zshm (ZBytes -> &zshm) + let borrowed_shm_buf: &zshm = payload.deserialize().unwrap(); + + // construct owned buffer from borrowed type (&zshm -> ZShm) + let owned = borrowed_shm_buf.to_owned(); + + // try to construct mutable ZShmMut (ZShm -> ZShmMut) + let owned_mut: Result = owned.try_into(); + // the attempt fails because ZShm has two existing references ('owned' and inside 'payload') + assert!(owned_mut.is_err()) + } + + // branch to illustrate mutable access to SHM data + { + // deserialize ZBytes as mutably borrowed zshm (ZBytes -> &mut zshm) + let borrowed_shm_buf: &mut zshm = payload.deserialize_mut().unwrap(); + + // convert zshm to zshmmut (&mut zshm -> &mut zshmmut) + let _borrowed_shm_buf_mut: &mut zshmmut = borrowed_shm_buf.try_into().unwrap(); + } +} diff --git a/zenoh/tests/connection_retry.rs b/zenoh/tests/connection_retry.rs index 234cb50454..78814556f7 100644 --- a/zenoh/tests/connection_retry.rs +++ b/zenoh/tests/connection_retry.rs @@ -11,8 +11,12 @@ // Contributors: // ZettaScale Zenoh Team, // -use config::ConnectionRetryConf; -use zenoh::prelude::sync::*; +use zenoh::{ + config::{ConnectionRetryConf, EndPoint}, + prelude::*, + Config, +}; +use zenoh_config::ModeDependent; #[test] fn retry_config_overriding() { @@ -71,7 +75,14 @@ fn retry_config_overriding() { }, ]; - for (i, endpoint) in config.listen().endpoints().iter().enumerate() { + for (i, endpoint) in config + .listen() + .endpoints() + .get(config.mode().unwrap_or_default()) + .unwrap_or(&vec![]) + .iter() + .enumerate() + { let retry_config = zenoh_config::get_retry_config(&config, Some(endpoint), true); assert_eq!(retry_config, expected[i]); } @@ -165,7 +176,7 @@ fn listen_no_retry() { .unwrap(); config.insert_json5("listen/timeout_ms", "0").unwrap(); - zenoh::open(config).res().unwrap(); + zenoh::open(config).wait().unwrap(); } #[test] @@ -178,5 +189,5 @@ fn listen_with_retry() { config.insert_json5("listen/timeout_ms", "1000").unwrap(); - zenoh::open(config).res().unwrap(); + zenoh::open(config).wait().unwrap(); } diff --git a/zenoh/tests/events.rs b/zenoh/tests/events.rs index 6b2790e151..11a6e18b53 100644 --- a/zenoh/tests/events.rs +++ b/zenoh/tests/events.rs @@ -12,99 +12,107 @@ // ZettaScale Zenoh Team, // use std::time::Duration; -use zenoh::prelude::r#async::*; -use zenoh::query::Reply; + +use zenoh::{config, query::Reply, sample::SampleKind, Session}; use zenoh_core::ztimeout; const TIMEOUT: Duration = Duration::from_secs(10); async fn open_session(listen: &[&str], connect: &[&str]) -> Session { let mut config = config::peer(); - config.listen.endpoints = listen - .iter() - .map(|e| e.parse().unwrap()) - .collect::>(); - config.connect.endpoints = connect - .iter() - .map(|e| e.parse().unwrap()) - .collect::>(); + config + .listen + .endpoints + .set( + listen + .iter() + .map(|e| e.parse().unwrap()) + .collect::>(), + ) + .unwrap(); + config + .connect + .endpoints + .set( + connect + .iter() + .map(|e| e.parse().unwrap()) + .collect::>(), + ) + .unwrap(); config.scouting.multicast.set_enabled(Some(false)).unwrap(); println!("[ ][01a] Opening session"); - ztimeout!(zenoh::open(config).res_async()).unwrap() + ztimeout!(zenoh::open(config)).unwrap() } async fn close_session(session: Session) { println!("[ ][01d] Closing session"); - ztimeout!(session.close().res_async()).unwrap(); + ztimeout!(session.close()).unwrap(); } #[tokio::test(flavor = "multi_thread", worker_threads = 4)] async fn zenoh_events() { + use zenoh::prelude::SessionDeclarations; let session = open_session(&["tcp/127.0.0.1:18447"], &[]).await; let zid = session.zid(); - let sub1 = session - .declare_subscriber(format!("@/session/{zid}/transport/unicast/*")) - .res() - .await - .unwrap(); - let sub2 = session - .declare_subscriber(format!("@/session/{zid}/transport/unicast/*/link/*")) - .res() - .await - .unwrap(); + let sub1 = + ztimeout!(session.declare_subscriber(format!("@/{zid}/session/transport/unicast/*"))) + .unwrap(); + let sub2 = ztimeout!( + session.declare_subscriber(format!("@/{zid}/session/transport/unicast/*/link/*")) + ) + .unwrap(); let session2 = open_session(&["tcp/127.0.0.1:18448"], &["tcp/127.0.0.1:18447"]).await; let zid2 = session2.zid(); let sample = ztimeout!(sub1.recv_async()); assert!(sample.is_ok()); - let key_expr = sample.as_ref().unwrap().key_expr.as_str(); - assert!(key_expr.eq(&format!("@/session/{zid}/transport/unicast/{zid2}"))); - assert!(sample.as_ref().unwrap().kind == SampleKind::Put); + let key_expr = sample.as_ref().unwrap().key_expr().as_str(); + assert!(key_expr.eq(&format!("@/{zid}/session/transport/unicast/{zid2}"))); + assert!(sample.as_ref().unwrap().kind() == SampleKind::Put); let sample = ztimeout!(sub2.recv_async()); assert!(sample.is_ok()); - let key_expr = sample.as_ref().unwrap().key_expr.as_str(); - assert!(key_expr.starts_with(&format!("@/session/{zid}/transport/unicast/{zid2}/link/"))); - assert!(sample.as_ref().unwrap().kind == SampleKind::Put); + let key_expr = sample.as_ref().unwrap().key_expr().as_str(); + assert!(key_expr.starts_with(&format!("@/{zid}/session/transport/unicast/{zid2}/link/"))); + assert!(sample.as_ref().unwrap().kind() == SampleKind::Put); - let replies: Vec = ztimeout!(session - .get(format!("@/session/{zid}/transport/unicast/*")) - .res_async()) - .unwrap() - .into_iter() - .collect(); + let replies: Vec = + ztimeout!(session.get(format!("@/{zid}/session/transport/unicast/*"))) + .unwrap() + .into_iter() + .collect(); assert!(replies.len() == 1); - assert!(replies[0].sample.is_ok()); - let key_expr = replies[0].sample.as_ref().unwrap().key_expr.as_str(); - assert!(key_expr.eq(&format!("@/session/{zid}/transport/unicast/{zid2}"))); + assert!(replies[0].result().is_ok()); + let key_expr = replies[0].result().unwrap().key_expr().as_str(); + assert!(key_expr.eq(&format!("@/{zid}/session/transport/unicast/{zid2}"))); - let replies: Vec = ztimeout!(session - .get(format!("@/session/{zid}/transport/unicast/*/link/*")) - .res_async()) - .unwrap() - .into_iter() - .collect(); + let replies: Vec = + ztimeout!(session.get(format!("@/{zid}/session/transport/unicast/*/link/*"))) + .unwrap() + .into_iter() + .collect(); assert!(replies.len() == 1); - assert!(replies[0].sample.is_ok()); - let key_expr = replies[0].sample.as_ref().unwrap().key_expr.as_str(); - assert!(key_expr.starts_with(&format!("@/session/{zid}/transport/unicast/{zid2}/link/"))); + assert!(replies[0].result().is_ok()); + let key_expr = replies[0].result().unwrap().key_expr().as_str(); + assert!(key_expr.starts_with(&format!("@/{zid}/session/transport/unicast/{zid2}/link/"))); close_session(session2).await; let sample = ztimeout!(sub1.recv_async()); assert!(sample.is_ok()); - let key_expr = sample.as_ref().unwrap().key_expr.as_str(); - assert!(key_expr.eq(&format!("@/session/{zid}/transport/unicast/{zid2}"))); - assert!(sample.as_ref().unwrap().kind == SampleKind::Delete); + let key_expr = sample.as_ref().unwrap().key_expr().as_str(); + assert!(key_expr.eq(&format!("@/{zid}/session/transport/unicast/{zid2}"))); + assert!(sample.as_ref().unwrap().kind() == SampleKind::Delete); let sample = ztimeout!(sub2.recv_async()); assert!(sample.is_ok()); - let key_expr = sample.as_ref().unwrap().key_expr.as_str(); - assert!(key_expr.starts_with(&format!("@/session/{zid}/transport/unicast/{zid2}/link/"))); - assert!(sample.as_ref().unwrap().kind == SampleKind::Delete); + let key_expr = sample.as_ref().unwrap().key_expr().as_str(); + assert!(key_expr.starts_with(&format!("@/{zid}/session/transport/unicast/{zid2}/link/"))); + assert!(sample.as_ref().unwrap().kind() == SampleKind::Delete); - sub2.undeclare().res().await.unwrap(); - sub1.undeclare().res().await.unwrap(); + ztimeout!(sub2.undeclare()).unwrap(); + ztimeout!(sub1.undeclare()).unwrap(); close_session(session).await; } diff --git a/zenoh/tests/formatters.rs b/zenoh/tests/formatters.rs index 27f34233c4..a63fb10e8c 100644 --- a/zenoh/tests/formatters.rs +++ b/zenoh/tests/formatters.rs @@ -11,13 +11,16 @@ // Contributors: // ZettaScale Zenoh Team, // +#![cfg(feature = "unstable")] +use zenoh::key_expr::format::{kedefine, keformat}; + #[test] fn kedefine_reuse() { - zenoh::kedefine!( + kedefine!( pub gkeys: "zenoh/${group:*}/${member:*}", ); let mut formatter = gkeys::formatter(); - let k1 = zenoh::keformat!(formatter, group = "foo", member = "bar").unwrap(); + let k1 = keformat!(formatter, group = "foo", member = "bar").unwrap(); assert_eq!(dbg!(k1).as_str(), "zenoh/foo/bar"); formatter.set("member", "*").unwrap(); @@ -29,8 +32,8 @@ fn kedefine_reuse() { let k2 = dbg!(&mut formatter).build().unwrap(); assert_eq!(dbg!(k2).as_str(), "zenoh/foo/*"); - let k3 = zenoh::keformat!(formatter, group = "foo", member = "*").unwrap(); + let k3 = keformat!(formatter, group = "foo", member = "*").unwrap(); assert_eq!(dbg!(k3).as_str(), "zenoh/foo/*"); - zenoh::keformat!(formatter, group = "**", member = "**").unwrap_err(); + keformat!(formatter, group = "**", member = "**").unwrap_err(); } diff --git a/zenoh/tests/handler.rs b/zenoh/tests/handler.rs new file mode 100644 index 0000000000..640ed33b89 --- /dev/null +++ b/zenoh/tests/handler.rs @@ -0,0 +1,73 @@ +// +// Copyright (c) 2024 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// +use std::{thread, time::Duration}; + +use zenoh::{handlers::RingChannel, prelude::*, Config}; + +#[test] +fn pubsub_with_ringbuffer() { + let zenoh = zenoh::open(Config::default()).wait().unwrap(); + let sub = zenoh + .declare_subscriber("test/ringbuffer") + .with(RingChannel::new(3)) + .wait() + .unwrap(); + for i in 0..10 { + zenoh + .put("test/ringbuffer", format!("put{i}")) + .wait() + .unwrap(); + } + // Should only receive the last three samples ("put7", "put8", "put9") + for i in 7..10 { + assert_eq!( + sub.recv() + .unwrap() + .payload() + .deserialize::() + .unwrap(), + format!("put{i}") + ); + } + // Wait for the subscriber to get the value + thread::sleep(Duration::from_millis(1000)); +} + +#[test] +fn query_with_ringbuffer() { + let zenoh = zenoh::open(Config::default()).wait().unwrap(); + let queryable = zenoh + .declare_queryable("test/ringbuffer_query") + .with(RingChannel::new(1)) + .wait() + .unwrap(); + + let _reply1 = zenoh + .get("test/ringbuffer_query") + .payload("query1") + .wait() + .unwrap(); + let _reply2 = zenoh + .get("test/ringbuffer_query") + .payload("query2") + .wait() + .unwrap(); + + let query = queryable.recv().unwrap(); + // Only receive the latest query + assert_eq!( + query.payload().unwrap().deserialize::().unwrap(), + "query2" + ); +} diff --git a/zenoh/tests/interceptors.rs b/zenoh/tests/interceptors.rs index dbbdd7ea0d..1e5ef13799 100644 --- a/zenoh/tests/interceptors.rs +++ b/zenoh/tests/interceptors.rs @@ -11,14 +11,22 @@ // Contributors: // ZettaScale Zenoh Team, // -use std::collections::HashMap; -use std::sync::{ - atomic::{AtomicBool, AtomicUsize, Ordering}, - Arc, +#![cfg(unix)] + +use std::{ + collections::HashMap, + sync::{ + atomic::{AtomicBool, AtomicUsize, Ordering}, + Arc, + }, +}; + +use zenoh::{ + config::{DownsamplingItemConf, DownsamplingRuleConf, InterceptorFlow}, + key_expr::KeyExpr, + prelude::*, + Config, }; -use zenoh::prelude::sync::*; -use zenoh::prelude::Config; -use zenoh_config::{DownsamplingItemConf, DownsamplingRuleConf, InterceptorFlow}; // Tokio's time granularity on different platforms #[cfg(target_os = "windows")] @@ -48,8 +56,16 @@ fn build_config( .set_enabled(Some(false)) .unwrap(); - sub_config.listen.endpoints = vec![locator.parse().unwrap()]; - pub_config.connect.endpoints = vec![locator.parse().unwrap()]; + sub_config + .listen + .endpoints + .set(vec![locator.parse().unwrap()]) + .unwrap(); + pub_config + .connect + .endpoints + .set(vec![locator.parse().unwrap()]) + .unwrap(); match flow { InterceptorFlow::Egress => pub_config.set_downsampling(ds_config).unwrap(), @@ -77,32 +93,32 @@ fn downsampling_test( .collect(), ); - let sub_session = zenoh::open(sub_config).res().unwrap(); + let sub_session = zenoh::open(sub_config).wait().unwrap(); let _sub = sub_session .declare_subscriber(format!("{ke_prefix}/*")) .callback({ let counters = counters.clone(); move |sample| { counters - .get(&sample.key_expr) + .get(sample.key_expr()) .map(|ctr| ctr.fetch_add(1, Ordering::SeqCst)); } }) - .res() + .wait() .unwrap(); let is_terminated = Arc::new(AtomicBool::new(false)); let c_is_terminated = is_terminated.clone(); let handle = std::thread::spawn(move || { - let pub_session = zenoh::open(pub_config).res().unwrap(); + let pub_session = zenoh::open(pub_config).wait().unwrap(); let publishers: Vec<_> = ke_of_rates .into_iter() - .map(|ke| pub_session.declare_publisher(ke).res().unwrap()) + .map(|ke| pub_session.declare_publisher(ke).wait().unwrap()) .collect(); let interval = std::time::Duration::from_millis(MINIMAL_SLEEP_INTERVAL_MS); while !c_is_terminated.load(Ordering::SeqCst) { publishers.iter().for_each(|publ| { - publ.put("message").res().unwrap(); + publ.put("message").wait().unwrap(); }); std::thread::sleep(interval); } @@ -176,7 +192,7 @@ fn downsampling_by_keyexpr_impl(flow: InterceptorFlow) { #[test] fn downsampling_by_keyexpr() { - zenoh_util::try_init_log_from_env(); + zenoh::try_init_log_from_env(); downsampling_by_keyexpr_impl(InterceptorFlow::Ingress); downsampling_by_keyexpr_impl(InterceptorFlow::Egress); } @@ -229,7 +245,7 @@ fn downsampling_by_interface_impl(flow: InterceptorFlow) { #[cfg(unix)] #[test] fn downsampling_by_interface() { - zenoh_util::try_init_log_from_env(); + zenoh::try_init_log_from_env(); downsampling_by_interface_impl(InterceptorFlow::Ingress); downsampling_by_interface_impl(InterceptorFlow::Egress); } @@ -237,9 +253,7 @@ fn downsampling_by_interface() { #[test] #[should_panic(expected = "unknown variant `down`")] fn downsampling_config_error_wrong_strategy() { - zenoh_util::try_init_log_from_env(); - - use zenoh::prelude::sync::*; + zenoh::try_init_log_from_env(); let mut config = Config::default(); config @@ -259,5 +273,5 @@ fn downsampling_config_error_wrong_strategy() { ) .unwrap(); - zenoh::open(config).res().unwrap(); + zenoh::open(config).wait().unwrap(); } diff --git a/zenoh/tests/liveliness.rs b/zenoh/tests/liveliness.rs index b4b138d78f..4d964cc1cf 100644 --- a/zenoh/tests/liveliness.rs +++ b/zenoh/tests/liveliness.rs @@ -11,68 +11,355 @@ // Contributors: // ZettaScale Zenoh Team, // -use std::time::Duration; -use zenoh::prelude::r#async::*; +#[cfg(feature = "unstable")] use zenoh_core::ztimeout; -const TIMEOUT: Duration = Duration::from_secs(60); -const SLEEP: Duration = Duration::from_secs(1); +#[cfg(feature = "unstable")] +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +async fn test_liveliness_subscriber_clique() { + use std::time::Duration; + + use zenoh::{config, prelude::*, sample::SampleKind}; + use zenoh_config::WhatAmI; + use zenoh_link::EndPoint; + const TIMEOUT: Duration = Duration::from_secs(60); + const SLEEP: Duration = Duration::from_secs(1); + const PEER1_ENDPOINT: &str = "tcp/localhost:47447"; + const LIVELINESS_KEYEXPR: &str = "test/liveliness/subscriber/clique"; + + zenoh_util::try_init_log_from_env(); + + let peer1 = { + let mut c = config::default(); + c.listen + .endpoints + .set(vec![PEER1_ENDPOINT.parse::().unwrap()]) + .unwrap(); + c.scouting.multicast.set_enabled(Some(false)).unwrap(); + let _ = c.set_mode(Some(WhatAmI::Peer)); + let s = ztimeout!(zenoh::open(c)).unwrap(); + tracing::info!("Peer (1) ZID: {}", s.zid()); + s + }; + + let peer2 = { + let mut c = config::default(); + c.connect + .endpoints + .set(vec![PEER1_ENDPOINT.parse::().unwrap()]) + .unwrap(); + c.scouting.multicast.set_enabled(Some(false)).unwrap(); + let _ = c.set_mode(Some(WhatAmI::Peer)); + let s = ztimeout!(zenoh::open(c)).unwrap(); + tracing::info!("Peer (2) ZID: {}", s.zid()); + s + }; + + let sub = ztimeout!(peer1.liveliness().declare_subscriber(LIVELINESS_KEYEXPR)).unwrap(); + tokio::time::sleep(SLEEP).await; + + let token = ztimeout!(peer2.liveliness().declare_token(LIVELINESS_KEYEXPR)).unwrap(); + tokio::time::sleep(SLEEP).await; + + let sample = ztimeout!(sub.recv_async()).unwrap(); + assert!(sample.kind() == SampleKind::Put); + assert!(sample.key_expr().as_str() == LIVELINESS_KEYEXPR); + + token.undeclare().await.unwrap(); + tokio::time::sleep(SLEEP).await; + + let sample = ztimeout!(sub.recv_async()).unwrap(); + assert!(sample.kind() == SampleKind::Delete); + assert!(sample.key_expr().as_str() == LIVELINESS_KEYEXPR); + + sub.undeclare().await.unwrap(); + + peer1.close().await.unwrap(); + peer2.close().await.unwrap(); +} #[cfg(feature = "unstable")] #[tokio::test(flavor = "multi_thread", worker_threads = 4)] -async fn zenoh_liveliness() { - let mut c1 = config::peer(); - c1.listen - .set_endpoints(vec!["tcp/localhost:47447".parse().unwrap()]) - .unwrap(); - c1.scouting.multicast.set_enabled(Some(false)).unwrap(); - let session1 = ztimeout!(zenoh::open(c1).res_async()).unwrap(); - let mut c2 = config::peer(); - c2.connect - .set_endpoints(vec!["tcp/localhost:47447".parse().unwrap()]) - .unwrap(); - c2.scouting.multicast.set_enabled(Some(false)).unwrap(); - let session2 = ztimeout!(zenoh::open(c2).res_async()).unwrap(); - - let sub = ztimeout!(session2 - .liveliness() - .declare_subscriber("zenoh_liveliness_test") - .res_async()) - .unwrap(); - - let token = ztimeout!(session1 - .liveliness() - .declare_token("zenoh_liveliness_test") - .res_async()) - .unwrap(); +async fn test_liveliness_query_clique() { + use std::time::Duration; + + use zenoh::{config, prelude::*, sample::SampleKind}; + use zenoh_config::WhatAmI; + use zenoh_link::EndPoint; + const TIMEOUT: Duration = Duration::from_secs(60); + const SLEEP: Duration = Duration::from_secs(1); + const PEER1_ENDPOINT: &str = "tcp/localhost:47448"; + const LIVELINESS_KEYEXPR: &str = "test/liveliness/query/clique"; + + zenoh_util::try_init_log_from_env(); + + let peer1 = { + let mut c = config::default(); + c.listen + .endpoints + .set(vec![PEER1_ENDPOINT.parse::().unwrap()]) + .unwrap(); + c.scouting.multicast.set_enabled(Some(false)).unwrap(); + let _ = c.set_mode(Some(WhatAmI::Peer)); + let s = ztimeout!(zenoh::open(c)).unwrap(); + tracing::info!("Peer (1) ZID: {}", s.zid()); + s + }; + let peer2 = { + let mut c = config::default(); + c.connect + .endpoints + .set(vec![PEER1_ENDPOINT.parse::().unwrap()]) + .unwrap(); + c.scouting.multicast.set_enabled(Some(false)).unwrap(); + let _ = c.set_mode(Some(WhatAmI::Peer)); + let s = ztimeout!(zenoh::open(c)).unwrap(); + tracing::info!("Peer (2) ZID: {}", s.zid()); + s + }; + + let token = ztimeout!(peer1.liveliness().declare_token(LIVELINESS_KEYEXPR)).unwrap(); + tokio::time::sleep(SLEEP).await; + + let get = ztimeout!(peer2.liveliness().get(LIVELINESS_KEYEXPR)).unwrap(); + tokio::time::sleep(SLEEP).await; + + let sample = ztimeout!(get.recv_async()).unwrap().into_result().unwrap(); + assert!(sample.kind() == SampleKind::Put); + assert!(sample.key_expr().as_str() == LIVELINESS_KEYEXPR); + + token.undeclare().await.unwrap(); + + peer1.close().await.unwrap(); + peer2.close().await.unwrap(); +} + +#[cfg(feature = "unstable")] +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +async fn test_liveliness_subscriber_brokered() { + use std::time::Duration; + + use zenoh::{config, prelude::*, sample::SampleKind}; + use zenoh_config::WhatAmI; + use zenoh_link::EndPoint; + + const TIMEOUT: Duration = Duration::from_secs(60); + const SLEEP: Duration = Duration::from_secs(1); + const ROUTER_ENDPOINT: &str = "tcp/localhost:47449"; + const LIVELINESS_KEYEXPR: &str = "test/liveliness/subscriber/brokered"; + + zenoh_util::try_init_log_from_env(); + + let router = { + let mut c = config::default(); + c.listen + .endpoints + .set(vec![ROUTER_ENDPOINT.parse::().unwrap()]) + .unwrap(); + c.scouting.multicast.set_enabled(Some(false)).unwrap(); + let _ = c.set_mode(Some(WhatAmI::Router)); + let s = ztimeout!(zenoh::open(c)).unwrap(); + tracing::info!("Router ZID: {}", s.zid()); + s + }; + + let client1 = { + let mut c = config::default(); + c.connect + .endpoints + .set(vec![ROUTER_ENDPOINT.parse::().unwrap()]) + .unwrap(); + c.scouting.multicast.set_enabled(Some(false)).unwrap(); + let _ = c.set_mode(Some(WhatAmI::Client)); + let s = ztimeout!(zenoh::open(c)).unwrap(); + tracing::info!("Client (1) ZID: {}", s.zid()); + s + }; + + let client2 = { + let mut c = config::default(); + c.connect + .endpoints + .set(vec![ROUTER_ENDPOINT.parse::().unwrap()]) + .unwrap(); + c.scouting.multicast.set_enabled(Some(false)).unwrap(); + let _ = c.set_mode(Some(WhatAmI::Client)); + let s = ztimeout!(zenoh::open(c)).unwrap(); + tracing::info!("Client (2) ZID: {}", s.zid()); + s + }; + + let sub = ztimeout!(client1.liveliness().declare_subscriber(LIVELINESS_KEYEXPR)).unwrap(); tokio::time::sleep(SLEEP).await; - let replies = ztimeout!(session2 - .liveliness() - .get("zenoh_liveliness_test") - .res_async()) - .unwrap(); - let sample = ztimeout!(replies.recv_async()).unwrap().sample.unwrap(); - assert!(sample.kind == SampleKind::Put); - assert!(sample.key_expr.as_str() == "zenoh_liveliness_test"); + let token = ztimeout!(client2.liveliness().declare_token(LIVELINESS_KEYEXPR)).unwrap(); + tokio::time::sleep(SLEEP).await; - assert!(ztimeout!(replies.recv_async()).is_err()); + let sample = ztimeout!(sub.recv_async()).unwrap(); + assert!(sample.kind() == SampleKind::Put); + assert!(sample.key_expr().as_str() == LIVELINESS_KEYEXPR); + + token.undeclare().await.unwrap(); + tokio::time::sleep(SLEEP).await; let sample = ztimeout!(sub.recv_async()).unwrap(); - assert!(sample.kind == SampleKind::Put); - assert!(sample.key_expr.as_str() == "zenoh_liveliness_test"); + assert!(sample.kind() == SampleKind::Delete); + assert!(sample.key_expr().as_str() == LIVELINESS_KEYEXPR); - drop(token); + sub.undeclare().await.unwrap(); + + router.close().await.unwrap(); + client1.close().await.unwrap(); + client2.close().await.unwrap(); +} + +#[cfg(feature = "unstable")] +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +async fn test_liveliness_query_brokered() { + use std::time::Duration; + + use zenoh::{config, prelude::*, sample::SampleKind}; + use zenoh_config::WhatAmI; + use zenoh_link::EndPoint; + const TIMEOUT: Duration = Duration::from_secs(60); + const SLEEP: Duration = Duration::from_secs(1); + const ROUTER_ENDPOINT: &str = "tcp/localhost:47450"; + const LIVELINESS_KEYEXPR: &str = "test/liveliness/query/brokered"; + + zenoh_util::try_init_log_from_env(); + + let router = { + let mut c = config::default(); + c.listen + .endpoints + .set(vec![ROUTER_ENDPOINT.parse::().unwrap()]) + .unwrap(); + c.scouting.multicast.set_enabled(Some(false)).unwrap(); + let _ = c.set_mode(Some(WhatAmI::Router)); + let s = ztimeout!(zenoh::open(c)).unwrap(); + tracing::info!("Router ZID: {}", s.zid()); + s + }; + + let client1 = { + let mut c = config::default(); + c.connect + .endpoints + .set(vec![ROUTER_ENDPOINT.parse::().unwrap()]) + .unwrap(); + c.scouting.multicast.set_enabled(Some(false)).unwrap(); + let _ = c.set_mode(Some(WhatAmI::Client)); + let s = ztimeout!(zenoh::open(c)).unwrap(); + tracing::info!("Client (1) ZID: {}", s.zid()); + s + }; + + let client2 = { + let mut c = config::default(); + c.connect + .endpoints + .set(vec![ROUTER_ENDPOINT.parse::().unwrap()]) + .unwrap(); + c.scouting.multicast.set_enabled(Some(false)).unwrap(); + let _ = c.set_mode(Some(WhatAmI::Client)); + let s = ztimeout!(zenoh::open(c)).unwrap(); + tracing::info!("Client (2) ZID: {}", s.zid()); + s + }; + + let token = ztimeout!(client1.liveliness().declare_token(LIVELINESS_KEYEXPR)).unwrap(); + tokio::time::sleep(SLEEP).await; + + let get = ztimeout!(client2.liveliness().get(LIVELINESS_KEYEXPR)).unwrap(); + tokio::time::sleep(SLEEP).await; + + let sample = ztimeout!(get.recv_async()).unwrap().into_result().unwrap(); + assert!(sample.kind() == SampleKind::Put); + assert!(sample.key_expr().as_str() == LIVELINESS_KEYEXPR); + + token.undeclare().await.unwrap(); + + router.close().await.unwrap(); + client1.close().await.unwrap(); + client2.close().await.unwrap(); +} + +#[cfg(feature = "unstable")] +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +async fn test_liveliness_subscriber_local() { + use std::time::Duration; + + use zenoh::{config, prelude::*, sample::SampleKind}; + use zenoh_config::WhatAmI; + const TIMEOUT: Duration = Duration::from_secs(60); + const SLEEP: Duration = Duration::from_secs(1); + const LIVELINESS_KEYEXPR: &str = "test/liveliness/subscriber/local"; + + zenoh_util::try_init_log_from_env(); + + let peer = { + let mut c = config::default(); + c.scouting.multicast.set_enabled(Some(false)).unwrap(); + let _ = c.set_mode(Some(WhatAmI::Peer)); + let s = ztimeout!(zenoh::open(c)).unwrap(); + tracing::info!("Peer (1) ZID: {}", s.zid()); + s + }; + + let sub = ztimeout!(peer.liveliness().declare_subscriber(LIVELINESS_KEYEXPR)).unwrap(); + tokio::time::sleep(SLEEP).await; + + let token = ztimeout!(peer.liveliness().declare_token(LIVELINESS_KEYEXPR)).unwrap(); + tokio::time::sleep(SLEEP).await; + + let sample = ztimeout!(sub.recv_async()).unwrap(); + assert!(sample.kind() == SampleKind::Put); + assert!(sample.key_expr().as_str() == LIVELINESS_KEYEXPR); + + token.undeclare().await.unwrap(); + tokio::time::sleep(SLEEP).await; + + let sample = ztimeout!(sub.recv_async()).unwrap(); + assert!(sample.kind() == SampleKind::Delete); + assert!(sample.key_expr().as_str() == LIVELINESS_KEYEXPR); + + sub.undeclare().await.unwrap(); + peer.close().await.unwrap(); +} + +#[cfg(feature = "unstable")] +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +async fn test_liveliness_query_local() { + use std::time::Duration; + + use zenoh::{config, prelude::*, sample::SampleKind}; + use zenoh_config::WhatAmI; + const TIMEOUT: Duration = Duration::from_secs(60); + const SLEEP: Duration = Duration::from_secs(1); + const LIVELINESS_KEYEXPR: &str = "test/liveliness/query/local"; + + zenoh_util::try_init_log_from_env(); + + let peer = { + let mut c = config::default(); + c.scouting.multicast.set_enabled(Some(false)).unwrap(); + let _ = c.set_mode(Some(WhatAmI::Peer)); + let s = ztimeout!(zenoh::open(c)).unwrap(); + tracing::info!("Peer (1) ZID: {}", s.zid()); + s + }; + + let token = ztimeout!(peer.liveliness().declare_token(LIVELINESS_KEYEXPR)).unwrap(); + tokio::time::sleep(SLEEP).await; + let get = ztimeout!(peer.liveliness().get(LIVELINESS_KEYEXPR)).unwrap(); tokio::time::sleep(SLEEP).await; - let replies = ztimeout!(session2 - .liveliness() - .get("zenoh_liveliness_test") - .res_async()) - .unwrap(); - assert!(ztimeout!(replies.recv_async()).is_err()); + let sample = ztimeout!(get.recv_async()).unwrap().into_result().unwrap(); + assert!(sample.kind() == SampleKind::Put); + assert!(sample.key_expr().as_str() == LIVELINESS_KEYEXPR); - assert!(replies.try_recv().is_err()); + token.undeclare().await.unwrap(); + peer.close().await.unwrap(); } diff --git a/zenoh/tests/matching.rs b/zenoh/tests/matching.rs index e56036f5de..da0ba0a6d1 100644 --- a/zenoh/tests/matching.rs +++ b/zenoh/tests/matching.rs @@ -11,217 +11,191 @@ // Contributors: // ZettaScale Zenoh Team, // -use std::str::FromStr; -use std::time::Duration; -use zenoh::prelude::r#async::*; +#![cfg(feature = "unstable")] +use std::{str::FromStr, time::Duration}; + +use flume::RecvTimeoutError; +use zenoh::{config, config::Locator, prelude::*, sample::Locality, Session}; use zenoh_core::ztimeout; -use zenoh_result::ZResult as Result; const TIMEOUT: Duration = Duration::from_secs(60); const RECV_TIMEOUT: Duration = Duration::from_secs(1); -#[cfg(feature = "unstable")] async fn create_session_pair(locator: &str) -> (Session, Session) { let config1 = { - let mut config = zenoh::config::peer(); + let mut config = config::peer(); config.scouting.multicast.set_enabled(Some(false)).unwrap(); config .listen - .set_endpoints(vec![locator.parse().unwrap()]) + .endpoints + .set(vec![locator.parse().unwrap()]) .unwrap(); config }; - let config2 = zenoh::config::client([Locator::from_str(locator).unwrap()]); + let config2 = config::client([Locator::from_str(locator).unwrap()]); - let session1 = ztimeout!(zenoh::open(config1).res_async()).unwrap(); - let session2 = ztimeout!(zenoh::open(config2).res_async()).unwrap(); + let session1 = ztimeout!(zenoh::open(config1)).unwrap(); + let session2 = ztimeout!(zenoh::open(config2)).unwrap(); (session1, session2) } -#[cfg(feature = "unstable")] #[tokio::test(flavor = "multi_thread", worker_threads = 4)] -async fn zenoh_matching_status_any() -> Result<()> { - use flume::RecvTimeoutError; - +async fn zenoh_matching_status_any() -> ZResult<()> { + zenoh_util::try_init_log_from_env(); let (session1, session2) = create_session_pair("tcp/127.0.0.1:18001").await; let publisher1 = ztimeout!(session1 .declare_publisher("zenoh_matching_status_any_test") - .allowed_destination(Locality::Any) - .res_async()) + .allowed_destination(Locality::Any)) .unwrap(); - let matching_listener = ztimeout!(publisher1.matching_listener().res_async()).unwrap(); + let matching_listener = ztimeout!(publisher1.matching_listener()).unwrap(); let received_status = matching_listener.recv_timeout(RECV_TIMEOUT); assert!(received_status.err() == Some(RecvTimeoutError::Timeout)); - let matching_status = ztimeout!(publisher1.matching_status().res_async()).unwrap(); + let matching_status = ztimeout!(publisher1.matching_status()).unwrap(); assert!(!matching_status.matching_subscribers()); - let sub = ztimeout!(session1 - .declare_subscriber("zenoh_matching_status_any_test") - .res_async()) - .unwrap(); + let sub = ztimeout!(session1.declare_subscriber("zenoh_matching_status_any_test")).unwrap(); let received_status = matching_listener.recv_timeout(RECV_TIMEOUT); assert!(received_status.ok().map(|s| s.matching_subscribers()) == Some(true)); - let matching_status = ztimeout!(publisher1.matching_status().res_async()).unwrap(); + let matching_status = ztimeout!(publisher1.matching_status()).unwrap(); assert!(matching_status.matching_subscribers()); - ztimeout!(sub.undeclare().res_async()).unwrap(); + ztimeout!(sub.undeclare()).unwrap(); let received_status = matching_listener.recv_timeout(RECV_TIMEOUT); assert!(received_status.ok().map(|s| s.matching_subscribers()) == Some(false)); - let matching_status = ztimeout!(publisher1.matching_status().res_async()).unwrap(); + let matching_status = ztimeout!(publisher1.matching_status()).unwrap(); assert!(!matching_status.matching_subscribers()); - let sub = ztimeout!(session2 - .declare_subscriber("zenoh_matching_status_any_test") - .res_async()) - .unwrap(); + let sub = ztimeout!(session2.declare_subscriber("zenoh_matching_status_any_test")).unwrap(); let received_status = matching_listener.recv_timeout(RECV_TIMEOUT); assert!(received_status.ok().map(|s| s.matching_subscribers()) == Some(true)); - let matching_status = ztimeout!(publisher1.matching_status().res_async()).unwrap(); + let matching_status = ztimeout!(publisher1.matching_status()).unwrap(); assert!(matching_status.matching_subscribers()); - ztimeout!(sub.undeclare().res_async()).unwrap(); + ztimeout!(sub.undeclare()).unwrap(); let received_status = matching_listener.recv_timeout(RECV_TIMEOUT); assert!(received_status.ok().map(|s| s.matching_subscribers()) == Some(false)); - let matching_status = ztimeout!(publisher1.matching_status().res_async()).unwrap(); + let matching_status = ztimeout!(publisher1.matching_status()).unwrap(); assert!(!matching_status.matching_subscribers()); Ok(()) } -#[cfg(feature = "unstable")] #[tokio::test(flavor = "multi_thread", worker_threads = 4)] -async fn zenoh_matching_status_remote() -> Result<()> { - use flume::RecvTimeoutError; - - let session1 = ztimeout!(zenoh::open(config::peer()).res_async()).unwrap(); +async fn zenoh_matching_status_remote() -> ZResult<()> { + zenoh_util::try_init_log_from_env(); - let session2 = ztimeout!(zenoh::open(config::peer()).res_async()).unwrap(); + let session1 = ztimeout!(zenoh::open(config::peer())).unwrap(); + let session2 = ztimeout!(zenoh::open(config::peer())).unwrap(); let publisher1 = ztimeout!(session1 .declare_publisher("zenoh_matching_status_remote_test") - .allowed_destination(Locality::Remote) - .res_async()) + .allowed_destination(Locality::Remote)) .unwrap(); - let matching_listener = ztimeout!(publisher1.matching_listener().res_async()).unwrap(); + let matching_listener = ztimeout!(publisher1.matching_listener()).unwrap(); let received_status = matching_listener.recv_timeout(RECV_TIMEOUT); assert!(received_status.err() == Some(RecvTimeoutError::Timeout)); - let matching_status = ztimeout!(publisher1.matching_status().res_async()).unwrap(); + let matching_status = ztimeout!(publisher1.matching_status()).unwrap(); assert!(!matching_status.matching_subscribers()); - let sub = ztimeout!(session1 - .declare_subscriber("zenoh_matching_status_remote_test") - .res_async()) - .unwrap(); + let sub = ztimeout!(session1.declare_subscriber("zenoh_matching_status_remote_test")).unwrap(); let received_status = matching_listener.recv_timeout(RECV_TIMEOUT); assert!(received_status.err() == Some(RecvTimeoutError::Timeout)); - let matching_status = ztimeout!(publisher1.matching_status().res_async()).unwrap(); + let matching_status = ztimeout!(publisher1.matching_status()).unwrap(); assert!(!matching_status.matching_subscribers()); - ztimeout!(sub.undeclare().res_async()).unwrap(); + ztimeout!(sub.undeclare()).unwrap(); let received_status = matching_listener.recv_timeout(RECV_TIMEOUT); assert!(received_status.err() == Some(RecvTimeoutError::Timeout)); - let matching_status = ztimeout!(publisher1.matching_status().res_async()).unwrap(); + let matching_status = ztimeout!(publisher1.matching_status()).unwrap(); assert!(!matching_status.matching_subscribers()); - let sub = ztimeout!(session2 - .declare_subscriber("zenoh_matching_status_remote_test") - .res_async()) - .unwrap(); + let sub = ztimeout!(session2.declare_subscriber("zenoh_matching_status_remote_test")).unwrap(); let received_status = matching_listener.recv_timeout(RECV_TIMEOUT); assert!(received_status.ok().map(|s| s.matching_subscribers()) == Some(true)); - let matching_status = ztimeout!(publisher1.matching_status().res_async()).unwrap(); + let matching_status = ztimeout!(publisher1.matching_status()).unwrap(); assert!(matching_status.matching_subscribers()); - ztimeout!(sub.undeclare().res_async()).unwrap(); + ztimeout!(sub.undeclare()).unwrap(); let received_status = matching_listener.recv_timeout(RECV_TIMEOUT); assert!(received_status.ok().map(|s| s.matching_subscribers()) == Some(false)); - let matching_status = ztimeout!(publisher1.matching_status().res_async()).unwrap(); + let matching_status = ztimeout!(publisher1.matching_status()).unwrap(); assert!(!matching_status.matching_subscribers()); Ok(()) } -#[cfg(feature = "unstable")] #[tokio::test(flavor = "multi_thread", worker_threads = 4)] -async fn zenoh_matching_status_local() -> Result<()> { - use flume::RecvTimeoutError; - - let session1 = ztimeout!(zenoh::open(config::peer()).res_async()).unwrap(); +async fn zenoh_matching_status_local() -> ZResult<()> { + zenoh_util::try_init_log_from_env(); - let session2 = ztimeout!(zenoh::open(config::peer()).res_async()).unwrap(); + let session1 = ztimeout!(zenoh::open(zenoh::config::peer())).unwrap(); + let session2 = ztimeout!(zenoh::open(zenoh::config::peer())).unwrap(); let publisher1 = ztimeout!(session1 .declare_publisher("zenoh_matching_status_local_test") - .allowed_destination(Locality::SessionLocal) - .res_async()) + .allowed_destination(Locality::SessionLocal)) .unwrap(); - let matching_listener = ztimeout!(publisher1.matching_listener().res_async()).unwrap(); + let matching_listener = ztimeout!(publisher1.matching_listener()).unwrap(); let received_status = matching_listener.recv_timeout(RECV_TIMEOUT); assert!(received_status.err() == Some(RecvTimeoutError::Timeout)); - let matching_status = ztimeout!(publisher1.matching_status().res_async()).unwrap(); + let matching_status = ztimeout!(publisher1.matching_status()).unwrap(); assert!(!matching_status.matching_subscribers()); - let sub = ztimeout!(session1 - .declare_subscriber("zenoh_matching_status_local_test") - .res_async()) - .unwrap(); + let sub = ztimeout!(session1.declare_subscriber("zenoh_matching_status_local_test")).unwrap(); let received_status = matching_listener.recv_timeout(RECV_TIMEOUT); assert!(received_status.ok().map(|s| s.matching_subscribers()) == Some(true)); - let matching_status = ztimeout!(publisher1.matching_status().res_async()).unwrap(); + let matching_status = ztimeout!(publisher1.matching_status()).unwrap(); assert!(matching_status.matching_subscribers()); - ztimeout!(sub.undeclare().res_async()).unwrap(); + ztimeout!(sub.undeclare()).unwrap(); let received_status = matching_listener.recv_timeout(RECV_TIMEOUT); assert!(received_status.ok().map(|s| s.matching_subscribers()) == Some(false)); - let matching_status = ztimeout!(publisher1.matching_status().res_async()).unwrap(); + let matching_status = ztimeout!(publisher1.matching_status()).unwrap(); assert!(!matching_status.matching_subscribers()); - let sub = ztimeout!(session2 - .declare_subscriber("zenoh_matching_status_local_test") - .res_async()) - .unwrap(); + let sub = ztimeout!(session2.declare_subscriber("zenoh_matching_status_local_test")).unwrap(); let received_status = matching_listener.recv_timeout(RECV_TIMEOUT); assert!(received_status.err() == Some(RecvTimeoutError::Timeout)); - let matching_status = ztimeout!(publisher1.matching_status().res_async()).unwrap(); + let matching_status = ztimeout!(publisher1.matching_status()).unwrap(); assert!(!matching_status.matching_subscribers()); - ztimeout!(sub.undeclare().res_async()).unwrap(); + ztimeout!(sub.undeclare()).unwrap(); let received_status = matching_listener.recv_timeout(RECV_TIMEOUT); assert!(received_status.err() == Some(RecvTimeoutError::Timeout)); - let matching_status = ztimeout!(publisher1.matching_status().res_async()).unwrap(); + let matching_status = ztimeout!(publisher1.matching_status()).unwrap(); assert!(!matching_status.matching_subscribers()); Ok(()) diff --git a/zenoh/tests/open_time.rs b/zenoh/tests/open_time.rs new file mode 100644 index 0000000000..7f1c2b2972 --- /dev/null +++ b/zenoh/tests/open_time.rs @@ -0,0 +1,429 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// +#![allow(unused)] +use std::{ + future::IntoFuture, + time::{Duration, Instant}, +}; + +use zenoh_config::Config; +use zenoh_link::EndPoint; +use zenoh_protocol::core::WhatAmI; + +const TIMEOUT_EXPECTED: Duration = Duration::from_secs(5); +const SLEEP: Duration = Duration::from_millis(100); + +macro_rules! ztimeout_expected { + ($f:expr) => { + tokio::time::timeout(TIMEOUT_EXPECTED, $f).await.unwrap() + }; +} + +async fn time_open( + listen_endpoint: &EndPoint, + connect_endpoint: &EndPoint, + connect_mode: WhatAmI, + lowlatency: bool, +) { + /* [ROUTER] */ + let mut router_config = Config::default(); + router_config.set_mode(Some(WhatAmI::Router)).unwrap(); + router_config + .listen + .endpoints + .set(vec![listen_endpoint.clone()]) + .unwrap(); + router_config + .transport + .unicast + .set_lowlatency(lowlatency) + .unwrap(); + router_config + .transport + .unicast + .qos + .set_enabled(!lowlatency) + .unwrap(); + + let start = Instant::now(); + let router = ztimeout_expected!(zenoh::open(router_config).into_future()).unwrap(); + println!( + "open(mode:{}, listen_endpoint:{}, lowlatency:{}): {:#?}", + WhatAmI::Router, + listen_endpoint.as_str().split('#').next().unwrap(), + lowlatency, + start.elapsed() + ); + + /* [APP] */ + let mut app_config = Config::default(); + app_config.set_mode(Some(connect_mode)).unwrap(); + app_config + .connect + .endpoints + .set(vec![connect_endpoint.clone()]) + .unwrap(); + app_config + .transport + .unicast + .set_lowlatency(lowlatency) + .unwrap(); + app_config + .transport + .unicast + .qos + .set_enabled(!lowlatency) + .unwrap(); + + /* [1] */ + // Open a transport from the app to the router + let start = Instant::now(); + let app = ztimeout_expected!(zenoh::open(app_config).into_future()).unwrap(); + println!( + "open(mode:{}, connect_endpoint:{}, lowlatency:{}): {:#?}", + connect_mode, + connect_endpoint.as_str().split('#').next().unwrap(), + lowlatency, + start.elapsed() + ); + + /* [2] */ + // Close the open transport on the app + let start = Instant::now(); + ztimeout_expected!(app.close().into_future()).unwrap(); + println!( + "close(mode:{}, connect_endpoint:{}, lowlatency:{}): {:#?}", + connect_mode, + connect_endpoint.as_str().split('#').next().unwrap(), + lowlatency, + start.elapsed() + ); + + /* [3] */ + // Close the router + let start = Instant::now(); + ztimeout_expected!(router.close().into_future()).unwrap(); + println!( + "close(mode:{}, listen_endpoint:{}, lowlatency:{}): {:#?}", + WhatAmI::Router, + listen_endpoint.as_str().split('#').next().unwrap(), + lowlatency, + start.elapsed() + ); + + // Wait a little bit + tokio::time::sleep(SLEEP).await; +} + +async fn time_universal_open(endpoint: &EndPoint, mode: WhatAmI) { + time_open(endpoint, endpoint, mode, false).await +} + +async fn time_lowlatency_open(endpoint: &EndPoint, mode: WhatAmI) { + time_open(endpoint, endpoint, mode, true).await +} + +#[cfg(feature = "transport_tcp")] +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +#[ignore] +async fn time_tcp_only_open() { + zenoh::try_init_log_from_env(); + let endpoint: EndPoint = format!("tcp/127.0.0.1:{}", 14000).parse().unwrap(); + time_universal_open(&endpoint, WhatAmI::Client).await; +} + +#[cfg(feature = "transport_tcp")] +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +#[ignore] +async fn time_tcp_only_with_lowlatency_open() { + zenoh::try_init_log_from_env(); + let endpoint: EndPoint = format!("tcp/127.0.0.1:{}", 14100).parse().unwrap(); + time_lowlatency_open(&endpoint, WhatAmI::Client).await; +} + +#[cfg(feature = "transport_udp")] +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +#[ignore] +async fn time_udp_only_open() { + zenoh::try_init_log_from_env(); + let endpoint: EndPoint = format!("udp/127.0.0.1:{}", 14010).parse().unwrap(); + time_universal_open(&endpoint, WhatAmI::Client).await; +} + +#[cfg(feature = "transport_udp")] +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +#[ignore] +async fn time_udp_only_with_lowlatency_open() { + zenoh::try_init_log_from_env(); + let endpoint: EndPoint = format!("udp/127.0.0.1:{}", 14110).parse().unwrap(); + time_lowlatency_open(&endpoint, WhatAmI::Client).await; +} + +// #[cfg(feature = "transport_ws")] +// #[tokio::test(flavor = "multi_thread", worker_threads = 4)] +// #[ignore] +// async fn time_ws_only_open() { +// zenoh::try_init_log_from_env(); +// let endpoint: EndPoint = format!("ws/127.0.0.1:{}", 14020).parse().unwrap(); +// time_universal_open(&endpoint, WhatAmI::Client).await; +// } + +// #[cfg(feature = "transport_ws")] +// #[tokio::test(flavor = "multi_thread", worker_threads = 4)] +// #[ignore] +// async fn time_ws_only_with_lowlatency_open() { +// zenoh::try_init_log_from_env(); +// let endpoint: EndPoint = format!("ws/127.0.0.1:{}", 14120).parse().unwrap(); +// time_lowlatency_open(&endpoint, WhatAmI::Client).await; +// } + +#[cfg(feature = "transport_unixpipe")] +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +#[ignore] +async fn time_unixpipe_only_open() { + zenoh::try_init_log_from_env(); + let endpoint: EndPoint = "unixpipe/time_unixpipe_only_open".parse().unwrap(); + time_universal_open(&endpoint, WhatAmI::Client).await; +} + +#[cfg(feature = "transport_unixpipe")] +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +#[ignore] +async fn time_unixpipe_only_with_lowlatency_open() { + zenoh::try_init_log_from_env(); + let endpoint: EndPoint = "unixpipe/time_unixpipe_only_with_lowlatency_open" + .parse() + .unwrap(); + time_lowlatency_open(&endpoint, WhatAmI::Client).await; +} + +#[cfg(all(feature = "transport_unixsock-stream", target_family = "unix"))] +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +#[ignore] +async fn time_unix_only_open() { + zenoh::try_init_log_from_env(); + let f1 = "zenoh-test-unix-socket-9-open.sock"; + let _ = std::fs::remove_file(f1); + let endpoint: EndPoint = format!("unixsock-stream/{f1}").parse().unwrap(); + time_universal_open(&endpoint, WhatAmI::Client).await; + let _ = std::fs::remove_file(f1); + let _ = std::fs::remove_file(format!("{f1}.lock")); +} + +#[cfg(feature = "transport_tls")] +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +#[ignore] +async fn time_tls_only_open() { + use zenoh_link::tls::config::*; + + zenoh::try_init_log_from_env(); + // NOTE: this an auto-generated pair of certificate and key. + // The target domain is localhost, so it has no real + // mapping to any existing domain. The certificate and key + // have been generated using: https://github.com/jsha/minica + let key = "-----BEGIN RSA PRIVATE KEY----- +MIIEpAIBAAKCAQEAsfqAuhElN4HnyeqLovSd4Qe+nNv5AwCjSO+HFiF30x3vQ1Hi +qRA0UmyFlSqBnFH3TUHm4Jcad40QfrX8f11NKGZdpvKHsMYqYjZnYkRFGS2s4fQy +aDbV5M06s3UDX8ETPgY41Y8fCKTSVdi9iHkwcVrXMxUu4IBBx0C1r2GSo3gkIBnU +cELdFdaUOSbdCipJhbnkwixEr2h7PXxwba7SIZgZtRaQWak1VE9b716qe3iMuMha +Efo/UoFmeZCPu5spfwaOZsnCsxRPk2IjbzlsHTJ09lM9wmbEFHBMVAXejLTk++Sr +Xt8jASZhNen/2GzyLQNAquGn98lCMQ6SsE9vLQIDAQABAoIBAGQkKggHm6Q20L+4 +2+bNsoOqguLplpvM4RMpyx11qWE9h6GeUmWD+5yg+SysJQ9aw0ZSHWEjRD4ePji9 +lxvm2IIxzuIftp+NcM2gBN2ywhpfq9XbO/2NVR6PJ0dQQJzBG12bzKDFDdYkP0EU +WdiPL+WoEkvo0F57bAd77n6G7SZSgxYekBF+5S6rjbu5I1cEKW+r2vLehD4uFCVX +Q0Tu7TyIOE1KJ2anRb7ZXVUaguNj0/Er7EDT1+wN8KJKvQ1tYGIq/UUBtkP9nkOI +9XJd25k6m5AQPDddzd4W6/5+M7kjyVPi3CsQcpBPss6ueyecZOMaKqdWAHeEyaak +r67TofUCgYEA6GBa+YkRvp0Ept8cd5mh4gCRM8wUuhtzTQnhubCPivy/QqMWScdn +qD0OiARLAsqeoIfkAVgyqebVnxwTrKTvWe0JwpGylEVWQtpGz3oHgjST47yZxIiY +CSAaimi2CYnJZ+QB2oBkFVwNCuXdPEGX6LgnOGva19UKrm6ONsy6V9MCgYEAxBJu +fu4dGXZreARKEHa/7SQjI9ayAFuACFlON/EgSlICzQyG/pumv1FsMEiFrv6w7PRj +4AGqzyzGKXWVDRMrUNVeGPSKJSmlPGNqXfPaXRpVEeB7UQhAs5wyMrWDl8jEW7Ih +XcWhMLn1f/NOAKyrSDSEaEM+Nuu+xTifoAghvP8CgYEAlta9Fw+nihDIjT10cBo0 +38w4dOP7bFcXQCGy+WMnujOYPzw34opiue1wOlB3FIfL8i5jjY/fyzPA5PhHuSCT +Ec9xL3B9+AsOFHU108XFi/pvKTwqoE1+SyYgtEmGKKjdKOfzYA9JaCgJe1J8inmV +jwXCx7gTJVjwBwxSmjXIm+sCgYBQF8NhQD1M0G3YCdCDZy7BXRippCL0OGxVfL2R +5oKtOVEBl9NxH/3+evE5y/Yn5Mw7Dx3ZPHUcygpslyZ6v9Da5T3Z7dKcmaVwxJ+H +n3wcugv0EIHvOPLNK8npovINR6rGVj6BAqD0uZHKYYYEioQxK5rGyGkaoDQ+dgHm +qku12wKBgQDem5FvNp5iW7mufkPZMqf3sEGtu612QeqejIPFM1z7VkUgetsgPBXD +tYsqC2FtWzY51VOEKNpnfH7zH5n+bjoI9nAEAW63TK9ZKkr2hRGsDhJdGzmLfQ7v +F6/CuIw9EsAq6qIB8O88FXQqald+BZOx6AzB8Oedsz/WtMmIEmr/+Q== +-----END RSA PRIVATE KEY-----"; + + let cert = "-----BEGIN CERTIFICATE----- +MIIDLjCCAhagAwIBAgIIeUtmIdFQznMwDQYJKoZIhvcNAQELBQAwIDEeMBwGA1UE +AxMVbWluaWNhIHJvb3QgY2EgMDc4ZGE3MCAXDTIzMDMwNjE2MDMxOFoYDzIxMjMw +MzA2MTYwMzE4WjAUMRIwEAYDVQQDEwlsb2NhbGhvc3QwggEiMA0GCSqGSIb3DQEB +AQUAA4IBDwAwggEKAoIBAQCx+oC6ESU3gefJ6oui9J3hB76c2/kDAKNI74cWIXfT +He9DUeKpEDRSbIWVKoGcUfdNQebglxp3jRB+tfx/XU0oZl2m8oewxipiNmdiREUZ +Lazh9DJoNtXkzTqzdQNfwRM+BjjVjx8IpNJV2L2IeTBxWtczFS7ggEHHQLWvYZKj +eCQgGdRwQt0V1pQ5Jt0KKkmFueTCLESvaHs9fHBtrtIhmBm1FpBZqTVUT1vvXqp7 +eIy4yFoR+j9SgWZ5kI+7myl/Bo5mycKzFE+TYiNvOWwdMnT2Uz3CZsQUcExUBd6M +tOT75Kte3yMBJmE16f/YbPItA0Cq4af3yUIxDpKwT28tAgMBAAGjdjB0MA4GA1Ud +DwEB/wQEAwIFoDAdBgNVHSUEFjAUBggrBgEFBQcDAQYIKwYBBQUHAwIwDAYDVR0T +AQH/BAIwADAfBgNVHSMEGDAWgBTWfAmQ/BUIQm/9/llJJs2jUMWzGzAUBgNVHREE +DTALgglsb2NhbGhvc3QwDQYJKoZIhvcNAQELBQADggEBAG/POnBob0S7iYwsbtI2 +3LTTbRnmseIErtJuJmI9yYzgVIm6sUSKhlIUfAIm4rfRuzE94KFeWR2w9RabxOJD +wjYLLKvQ6rFY5g2AV/J0TwDjYuq0absdaDPZ8MKJ+/lpGYK3Te+CTOfq5FJRFt1q +GOkXAxnNpGg0obeRWRKFiAMHbcw6a8LIMfRjCooo3+uSQGsbVzGxSB4CYo720KcC +9vB1K9XALwzoqCewP4aiQsMY1GWpAmzXJftY3w+lka0e9dBYcdEdOqxSoZb5OBBZ +p5e60QweRuJsb60aUaCG8HoICevXYK2fFqCQdlb5sIqQqXyN2K6HuKAFywsjsGyJ +abY= +-----END CERTIFICATE-----"; + + // Configure the client + let ca = "-----BEGIN CERTIFICATE----- +MIIDSzCCAjOgAwIBAgIIB42n1ZIkOakwDQYJKoZIhvcNAQELBQAwIDEeMBwGA1UE +AxMVbWluaWNhIHJvb3QgY2EgMDc4ZGE3MCAXDTIzMDMwNjE2MDMwN1oYDzIxMjMw +MzA2MTYwMzA3WjAgMR4wHAYDVQQDExVtaW5pY2Egcm9vdCBjYSAwNzhkYTcwggEi +MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDIuCq24O4P4Aep5vAVlrIQ7P8+ +uWWgcHIFYa02TmhBUB/hjo0JANCQvAtpVNuQ8NyKPlqnnq1cttePbSYVeA0rrnOs +DcfySAiyGBEY9zMjFfHJtH1wtrPcJEU8XIEY3xUlrAJE2CEuV9dVYgfEEydnvgLc +8Ug0WXSiARjqbnMW3l8jh6bYCp/UpL/gSM4mxdKrgpfyPoweGhlOWXc3RTS7cqM9 +T25acURGOSI6/g8GF0sNE4VZmUvHggSTmsbLeXMJzxDWO+xVehRmbQx3IkG7u++b +QdRwGIJcDNn7zHlDMHtQ0Z1DBV94fZNBwCULhCBB5g20XTGw//S7Fj2FPwyhAgMB +AAGjgYYwgYMwDgYDVR0PAQH/BAQDAgKEMB0GA1UdJQQWMBQGCCsGAQUFBwMBBggr +BgEFBQcDAjASBgNVHRMBAf8ECDAGAQH/AgEAMB0GA1UdDgQWBBTWfAmQ/BUIQm/9 +/llJJs2jUMWzGzAfBgNVHSMEGDAWgBTWfAmQ/BUIQm/9/llJJs2jUMWzGzANBgkq +hkiG9w0BAQsFAAOCAQEAvtcZFAELKiTuOiAeYts6zeKxc+nnHCzayDeD/BDCbxGJ +e1n+xdHjLtWGd+/Anc+fvftSYBPTFQqCi84lPiUIln5z/rUxE+ke81hNPIfw2obc +yIg87xCabQpVyEh8s+MV+7YPQ1+fH4FuSi2Fck1FejxkVqN2uOZPvOYUmSTsaVr1 +8SfRnwJNZ9UMRPM2bD4Jkvj0VcL42JM3QkOClOzYW4j/vll2cSs4kx7er27cIoo1 +Ck0v2xSPAiVjg6w65rUQeW6uB5m0T2wyj+wm0At8vzhZPlgS1fKhcmT2dzOq3+oN +R+IdLiXcyIkg0m9N8I17p0ljCSkbrgGMD3bbePRTfg== +-----END CERTIFICATE-----"; + + let mut endpoint: EndPoint = format!("tls/localhost:{}", 14030).parse().unwrap(); + endpoint + .config_mut() + .extend_from_iter( + [ + (TLS_ROOT_CA_CERTIFICATE_RAW, ca), + (TLS_SERVER_PRIVATE_KEY_RAW, key), + (TLS_SERVER_CERTIFICATE_RAW, cert), + ] + .iter() + .copied(), + ) + .unwrap(); + + time_universal_open(&endpoint, WhatAmI::Client).await; +} + +#[cfg(feature = "transport_quic")] +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +#[ignore] +async fn time_quic_only_open() { + use zenoh_link::quic::config::*; + + // NOTE: this an auto-generated pair of certificate and key. + // The target domain is localhost, so it has no real + // mapping to any existing domain. The certificate and key + // have been generated using: https://github.com/jsha/minica + let key = "-----BEGIN RSA PRIVATE KEY----- +MIIEpAIBAAKCAQEAsfqAuhElN4HnyeqLovSd4Qe+nNv5AwCjSO+HFiF30x3vQ1Hi +qRA0UmyFlSqBnFH3TUHm4Jcad40QfrX8f11NKGZdpvKHsMYqYjZnYkRFGS2s4fQy +aDbV5M06s3UDX8ETPgY41Y8fCKTSVdi9iHkwcVrXMxUu4IBBx0C1r2GSo3gkIBnU +cELdFdaUOSbdCipJhbnkwixEr2h7PXxwba7SIZgZtRaQWak1VE9b716qe3iMuMha +Efo/UoFmeZCPu5spfwaOZsnCsxRPk2IjbzlsHTJ09lM9wmbEFHBMVAXejLTk++Sr +Xt8jASZhNen/2GzyLQNAquGn98lCMQ6SsE9vLQIDAQABAoIBAGQkKggHm6Q20L+4 +2+bNsoOqguLplpvM4RMpyx11qWE9h6GeUmWD+5yg+SysJQ9aw0ZSHWEjRD4ePji9 +lxvm2IIxzuIftp+NcM2gBN2ywhpfq9XbO/2NVR6PJ0dQQJzBG12bzKDFDdYkP0EU +WdiPL+WoEkvo0F57bAd77n6G7SZSgxYekBF+5S6rjbu5I1cEKW+r2vLehD4uFCVX +Q0Tu7TyIOE1KJ2anRb7ZXVUaguNj0/Er7EDT1+wN8KJKvQ1tYGIq/UUBtkP9nkOI +9XJd25k6m5AQPDddzd4W6/5+M7kjyVPi3CsQcpBPss6ueyecZOMaKqdWAHeEyaak +r67TofUCgYEA6GBa+YkRvp0Ept8cd5mh4gCRM8wUuhtzTQnhubCPivy/QqMWScdn +qD0OiARLAsqeoIfkAVgyqebVnxwTrKTvWe0JwpGylEVWQtpGz3oHgjST47yZxIiY +CSAaimi2CYnJZ+QB2oBkFVwNCuXdPEGX6LgnOGva19UKrm6ONsy6V9MCgYEAxBJu +fu4dGXZreARKEHa/7SQjI9ayAFuACFlON/EgSlICzQyG/pumv1FsMEiFrv6w7PRj +4AGqzyzGKXWVDRMrUNVeGPSKJSmlPGNqXfPaXRpVEeB7UQhAs5wyMrWDl8jEW7Ih +XcWhMLn1f/NOAKyrSDSEaEM+Nuu+xTifoAghvP8CgYEAlta9Fw+nihDIjT10cBo0 +38w4dOP7bFcXQCGy+WMnujOYPzw34opiue1wOlB3FIfL8i5jjY/fyzPA5PhHuSCT +Ec9xL3B9+AsOFHU108XFi/pvKTwqoE1+SyYgtEmGKKjdKOfzYA9JaCgJe1J8inmV +jwXCx7gTJVjwBwxSmjXIm+sCgYBQF8NhQD1M0G3YCdCDZy7BXRippCL0OGxVfL2R +5oKtOVEBl9NxH/3+evE5y/Yn5Mw7Dx3ZPHUcygpslyZ6v9Da5T3Z7dKcmaVwxJ+H +n3wcugv0EIHvOPLNK8npovINR6rGVj6BAqD0uZHKYYYEioQxK5rGyGkaoDQ+dgHm +qku12wKBgQDem5FvNp5iW7mufkPZMqf3sEGtu612QeqejIPFM1z7VkUgetsgPBXD +tYsqC2FtWzY51VOEKNpnfH7zH5n+bjoI9nAEAW63TK9ZKkr2hRGsDhJdGzmLfQ7v +F6/CuIw9EsAq6qIB8O88FXQqald+BZOx6AzB8Oedsz/WtMmIEmr/+Q== +-----END RSA PRIVATE KEY-----"; + + let cert = "-----BEGIN CERTIFICATE----- +MIIDLjCCAhagAwIBAgIIeUtmIdFQznMwDQYJKoZIhvcNAQELBQAwIDEeMBwGA1UE +AxMVbWluaWNhIHJvb3QgY2EgMDc4ZGE3MCAXDTIzMDMwNjE2MDMxOFoYDzIxMjMw +MzA2MTYwMzE4WjAUMRIwEAYDVQQDEwlsb2NhbGhvc3QwggEiMA0GCSqGSIb3DQEB +AQUAA4IBDwAwggEKAoIBAQCx+oC6ESU3gefJ6oui9J3hB76c2/kDAKNI74cWIXfT +He9DUeKpEDRSbIWVKoGcUfdNQebglxp3jRB+tfx/XU0oZl2m8oewxipiNmdiREUZ +Lazh9DJoNtXkzTqzdQNfwRM+BjjVjx8IpNJV2L2IeTBxWtczFS7ggEHHQLWvYZKj +eCQgGdRwQt0V1pQ5Jt0KKkmFueTCLESvaHs9fHBtrtIhmBm1FpBZqTVUT1vvXqp7 +eIy4yFoR+j9SgWZ5kI+7myl/Bo5mycKzFE+TYiNvOWwdMnT2Uz3CZsQUcExUBd6M +tOT75Kte3yMBJmE16f/YbPItA0Cq4af3yUIxDpKwT28tAgMBAAGjdjB0MA4GA1Ud +DwEB/wQEAwIFoDAdBgNVHSUEFjAUBggrBgEFBQcDAQYIKwYBBQUHAwIwDAYDVR0T +AQH/BAIwADAfBgNVHSMEGDAWgBTWfAmQ/BUIQm/9/llJJs2jUMWzGzAUBgNVHREE +DTALgglsb2NhbGhvc3QwDQYJKoZIhvcNAQELBQADggEBAG/POnBob0S7iYwsbtI2 +3LTTbRnmseIErtJuJmI9yYzgVIm6sUSKhlIUfAIm4rfRuzE94KFeWR2w9RabxOJD +wjYLLKvQ6rFY5g2AV/J0TwDjYuq0absdaDPZ8MKJ+/lpGYK3Te+CTOfq5FJRFt1q +GOkXAxnNpGg0obeRWRKFiAMHbcw6a8LIMfRjCooo3+uSQGsbVzGxSB4CYo720KcC +9vB1K9XALwzoqCewP4aiQsMY1GWpAmzXJftY3w+lka0e9dBYcdEdOqxSoZb5OBBZ +p5e60QweRuJsb60aUaCG8HoICevXYK2fFqCQdlb5sIqQqXyN2K6HuKAFywsjsGyJ +abY= +-----END CERTIFICATE-----"; + + // Configure the client + let ca = "-----BEGIN CERTIFICATE----- +MIIDSzCCAjOgAwIBAgIIB42n1ZIkOakwDQYJKoZIhvcNAQELBQAwIDEeMBwGA1UE +AxMVbWluaWNhIHJvb3QgY2EgMDc4ZGE3MCAXDTIzMDMwNjE2MDMwN1oYDzIxMjMw +MzA2MTYwMzA3WjAgMR4wHAYDVQQDExVtaW5pY2Egcm9vdCBjYSAwNzhkYTcwggEi +MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDIuCq24O4P4Aep5vAVlrIQ7P8+ +uWWgcHIFYa02TmhBUB/hjo0JANCQvAtpVNuQ8NyKPlqnnq1cttePbSYVeA0rrnOs +DcfySAiyGBEY9zMjFfHJtH1wtrPcJEU8XIEY3xUlrAJE2CEuV9dVYgfEEydnvgLc +8Ug0WXSiARjqbnMW3l8jh6bYCp/UpL/gSM4mxdKrgpfyPoweGhlOWXc3RTS7cqM9 +T25acURGOSI6/g8GF0sNE4VZmUvHggSTmsbLeXMJzxDWO+xVehRmbQx3IkG7u++b +QdRwGIJcDNn7zHlDMHtQ0Z1DBV94fZNBwCULhCBB5g20XTGw//S7Fj2FPwyhAgMB +AAGjgYYwgYMwDgYDVR0PAQH/BAQDAgKEMB0GA1UdJQQWMBQGCCsGAQUFBwMBBggr +BgEFBQcDAjASBgNVHRMBAf8ECDAGAQH/AgEAMB0GA1UdDgQWBBTWfAmQ/BUIQm/9 +/llJJs2jUMWzGzAfBgNVHSMEGDAWgBTWfAmQ/BUIQm/9/llJJs2jUMWzGzANBgkq +hkiG9w0BAQsFAAOCAQEAvtcZFAELKiTuOiAeYts6zeKxc+nnHCzayDeD/BDCbxGJ +e1n+xdHjLtWGd+/Anc+fvftSYBPTFQqCi84lPiUIln5z/rUxE+ke81hNPIfw2obc +yIg87xCabQpVyEh8s+MV+7YPQ1+fH4FuSi2Fck1FejxkVqN2uOZPvOYUmSTsaVr1 +8SfRnwJNZ9UMRPM2bD4Jkvj0VcL42JM3QkOClOzYW4j/vll2cSs4kx7er27cIoo1 +Ck0v2xSPAiVjg6w65rUQeW6uB5m0T2wyj+wm0At8vzhZPlgS1fKhcmT2dzOq3+oN +R+IdLiXcyIkg0m9N8I17p0ljCSkbrgGMD3bbePRTfg== +-----END CERTIFICATE-----"; + + // Define the locator + let mut endpoint: EndPoint = format!("quic/localhost:{}", 14040).parse().unwrap(); + endpoint + .config_mut() + .extend_from_iter( + [ + (TLS_ROOT_CA_CERTIFICATE_RAW, ca), + (TLS_SERVER_PRIVATE_KEY_RAW, key), + (TLS_SERVER_CERTIFICATE_RAW, cert), + ] + .iter() + .copied(), + ) + .unwrap(); + + time_universal_open(&endpoint, WhatAmI::Client).await; +} + +#[cfg(all(feature = "transport_vsock", target_os = "linux"))] +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +#[ignore] +async fn time_vsock_only_open() { + zenoh::try_init_log_from_env(); + let endpoint: EndPoint = "vsock/VMADDR_CID_LOCAL:18000".parse().unwrap(); + time_lowlatency_open(&endpoint, WhatAmI::Client).await; +} diff --git a/zenoh/tests/qos.rs b/zenoh/tests/qos.rs index 0e28af0847..8d7d7e7322 100644 --- a/zenoh/tests/qos.rs +++ b/zenoh/tests/qos.rs @@ -12,44 +12,54 @@ // ZettaScale Zenoh Team, // use std::time::Duration; -use zenoh::prelude::r#async::*; -use zenoh::{publication::Priority, SessionDeclarations}; + +use zenoh::{ + bytes::Encoding, + prelude::*, + qos::{CongestionControl, Priority}, +}; use zenoh_core::ztimeout; const TIMEOUT: Duration = Duration::from_secs(60); const SLEEP: Duration = Duration::from_secs(1); #[tokio::test(flavor = "multi_thread", worker_threads = 4)] -async fn pubsub() { - let session1 = ztimeout!(zenoh::open(zenoh_config::peer()).res_async()).unwrap(); - let session2 = ztimeout!(zenoh::open(zenoh_config::peer()).res_async()).unwrap(); +async fn qos_pubsub() { + let session1 = ztimeout!(zenoh::open(zenoh_config::peer())).unwrap(); + let session2 = ztimeout!(zenoh::open(zenoh_config::peer())).unwrap(); let publisher1 = ztimeout!(session1 .declare_publisher("test/qos") + .encoding("text/plain") .priority(Priority::DataHigh) .congestion_control(CongestionControl::Drop) - .res()) + .express(true)) .unwrap(); let publisher2 = ztimeout!(session1 .declare_publisher("test/qos") + .encoding(Encoding::ZENOH_STRING) .priority(Priority::DataLow) .congestion_control(CongestionControl::Block) - .res()) + .express(false)) .unwrap(); - let subscriber = ztimeout!(session2.declare_subscriber("test/qos").res()).unwrap(); + let subscriber = ztimeout!(session2.declare_subscriber("test/qos")).unwrap(); tokio::time::sleep(SLEEP).await; - ztimeout!(publisher1.put("qos").res_async()).unwrap(); - let qos = ztimeout!(subscriber.recv_async()).unwrap().qos; + ztimeout!(publisher1.put("qos")).unwrap(); + let sample = ztimeout!(subscriber.recv_async()).unwrap(); - assert_eq!(qos.priority(), Priority::DataHigh); - assert_eq!(qos.congestion_control(), CongestionControl::Drop); + assert_eq!(sample.encoding(), &Encoding::TEXT_PLAIN); + assert_eq!(sample.priority(), Priority::DataHigh); + assert_eq!(sample.congestion_control(), CongestionControl::Drop); + assert!(sample.express()); - ztimeout!(publisher2.put("qos").res_async()).unwrap(); - let qos = ztimeout!(subscriber.recv_async()).unwrap().qos; + ztimeout!(publisher2.put("qos")).unwrap(); + let sample = ztimeout!(subscriber.recv_async()).unwrap(); - assert_eq!(qos.priority(), Priority::DataLow); - assert_eq!(qos.congestion_control(), CongestionControl::Block); + assert_eq!(sample.encoding(), &Encoding::ZENOH_STRING); + assert_eq!(sample.priority(), Priority::DataLow); + assert_eq!(sample.congestion_control(), CongestionControl::Block); + assert!(!sample.express()); } diff --git a/zenoh/tests/routing.rs b/zenoh/tests/routing.rs index be479756b3..07971b7853 100644 --- a/zenoh/tests/routing.rs +++ b/zenoh/tests/routing.rs @@ -12,26 +12,27 @@ // ZettaScale Zenoh Team, // use std::{ - str::FromStr, - sync::{atomic::AtomicUsize, atomic::Ordering, Arc}, + sync::{ + atomic::{AtomicUsize, Ordering}, + Arc, + }, time::Duration, }; -use tokio_util::{sync::CancellationToken, task::TaskTracker}; + +use tokio_util::sync::CancellationToken; use zenoh::{ - config::{Config, ModeDependentValue}, - prelude::r#async::*, - value::Value, - Result, + config::{ModeDependentValue, WhatAmI, WhatAmIMatcher}, + prelude::*, + qos::CongestionControl, + Config, Result, Session, }; use zenoh_core::ztimeout; -use zenoh_protocol::core::{WhatAmI, WhatAmIMatcher}; use zenoh_result::bail; const TIMEOUT: Duration = Duration::from_secs(10); const MSG_COUNT: usize = 50; -const MSG_SIZE: [usize; 2] = [1_024, 131_072]; -// Maximal recipes to run at once -const PARALLEL_RECIPES: usize = 4; +#[cfg(feature = "unstable")] +const LIVELINESSGET_DELAY: Duration = Duration::from_millis(10); #[derive(Debug, Clone, PartialEq, Eq)] enum Task { @@ -39,6 +40,14 @@ enum Task { Sub(String, usize), Queryable(String, usize), Get(String, usize), + #[cfg(feature = "unstable")] + Liveliness(String), + #[cfg(feature = "unstable")] + LivelinessGet(String), + #[cfg(feature = "unstable")] + LivelinessLoop(String), + #[cfg(feature = "unstable")] + LivelinessSub(String), Sleep(Duration), Wait, Checkpoint, @@ -54,14 +63,14 @@ impl Task { match self { // The Sub task checks if the incoming message matches the expected size until it receives enough counts. Self::Sub(ke, expected_size) => { - let sub = ztimeout!(session.declare_subscriber(ke).res_async())?; + let sub = ztimeout!(session.declare_subscriber(ke))?; let mut counter = 0; loop { tokio::select! { _ = token.cancelled() => break, res = sub.recv_async() => { if let Ok(sample) = res { - let recv_size = sample.value.payload.len(); + let recv_size = sample.payload().len(); if recv_size != *expected_size { bail!("Received payload size {recv_size} mismatches the expected {expected_size}"); } @@ -79,17 +88,16 @@ impl Task { // The Pub task keeps putting messages until all checkpoints are finished. Self::Pub(ke, payload_size) => { - let value: Value = vec![0u8; *payload_size].into(); - // while remaining_checkpoints.load(Ordering::Relaxed) > 0 { loop { tokio::select! { _ = token.cancelled() => break, // WARN: this won't yield after a timeout since the put is a blocking call - res = tokio::time::timeout(std::time::Duration::from_secs(1), session - .put(ke, value.clone()) + res = tokio::time::timeout(std::time::Duration::from_secs(1), async {session + .put(ke, vec![0u8; *payload_size]) .congestion_control(CongestionControl::Block) - .res()) => { + .await + }) => { let _ = res?; } } @@ -97,18 +105,34 @@ impl Task { println!("Pub task done."); } + // The Queryable task keeps replying to requested messages until all checkpoints are finished. + Self::Queryable(ke, payload_size) => { + let queryable = ztimeout!(session.declare_queryable(ke))?; + let payload = vec![0u8; *payload_size]; + + loop { + tokio::select! { + _ = token.cancelled() => break, + query = queryable.recv_async() => { + ztimeout!(query?.reply(ke.to_owned(), payload.clone()))?; + }, + } + } + println!("Queryable task done."); + } + // The Get task gets and checks if the incoming message matches the expected size until it receives enough counts. Self::Get(ke, expected_size) => { let mut counter = 0; while counter < MSG_COUNT { tokio::select! { _ = token.cancelled() => break, - replies = session.get(ke).timeout(Duration::from_secs(10)).res() => { + replies = async { session.get(ke).timeout(Duration::from_secs(10)).await } => { let replies = replies?; while let Ok(reply) = replies.recv_async().await { - match reply.sample { + match reply.result() { Ok(sample) => { - let recv_size = sample.value.payload.len(); + let recv_size = sample.payload().len(); if recv_size != *expected_size { bail!("Received payload size {recv_size} mismatches the expected {expected_size}"); } @@ -116,7 +140,7 @@ impl Task { Err(err) => { tracing::warn!( - "Sample got from {} failed to unwrap! Error: {}.", + "Sample got from {} failed to unwrap! Error: {:?}.", ke, err ); @@ -131,20 +155,92 @@ impl Task { println!("Get got sufficient amount of messages. Done."); } - // The Queryable task keeps replying to requested messages until all checkpoints are finished. - Self::Queryable(ke, payload_size) => { - let queryable = session.declare_queryable(ke).res_async().await?; - let sample = Sample::try_from(ke.clone(), vec![0u8; *payload_size])?; + #[cfg(feature = "unstable")] + // The Liveliness task. + Self::Liveliness(ke) => { + let _liveliness = ztimeout!(session.liveliness().declare_token(ke))?; + + token.cancelled().await; + println!("Liveliness task done."); + } + + #[cfg(feature = "unstable")] + // The LivelinessGet task. + Self::LivelinessGet(ke) => { + let mut counter = 0; + while counter < MSG_COUNT { + tokio::select! { + _ = token.cancelled() => break, + replies = async { session.liveliness().get(ke).timeout(Duration::from_secs(10)).await } => { + let replies = replies?; + while let Ok(reply) = replies.recv_async().await { + if let Err(err) = reply.result() { + tracing::warn!( + "Sample got from {} failed to unwrap! Error: {:?}.", + ke, + err + ); + continue; + } + counter += 1; + } + tokio::time::sleep(LIVELINESSGET_DELAY).await; + } + } + } + println!("LivelinessGet got sufficient amount of messages. Done."); + } + + // The LivelinessLoop task. + #[cfg(feature = "unstable")] + Self::LivelinessLoop(ke) => { + let mut liveliness: Option = None; + loop { + match liveliness.take() { + Some(liveliness) => { + tokio::select! { + _ = token.cancelled() => break, + res = tokio::time::timeout(std::time::Duration::from_secs(1), async {liveliness.undeclare().await}) => { + _ = res?; + } + } + } + None => { + tokio::select! { + _ = token.cancelled() => break, + res = tokio::time::timeout(std::time::Duration::from_secs(1), async {session.liveliness().declare_token(ke) + .await + }) => { + liveliness = res?.ok(); + } + } + } + } + } + println!("LivelinessLoop task done."); + } + + #[cfg(feature = "unstable")] + // The LivelinessSub task. + Self::LivelinessSub(ke) => { + let sub = ztimeout!(session.liveliness().declare_subscriber(ke))?; + let mut counter = 0; loop { tokio::select! { _ = token.cancelled() => break, - query = queryable.recv_async() => { - query?.reply(Ok(sample.clone())).res_async().await?; - }, + res = sub.recv_async() => { + if res.is_ok() { + counter += 1; + if counter >= MSG_COUNT { + println!("LivelinessSub received sufficient amount of messages. Done."); + break; + } + } + } } } - println!("Queryable task done."); + println!("LivelinessSub task done."); } // Make the zenoh session sleep for a while. @@ -266,16 +362,20 @@ impl Recipe { let mut config = node.config.unwrap_or_default(); config.set_mode(Some(node.mode)).unwrap(); config.scouting.multicast.set_enabled(Some(false)).unwrap(); - config - .listen - .set_endpoints(node.listen.iter().map(|x| x.parse().unwrap()).collect()) - .unwrap(); - config - .connect - .set_endpoints( - node.connect.iter().map(|x| x.parse().unwrap()).collect(), - ) - .unwrap(); + if !node.listen.is_empty() { + config + .listen + .endpoints + .set(node.listen.iter().map(|x| x.parse().unwrap()).collect()) + .unwrap(); + } + if !node.connect.is_empty() { + config + .connect + .endpoints + .set(node.connect.iter().map(|x| x.parse().unwrap()).collect()) + .unwrap(); + } config }; @@ -285,7 +385,7 @@ impl Recipe { // In case of client can't connect to some peers/routers loop { - if let Ok(session) = zenoh::open(config.clone()).res_async().await { + if let Ok(session) = ztimeout!(zenoh::open(config.clone())) { break session.into_arc(); } else { tokio::time::sleep(Duration::from_secs(1)).await; @@ -321,11 +421,7 @@ impl Recipe { // node_task_tracker.wait().await; // Close the session once all the task associated with the node are done. - Arc::try_unwrap(session) - .unwrap() - .close() - .res_async() - .await?; + ztimeout!(Arc::try_unwrap(session).unwrap().close())?; println!("Node: {} is closed.", &node.name); Result::Ok(()) @@ -365,7 +461,7 @@ impl Recipe { // And the message transmission should work even if the common node disappears after a while. #[tokio::test(flavor = "multi_thread", worker_threads = 4)] async fn gossip() -> Result<()> { - zenoh_util::try_init_log_from_env(); + zenoh::try_init_log_from_env(); let locator = String::from("tcp/127.0.0.1:17446"); let ke = String::from("testKeyExprGossip"); @@ -433,7 +529,7 @@ async fn gossip() -> Result<()> { // Simulate two peers connecting to a router but not directly reachable to each other can exchange messages via the brokering by the router. #[tokio::test(flavor = "multi_thread", worker_threads = 4)] async fn static_failover_brokering() -> Result<()> { - zenoh_util::try_init_log_from_env(); + zenoh::try_init_log_from_env(); let locator = String::from("tcp/127.0.0.1:17449"); let ke = String::from("testKeyExprStaticFailoverBrokering"); let msg_size = 8; @@ -443,9 +539,7 @@ async fn static_failover_brokering() -> Result<()> { config .scouting .gossip - .set_autoconnect(Some(ModeDependentValue::Unique( - WhatAmIMatcher::from_str("").unwrap(), - ))) + .set_autoconnect(Some(ModeDependentValue::Unique(WhatAmIMatcher::empty()))) .unwrap(); Some(config) }; @@ -486,15 +580,24 @@ async fn static_failover_brokering() -> Result<()> { Result::Ok(()) } +#[cfg(feature = "unstable")] +use tokio_util::task::TaskTracker; +#[cfg(feature = "unstable")] +const MSG_SIZE: [usize; 2] = [1_024, 131_072]; +// Maximal recipes to run at once +#[cfg(feature = "unstable")] +const PARALLEL_RECIPES: usize = 4; + // All test cases varying in // 1. Message size: 2 (sizes) // 2. Mode: {Client, Peer} x {Client x Peer} x {Router} = 2 x 2 x 1 = 4 (cases) // 3. Spawning order (delay_in_secs for node1, node2, and node3) = 6 (cases) // // Total cases = 2 x 4 x 6 = 48 +#[cfg(feature = "unstable")] #[tokio::test(flavor = "multi_thread", worker_threads = 9)] async fn three_node_combination() -> Result<()> { - zenoh_util::try_init_log_from_env(); + zenoh::try_init_log_from_env(); let modes = [WhatAmI::Peer, WhatAmI::Client]; let delay_in_secs = [ (0, 1, 2), @@ -522,6 +625,10 @@ async fn three_node_combination() -> Result<()> { let ke_pubsub = format!("three_node_combination_keyexpr_pubsub_{idx}"); let ke_getqueryable = format!("three_node_combination_keyexpr_getqueryable_{idx}"); + let ke_getliveliness = + format!("three_node_combination_keyexpr_getliveliness_{idx}"); + let ke_subliveliness = + format!("three_node_combination_keyexpr_subliveliness_{idx}"); use rand::Rng; let mut rng = rand::thread_rng(); @@ -536,7 +643,7 @@ async fn three_node_combination() -> Result<()> { ..Default::default() }; - let (pub_node, queryable_node) = { + let (pub_node, queryable_node, liveliness_node, livelinessloop_node) = { let base = Node { mode: node1_mode, connect: vec![locator.clone()], @@ -552,7 +659,7 @@ async fn three_node_combination() -> Result<()> { )])]); pub_node.warmup += Duration::from_millis(rng.gen_range(0..500)); - let mut queryable_node = base; + let mut queryable_node = base.clone(); queryable_node.name = format!("Queryable {node1_mode}"); queryable_node.con_task = ConcurrentTask::from([SequentialTask::from([Task::Queryable( @@ -561,10 +668,31 @@ async fn three_node_combination() -> Result<()> { )])]); queryable_node.warmup += Duration::from_millis(rng.gen_range(0..500)); - (pub_node, queryable_node) + let mut liveliness_node = base.clone(); + liveliness_node.name = format!("Liveliness {node1_mode}"); + liveliness_node.con_task = + ConcurrentTask::from([SequentialTask::from([Task::Liveliness( + ke_getliveliness.clone(), + )])]); + liveliness_node.warmup += Duration::from_millis(rng.gen_range(0..500)); + + let mut livelinessloop_node = base; + livelinessloop_node.name = format!("LivelinessLoop {node1_mode}"); + livelinessloop_node.con_task = + ConcurrentTask::from([SequentialTask::from([Task::LivelinessLoop( + ke_subliveliness.clone(), + )])]); + livelinessloop_node.warmup += Duration::from_millis(rng.gen_range(0..500)); + + ( + pub_node, + queryable_node, + liveliness_node, + livelinessloop_node, + ) }; - let (sub_node, get_node) = { + let (sub_node, get_node, livelinessget_node, livelinesssub_node) = { let base = Node { mode: node2_mode, connect: vec![locator], @@ -580,7 +708,7 @@ async fn three_node_combination() -> Result<()> { ])]); sub_node.warmup += Duration::from_millis(rng.gen_range(0..500)); - let mut get_node = base; + let mut get_node = base.clone(); get_node.name = format!("Get {node2_mode}"); get_node.con_task = ConcurrentTask::from([SequentialTask::from([ Task::Get(ke_getqueryable, msg_size), @@ -588,12 +716,30 @@ async fn three_node_combination() -> Result<()> { ])]); get_node.warmup += Duration::from_millis(rng.gen_range(0..500)); - (sub_node, get_node) + let mut livelinessget_node = base.clone(); + livelinessget_node.name = format!("LivelinessGet {node2_mode}"); + livelinessget_node.con_task = ConcurrentTask::from([SequentialTask::from([ + Task::LivelinessGet(ke_getliveliness), + Task::Checkpoint, + ])]); + livelinessget_node.warmup += Duration::from_millis(rng.gen_range(0..500)); + + let mut livelinesssub_node = base; + livelinesssub_node.name = format!("LivelinessSub {node2_mode}"); + livelinesssub_node.con_task = ConcurrentTask::from([SequentialTask::from([ + Task::LivelinessSub(ke_subliveliness), + Task::Checkpoint, + ])]); + livelinesssub_node.warmup += Duration::from_millis(rng.gen_range(0..500)); + + (sub_node, get_node, livelinessget_node, livelinesssub_node) }; ( Recipe::new([router_node.clone(), pub_node, sub_node]), - Recipe::new([router_node, queryable_node, get_node]), + Recipe::new([router_node.clone(), queryable_node, get_node]), + Recipe::new([router_node.clone(), liveliness_node, livelinessget_node]), + Recipe::new([router_node, livelinessloop_node, livelinesssub_node]), ) }, ) @@ -601,10 +747,12 @@ async fn three_node_combination() -> Result<()> { for chunks in recipe_list.chunks(4).map(|x| x.to_vec()) { let mut join_set = tokio::task::JoinSet::new(); - for (pubsub, getqueryable) in chunks { + for (pubsub, getqueryable, getliveliness, subliveliness) in chunks { join_set.spawn(async move { pubsub.run().await?; getqueryable.run().await?; + getliveliness.run().await?; + subliveliness.run().await?; Result::Ok(()) }); } @@ -623,9 +771,10 @@ async fn three_node_combination() -> Result<()> { // 2. Mode: {Client, Peer} x {Client, Peer} x {IsFirstListen} = 2 x 2 x 2 = 8 (modes) // // Total cases = 2 x 8 = 16 +#[cfg(feature = "unstable")] #[tokio::test(flavor = "multi_thread", worker_threads = 8)] async fn two_node_combination() -> Result<()> { - zenoh_util::try_init_log_from_env(); + zenoh::try_init_log_from_env(); #[derive(Clone, Copy)] struct IsFirstListen(bool); @@ -647,6 +796,8 @@ async fn two_node_combination() -> Result<()> { idx += 1; let ke_pubsub = format!("two_node_combination_keyexpr_pubsub_{idx}"); let ke_getqueryable = format!("two_node_combination_keyexpr_getqueryable_{idx}"); + let ke_subliveliness = format!("two_node_combination_keyexpr_subliveliness_{idx}"); + let ke_getliveliness = format!("two_node_combination_keyexpr_getliveliness_{idx}"); let (node1_listen_connect, node2_listen_connect) = { let locator = format!("tcp/127.0.0.1:{}", base_port + idx); @@ -660,7 +811,7 @@ async fn two_node_combination() -> Result<()> { } }; - let (pub_node, queryable_node) = { + let (pub_node, queryable_node, liveliness_node, livelinessloop_node) = { let base = Node { mode: node1_mode, listen: node1_listen_connect.0, @@ -675,7 +826,7 @@ async fn two_node_combination() -> Result<()> { msg_size, )])]); - let mut queryable_node = base; + let mut queryable_node = base.clone(); queryable_node.name = format!("Queryable {node1_mode}"); queryable_node.con_task = ConcurrentTask::from([SequentialTask::from([Task::Queryable( @@ -683,10 +834,29 @@ async fn two_node_combination() -> Result<()> { msg_size, )])]); - (pub_node, queryable_node) + let mut liveliness_node = base.clone(); + liveliness_node.name = format!("Liveliness {node1_mode}"); + liveliness_node.con_task = + ConcurrentTask::from([SequentialTask::from([Task::Liveliness( + ke_getliveliness.clone(), + )])]); + + let mut livelinessloop_node = base; + livelinessloop_node.name = format!("LivelinessLoop {node1_mode}"); + livelinessloop_node.con_task = + ConcurrentTask::from([SequentialTask::from([Task::LivelinessLoop( + ke_subliveliness.clone(), + )])]); + + ( + pub_node, + queryable_node, + liveliness_node, + livelinessloop_node, + ) }; - let (sub_node, get_node) = { + let (sub_node, get_node, livelinessget_node, livelinesssub_node) = { let base = Node { mode: node2_mode, listen: node2_listen_connect.0, @@ -701,29 +871,47 @@ async fn two_node_combination() -> Result<()> { Task::Checkpoint, ])]); - let mut get_node = base; + let mut get_node = base.clone(); get_node.name = format!("Get {node2_mode}"); get_node.con_task = ConcurrentTask::from([SequentialTask::from([ Task::Get(ke_getqueryable, msg_size), Task::Checkpoint, ])]); - (sub_node, get_node) + let mut livelinessget_node = base.clone(); + livelinessget_node.name = format!("LivelinessGet {node2_mode}"); + livelinessget_node.con_task = ConcurrentTask::from([SequentialTask::from([ + Task::LivelinessGet(ke_getliveliness), + Task::Checkpoint, + ])]); + + let mut livelinesssub_node = base; + livelinesssub_node.name = format!("LivelinessSub {node2_mode}"); + livelinesssub_node.con_task = ConcurrentTask::from([SequentialTask::from([ + Task::LivelinessSub(ke_subliveliness), + Task::Checkpoint, + ])]); + + (sub_node, get_node, livelinessget_node, livelinesssub_node) }; ( Recipe::new([pub_node, sub_node]), Recipe::new([queryable_node, get_node]), + Recipe::new([liveliness_node, livelinessget_node]), + Recipe::new([livelinessloop_node, livelinesssub_node]), ) }) .collect(); for chunks in recipe_list.chunks(PARALLEL_RECIPES).map(|x| x.to_vec()) { let task_tracker = TaskTracker::new(); - for (pubsub, getqueryable) in chunks { + for (pubsub, getqueryable, getlivelienss, subliveliness) in chunks { task_tracker.spawn(async move { pubsub.run().await?; getqueryable.run().await?; + getlivelienss.run().await?; + subliveliness.run().await?; Result::Ok(()) }); } diff --git a/zenoh/tests/session.rs b/zenoh/tests/session.rs index f5061f7fd7..7515eefc49 100644 --- a/zenoh/tests/session.rs +++ b/zenoh/tests/session.rs @@ -11,12 +11,24 @@ // Contributors: // ZettaScale Zenoh Team, // -use std::sync::atomic::{AtomicUsize, Ordering}; -use std::sync::Arc; -use std::time::Duration; -use zenoh::prelude::r#async::*; -use zenoh::runtime::{Runtime, RuntimeBuilder}; +use std::{ + sync::{ + atomic::{AtomicUsize, Ordering}, + Arc, + }, + time::Duration, +}; + +#[cfg(feature = "internal")] +use zenoh::internal::runtime::{Runtime, RuntimeBuilder}; +#[cfg(feature = "unstable")] +use zenoh::pubsub::Reliability; +use zenoh::{ + config, key_expr::KeyExpr, prelude::*, qos::CongestionControl, sample::SampleKind, Session, +}; use zenoh_core::ztimeout; +#[cfg(not(feature = "unstable"))] +use zenoh_protocol::core::Reliability; const TIMEOUT: Duration = Duration::from_secs(60); const SLEEP: Duration = Duration::from_secs(1); @@ -27,22 +39,34 @@ const MSG_SIZE: [usize; 2] = [1_024, 100_000]; async fn open_session_unicast(endpoints: &[&str]) -> (Session, Session) { // Open the sessions let mut config = config::peer(); - config.listen.endpoints = endpoints - .iter() - .map(|e| e.parse().unwrap()) - .collect::>(); + config + .listen + .endpoints + .set( + endpoints + .iter() + .map(|e| e.parse().unwrap()) + .collect::>(), + ) + .unwrap(); config.scouting.multicast.set_enabled(Some(false)).unwrap(); println!("[ ][01a] Opening peer01 session: {:?}", endpoints); - let peer01 = ztimeout!(zenoh::open(config).res_async()).unwrap(); + let peer01 = ztimeout!(zenoh::open(config)).unwrap(); let mut config = config::peer(); - config.connect.endpoints = endpoints - .iter() - .map(|e| e.parse().unwrap()) - .collect::>(); + config + .connect + .endpoints + .set( + endpoints + .iter() + .map(|e| e.parse().unwrap()) + .collect::>(), + ) + .unwrap(); config.scouting.multicast.set_enabled(Some(false)).unwrap(); println!("[ ][02a] Opening peer02 session: {:?}", endpoints); - let peer02 = ztimeout!(zenoh::open(config).res_async()).unwrap(); + let peer02 = ztimeout!(zenoh::open(config)).unwrap(); (peer01, peer02) } @@ -50,25 +74,33 @@ async fn open_session_unicast(endpoints: &[&str]) -> (Session, Session) { async fn open_session_multicast(endpoint01: &str, endpoint02: &str) -> (Session, Session) { // Open the sessions let mut config = config::peer(); - config.listen.endpoints = vec![endpoint01.parse().unwrap()]; + config + .listen + .endpoints + .set(vec![endpoint01.parse().unwrap()]) + .unwrap(); config.scouting.multicast.set_enabled(Some(true)).unwrap(); println!("[ ][01a] Opening peer01 session: {}", endpoint01); - let peer01 = ztimeout!(zenoh::open(config).res_async()).unwrap(); + let peer01 = ztimeout!(zenoh::open(config)).unwrap(); let mut config = config::peer(); - config.listen.endpoints = vec![endpoint02.parse().unwrap()]; + config + .listen + .endpoints + .set(vec![endpoint02.parse().unwrap()]) + .unwrap(); config.scouting.multicast.set_enabled(Some(true)).unwrap(); println!("[ ][02a] Opening peer02 session: {}", endpoint02); - let peer02 = ztimeout!(zenoh::open(config).res_async()).unwrap(); + let peer02 = ztimeout!(zenoh::open(config)).unwrap(); (peer01, peer02) } async fn close_session(peer01: Session, peer02: Session) { println!("[ ][01d] Closing peer01 session"); - ztimeout!(peer01.close().res_async()).unwrap(); + ztimeout!(peer01.close()).unwrap(); println!("[ ][02d] Closing peer02 session"); - ztimeout!(peer02.close().res_async()).unwrap(); + ztimeout!(peer02.close()).unwrap(); } async fn test_session_pubsub(peer01: &Session, peer02: &Session, reliability: Reliability) { @@ -85,13 +117,10 @@ async fn test_session_pubsub(peer01: &Session, peer02: &Session, reliability: Re // Subscribe to data println!("[PS][01b] Subscribing on peer01 session"); let c_msgs = msgs.clone(); - let sub = ztimeout!(peer01 - .declare_subscriber(key_expr) - .callback(move |sample| { - assert_eq!(sample.value.payload.len(), size); - c_msgs.fetch_add(1, Ordering::Relaxed); - }) - .res_async()) + let sub = ztimeout!(peer01.declare_subscriber(key_expr).callback(move |sample| { + assert_eq!(sample.payload().len(), size); + c_msgs.fetch_add(1, Ordering::Relaxed); + })) .unwrap(); // Wait for the declaration to propagate @@ -102,8 +131,7 @@ async fn test_session_pubsub(peer01: &Session, peer02: &Session, reliability: Re for _ in 0..msg_count { ztimeout!(peer02 .put(key_expr, vec![0u8; size]) - .congestion_control(CongestionControl::Block) - .res_async()) + .congestion_control(CongestionControl::Block)) .unwrap(); } @@ -123,7 +151,7 @@ async fn test_session_pubsub(peer01: &Session, peer02: &Session, reliability: Re tokio::time::sleep(SLEEP).await; println!("[PS][03b] Unsubscribing on peer01 session"); - ztimeout!(sub.undeclare().res_async()).unwrap(); + ztimeout!(sub.undeclare()).unwrap(); // Wait for the declaration to propagate tokio::time::sleep(SLEEP).await; @@ -144,29 +172,51 @@ async fn test_session_qryrep(peer01: &Session, peer02: &Session, reliability: Re // Queryable to data println!("[QR][01c] Queryable on peer01 session"); let c_msgs = msgs.clone(); - let qbl = ztimeout!(peer01 - .declare_queryable(key_expr) - .callback(move |sample| { - c_msgs.fetch_add(1, Ordering::Relaxed); - let rep = Sample::try_from(key_expr, vec![0u8; size]).unwrap(); - tokio::task::block_in_place(|| { - tokio::runtime::Handle::current() - .block_on(async { ztimeout!(sample.reply(Ok(rep)).res_async()).unwrap() }) - }); - }) - .res_async()) + let qbl = ztimeout!(peer01.declare_queryable(key_expr).callback(move |query| { + c_msgs.fetch_add(1, Ordering::Relaxed); + match query.parameters().as_str() { + "ok_put" => { + tokio::task::block_in_place(|| { + tokio::runtime::Handle::current().block_on(async { + ztimeout!(query.reply( + KeyExpr::try_from(key_expr).unwrap(), + vec![0u8; size].to_vec() + )) + .unwrap() + }) + }); + } + "ok_del" => { + tokio::task::block_in_place(|| { + tokio::runtime::Handle::current() + .block_on(async { ztimeout!(query.reply_del(key_expr)).unwrap() }) + }); + } + "err" => { + let rep = vec![0u8; size]; + tokio::task::block_in_place(|| { + tokio::runtime::Handle::current() + .block_on(async { ztimeout!(query.reply_err(rep)).unwrap() }) + }); + } + _ => panic!("Unknown query parameter"), + } + })) .unwrap(); // Wait for the declaration to propagate tokio::time::sleep(SLEEP).await; // Get data - println!("[QR][02c] Getting on peer02 session. {msg_count} msgs."); + println!("[QR][02c] Getting Ok(Put) on peer02 session. {msg_count} msgs."); let mut cnt = 0; for _ in 0..msg_count { - let rs = ztimeout!(peer02.get(key_expr).res_async()).unwrap(); + let selector = format!("{}?ok_put", key_expr); + let rs = ztimeout!(peer02.get(selector)).unwrap(); while let Ok(s) = ztimeout!(rs.recv_async()) { - assert_eq!(s.sample.unwrap().value.payload.len(), size); + let s = s.result().unwrap(); + assert_eq!(s.kind(), SampleKind::Put); + assert_eq!(s.payload().len(), size); cnt += 1; } } @@ -174,8 +224,43 @@ async fn test_session_qryrep(peer01: &Session, peer02: &Session, reliability: Re assert_eq!(msgs.load(Ordering::Relaxed), msg_count); assert_eq!(cnt, msg_count); + msgs.store(0, Ordering::Relaxed); + + println!("[QR][03c] Getting Ok(Delete) on peer02 session. {msg_count} msgs."); + let mut cnt = 0; + for _ in 0..msg_count { + let selector = format!("{}?ok_del", key_expr); + let rs = ztimeout!(peer02.get(selector)).unwrap(); + while let Ok(s) = ztimeout!(rs.recv_async()) { + let s = s.result().unwrap(); + assert_eq!(s.kind(), SampleKind::Delete); + assert_eq!(s.payload().len(), 0); + cnt += 1; + } + } + println!("[QR][03c] Got on peer02 session. {cnt}/{msg_count} msgs."); + assert_eq!(msgs.load(Ordering::Relaxed), msg_count); + assert_eq!(cnt, msg_count); + + msgs.store(0, Ordering::Relaxed); + + println!("[QR][04c] Getting Err() on peer02 session. {msg_count} msgs."); + let mut cnt = 0; + for _ in 0..msg_count { + let selector = format!("{}?err", key_expr); + let rs = ztimeout!(peer02.get(selector)).unwrap(); + while let Ok(s) = ztimeout!(rs.recv_async()) { + let e = s.result().unwrap_err(); + assert_eq!(e.payload().len(), size); + cnt += 1; + } + } + println!("[QR][04c] Got on peer02 session. {cnt}/{msg_count} msgs."); + assert_eq!(msgs.load(Ordering::Relaxed), msg_count); + assert_eq!(cnt, msg_count); + println!("[PS][03c] Unqueryable on peer01 session"); - ztimeout!(qbl.undeclare().res_async()).unwrap(); + ztimeout!(qbl.undeclare()).unwrap(); // Wait for the declaration to propagate tokio::time::sleep(SLEEP).await; @@ -184,7 +269,7 @@ async fn test_session_qryrep(peer01: &Session, peer02: &Session, reliability: Re #[tokio::test(flavor = "multi_thread", worker_threads = 4)] async fn zenoh_session_unicast() { - zenoh_util::try_init_log_from_env(); + zenoh::try_init_log_from_env(); let (peer01, peer02) = open_session_unicast(&["tcp/127.0.0.1:17447"]).await; test_session_pubsub(&peer01, &peer02, Reliability::Reliable).await; test_session_qryrep(&peer01, &peer02, Reliability::Reliable).await; @@ -193,30 +278,43 @@ async fn zenoh_session_unicast() { #[tokio::test(flavor = "multi_thread", worker_threads = 4)] async fn zenoh_session_multicast() { - zenoh_util::try_init_log_from_env(); + zenoh::try_init_log_from_env(); let (peer01, peer02) = open_session_multicast("udp/224.0.0.1:17448", "udp/224.0.0.1:17448").await; test_session_pubsub(&peer01, &peer02, Reliability::BestEffort).await; close_session(peer01, peer02).await; } +#[cfg(feature = "internal")] async fn open_session_unicast_runtime(endpoints: &[&str]) -> (Runtime, Runtime) { // Open the sessions let mut config = config::peer(); - config.listen.endpoints = endpoints - .iter() - .map(|e| e.parse().unwrap()) - .collect::>(); + config + .listen + .endpoints + .set( + endpoints + .iter() + .map(|e| e.parse().unwrap()) + .collect::>(), + ) + .unwrap(); config.scouting.multicast.set_enabled(Some(false)).unwrap(); println!("[ ][01a] Creating r1 session runtime: {:?}", endpoints); let mut r1 = RuntimeBuilder::new(config).build().await.unwrap(); r1.start().await.unwrap(); let mut config = config::peer(); - config.connect.endpoints = endpoints - .iter() - .map(|e| e.parse().unwrap()) - .collect::>(); + config + .connect + .endpoints + .set( + endpoints + .iter() + .map(|e| e.parse().unwrap()) + .collect::>(), + ) + .unwrap(); config.scouting.multicast.set_enabled(Some(false)).unwrap(); println!("[ ][02a] Creating r2 session runtime: {:?}", endpoints); let mut r2 = RuntimeBuilder::new(config).build().await.unwrap(); @@ -225,17 +323,18 @@ async fn open_session_unicast_runtime(endpoints: &[&str]) -> (Runtime, Runtime) (r1, r2) } +#[cfg(feature = "internal")] #[tokio::test(flavor = "multi_thread", worker_threads = 4)] async fn zenoh_2sessions_1runtime_init() { let (r1, r2) = open_session_unicast_runtime(&["tcp/127.0.0.1:17449"]).await; println!("[RI][02a] Creating peer01 session from runtime 1"); - let peer01 = zenoh::init(r1.clone()).res_async().await.unwrap(); + let peer01 = zenoh::session::init(r1.clone()).await.unwrap(); println!("[RI][02b] Creating peer02 session from runtime 2"); - let peer02 = zenoh::init(r2.clone()).res_async().await.unwrap(); + let peer02 = zenoh::session::init(r2.clone()).await.unwrap(); println!("[RI][02c] Creating peer01a session from runtime 1"); - let peer01a = zenoh::init(r1.clone()).res_async().await.unwrap(); + let peer01a = zenoh::session::init(r1.clone()).await.unwrap(); println!("[RI][03c] Closing peer01a session"); - std::mem::drop(peer01a); + drop(peer01a); test_session_pubsub(&peer01, &peer02, Reliability::Reliable).await; close_session(peer01, peer02).await; println!("[ ][01e] Closing r1 runtime"); diff --git a/zenoh/tests/shm.rs b/zenoh/tests/shm.rs new file mode 100644 index 0000000000..0febac2eaa --- /dev/null +++ b/zenoh/tests/shm.rs @@ -0,0 +1,221 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// +#![cfg(all(feature = "unstable", feature = "shared-memory"))] +use std::{ + sync::{ + atomic::{AtomicUsize, Ordering}, + Arc, + }, + time::Duration, +}; + +use zenoh::{ + config, + prelude::*, + pubsub::Reliability, + qos::CongestionControl, + shm::{ + zshm, BlockOn, GarbageCollect, PosixShmProviderBackend, ShmProviderBuilder, + POSIX_PROTOCOL_ID, + }, + Session, +}; +use zenoh_core::ztimeout; + +const TIMEOUT: Duration = Duration::from_secs(60); +const SLEEP: Duration = Duration::from_secs(1); + +const MSG_COUNT: usize = 1_00; +const MSG_SIZE: [usize; 2] = [1_024, 100_000]; + +async fn open_session_unicast(endpoints: &[&str]) -> (Session, Session) { + // Open the sessions + let mut config = config::peer(); + config + .listen + .endpoints + .set( + endpoints + .iter() + .map(|e| e.parse().unwrap()) + .collect::>(), + ) + .unwrap(); + config.scouting.multicast.set_enabled(Some(false)).unwrap(); + println!("[ ][01a] Opening peer01 session: {:?}", endpoints); + let peer01 = ztimeout!(zenoh::open(config)).unwrap(); + + let mut config = config::peer(); + config + .connect + .endpoints + .set( + endpoints + .iter() + .map(|e| e.parse().unwrap()) + .collect::>(), + ) + .unwrap(); + config.scouting.multicast.set_enabled(Some(false)).unwrap(); + println!("[ ][02a] Opening peer02 session: {:?}", endpoints); + let peer02 = ztimeout!(zenoh::open(config)).unwrap(); + + (peer01, peer02) +} + +async fn open_session_multicast(endpoint01: &str, endpoint02: &str) -> (Session, Session) { + // Open the sessions + let mut config = config::peer(); + config + .listen + .endpoints + .set(vec![endpoint01.parse().unwrap()]) + .unwrap(); + config.scouting.multicast.set_enabled(Some(true)).unwrap(); + println!("[ ][01a] Opening peer01 session: {}", endpoint01); + let peer01 = ztimeout!(zenoh::open(config)).unwrap(); + + let mut config = config::peer(); + config + .listen + .endpoints + .set(vec![endpoint02.parse().unwrap()]) + .unwrap(); + config.scouting.multicast.set_enabled(Some(true)).unwrap(); + println!("[ ][02a] Opening peer02 session: {}", endpoint02); + let peer02 = ztimeout!(zenoh::open(config)).unwrap(); + + (peer01, peer02) +} + +async fn close_session(peer01: Session, peer02: Session) { + println!("[ ][01d] Closing peer02 session"); + ztimeout!(peer01.close()).unwrap(); + println!("[ ][02d] Closing peer02 session"); + ztimeout!(peer02.close()).unwrap(); +} + +async fn test_session_pubsub(peer01: &Session, peer02: &Session, reliability: Reliability) { + let msg_count = match reliability { + Reliability::Reliable => MSG_COUNT, + Reliability::BestEffort => 1, + }; + let msgs = Arc::new(AtomicUsize::new(0)); + + for size in MSG_SIZE { + let key_expr = format!("shm{size}"); + + msgs.store(0, Ordering::SeqCst); + + // Subscribe to data + println!("[PS][01b] Subscribing on peer01 session"); + let c_msgs = msgs.clone(); + let _sub = ztimeout!(peer01 + .declare_subscriber(&key_expr) + .callback(move |sample| { + assert_eq!(sample.payload().len(), size); + let _ = sample.payload().deserialize::<&zshm>().unwrap(); + c_msgs.fetch_add(1, Ordering::Relaxed); + })) + .unwrap(); + + // Wait for the declaration to propagate + tokio::time::sleep(SLEEP).await; + + // create SHM backend... + let backend = PosixShmProviderBackend::builder() + .with_size(size * MSG_COUNT / 10) + .unwrap() + .res() + .unwrap(); + // ...and SHM provider + let shm01 = ShmProviderBuilder::builder() + .protocol_id::() + .backend(backend) + .res(); + + // remember segment size that was allocated + let shm_segment_size = shm01.available(); + + // Prepare a layout for allocations + let layout = shm01.alloc(size).into_layout().unwrap(); + + // Put data + println!("[PS][03b] Putting on peer02 session. {MSG_COUNT} msgs of {size} bytes."); + for c in 0..msg_count { + // Allocate new message + let sbuf = ztimeout!(layout.alloc().with_policy::>()).unwrap(); + println!("{c} created"); + + // Publish this message + ztimeout!(peer02 + .put(&key_expr, sbuf) + .congestion_control(CongestionControl::Block)) + .unwrap(); + println!("{c} putted"); + } + + // wat for all messages received + ztimeout!(async { + loop { + let cnt = msgs.load(Ordering::Relaxed); + println!("[PS][03b] Received {cnt}/{msg_count}."); + if cnt != msg_count { + tokio::time::sleep(SLEEP).await; + } else { + break; + } + } + }); + + // wat for all memory reclaimed + ztimeout!(async { + loop { + shm01.garbage_collect(); + let available = shm01.available(); + println!("[PS][03b] SHM available {available}/{shm_segment_size}"); + if available != shm_segment_size { + tokio::time::sleep(SLEEP).await; + } else { + break; + } + } + }); + } +} + +#[test] +fn zenoh_shm_unicast() { + tokio::runtime::Runtime::new().unwrap().block_on(async { + // Initiate logging + zenoh::try_init_log_from_env(); + + let (peer01, peer02) = open_session_unicast(&["tcp/127.0.0.1:19447"]).await; + test_session_pubsub(&peer01, &peer02, Reliability::Reliable).await; + close_session(peer01, peer02).await; + }); +} + +#[test] +fn zenoh_shm_multicast() { + tokio::runtime::Runtime::new().unwrap().block_on(async { + // Initiate logging + zenoh::try_init_log_from_env(); + + let (peer01, peer02) = + open_session_multicast("udp/224.0.0.1:19448", "udp/224.0.0.1:19448").await; + test_session_pubsub(&peer01, &peer02, Reliability::BestEffort).await; + close_session(peer01, peer02).await; + }); +} diff --git a/zenoh/tests/unicity.rs b/zenoh/tests/unicity.rs index e0f3f3ab4f..49663249ad 100644 --- a/zenoh/tests/unicity.rs +++ b/zenoh/tests/unicity.rs @@ -11,11 +11,23 @@ // Contributors: // ZettaScale Zenoh Team, // -use std::sync::atomic::{AtomicUsize, Ordering}; -use std::sync::Arc; -use std::time::Duration; +use std::{ + sync::{ + atomic::{AtomicUsize, Ordering}, + Arc, + }, + time::Duration, +}; + use tokio::runtime::Handle; -use zenoh::prelude::r#async::*; +use zenoh::{ + config, + config::{EndPoint, WhatAmI}, + key_expr::KeyExpr, + prelude::*, + qos::CongestionControl, + Session, +}; use zenoh_core::ztimeout; const TIMEOUT: Duration = Duration::from_secs(60); @@ -26,26 +38,42 @@ const MSG_SIZE: [usize; 2] = [1_024, 100_000]; async fn open_p2p_sessions() -> (Session, Session, Session) { // Open the sessions let mut config = config::peer(); - config.listen.endpoints = vec!["tcp/127.0.0.1:27447".parse().unwrap()]; + config + .listen + .endpoints + .set(vec!["tcp/127.0.0.1:27447".parse().unwrap()]) + .unwrap(); config.scouting.multicast.set_enabled(Some(false)).unwrap(); println!("[ ][01a] Opening s01 session"); - let s01 = ztimeout!(zenoh::open(config).res_async()).unwrap(); + let s01 = ztimeout!(zenoh::open(config)).unwrap(); let mut config = config::peer(); - config.listen.endpoints = vec!["tcp/127.0.0.1:27448".parse().unwrap()]; - config.connect.endpoints = vec!["tcp/127.0.0.1:27447".parse().unwrap()]; + config + .listen + .endpoints + .set(vec!["tcp/127.0.0.1:27448".parse().unwrap()]) + .unwrap(); + config + .connect + .endpoints + .set(vec!["tcp/127.0.0.1:27447".parse().unwrap()]) + .unwrap(); config.scouting.multicast.set_enabled(Some(false)).unwrap(); println!("[ ][02a] Opening s02 session"); - let s02 = ztimeout!(zenoh::open(config).res_async()).unwrap(); + let s02 = ztimeout!(zenoh::open(config)).unwrap(); let mut config = config::peer(); - config.connect.endpoints = vec![ - "tcp/127.0.0.1:27447".parse().unwrap(), - "tcp/127.0.0.1:27448".parse().unwrap(), - ]; + config + .connect + .endpoints + .set(vec![ + "tcp/127.0.0.1:27447".parse().unwrap(), + "tcp/127.0.0.1:27448".parse().unwrap(), + ]) + .unwrap(); config.scouting.multicast.set_enabled(Some(false)).unwrap(); println!("[ ][03a] Opening s03 session"); - let s03 = ztimeout!(zenoh::open(config).res_async()).unwrap(); + let s03 = ztimeout!(zenoh::open(config)).unwrap(); (s01, s02, s03) } @@ -54,41 +82,45 @@ async fn open_router_session() -> Session { // Open the sessions let mut config = config::default(); config.set_mode(Some(WhatAmI::Router)).unwrap(); - config.listen.endpoints = vec!["tcp/127.0.0.1:37447".parse().unwrap()]; + config + .listen + .endpoints + .set(vec!["tcp/127.0.0.1:37447".parse().unwrap()]) + .unwrap(); config.scouting.multicast.set_enabled(Some(false)).unwrap(); println!("[ ][00a] Opening router session"); - ztimeout!(zenoh::open(config).res_async()).unwrap() + ztimeout!(zenoh::open(config)).unwrap() } async fn close_router_session(s: Session) { println!("[ ][01d] Closing router session"); - ztimeout!(s.close().res_async()).unwrap(); + ztimeout!(s.close()).unwrap(); } async fn open_client_sessions() -> (Session, Session, Session) { // Open the sessions let config = config::client(["tcp/127.0.0.1:37447".parse::().unwrap()]); println!("[ ][01a] Opening s01 session"); - let s01 = ztimeout!(zenoh::open(config).res_async()).unwrap(); + let s01 = ztimeout!(zenoh::open(config)).unwrap(); let config = config::client(["tcp/127.0.0.1:37447".parse::().unwrap()]); println!("[ ][02a] Opening s02 session"); - let s02 = ztimeout!(zenoh::open(config).res_async()).unwrap(); + let s02 = ztimeout!(zenoh::open(config)).unwrap(); let config = config::client(["tcp/127.0.0.1:37447".parse::().unwrap()]); println!("[ ][03a] Opening s03 session"); - let s03 = ztimeout!(zenoh::open(config).res_async()).unwrap(); + let s03 = ztimeout!(zenoh::open(config)).unwrap(); (s01, s02, s03) } async fn close_sessions(s01: Session, s02: Session, s03: Session) { println!("[ ][01d] Closing s01 session"); - ztimeout!(s01.close().res_async()).unwrap(); + ztimeout!(s01.close()).unwrap(); println!("[ ][02d] Closing s02 session"); - ztimeout!(s02.close().res_async()).unwrap(); + ztimeout!(s02.close()).unwrap(); println!("[ ][03d] Closing s03 session"); - ztimeout!(s03.close().res_async()).unwrap(); + ztimeout!(s03.close()).unwrap(); } async fn test_unicity_pubsub(s01: &Session, s02: &Session, s03: &Session) { @@ -104,25 +136,19 @@ async fn test_unicity_pubsub(s01: &Session, s02: &Session, s03: &Session) { // Subscribe to data println!("[PS][01b] Subscribing on s01 session"); let c_msgs1 = msgs1.clone(); - let sub1 = ztimeout!(s01 - .declare_subscriber(key_expr) - .callback(move |sample| { - assert_eq!(sample.value.payload.len(), size); - c_msgs1.fetch_add(1, Ordering::Relaxed); - }) - .res_async()) + let sub1 = ztimeout!(s01.declare_subscriber(key_expr).callback(move |sample| { + assert_eq!(sample.payload().len(), size); + c_msgs1.fetch_add(1, Ordering::Relaxed); + })) .unwrap(); // Subscribe to data println!("[PS][02b] Subscribing on s02 session"); let c_msgs2 = msgs2.clone(); - let sub2 = ztimeout!(s02 - .declare_subscriber(key_expr) - .callback(move |sample| { - assert_eq!(sample.value.payload.len(), size); - c_msgs2.fetch_add(1, Ordering::Relaxed); - }) - .res_async()) + let sub2 = ztimeout!(s02.declare_subscriber(key_expr).callback(move |sample| { + assert_eq!(sample.payload().len(), size); + c_msgs2.fetch_add(1, Ordering::Relaxed); + })) .unwrap(); // Wait for the declaration to propagate @@ -133,8 +159,7 @@ async fn test_unicity_pubsub(s01: &Session, s02: &Session, s03: &Session) { for _ in 0..msg_count { ztimeout!(s03 .put(key_expr, vec![0u8; size]) - .congestion_control(CongestionControl::Block) - .res_async()) + .congestion_control(CongestionControl::Block)) .unwrap(); } @@ -162,10 +187,10 @@ async fn test_unicity_pubsub(s01: &Session, s02: &Session, s03: &Session) { assert_eq!(cnt2, msg_count); println!("[PS][02b] Unsubscribing on s02 session"); - ztimeout!(sub2.undeclare().res_async()).unwrap(); + ztimeout!(sub2.undeclare()).unwrap(); println!("[PS][01b] Unsubscribing on s01 session"); - ztimeout!(sub1.undeclare().res_async()).unwrap(); + ztimeout!(sub1.undeclare()).unwrap(); // Wait for the declaration to propagate tokio::time::sleep(SLEEP).await; @@ -173,7 +198,7 @@ async fn test_unicity_pubsub(s01: &Session, s02: &Session, s03: &Session) { } async fn test_unicity_qryrep(s01: &Session, s02: &Session, s03: &Session) { - let key_expr = "test/unicity"; + let key_expr = KeyExpr::new("test/unicity").unwrap(); let msg_count = 1; let msgs1 = Arc::new(AtomicUsize::new(0)); let msgs2 = Arc::new(AtomicUsize::new(0)); @@ -184,36 +209,36 @@ async fn test_unicity_qryrep(s01: &Session, s02: &Session, s03: &Session) { // Queryable to data println!("[QR][01c] Queryable on s01 session"); + let cke = key_expr.clone(); let c_msgs1 = msgs1.clone(); - let qbl1 = ztimeout!(s01 - .declare_queryable(key_expr) - .callback(move |sample| { - c_msgs1.fetch_add(1, Ordering::Relaxed); - let rep = Sample::try_from(key_expr, vec![0u8; size]).unwrap(); - tokio::task::block_in_place(move || { + let qbl1 = ztimeout!(s01.declare_queryable(cke.clone()).callback(move |sample| { + c_msgs1.fetch_add(1, Ordering::Relaxed); + tokio::task::block_in_place({ + let cke2 = cke.clone(); + move || { Handle::current().block_on(async move { - ztimeout!(sample.reply(Ok(rep)).res_async()).unwrap() + ztimeout!(sample.reply(cke2.clone(), vec![0u8; size])).unwrap() }); - }); - }) - .res_async()) + } + }); + })) .unwrap(); // Queryable to data println!("[QR][02c] Queryable on s02 session"); + let cke = key_expr.clone(); let c_msgs2 = msgs2.clone(); - let qbl2 = ztimeout!(s02 - .declare_queryable(key_expr) - .callback(move |sample| { - c_msgs2.fetch_add(1, Ordering::Relaxed); - let rep = Sample::try_from(key_expr, vec![0u8; size]).unwrap(); - tokio::task::block_in_place(move || { + let qbl2 = ztimeout!(s02.declare_queryable(cke.clone()).callback(move |sample| { + c_msgs2.fetch_add(1, Ordering::Relaxed); + tokio::task::block_in_place({ + let cke2 = cke.clone(); + move || { Handle::current().block_on(async move { - ztimeout!(sample.reply(Ok(rep)).res_async()).unwrap() + ztimeout!(sample.reply(cke2.clone(), vec![0u8; size])).unwrap() }); - }); - }) - .res_async()) + } + }); + })) .unwrap(); // Wait for the declaration to propagate @@ -221,11 +246,12 @@ async fn test_unicity_qryrep(s01: &Session, s02: &Session, s03: &Session) { // Get data println!("[QR][03c] Getting on s03 session. {msg_count} msgs."); + let cke = key_expr.clone(); let mut cnt = 0; for _ in 0..msg_count { - let rs = ztimeout!(s03.get(key_expr).res_async()).unwrap(); + let rs = ztimeout!(s03.get(cke.clone())).unwrap(); while let Ok(s) = ztimeout!(rs.recv_async()) { - assert_eq!(s.sample.unwrap().value.payload.len(), size); + assert_eq!(s.result().unwrap().payload().len(), size); cnt += 1; } } @@ -239,10 +265,10 @@ async fn test_unicity_qryrep(s01: &Session, s02: &Session, s03: &Session) { assert_eq!(cnt, msg_count); println!("[PS][01c] Unqueryable on s01 session"); - ztimeout!(qbl1.undeclare().res_async()).unwrap(); + ztimeout!(qbl1.undeclare()).unwrap(); println!("[PS][02c] Unqueryable on s02 session"); - ztimeout!(qbl2.undeclare().res_async()).unwrap(); + ztimeout!(qbl2.undeclare()).unwrap(); // Wait for the declaration to propagate tokio::time::sleep(SLEEP).await; @@ -251,7 +277,7 @@ async fn test_unicity_qryrep(s01: &Session, s02: &Session, s03: &Session) { #[tokio::test(flavor = "multi_thread", worker_threads = 4)] async fn zenoh_unicity_p2p() { - zenoh_util::try_init_log_from_env(); + zenoh::try_init_log_from_env(); let (s01, s02, s03) = open_p2p_sessions().await; test_unicity_pubsub(&s01, &s02, &s03).await; @@ -261,7 +287,7 @@ async fn zenoh_unicity_p2p() { #[tokio::test(flavor = "multi_thread", worker_threads = 4)] async fn zenoh_unicity_brokered() { - zenoh_util::try_init_log_from_env(); + zenoh::try_init_log_from_env(); let r = open_router_session().await; let (s01, s02, s03) = open_client_sessions().await; diff --git a/zenohd/Cargo.toml b/zenohd/Cargo.toml index caf7169673..b0320ce648 100644 --- a/zenohd/Cargo.toml +++ b/zenohd/Cargo.toml @@ -42,7 +42,7 @@ tracing = {workspace = true} tracing-subscriber = {workspace = true} tracing-loki = {workspace = true, optional = true } url = {workspace = true, optional = true } -zenoh = { workspace = true, features = ["unstable", "plugins"] } +zenoh = { workspace = true, features = ["unstable", "internal", "plugins"] } [dev-dependencies] rand = { workspace = true, features = ["default"] } diff --git a/zenohd/src/main.rs b/zenohd/src/main.rs index ddac046770..9ce0a64333 100644 --- a/zenohd/src/main.rs +++ b/zenohd/src/main.rs @@ -14,15 +14,14 @@ use clap::Parser; use futures::future; use git_version::git_version; -use tracing_subscriber::layer::SubscriberExt; -use tracing_subscriber::util::SubscriberInitExt; -use tracing_subscriber::EnvFilter; -use zenoh::config::{Config, ModeDependentValue, PermissionsConf, ValidatedMap}; -use zenoh::prelude::r#async::*; -use zenoh::Result; - +use tracing_subscriber::{layer::SubscriberExt, util::SubscriberInitExt, EnvFilter}; #[cfg(feature = "loki")] use url::Url; +use zenoh::{ + config::{Config, EndPoint, ModeDependentValue, PermissionsConf, ValidatedMap, WhatAmI}, + Result, +}; +use zenoh_util::LibSearchDirs; #[cfg(feature = "loki")] const LOKI_ENDPOINT_VAR: &str = "LOKI_ENDPOINT"; @@ -39,8 +38,6 @@ lazy_static::lazy_static!( static ref LONG_VERSION: String = format!("{} built with {}", GIT_VERSION, env!("RUSTC_VERSION")); ); -const DEFAULT_LISTENER: &str = "tcp/[::]:7447"; - #[derive(Debug, Parser)] #[command(version=GIT_VERSION, long_version=LONG_VERSION.as_str(), about="The zenoh router")] struct Args { @@ -58,7 +55,7 @@ struct Args { /// WARNING: this identifier must be unique in the system and must be 16 bytes maximum (32 chars)! #[arg(short, long)] id: Option, - /// A plugin that MUST be loaded. You can give just the name of the plugin, zenohd will search for a library named 'libzenoh_plugin_.so' (exact name depending the OS). Or you can give such a string: ": + /// A plugin that MUST be loaded. You can give just the name of the plugin, zenohd will search for a library named 'libzenoh_plugin_\.so' (exact name depending the OS). Or you can give such a string: "\:\" /// Repeat this option to load several plugins. If loading failed, zenohd will exit. #[arg(short = 'P', long)] plugin: Vec, @@ -83,8 +80,8 @@ struct Args { /// - VALUE must be a valid JSON5 string that can be deserialized to the expected type for the KEY field. /// /// Examples: - /// - `--cfg='startup/subscribe:["demo/**"]'` - /// - `--cfg='plugins/storage_manager/storages/demo:{key_expr:"demo/example/**",volume:"memory"}'` + /// - `--cfg='startup/subscribe:["demo/**"]'` + /// - `--cfg='plugins/storage_manager/storages/demo:{key_expr:"demo/example/**",volume:"memory"}'` #[arg(long)] cfg: Vec, /// Configure the read and/or write permissions on the admin space. Default is read only. @@ -106,7 +103,7 @@ fn main() { let config = config_from_args(&args); tracing::info!("Initial conf: {}", &config); - let _session = match zenoh::open(config).res().await { + let _session = match zenoh::open(config).await { Ok(runtime) => runtime, Err(e) => { println!("{e}. Exiting..."); @@ -123,11 +120,7 @@ fn config_from_args(args: &Args) -> Config { .config .as_ref() .map_or_else(Config::default, |conf_file| { - Config::from_file(conf_file).unwrap_or_else(|e| { - // if file load fail, wanning it, and load default config - tracing::warn!("Warn: File {} not found! {}", conf_file, e.to_string()); - Config::default() - }) + Config::from_file(conf_file).unwrap() }); if config.mode().is_none() { @@ -154,7 +147,11 @@ fn config_from_args(args: &Args) -> Config { if !args.plugin_search_dir.is_empty() { config .plugins_loading - .set_search_dirs(Some(args.plugin_search_dir.clone())) + // REVIEW: Should this append to search_dirs instead? As there is no way to pass the new + // `current_exe_parent` unless we change the format of the argument and this overrides + // the one set from the default config. + // Also, --cfg plugins_loading/search_dirs=[...] makes this argument superfluous. + .set_search_dirs(LibSearchDirs::from_paths(&args.plugin_search_dir)) .unwrap(); } for plugin in &args.plugin { @@ -175,7 +172,8 @@ fn config_from_args(args: &Args) -> Config { if !args.connect.is_empty() { config .connect - .set_endpoints( + .endpoints + .set( args.connect .iter() .map(|v| match v.parse::() { @@ -191,7 +189,8 @@ fn config_from_args(args: &Args) -> Config { if !args.listen.is_empty() { config .listen - .set_endpoints( + .endpoints + .set( args.listen .iter() .map(|v| match v.parse::() { @@ -204,12 +203,6 @@ fn config_from_args(args: &Args) -> Config { ) .unwrap(); } - if config.listen.endpoints.is_empty() { - config - .listen - .endpoints - .push(DEFAULT_LISTENER.parse().unwrap()) - } if args.no_timestamp { config .timestamping @@ -348,7 +341,6 @@ fn test_default_features() { concat!( " zenoh/auth_pubkey", " zenoh/auth_usrpwd", - // " zenoh/complete_n", // " zenoh/shared-memory", // " zenoh/stats", " zenoh/transport_multilink", @@ -375,7 +367,6 @@ fn test_no_default_features() { concat!( // " zenoh/auth_pubkey", // " zenoh/auth_usrpwd", - // " zenoh/complete_n", // " zenoh/shared-memory", // " zenoh/stats", // " zenoh/transport_multilink",

EncodingBuilderTrait for PublicationBuilder { + fn encoding>(self, encoding: T) -> Self { + Self { + kind: PublicationBuilderPut { + encoding: encoding.into(), + ..self.kind + }, + ..self + } + } +} + +impl SampleBuilderTrait for PublicationBuilder { + #[cfg(feature = "unstable")] + fn source_info(self, source_info: SourceInfo) -> Self { + Self { + source_info, + ..self + } + } + fn attachment>(self, attachment: TA) -> Self { + let attachment: OptionZBytes = attachment.into(); + Self { + attachment: attachment.into(), + ..self + } + } +} + +impl TimestampBuilderTrait for PublicationBuilder { + fn timestamp>>(self, timestamp: TS) -> Self { + Self { + timestamp: timestamp.into(), + ..self + } + } +} + +impl Resolvable for PublicationBuilder { + type To = ZResult<()>; +} + +impl Wait for PublicationBuilder, PublicationBuilderPut> { + #[inline] + fn wait(self) -> ::To { + let publisher = self.publisher.create_one_shot_publisher()?; + publisher.resolve_put( + self.kind.payload, + SampleKind::Put, + self.kind.encoding, + self.timestamp, + #[cfg(feature = "unstable")] + self.source_info, + self.attachment, + ) + } +} + +impl Wait for PublicationBuilder, PublicationBuilderDelete> { + #[inline] + fn wait(self) -> ::To { + let publisher = self.publisher.create_one_shot_publisher()?; + publisher.resolve_put( + ZBytes::empty(), + SampleKind::Delete, + Encoding::ZENOH_BYTES, + self.timestamp, + #[cfg(feature = "unstable")] + self.source_info, + self.attachment, + ) + } +} + +impl IntoFuture for PublicationBuilder, PublicationBuilderPut> { + type Output = ::To; + type IntoFuture = Ready<::To>; + + fn into_future(self) -> Self::IntoFuture { + std::future::ready(self.wait()) + } +} + +impl IntoFuture for PublicationBuilder, PublicationBuilderDelete> { + type Output = ::To; + type IntoFuture = Ready<::To>; + + fn into_future(self) -> Self::IntoFuture { + std::future::ready(self.wait()) + } +} + +/// A builder for initializing a [`Publisher`]. +/// +/// # Examples +/// ``` +/// # #[tokio::main] +/// # async fn main() { +/// use zenoh::{prelude::*, qos::CongestionControl}; +/// +/// let session = zenoh::open(zenoh::config::peer()).await.unwrap(); +/// let publisher = session +/// .declare_publisher("key/expression") +/// .congestion_control(CongestionControl::Block) +/// .await +/// .unwrap(); +/// # } +/// ``` +#[must_use = "Resolvables do nothing unless you resolve them using the `res` method from either `SyncResolve` or `AsyncResolve`"] +#[derive(Debug)] +pub struct PublisherBuilder<'a, 'b: 'a> { + pub(crate) session: SessionRef<'a>, + pub(crate) key_expr: ZResult>, + pub(crate) encoding: Encoding, + pub(crate) congestion_control: CongestionControl, + pub(crate) priority: Priority, + pub(crate) is_express: bool, + pub(crate) destination: Locality, +} + +impl<'a, 'b> Clone for PublisherBuilder<'a, 'b> { + fn clone(&self) -> Self { + Self { + session: self.session.clone(), + key_expr: match &self.key_expr { + Ok(k) => Ok(k.clone()), + Err(e) => Err(zerror!("Cloned KE Error: {}", e).into()), + }, + encoding: self.encoding.clone(), + congestion_control: self.congestion_control, + priority: self.priority, + is_express: self.is_express, + destination: self.destination, + } + } +} + +impl QoSBuilderTrait for PublisherBuilder<'_, '_> { + /// Change the `congestion_control` to apply when routing the data. + #[inline] + fn congestion_control(self, congestion_control: CongestionControl) -> Self { + Self { + congestion_control, + ..self + } + } + + /// Change the priority of the written data. + #[inline] + fn priority(self, priority: Priority) -> Self { + Self { priority, ..self } + } + + /// Change the `express` policy to apply when routing the data. + /// When express is set to `true`, then the message will not be batched. + /// This usually has a positive impact on latency but negative impact on throughput. + #[inline] + fn express(self, is_express: bool) -> Self { + Self { is_express, ..self } + } +} + +impl<'a, 'b> PublisherBuilder<'a, 'b> { + /// Restrict the matching subscribers that will receive the published data + /// to the ones that have the given [`Locality`](crate::prelude::Locality). + #[zenoh_macros::unstable] + #[inline] + pub fn allowed_destination(mut self, destination: Locality) -> Self { + self.destination = destination; + self + } + + // internal function for performing the publication + fn create_one_shot_publisher(self) -> ZResult> { + Ok(Publisher { + session: self.session, + id: 0, // This is a one shot Publisher + key_expr: self.key_expr?, + encoding: self.encoding, + congestion_control: self.congestion_control, + priority: self.priority, + is_express: self.is_express, + destination: self.destination, + #[cfg(feature = "unstable")] + matching_listeners: Default::default(), + undeclare_on_drop: true, + }) + } +} + +impl<'a, 'b> Resolvable for PublisherBuilder<'a, 'b> { + type To = ZResult>; +} + +impl<'a, 'b> Wait for PublisherBuilder<'a, 'b> { + fn wait(self) -> ::To { + let mut key_expr = self.key_expr?; + if !key_expr.is_fully_optimized(&self.session) { + let session_id = self.session.id; + let expr_id = self.session.declare_prefix(key_expr.as_str()).wait(); + let prefix_len = key_expr + .len() + .try_into() + .expect("How did you get a key expression with a length over 2^32!?"); + key_expr = match key_expr.0 { + crate::api::key_expr::KeyExprInner::Borrowed(key_expr) + | crate::api::key_expr::KeyExprInner::BorrowedWire { key_expr, .. } => { + KeyExpr(crate::api::key_expr::KeyExprInner::BorrowedWire { + key_expr, + expr_id, + mapping: Mapping::Sender, + prefix_len, + session_id, + }) + } + crate::api::key_expr::KeyExprInner::Owned(key_expr) + | crate::api::key_expr::KeyExprInner::Wire { key_expr, .. } => { + KeyExpr(crate::api::key_expr::KeyExprInner::Wire { + key_expr, + expr_id, + mapping: Mapping::Sender, + prefix_len, + session_id, + }) + } + } + } + self.session + .declare_publisher_inner(key_expr.clone(), self.destination) + .map(|id| Publisher { + session: self.session, + id, + key_expr, + encoding: self.encoding, + congestion_control: self.congestion_control, + priority: self.priority, + is_express: self.is_express, + destination: self.destination, + #[cfg(feature = "unstable")] + matching_listeners: Default::default(), + undeclare_on_drop: true, + }) + } +} + +impl<'a, 'b> IntoFuture for PublisherBuilder<'a, 'b> { + type Output = ::To; + type IntoFuture = Ready<::To>; + + fn into_future(self) -> Self::IntoFuture { + std::future::ready(self.wait()) + } +} + +impl Wait for PublicationBuilder<&Publisher<'_>, PublicationBuilderPut> { + fn wait(self) -> ::To { + self.publisher.resolve_put( + self.kind.payload, + SampleKind::Put, + self.kind.encoding, + self.timestamp, + #[cfg(feature = "unstable")] + self.source_info, + self.attachment, + ) + } +} + +impl Wait for PublicationBuilder<&Publisher<'_>, PublicationBuilderDelete> { + fn wait(self) -> ::To { + self.publisher.resolve_put( + ZBytes::empty(), + SampleKind::Delete, + Encoding::ZENOH_BYTES, + self.timestamp, + #[cfg(feature = "unstable")] + self.source_info, + self.attachment, + ) + } +} + +impl IntoFuture for PublicationBuilder<&Publisher<'_>, PublicationBuilderPut> { + type Output = ::To; + type IntoFuture = Ready<::To>; + + fn into_future(self) -> Self::IntoFuture { + std::future::ready(self.wait()) + } +} + +impl IntoFuture for PublicationBuilder<&Publisher<'_>, PublicationBuilderDelete> { + type Output = ::To; + type IntoFuture = Ready<::To>; + + fn into_future(self) -> Self::IntoFuture { + std::future::ready(self.wait()) + } +} diff --git a/zenoh/src/api/builders/sample.rs b/zenoh/src/api/builders/sample.rs new file mode 100644 index 0000000000..53cf099448 --- /dev/null +++ b/zenoh/src/api/builders/sample.rs @@ -0,0 +1,266 @@ +// +// Copyright (c) 2024 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// +use std::marker::PhantomData; + +use uhlc::Timestamp; +use zenoh_core::zresult; +use zenoh_protocol::core::CongestionControl; + +use crate::api::{ + bytes::{OptionZBytes, ZBytes}, + encoding::Encoding, + key_expr::KeyExpr, + publisher::Priority, + sample::{QoS, QoSBuilder, Sample, SampleKind}, +}; +#[cfg(feature = "unstable")] +use crate::sample::SourceInfo; + +pub trait QoSBuilderTrait { + /// Change the `congestion_control` to apply when routing the data. + fn congestion_control(self, congestion_control: CongestionControl) -> Self; + /// Change the priority of the written data. + fn priority(self, priority: Priority) -> Self; + /// Change the `express` policy to apply when routing the data. + /// When express is set to `true`, then the message will not be batched. + /// This usually has a positive impact on latency but negative impact on throughput. + fn express(self, is_express: bool) -> Self; +} + +pub trait TimestampBuilderTrait { + /// Sets of clears timestamp + fn timestamp>>(self, timestamp: T) -> Self; +} + +pub trait SampleBuilderTrait { + /// Attach source information + #[zenoh_macros::unstable] + fn source_info(self, source_info: SourceInfo) -> Self; + /// Attach user-provided data in key-value format + fn attachment>(self, attachment: T) -> Self; +} + +pub trait EncodingBuilderTrait { + /// Set the [`Encoding`] + fn encoding>(self, encoding: T) -> Self; +} + +#[derive(Clone, Debug)] +pub struct SampleBuilderPut; +#[derive(Clone, Debug)] +pub struct SampleBuilderDelete; +#[derive(Clone, Debug)] +pub struct SampleBuilderAny; + +#[derive(Clone, Debug)] +pub struct SampleBuilder { + sample: Sample, + _t: PhantomData, +} + +impl SampleBuilder { + pub fn put( + key_expr: IntoKeyExpr, + payload: IntoZBytes, + ) -> SampleBuilder + where + IntoKeyExpr: Into>, + IntoZBytes: Into, + { + Self { + sample: Sample { + key_expr: key_expr.into(), + payload: payload.into(), + kind: SampleKind::Put, + encoding: Encoding::default(), + timestamp: None, + qos: QoS::default(), + #[cfg(feature = "unstable")] + source_info: SourceInfo::empty(), + attachment: None, + }, + _t: PhantomData::, + } + } + + pub fn payload(mut self, payload: IntoZBytes) -> Self + where + IntoZBytes: Into, + { + self.sample.payload = payload.into(); + self + } +} + +impl SampleBuilder { + pub fn delete(key_expr: IntoKeyExpr) -> SampleBuilder + where + IntoKeyExpr: Into>, + { + Self { + sample: Sample { + key_expr: key_expr.into(), + payload: ZBytes::empty(), + kind: SampleKind::Delete, + encoding: Encoding::default(), + timestamp: None, + qos: QoS::default(), + #[cfg(feature = "unstable")] + source_info: SourceInfo::empty(), + attachment: None, + }, + _t: PhantomData::, + } + } +} + +impl SampleBuilder { + /// Allows to change keyexpr of [`Sample`] + pub fn keyexpr(self, key_expr: IntoKeyExpr) -> Self + where + IntoKeyExpr: Into>, + { + Self { + sample: Sample { + key_expr: key_expr.into(), + ..self.sample + }, + _t: PhantomData::, + } + } + + // Allows to change qos as a whole of [`Sample`] + pub(crate) fn qos(self, qos: QoS) -> Self { + Self { + sample: Sample { qos, ..self.sample }, + _t: PhantomData::, + } + } +} + +impl TimestampBuilderTrait for SampleBuilder { + fn timestamp>>(self, timestamp: U) -> Self { + Self { + sample: Sample { + timestamp: timestamp.into(), + ..self.sample + }, + _t: PhantomData::, + } + } +} + +impl SampleBuilderTrait for SampleBuilder { + #[zenoh_macros::unstable] + fn source_info(self, source_info: SourceInfo) -> Self { + Self { + sample: Sample { + source_info, + ..self.sample + }, + _t: PhantomData::, + } + } + + fn attachment>(self, attachment: U) -> Self { + let attachment: OptionZBytes = attachment.into(); + Self { + sample: Sample { + attachment: attachment.into(), + ..self.sample + }, + _t: PhantomData::, + } + } +} + +impl QoSBuilderTrait for SampleBuilder { + fn congestion_control(self, congestion_control: CongestionControl) -> Self { + let qos: QoSBuilder = self.sample.qos.into(); + let qos = qos.congestion_control(congestion_control).into(); + Self { + sample: Sample { qos, ..self.sample }, + _t: PhantomData::, + } + } + fn priority(self, priority: Priority) -> Self { + let qos: QoSBuilder = self.sample.qos.into(); + let qos = qos.priority(priority).into(); + Self { + sample: Sample { qos, ..self.sample }, + _t: PhantomData::, + } + } + fn express(self, is_express: bool) -> Self { + let qos: QoSBuilder = self.sample.qos.into(); + let qos = qos.express(is_express).into(); + Self { + sample: Sample { qos, ..self.sample }, + _t: PhantomData::, + } + } +} + +impl EncodingBuilderTrait for SampleBuilder { + fn encoding>(self, encoding: T) -> Self { + Self { + sample: Sample { + encoding: encoding.into(), + ..self.sample + }, + _t: PhantomData::, + } + } +} + +impl From for SampleBuilder { + fn from(sample: Sample) -> Self { + SampleBuilder { + sample, + _t: PhantomData::, + } + } +} + +impl TryFrom for SampleBuilder { + type Error = zresult::Error; + fn try_from(sample: Sample) -> Result { + if sample.kind != SampleKind::Put { + bail!("Sample is not a put sample") + } + Ok(SampleBuilder { + sample, + _t: PhantomData::, + }) + } +} + +impl TryFrom for SampleBuilder { + type Error = zresult::Error; + fn try_from(sample: Sample) -> Result { + if sample.kind != SampleKind::Delete { + bail!("Sample is not a delete sample") + } + Ok(SampleBuilder { + sample, + _t: PhantomData::, + }) + } +} + +impl From> for Sample { + fn from(sample_builder: SampleBuilder) -> Self { + sample_builder.sample + } +} diff --git a/zenoh/src/api/bytes.rs b/zenoh/src/api/bytes.rs new file mode 100644 index 0000000000..7e3083b57f --- /dev/null +++ b/zenoh/src/api/bytes.rs @@ -0,0 +1,3409 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// + +//! ZBytes primitives. +use std::{ + borrow::Cow, collections::HashMap, convert::Infallible, fmt::Debug, marker::PhantomData, + str::Utf8Error, string::FromUtf8Error, sync::Arc, +}; + +use uhlc::Timestamp; +use unwrap_infallible::UnwrapInfallible; +use zenoh_buffers::{ + buffer::{Buffer, SplitBuffer}, + reader::{DidntRead, HasReader, Reader}, + writer::HasWriter, + ZBuf, ZBufReader, ZBufWriter, ZSlice, ZSliceBuffer, +}; +use zenoh_codec::{RCodec, WCodec, Zenoh080}; +use zenoh_protocol::{ + core::{Encoding as EncodingProto, Parameters}, + zenoh::ext::AttachmentType, +}; +#[cfg(feature = "shared-memory")] +use zenoh_shm::{ + api::buffer::{ + zshm::{zshm, ZShm}, + zshmmut::{zshmmut, ZShmMut}, + }, + ShmBufInner, +}; + +use super::{encoding::Encoding, value::Value}; + +/// Wrapper type for API ergonomicity to allow any type `T` to be converted into `Option` where `T` implements `Into`. +#[repr(transparent)] +#[derive(Clone, Debug, Default, PartialEq, Eq)] +pub struct OptionZBytes(Option); + +impl From for OptionZBytes +where + T: Into, +{ + fn from(value: T) -> Self { + Self(Some(value.into())) + } +} + +impl From> for OptionZBytes +where + T: Into, +{ + fn from(mut value: Option) -> Self { + match value.take() { + Some(v) => Self(Some(v.into())), + None => Self(None), + } + } +} + +impl From<&Option> for OptionZBytes +where + for<'a> &'a T: Into, +{ + fn from(value: &Option) -> Self { + match value.as_ref() { + Some(v) => Self(Some(v.into())), + None => Self(None), + } + } +} + +impl From for Option { + fn from(value: OptionZBytes) -> Self { + value.0 + } +} + +/// Trait to encode a type `T` into a [`Value`]. +pub trait Serialize { + type Output; + + /// The implementer should take care of serializing the type `T` and set the proper [`Encoding`]. + fn serialize(self, t: T) -> Self::Output; +} + +pub trait Deserialize { + type Input<'a>; + type Error; + + /// The implementer should take care of deserializing the type `T` based on the [`Encoding`] information. + fn deserialize(self, t: Self::Input<'_>) -> Result; +} + +/// ZBytes contains the serialized bytes of user data. +/// +/// `ZBytes` provides convenient methods to the user for serialization/deserialization based on the default Zenoh serializer [`ZSerde`]. +/// +/// **NOTE:** Zenoh semantic and protocol take care of sending and receiving bytes without restricting the actual data types. +/// [`ZSerde`] is the default serializer/deserializer provided for convenience to the users to deal with primitives data types via +/// a simple out-of-the-box encoding. [`ZSerde`] is **NOT** by any means the only serializer/deserializer users can use nor a limitation +/// to the types supported by Zenoh. Users are free and encouraged to use any serializer/deserializer of their choice like *serde*, +/// *protobuf*, *bincode*, *flatbuffers*, etc. +/// +/// `ZBytes` can be used to serialize a single type: +/// ```rust +/// use zenoh::bytes::ZBytes; +/// +/// let start = String::from("abc"); +/// let bytes = ZBytes::serialize(start.clone()); +/// let end: String = bytes.deserialize().unwrap(); +/// assert_eq!(start, end); +/// ``` +/// +/// A tuple of serializable types: +/// ```rust +/// use zenoh::bytes::ZBytes; +/// +/// let start = (String::from("abc"), String::from("def")); +/// let bytes = ZBytes::serialize(start.clone()); +/// let end: (String, String) = bytes.deserialize().unwrap(); +/// assert_eq!(start, end); +/// +/// let start = (1_u8, 3.14_f32, String::from("abc")); +/// let bytes = ZBytes::serialize(start.clone()); +/// let end: (u8, f32, String) = bytes.deserialize().unwrap(); +/// assert_eq!(start, end); +/// `````` +/// +/// An iterator of serializable types: +/// ```rust +/// use zenoh::bytes::ZBytes; +/// +/// let start = vec![String::from("abc"), String::from("def")]; +/// let bytes = ZBytes::from_iter(start.iter()); +/// +/// let mut i = 0; +/// let mut iter = bytes.iter::(); +/// while let Some(Ok(t)) = iter.next() { +/// assert_eq!(start[i], t); +/// i += 1; +/// } +/// ``` +/// +/// A writer and a reader of serializable types: +/// ```rust +/// use zenoh::bytes::ZBytes; +/// +/// #[derive(Debug, PartialEq)] +/// struct Foo { +/// one: usize, +/// two: String, +/// three: Vec, +/// } +/// +/// let start = Foo { +/// one: 42, +/// two: String::from("Forty-Two"), +/// three: vec![42u8; 42], +/// }; +/// +/// let mut bytes = ZBytes::empty(); +/// let mut writer = bytes.writer(); +/// +/// writer.serialize(&start.one); +/// writer.serialize(&start.two); +/// writer.serialize(&start.three); +/// +/// let mut reader = bytes.reader(); +/// let end = Foo { +/// one: reader.deserialize().unwrap(), +/// two: reader.deserialize().unwrap(), +/// three: reader.deserialize().unwrap(), +/// }; +/// assert_eq!(start, end); +/// ``` +/// +#[repr(transparent)] +#[derive(Clone, Debug, Default, PartialEq, Eq)] +pub struct ZBytes(ZBuf); + +impl ZBytes { + /// Create an empty ZBytes. + pub const fn empty() -> Self { + Self(ZBuf::empty()) + } + + /// Create a [`ZBytes`] from any type `T` that implements [`Into`]. + pub fn new(t: T) -> Self + where + T: Into, + { + Self(t.into()) + } + + /// Returns whether the ZBytes is empty or not. + pub fn is_empty(&self) -> bool { + self.0.is_empty() + } + + /// Returns the length of the ZBytes. + pub fn len(&self) -> usize { + self.0.len() + } + + /// Get a [`ZBytesReader`] implementing [`std::io::Read`] trait. + pub fn reader(&self) -> ZBytesReader<'_> { + ZBytesReader(self.0.reader()) + } + + /// Build a [`ZBytes`] from a generic reader implementing [`std::io::Read`]. This operation copies data from the reader. + pub fn from_reader(mut reader: R) -> Result + where + R: std::io::Read, + { + let mut buf: Vec = vec![]; + reader.read_to_end(&mut buf)?; + Ok(ZBytes::new(buf)) + } + + /// Get a [`ZBytesWriter`] implementing [`std::io::Write`] trait. + pub fn writer(&mut self) -> ZBytesWriter<'_> { + ZBytesWriter(self.0.writer()) + } + + /// Get a [`ZBytesReader`] implementing [`std::io::Read`] trait. + pub fn iter(&self) -> ZBytesIterator<'_, T> + where + for<'b> ZSerde: Deserialize = &'b ZBytes>, + for<'b> >::Error: Debug, + { + ZBytesIterator { + reader: self.0.reader(), + _t: PhantomData::, + } + } + + /// Serialize an object of type `T` as a [`ZBytes`] using the [`ZSerde`]. + /// + /// ```rust + /// use zenoh::bytes::ZBytes; + /// + /// let start = String::from("abc"); + /// let bytes = ZBytes::serialize(start.clone()); + /// let end: String = bytes.deserialize().unwrap(); + /// assert_eq!(start, end); + /// ``` + pub fn serialize(t: T) -> Self + where + ZSerde: Serialize, + { + ZSerde.serialize(t) + } + + /// Try serializing an object of type `T` as a [`ZBytes`] using the [`ZSerde`]. + /// + /// ```rust + /// use serde_json::Value; + /// use zenoh::bytes::ZBytes; + /// + /// // Some JSON input data as a &str. Maybe this comes from the user. + /// let data = r#" + /// { + /// "name": "John Doe", + /// "age": 43, + /// "phones": [ + /// "+44 1234567", + /// "+44 2345678" + /// ] + /// }"#; + /// + /// // Parse the string of data into serde_json::Value. + /// let start: Value = serde_json::from_str(data).unwrap(); + /// // The serialization of a serde_json::Value is faillable (see `serde_json::to_string()`). + /// let bytes = ZBytes::try_serialize(start.clone()).unwrap(); + /// let end: Value = bytes.deserialize().unwrap(); + /// assert_eq!(start, end); + /// ``` + pub fn try_serialize(t: T) -> Result + where + ZSerde: Serialize>, + { + ZSerde.serialize(t) + } + + /// Deserialize an object of type `T` from a [`Value`] using the [`ZSerde`]. + pub fn deserialize<'a, T>(&'a self) -> Result>::Error> + where + ZSerde: Deserialize = &'a ZBytes>, + >::Error: Debug, + { + ZSerde.deserialize(self) + } + + /// Deserialize an object of type `T` from a [`Value`] using the [`ZSerde`]. + pub fn deserialize_mut<'a, T>(&'a mut self) -> Result>::Error> + where + ZSerde: Deserialize = &'a mut ZBytes>, + >::Error: Debug, + { + ZSerde.deserialize(self) + } + + /// Infallibly deserialize an object of type `T` from a [`Value`] using the [`ZSerde`]. + pub fn into<'a, T>(&'a self) -> T + where + ZSerde: Deserialize = &'a ZBytes, Error = Infallible>, + >::Error: Debug, + { + ZSerde.deserialize(self).unwrap_infallible() + } + + /// Infallibly deserialize an object of type `T` from a [`Value`] using the [`ZSerde`]. + pub fn into_mut<'a, T>(&'a mut self) -> T + where + ZSerde: Deserialize = &'a mut ZBytes, Error = Infallible>, + >::Error: Debug, + { + ZSerde.deserialize(self).unwrap_infallible() + } +} + +/// A reader that implements [`std::io::Read`] trait to read from a [`ZBytes`]. +#[repr(transparent)] +#[derive(Debug)] +pub struct ZBytesReader<'a>(ZBufReader<'a>); + +#[derive(Debug)] +pub enum ZReadOrDeserializeError +where + T: TryFrom, + >::Error: Debug, +{ + Read(DidntRead), + Deserialize(>::Error), +} + +impl std::fmt::Display for ZReadOrDeserializeError +where + T: Debug, + T: TryFrom, + >::Error: Debug, +{ + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + ZReadOrDeserializeError::Read(_) => f.write_str("Read error"), + ZReadOrDeserializeError::Deserialize(e) => f.write_fmt(format_args!("{:?}", e)), + } + } +} + +impl std::error::Error for ZReadOrDeserializeError +where + T: Debug, + T: TryFrom, + >::Error: Debug, +{ +} + +impl ZBytesReader<'_> { + /// Returns the number of bytes that can still be read + pub fn remaining(&self) -> usize { + self.0.remaining() + } + + /// Returns true if no more bytes can be read + pub fn is_empty(&self) -> bool { + self.remaining() == 0 + } + + /// Deserialize an object of type `T` from a [`ZBytesReader`] using the [`ZSerde`]. + /// See [`ZBytesWriter::serialize`] for an example. + pub fn deserialize(&mut self) -> Result>::Error> + where + for<'a> ZSerde: Deserialize = &'a ZBytes>, + >::Error: Debug, + { + let codec = Zenoh080::new(); + let abuf: ZBuf = codec.read(&mut self.0).unwrap(); + let apld = ZBytes::new(abuf); + + let a = ZSerde.deserialize(&apld)?; + Ok(a) + } +} + +impl std::io::Read for ZBytesReader<'_> { + fn read(&mut self, buf: &mut [u8]) -> std::io::Result { + std::io::Read::read(&mut self.0, buf) + } +} + +impl std::io::Seek for ZBytesReader<'_> { + fn seek(&mut self, pos: std::io::SeekFrom) -> std::io::Result { + std::io::Seek::seek(&mut self.0, pos) + } +} + +/// A writer that implements [`std::io::Write`] trait to write into a [`ZBytes`]. +#[repr(transparent)] +#[derive(Debug)] +pub struct ZBytesWriter<'a>(ZBufWriter<'a>); + +impl ZBytesWriter<'_> { + fn write(&mut self, bytes: &ZBuf) { + let codec = Zenoh080::new(); + // SAFETY: we are serializing slices on a ZBuf, so serialization will never + // fail unless we run out of memory. In that case, Rust memory allocator + // will panic before the serializer has any chance to fail. + unsafe { codec.write(&mut self.0, bytes).unwrap_unchecked() }; + } + + /// Serialize a type `T` on the [`ZBytes`]. For simmetricity, every serialization + /// operation preserves type boundaries by preprending the length of the serialized data. + /// This allows calling [`ZBytesReader::deserialize`] in the same order to retrieve the original type. + /// + /// Example: + /// ``` + /// use zenoh::bytes::ZBytes; + /// + /// // serialization + /// let mut bytes = ZBytes::empty(); + /// let mut writer = bytes.writer(); + /// let i1 = 1234_u32; + /// let i2 = String::from("test"); + /// let i3 = vec![1, 2, 3, 4]; + /// writer.serialize(i1); + /// writer.serialize(&i2); + /// writer.serialize(&i3); + /// // deserialization + /// let mut reader = bytes.reader(); + /// let o1: u32 = reader.deserialize().unwrap(); + /// let o2: String = reader.deserialize().unwrap(); + /// let o3: Vec = reader.deserialize().unwrap(); + /// assert_eq!(i1, o1); + /// assert_eq!(i2, o2); + /// assert_eq!(i3, o3); + /// ``` + pub fn serialize(&mut self, t: T) + where + ZSerde: Serialize, + { + let tpld = ZSerde.serialize(t); + self.write(&tpld.0); + } + + /// Try to serialize a type `T` on the [`ZBytes`]. Serialization works + /// in the same way as [`ZBytesWriter::serialize`]. + pub fn try_serialize(&mut self, t: T) -> Result<(), E> + where + ZSerde: Serialize>, + { + let tpld = ZSerde.serialize(t)?; + self.write(&tpld.0); + Ok(()) + } + + /// Append a [`ZBytes`] to this [`ZBytes`] by taking ownership. + /// This allows to compose a [`ZBytes`] out of multiple [`ZBytes`] that may point to different memory regions. + /// Said in other terms, it allows to create a linear view on different memory regions without copy. + /// Please note that `append` does not preserve any boundaries as done in [`ZBytesWriter::serialize`], meaning + /// that [`ZBytesReader::deserialize`] will not be able to deserialize the types in the same seriliazation order. + /// You will need to decide how to deserialize data yourself. + /// + /// Example: + /// ``` + /// use zenoh::bytes::ZBytes; + /// + /// let one = ZBytes::from(vec![0, 1]); + /// let two = ZBytes::from(vec![2, 3, 4, 5]); + /// let three = ZBytes::from(vec![6, 7]); + /// + /// let mut bytes = ZBytes::empty(); + /// let mut writer = bytes.writer(); + /// // Append data without copying by passing ownership + /// writer.append(one); + /// writer.append(two); + /// writer.append(three); + /// + /// // deserialization + /// let mut out: Vec = bytes.into(); + /// assert_eq!(out, vec![0u8, 1, 2, 3, 4, 5, 6, 7]); + /// ``` + pub fn append(&mut self, b: ZBytes) { + use zenoh_buffers::writer::Writer; + for s in b.0.zslices() { + // SAFETY: we are writing a ZSlice on a ZBuf, this is infallible because we are just pushing a ZSlice to + // the list of available ZSlices. + unsafe { self.0.write_zslice(s).unwrap_unchecked() } + } + } +} + +impl std::io::Write for ZBytesWriter<'_> { + fn write(&mut self, buf: &[u8]) -> std::io::Result { + std::io::Write::write(&mut self.0, buf) + } + + fn flush(&mut self) -> std::io::Result<()> { + Ok(()) + } +} + +/// An iterator that implements [`std::iter::Iterator`] trait to iterate on values `T` in a [`ZBytes`]. +/// Note that [`ZBytes`] contains a serialized version of `T` and iterating over a [`ZBytes`] performs lazy deserialization. +#[repr(transparent)] +#[derive(Debug)] +pub struct ZBytesIterator<'a, T> { + reader: ZBufReader<'a>, + _t: PhantomData, +} + +impl Iterator for ZBytesIterator<'_, T> +where + for<'a> ZSerde: Deserialize = &'a ZBytes>, + >::Error: Debug, +{ + type Item = Result>::Error>; + + fn next(&mut self) -> Option { + let codec = Zenoh080::new(); + + let kbuf: ZBuf = codec.read(&mut self.reader).ok()?; + let kpld = ZBytes::new(kbuf); + + Some(ZSerde.deserialize(&kpld)) + } +} + +impl FromIterator for ZBytes +where + ZSerde: Serialize, +{ + fn from_iter>(iter: T) -> Self { + let mut bytes = ZBytes::empty(); + let mut writer = bytes.writer(); + for t in iter { + writer.serialize(t); + } + + ZBytes::new(bytes) + } +} + +/// The default serializer for [`ZBytes`]. It supports primitives types, such as: `Vec`, `uX`, `iX`, `fX`, `String`, `bool`. +/// It also supports common Rust serde values like `serde_json::Value`. +/// +/// **NOTE:** Zenoh semantic and protocol take care of sending and receiving bytes without restricting the actual data types. +/// [`ZSerde`] is the default serializer/deserializer provided for convenience to the users to deal with primitives data types via +/// a simple out-of-the-box encoding. [`ZSerde`] is **NOT** by any means the only serializer/deserializer users can use nor a limitation +/// to the types supported by Zenoh. Users are free and encouraged to use any serializer/deserializer of their choice like *serde*, +/// *protobuf*, *bincode*, *flatbuffers*, etc. +#[derive(Clone, Copy, Debug)] +pub struct ZSerde; + +#[derive(Debug, Clone, Copy)] +pub struct ZDeserializeError; + +impl std::fmt::Display for ZDeserializeError { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.write_str("Deserialize error") + } +} + +impl std::error::Error for ZDeserializeError {} + +// ZBytes +impl Serialize for ZSerde { + type Output = ZBytes; + + fn serialize(self, t: ZBytes) -> Self::Output { + t + } +} + +impl From<&ZBytes> for ZBytes { + fn from(t: &ZBytes) -> Self { + ZSerde.serialize(t) + } +} + +impl From<&mut ZBytes> for ZBytes { + fn from(t: &mut ZBytes) -> Self { + ZSerde.serialize(t) + } +} + +impl Serialize<&ZBytes> for ZSerde { + type Output = ZBytes; + + fn serialize(self, t: &ZBytes) -> Self::Output { + t.clone() + } +} + +impl Serialize<&mut ZBytes> for ZSerde { + type Output = ZBytes; + + fn serialize(self, t: &mut ZBytes) -> Self::Output { + t.clone() + } +} + +impl Deserialize for ZSerde { + type Input<'a> = &'a ZBytes; + type Error = Infallible; + + fn deserialize(self, v: Self::Input<'_>) -> Result { + Ok(v.clone()) + } +} + +// ZBuf +impl Serialize for ZSerde { + type Output = ZBytes; + + fn serialize(self, t: ZBuf) -> Self::Output { + ZBytes::new(t) + } +} + +impl From for ZBytes { + fn from(t: ZBuf) -> Self { + ZSerde.serialize(t) + } +} + +impl Serialize<&ZBuf> for ZSerde { + type Output = ZBytes; + + fn serialize(self, t: &ZBuf) -> Self::Output { + ZBytes::new(t.clone()) + } +} + +impl From<&ZBuf> for ZBytes { + fn from(t: &ZBuf) -> Self { + ZSerde.serialize(t) + } +} + +impl Serialize<&mut ZBuf> for ZSerde { + type Output = ZBytes; + + fn serialize(self, t: &mut ZBuf) -> Self::Output { + ZBytes::new(t.clone()) + } +} + +impl From<&mut ZBuf> for ZBytes { + fn from(t: &mut ZBuf) -> Self { + ZSerde.serialize(t) + } +} + +impl Deserialize for ZSerde { + type Input<'a> = &'a ZBytes; + type Error = Infallible; + + fn deserialize(self, v: Self::Input<'_>) -> Result { + Ok(v.0.clone()) + } +} + +impl From for ZBuf { + fn from(value: ZBytes) -> Self { + value.0 + } +} + +impl From<&ZBytes> for ZBuf { + fn from(value: &ZBytes) -> Self { + ZSerde.deserialize(value).unwrap_infallible() + } +} + +impl From<&mut ZBytes> for ZBuf { + fn from(value: &mut ZBytes) -> Self { + ZSerde.deserialize(&*value).unwrap_infallible() + } +} + +// ZSlice +impl Serialize for ZSerde { + type Output = ZBytes; + + fn serialize(self, t: ZSlice) -> Self::Output { + ZBytes::new(t) + } +} + +impl From for ZBytes { + fn from(t: ZSlice) -> Self { + ZSerde.serialize(t) + } +} + +impl Serialize<&ZSlice> for ZSerde { + type Output = ZBytes; + + fn serialize(self, t: &ZSlice) -> Self::Output { + ZBytes::new(t.clone()) + } +} + +impl From<&ZSlice> for ZBytes { + fn from(t: &ZSlice) -> Self { + ZSerde.serialize(t) + } +} + +impl Serialize<&mut ZSlice> for ZSerde { + type Output = ZBytes; + + fn serialize(self, t: &mut ZSlice) -> Self::Output { + ZBytes::new(t.clone()) + } +} + +impl From<&mut ZSlice> for ZBytes { + fn from(t: &mut ZSlice) -> Self { + ZSerde.serialize(t) + } +} + +impl Deserialize for ZSerde { + type Input<'a> = &'a ZBytes; + type Error = Infallible; + + fn deserialize(self, v: Self::Input<'_>) -> Result { + Ok(v.0.to_zslice()) + } +} + +impl From for ZSlice { + fn from(value: ZBytes) -> Self { + ZBuf::from(value).to_zslice() + } +} + +impl From<&ZBytes> for ZSlice { + fn from(value: &ZBytes) -> Self { + ZSerde.deserialize(value).unwrap_infallible() + } +} + +impl From<&mut ZBytes> for ZSlice { + fn from(value: &mut ZBytes) -> Self { + ZSerde.deserialize(&*value).unwrap_infallible() + } +} + +// [u8; N] +impl Serialize<[u8; N]> for ZSerde { + type Output = ZBytes; + + fn serialize(self, t: [u8; N]) -> Self::Output { + ZBytes::new(t) + } +} + +impl From<[u8; N]> for ZBytes { + fn from(t: [u8; N]) -> Self { + ZSerde.serialize(t) + } +} + +impl Serialize<&[u8; N]> for ZSerde { + type Output = ZBytes; + + fn serialize(self, t: &[u8; N]) -> Self::Output { + ZBytes::new(*t) + } +} + +impl From<&[u8; N]> for ZBytes { + fn from(t: &[u8; N]) -> Self { + ZSerde.serialize(t) + } +} + +impl Serialize<&mut [u8; N]> for ZSerde { + type Output = ZBytes; + + fn serialize(self, t: &mut [u8; N]) -> Self::Output { + ZBytes::new(*t) + } +} + +impl From<&mut [u8; N]> for ZBytes { + fn from(t: &mut [u8; N]) -> Self { + ZSerde.serialize(*t) + } +} + +impl Deserialize<[u8; N]> for ZSerde { + type Input<'a> = &'a ZBytes; + type Error = ZDeserializeError; + + fn deserialize(self, v: Self::Input<'_>) -> Result<[u8; N], Self::Error> { + use std::io::Read; + + if v.0.len() != N { + return Err(ZDeserializeError); + } + let mut dst = [0u8; N]; + let mut reader = v.reader(); + reader.read_exact(&mut dst).map_err(|_| ZDeserializeError)?; + Ok(dst) + } +} + +impl TryFrom for [u8; N] { + type Error = ZDeserializeError; + + fn try_from(value: ZBytes) -> Result { + ZSerde.deserialize(&value) + } +} + +impl TryFrom<&ZBytes> for [u8; N] { + type Error = ZDeserializeError; + + fn try_from(value: &ZBytes) -> Result { + ZSerde.deserialize(value) + } +} + +impl TryFrom<&mut ZBytes> for [u8; N] { + type Error = ZDeserializeError; + + fn try_from(value: &mut ZBytes) -> Result { + ZSerde.deserialize(&*value) + } +} + +// Vec +impl Serialize> for ZSerde { + type Output = ZBytes; + + fn serialize(self, t: Vec) -> Self::Output { + ZBytes::new(t) + } +} + +impl From> for ZBytes { + fn from(t: Vec) -> Self { + ZSerde.serialize(t) + } +} + +impl Serialize<&Vec> for ZSerde { + type Output = ZBytes; + + fn serialize(self, t: &Vec) -> Self::Output { + ZBytes::new(t.clone()) + } +} + +impl From<&Vec> for ZBytes { + fn from(t: &Vec) -> Self { + ZSerde.serialize(t) + } +} + +impl Serialize<&mut Vec> for ZSerde { + type Output = ZBytes; + + fn serialize(self, t: &mut Vec) -> Self::Output { + ZBytes::new(t.clone()) + } +} + +impl From<&mut Vec> for ZBytes { + fn from(t: &mut Vec) -> Self { + ZSerde.serialize(t) + } +} + +impl Deserialize> for ZSerde { + type Input<'a> = &'a ZBytes; + type Error = Infallible; + + fn deserialize(self, v: Self::Input<'_>) -> Result, Self::Error> { + Ok(v.0.contiguous().to_vec()) + } +} + +impl From for Vec { + fn from(value: ZBytes) -> Self { + ZSerde.deserialize(&value).unwrap_infallible() + } +} + +impl From<&ZBytes> for Vec { + fn from(value: &ZBytes) -> Self { + ZSerde.deserialize(value).unwrap_infallible() + } +} + +impl From<&mut ZBytes> for Vec { + fn from(value: &mut ZBytes) -> Self { + ZSerde.deserialize(&*value).unwrap_infallible() + } +} + +// &[u8] +impl Serialize<&[u8]> for ZSerde { + type Output = ZBytes; + + fn serialize(self, t: &[u8]) -> Self::Output { + ZBytes::new(t.to_vec()) + } +} + +impl From<&[u8]> for ZBytes { + fn from(t: &[u8]) -> Self { + ZSerde.serialize(t) + } +} + +impl Serialize<&mut [u8]> for ZSerde { + type Output = ZBytes; + + fn serialize(self, t: &mut [u8]) -> Self::Output { + ZSerde.serialize(&*t) + } +} + +impl From<&mut [u8]> for ZBytes { + fn from(t: &mut [u8]) -> Self { + ZSerde.serialize(t) + } +} + +// Cow<[u8]> +impl<'a> Serialize> for ZSerde { + type Output = ZBytes; + + fn serialize(self, t: Cow<'a, [u8]>) -> Self::Output { + ZBytes::new(t.to_vec()) + } +} + +impl From> for ZBytes { + fn from(t: Cow<'_, [u8]>) -> Self { + ZSerde.serialize(t) + } +} + +impl<'a> Serialize<&Cow<'a, [u8]>> for ZSerde { + type Output = ZBytes; + + fn serialize(self, t: &Cow<'a, [u8]>) -> Self::Output { + ZBytes::new(t.to_vec()) + } +} + +impl From<&Cow<'_, [u8]>> for ZBytes { + fn from(t: &Cow<'_, [u8]>) -> Self { + ZSerde.serialize(t) + } +} + +impl<'a> Serialize<&mut Cow<'a, [u8]>> for ZSerde { + type Output = ZBytes; + + fn serialize(self, t: &mut Cow<'a, [u8]>) -> Self::Output { + ZSerde.serialize(&*t) + } +} + +impl From<&mut Cow<'_, [u8]>> for ZBytes { + fn from(t: &mut Cow<'_, [u8]>) -> Self { + ZSerde.serialize(t) + } +} + +impl<'a> Deserialize> for ZSerde { + type Input<'b> = &'a ZBytes; + type Error = Infallible; + + fn deserialize(self, v: Self::Input<'a>) -> Result, Self::Error> { + Ok(v.0.contiguous()) + } +} + +impl From for Cow<'static, [u8]> { + fn from(v: ZBytes) -> Self { + match v.0.contiguous() { + Cow::Borrowed(s) => Cow::Owned(s.to_vec()), + Cow::Owned(s) => Cow::Owned(s), + } + } +} + +impl<'a> From<&'a ZBytes> for Cow<'a, [u8]> { + fn from(value: &'a ZBytes) -> Self { + ZSerde.deserialize(value).unwrap_infallible() + } +} + +impl<'a> From<&'a mut ZBytes> for Cow<'a, [u8]> { + fn from(value: &'a mut ZBytes) -> Self { + ZSerde.deserialize(&*value).unwrap_infallible() + } +} + +// String +impl Serialize for ZSerde { + type Output = ZBytes; + + fn serialize(self, s: String) -> Self::Output { + ZBytes::new(s.into_bytes()) + } +} + +impl From for ZBytes { + fn from(t: String) -> Self { + ZSerde.serialize(t) + } +} + +impl Serialize<&String> for ZSerde { + type Output = ZBytes; + + fn serialize(self, s: &String) -> Self::Output { + ZBytes::new(s.clone().into_bytes()) + } +} + +impl From<&String> for ZBytes { + fn from(t: &String) -> Self { + ZSerde.serialize(t) + } +} + +impl Serialize<&mut String> for ZSerde { + type Output = ZBytes; + + fn serialize(self, s: &mut String) -> Self::Output { + ZSerde.serialize(&*s) + } +} + +impl From<&mut String> for ZBytes { + fn from(t: &mut String) -> Self { + ZSerde.serialize(t) + } +} + +impl Deserialize for ZSerde { + type Input<'a> = &'a ZBytes; + type Error = FromUtf8Error; + + fn deserialize(self, v: Self::Input<'_>) -> Result { + let v: Vec = ZSerde.deserialize(v).unwrap_infallible(); + String::from_utf8(v) + } +} + +impl TryFrom for String { + type Error = FromUtf8Error; + + fn try_from(value: ZBytes) -> Result { + ZSerde.deserialize(&value) + } +} + +impl TryFrom<&ZBytes> for String { + type Error = FromUtf8Error; + + fn try_from(value: &ZBytes) -> Result { + ZSerde.deserialize(value) + } +} + +impl TryFrom<&mut ZBytes> for String { + type Error = FromUtf8Error; + + fn try_from(value: &mut ZBytes) -> Result { + ZSerde.deserialize(&*value) + } +} + +// &str +impl Serialize<&str> for ZSerde { + type Output = ZBytes; + + fn serialize(self, s: &str) -> Self::Output { + ZSerde.serialize(s.to_string()) + } +} + +impl From<&str> for ZBytes { + fn from(t: &str) -> Self { + ZSerde.serialize(t) + } +} + +impl Serialize<&mut str> for ZSerde { + type Output = ZBytes; + + fn serialize(self, s: &mut str) -> Self::Output { + ZSerde.serialize(&*s) + } +} + +impl From<&mut str> for ZBytes { + fn from(t: &mut str) -> Self { + ZSerde.serialize(t) + } +} + +impl<'a> Serialize> for ZSerde { + type Output = ZBytes; + + fn serialize(self, s: Cow<'a, str>) -> Self::Output { + Self.serialize(s.to_string()) + } +} + +impl From> for ZBytes { + fn from(t: Cow<'_, str>) -> Self { + ZSerde.serialize(t) + } +} + +impl<'a> Serialize<&Cow<'a, str>> for ZSerde { + type Output = ZBytes; + + fn serialize(self, s: &Cow<'a, str>) -> Self::Output { + ZSerde.serialize(s.to_string()) + } +} + +impl From<&Cow<'_, str>> for ZBytes { + fn from(t: &Cow<'_, str>) -> Self { + ZSerde.serialize(t) + } +} + +impl<'a> Serialize<&mut Cow<'a, str>> for ZSerde { + type Output = ZBytes; + + fn serialize(self, s: &mut Cow<'a, str>) -> Self::Output { + ZSerde.serialize(&*s) + } +} + +impl From<&mut Cow<'_, str>> for ZBytes { + fn from(t: &mut Cow<'_, str>) -> Self { + ZSerde.serialize(t) + } +} + +impl<'a> Deserialize> for ZSerde { + type Input<'b> = &'a ZBytes; + type Error = Utf8Error; + + fn deserialize(self, v: Self::Input<'a>) -> Result, Self::Error> { + Cow::try_from(v) + } +} + +impl TryFrom for Cow<'static, str> { + type Error = Utf8Error; + + fn try_from(v: ZBytes) -> Result { + Ok(match Cow::<[u8]>::from(v) { + Cow::Borrowed(s) => core::str::from_utf8(s)?.into(), + Cow::Owned(s) => String::from_utf8(s).map_err(|err| err.utf8_error())?.into(), + }) + } +} + +impl<'a> TryFrom<&'a ZBytes> for Cow<'a, str> { + type Error = Utf8Error; + + fn try_from(v: &'a ZBytes) -> Result { + Ok(match Cow::<[u8]>::from(v) { + Cow::Borrowed(s) => core::str::from_utf8(s)?.into(), + Cow::Owned(s) => String::from_utf8(s).map_err(|err| err.utf8_error())?.into(), + }) + } +} + +impl<'a> TryFrom<&'a mut ZBytes> for Cow<'a, str> { + type Error = Utf8Error; + + fn try_from(v: &'a mut ZBytes) -> Result { + Ok(match Cow::<[u8]>::from(v) { + Cow::Borrowed(s) => core::str::from_utf8(s)?.into(), + Cow::Owned(s) => String::from_utf8(s).map_err(|err| err.utf8_error())?.into(), + }) + } +} + +// - Impl Serialize/Deserialize for numbers +macro_rules! impl_num { + ($t:ty) => { + impl Serialize<$t> for ZSerde { + type Output = ZBytes; + + fn serialize(self, t: $t) -> Self::Output { + let bs = t.to_le_bytes(); + let mut end = 1; + if t != 0 as $t { + end += bs.iter().rposition(|b| *b != 0).unwrap_or(bs.len() - 1); + }; + // SAFETY: + // - 0 is a valid start index because bs is guaranteed to always have a length greater or equal than 1 + // - end is a valid end index because is bounded between 0 and bs.len() + ZBytes::new(unsafe { ZSlice::new(Arc::new(bs), 0, end).unwrap_unchecked() }) + } + } + + impl From<$t> for ZBytes { + fn from(t: $t) -> Self { + ZSerde.serialize(t) + } + } + + impl Serialize<&$t> for ZSerde { + type Output = ZBytes; + + fn serialize(self, t: &$t) -> Self::Output { + Self.serialize(*t) + } + } + + impl From<&$t> for ZBytes { + fn from(t: &$t) -> Self { + ZSerde.serialize(t) + } + } + + impl Serialize<&mut $t> for ZSerde { + type Output = ZBytes; + + fn serialize(self, t: &mut $t) -> Self::Output { + Self.serialize(*t) + } + } + + impl From<&mut $t> for ZBytes { + fn from(t: &mut $t) -> Self { + ZSerde.serialize(t) + } + } + + impl Deserialize<$t> for ZSerde { + type Input<'a> = &'a ZBytes; + type Error = ZDeserializeError; + + fn deserialize(self, v: Self::Input<'_>) -> Result<$t, Self::Error> { + use std::io::Read; + + let mut r = v.reader(); + let mut bs = (0 as $t).to_le_bytes(); + if v.len() > bs.len() { + return Err(ZDeserializeError); + } + r.read_exact(&mut bs[..v.len()]) + .map_err(|_| ZDeserializeError)?; + let t = <$t>::from_le_bytes(bs); + Ok(t) + } + } + + impl TryFrom for $t { + type Error = ZDeserializeError; + + fn try_from(value: ZBytes) -> Result { + ZSerde.deserialize(&value) + } + } + + impl TryFrom<&ZBytes> for $t { + type Error = ZDeserializeError; + + fn try_from(value: &ZBytes) -> Result { + ZSerde.deserialize(value) + } + } + + impl TryFrom<&mut ZBytes> for $t { + type Error = ZDeserializeError; + + fn try_from(value: &mut ZBytes) -> Result { + ZSerde.deserialize(&*value) + } + } + }; +} + +// Zenoh unsigned integers +impl_num!(u8); +impl_num!(u16); +impl_num!(u32); +impl_num!(u64); +impl_num!(u128); +impl_num!(usize); + +// Zenoh signed integers +impl_num!(i8); +impl_num!(i16); +impl_num!(i32); +impl_num!(i64); +impl_num!(i128); +impl_num!(isize); + +// Zenoh floats +impl_num!(f32); +impl_num!(f64); + +// Zenoh bool +impl Serialize for ZSerde { + type Output = ZBytes; + + fn serialize(self, t: bool) -> Self::Output { + // SAFETY: casting a bool into an integer is well-defined behaviour. + // 0 is false, 1 is true: https://doc.rust-lang.org/std/primitive.bool.html + ZBytes::new(ZBuf::from((t as u8).to_le_bytes())) + } +} + +impl From for ZBytes { + fn from(t: bool) -> Self { + ZSerde.serialize(t) + } +} + +impl Serialize<&bool> for ZSerde { + type Output = ZBytes; + + fn serialize(self, t: &bool) -> Self::Output { + ZSerde.serialize(*t) + } +} + +impl From<&bool> for ZBytes { + fn from(t: &bool) -> Self { + ZSerde.serialize(t) + } +} + +impl Serialize<&mut bool> for ZSerde { + type Output = ZBytes; + + fn serialize(self, t: &mut bool) -> Self::Output { + ZSerde.serialize(*t) + } +} + +impl From<&mut bool> for ZBytes { + fn from(t: &mut bool) -> Self { + ZSerde.serialize(t) + } +} + +impl Deserialize for ZSerde { + type Input<'a> = &'a ZBytes; + type Error = ZDeserializeError; + + fn deserialize(self, v: Self::Input<'_>) -> Result { + let p = v.deserialize::().map_err(|_| ZDeserializeError)?; + match p { + 0 => Ok(false), + 1 => Ok(true), + _ => Err(ZDeserializeError), + } + } +} + +impl TryFrom for bool { + type Error = ZDeserializeError; + + fn try_from(value: ZBytes) -> Result { + ZSerde.deserialize(&value) + } +} + +impl TryFrom<&ZBytes> for bool { + type Error = ZDeserializeError; + + fn try_from(value: &ZBytes) -> Result { + ZSerde.deserialize(value) + } +} + +impl TryFrom<&mut ZBytes> for bool { + type Error = ZDeserializeError; + + fn try_from(value: &mut ZBytes) -> Result { + ZSerde.deserialize(&*value) + } +} + +// Zenoh char +impl Serialize for ZSerde { + type Output = ZBytes; + + fn serialize(self, t: char) -> Self::Output { + // We can convert char to u32 and encode it as such + // See https://doc.rust-lang.org/std/primitive.char.html#method.from_u32 + ZSerde.serialize(t as u32) + } +} + +impl From for ZBytes { + fn from(t: char) -> Self { + ZSerde.serialize(t) + } +} + +impl Serialize<&char> for ZSerde { + type Output = ZBytes; + + fn serialize(self, t: &char) -> Self::Output { + ZSerde.serialize(*t) + } +} + +impl From<&char> for ZBytes { + fn from(t: &char) -> Self { + ZSerde.serialize(t) + } +} + +impl Serialize<&mut char> for ZSerde { + type Output = ZBytes; + + fn serialize(self, t: &mut char) -> Self::Output { + ZSerde.serialize(*t) + } +} + +impl From<&mut char> for ZBytes { + fn from(t: &mut char) -> Self { + ZSerde.serialize(t) + } +} + +impl Deserialize for ZSerde { + type Input<'a> = &'a ZBytes; + type Error = ZDeserializeError; + + fn deserialize(self, v: Self::Input<'_>) -> Result { + let c = v.deserialize::()?; + let c = char::try_from(c).map_err(|_| ZDeserializeError)?; + Ok(c) + } +} + +impl TryFrom for char { + type Error = ZDeserializeError; + + fn try_from(value: ZBytes) -> Result { + ZSerde.deserialize(&value) + } +} + +impl TryFrom<&ZBytes> for char { + type Error = ZDeserializeError; + + fn try_from(value: &ZBytes) -> Result { + ZSerde.deserialize(value) + } +} + +impl TryFrom<&mut ZBytes> for char { + type Error = ZDeserializeError; + + fn try_from(value: &mut ZBytes) -> Result { + ZSerde.deserialize(&*value) + } +} + +// - Zenoh advanced types serializer/deserializer +// Parameters +impl Serialize> for ZSerde { + type Output = ZBytes; + + fn serialize(self, t: Parameters<'_>) -> Self::Output { + Self.serialize(t.as_str()) + } +} + +impl From> for ZBytes { + fn from(t: Parameters<'_>) -> Self { + ZSerde.serialize(t) + } +} + +impl Serialize<&Parameters<'_>> for ZSerde { + type Output = ZBytes; + + fn serialize(self, t: &Parameters<'_>) -> Self::Output { + Self.serialize(t.as_str()) + } +} + +impl<'s> From<&'s Parameters<'s>> for ZBytes { + fn from(t: &'s Parameters<'s>) -> Self { + ZSerde.serialize(t) + } +} + +impl Serialize<&mut Parameters<'_>> for ZSerde { + type Output = ZBytes; + + fn serialize(self, t: &mut Parameters<'_>) -> Self::Output { + Self.serialize(t.as_str()) + } +} + +impl<'s> From<&'s mut Parameters<'s>> for ZBytes { + fn from(t: &'s mut Parameters<'s>) -> Self { + ZSerde.serialize(&*t) + } +} + +impl<'a> Deserialize> for ZSerde { + type Input<'b> = &'a ZBytes; + type Error = ZDeserializeError; + + fn deserialize(self, v: Self::Input<'a>) -> Result, Self::Error> { + let s = v + .deserialize::>() + .map_err(|_| ZDeserializeError)?; + Ok(Parameters::from(s)) + } +} + +impl TryFrom for Parameters<'static> { + type Error = ZDeserializeError; + + fn try_from(v: ZBytes) -> Result { + let s = v.deserialize::>().map_err(|_| ZDeserializeError)?; + Ok(Parameters::from(s.into_owned())) + } +} + +impl<'s> TryFrom<&'s ZBytes> for Parameters<'s> { + type Error = ZDeserializeError; + + fn try_from(value: &'s ZBytes) -> Result { + ZSerde.deserialize(value) + } +} + +impl<'s> TryFrom<&'s mut ZBytes> for Parameters<'s> { + type Error = ZDeserializeError; + + fn try_from(value: &'s mut ZBytes) -> Result { + ZSerde.deserialize(&*value) + } +} + +// Timestamp +impl Serialize for ZSerde { + type Output = ZBytes; + + fn serialize(self, s: Timestamp) -> Self::Output { + ZSerde.serialize(&s) + } +} + +impl From for ZBytes { + fn from(t: Timestamp) -> Self { + ZSerde.serialize(t) + } +} + +impl Serialize<&Timestamp> for ZSerde { + type Output = ZBytes; + + fn serialize(self, s: &Timestamp) -> Self::Output { + let codec = Zenoh080::new(); + let mut buffer = ZBuf::empty(); + let mut writer = buffer.writer(); + // SAFETY: we are serializing slices on a ZBuf, so serialization will never + // fail unless we run out of memory. In that case, Rust memory allocator + // will panic before the serializer has any chance to fail. + unsafe { + codec.write(&mut writer, s).unwrap_unchecked(); + } + ZBytes::from(buffer) + } +} + +impl From<&Timestamp> for ZBytes { + fn from(t: &Timestamp) -> Self { + ZSerde.serialize(t) + } +} + +impl Serialize<&mut Timestamp> for ZSerde { + type Output = ZBytes; + + fn serialize(self, s: &mut Timestamp) -> Self::Output { + ZSerde.serialize(&*s) + } +} + +impl From<&mut Timestamp> for ZBytes { + fn from(t: &mut Timestamp) -> Self { + ZSerde.serialize(t) + } +} + +impl Deserialize for ZSerde { + type Input<'a> = &'a ZBytes; + type Error = zenoh_buffers::reader::DidntRead; + + fn deserialize(self, v: Self::Input<'_>) -> Result { + let codec = Zenoh080::new(); + let mut reader = v.0.reader(); + let e: Timestamp = codec.read(&mut reader)?; + Ok(e) + } +} + +impl TryFrom for Timestamp { + type Error = zenoh_buffers::reader::DidntRead; + + fn try_from(value: ZBytes) -> Result { + ZSerde.deserialize(&value) + } +} + +impl TryFrom<&ZBytes> for Timestamp { + type Error = zenoh_buffers::reader::DidntRead; + + fn try_from(value: &ZBytes) -> Result { + ZSerde.deserialize(value) + } +} + +impl TryFrom<&mut ZBytes> for Timestamp { + type Error = zenoh_buffers::reader::DidntRead; + + fn try_from(value: &mut ZBytes) -> Result { + ZSerde.deserialize(&*value) + } +} + +// Encoding +impl Serialize for ZSerde { + type Output = ZBytes; + + fn serialize(self, s: Encoding) -> Self::Output { + let e: EncodingProto = s.into(); + let codec = Zenoh080::new(); + let mut buffer = ZBuf::empty(); + let mut writer = buffer.writer(); + // SAFETY: we are serializing slices on a ZBuf, so serialization will never + // fail unless we run out of memory. In that case, Rust memory allocator + // will panic before the serializer has any chance to fail. + unsafe { + codec.write(&mut writer, &e).unwrap_unchecked(); + } + ZBytes::from(buffer) + } +} + +impl From for ZBytes { + fn from(t: Encoding) -> Self { + ZSerde.serialize(t) + } +} + +impl Serialize<&Encoding> for ZSerde { + type Output = ZBytes; + + fn serialize(self, s: &Encoding) -> Self::Output { + ZSerde.serialize(s.clone()) + } +} + +impl From<&Encoding> for ZBytes { + fn from(t: &Encoding) -> Self { + ZSerde.serialize(t) + } +} + +impl Serialize<&mut Encoding> for ZSerde { + type Output = ZBytes; + + fn serialize(self, s: &mut Encoding) -> Self::Output { + ZSerde.serialize(&*s) + } +} + +impl From<&mut Encoding> for ZBytes { + fn from(t: &mut Encoding) -> Self { + ZSerde.serialize(t) + } +} + +impl Deserialize for ZSerde { + type Input<'a> = &'a ZBytes; + type Error = zenoh_buffers::reader::DidntRead; + + fn deserialize(self, v: Self::Input<'_>) -> Result { + let codec = Zenoh080::new(); + let mut reader = v.0.reader(); + let e: EncodingProto = codec.read(&mut reader)?; + Ok(e.into()) + } +} + +impl TryFrom for Encoding { + type Error = zenoh_buffers::reader::DidntRead; + + fn try_from(value: ZBytes) -> Result { + ZSerde.deserialize(&value) + } +} + +impl TryFrom<&ZBytes> for Encoding { + type Error = zenoh_buffers::reader::DidntRead; + + fn try_from(value: &ZBytes) -> Result { + ZSerde.deserialize(value) + } +} + +impl TryFrom<&mut ZBytes> for Encoding { + type Error = zenoh_buffers::reader::DidntRead; + + fn try_from(value: &mut ZBytes) -> Result { + ZSerde.deserialize(&*value) + } +} + +// Value +impl Serialize for ZSerde { + type Output = ZBytes; + + fn serialize(self, s: Value) -> Self::Output { + ZSerde.serialize((s.payload(), s.encoding())) + } +} + +impl From for ZBytes { + fn from(t: Value) -> Self { + ZSerde.serialize(t) + } +} + +impl Serialize<&Value> for ZSerde { + type Output = ZBytes; + + fn serialize(self, s: &Value) -> Self::Output { + ZSerde.serialize(s.clone()) + } +} + +impl From<&Value> for ZBytes { + fn from(t: &Value) -> Self { + ZSerde.serialize(t) + } +} + +impl Serialize<&mut Value> for ZSerde { + type Output = ZBytes; + + fn serialize(self, s: &mut Value) -> Self::Output { + ZSerde.serialize(&*s) + } +} + +impl From<&mut Value> for ZBytes { + fn from(t: &mut Value) -> Self { + ZSerde.serialize(t) + } +} + +impl Deserialize for ZSerde { + type Input<'a> = &'a ZBytes; + type Error = ZReadOrDeserializeErrorTuple2; + + fn deserialize(self, v: Self::Input<'_>) -> Result { + let (payload, encoding) = v.deserialize::<(ZBytes, Encoding)>()?; + Ok(Value::new(payload, encoding)) + } +} + +impl TryFrom for Value { + type Error = ZReadOrDeserializeErrorTuple2; + + fn try_from(value: ZBytes) -> Result { + ZSerde.deserialize(&value) + } +} + +impl TryFrom<&ZBytes> for Value { + type Error = ZReadOrDeserializeErrorTuple2; + + fn try_from(value: &ZBytes) -> Result { + ZSerde.deserialize(value) + } +} + +impl TryFrom<&mut ZBytes> for Value { + type Error = ZReadOrDeserializeErrorTuple2; + + fn try_from(value: &mut ZBytes) -> Result { + ZSerde.deserialize(&*value) + } +} + +// JSON +impl Serialize for ZSerde { + type Output = Result; + + fn serialize(self, t: serde_json::Value) -> Self::Output { + ZSerde.serialize(&t) + } +} + +impl TryFrom for ZBytes { + type Error = serde_json::Error; + + fn try_from(value: serde_json::Value) -> Result { + ZSerde.serialize(&value) + } +} + +impl Serialize<&serde_json::Value> for ZSerde { + type Output = Result; + + fn serialize(self, t: &serde_json::Value) -> Self::Output { + let mut bytes = ZBytes::empty(); + serde_json::to_writer(bytes.writer(), t)?; + Ok(bytes) + } +} + +impl TryFrom<&serde_json::Value> for ZBytes { + type Error = serde_json::Error; + + fn try_from(value: &serde_json::Value) -> Result { + ZSerde.serialize(value) + } +} + +impl Serialize<&mut serde_json::Value> for ZSerde { + type Output = Result; + + fn serialize(self, t: &mut serde_json::Value) -> Self::Output { + let mut bytes = ZBytes::empty(); + serde_json::to_writer(bytes.writer(), t)?; + Ok(bytes) + } +} + +impl TryFrom<&mut serde_json::Value> for ZBytes { + type Error = serde_json::Error; + + fn try_from(value: &mut serde_json::Value) -> Result { + ZSerde.serialize(&*value) + } +} + +impl Deserialize for ZSerde { + type Input<'a> = &'a ZBytes; + type Error = serde_json::Error; + + fn deserialize(self, v: Self::Input<'_>) -> Result { + serde_json::from_reader(v.reader()) + } +} + +impl TryFrom for serde_json::Value { + type Error = serde_json::Error; + + fn try_from(value: ZBytes) -> Result { + ZSerde.deserialize(&value) + } +} + +impl TryFrom<&ZBytes> for serde_json::Value { + type Error = serde_json::Error; + + fn try_from(value: &ZBytes) -> Result { + ZSerde.deserialize(value) + } +} + +impl TryFrom<&mut ZBytes> for serde_json::Value { + type Error = serde_json::Error; + + fn try_from(value: &mut ZBytes) -> Result { + ZSerde.deserialize(&*value) + } +} + +// Yaml +impl Serialize for ZSerde { + type Output = Result; + + fn serialize(self, t: serde_yaml::Value) -> Self::Output { + Self.serialize(&t) + } +} + +impl TryFrom for ZBytes { + type Error = serde_yaml::Error; + + fn try_from(value: serde_yaml::Value) -> Result { + ZSerde.serialize(value) + } +} + +impl Serialize<&serde_yaml::Value> for ZSerde { + type Output = Result; + + fn serialize(self, t: &serde_yaml::Value) -> Self::Output { + let mut bytes = ZBytes::empty(); + serde_yaml::to_writer(bytes.writer(), t)?; + Ok(bytes) + } +} + +impl TryFrom<&serde_yaml::Value> for ZBytes { + type Error = serde_yaml::Error; + + fn try_from(value: &serde_yaml::Value) -> Result { + ZSerde.serialize(value) + } +} + +impl Serialize<&mut serde_yaml::Value> for ZSerde { + type Output = Result; + + fn serialize(self, t: &mut serde_yaml::Value) -> Self::Output { + let mut bytes = ZBytes::empty(); + serde_yaml::to_writer(bytes.writer(), t)?; + Ok(bytes) + } +} + +impl TryFrom<&mut serde_yaml::Value> for ZBytes { + type Error = serde_yaml::Error; + + fn try_from(value: &mut serde_yaml::Value) -> Result { + ZSerde.serialize(value) + } +} + +impl Deserialize for ZSerde { + type Input<'a> = &'a ZBytes; + type Error = serde_yaml::Error; + + fn deserialize(self, v: Self::Input<'_>) -> Result { + serde_yaml::from_reader(v.reader()) + } +} + +impl TryFrom for serde_yaml::Value { + type Error = serde_yaml::Error; + + fn try_from(value: ZBytes) -> Result { + ZSerde.deserialize(&value) + } +} + +impl TryFrom<&ZBytes> for serde_yaml::Value { + type Error = serde_yaml::Error; + + fn try_from(value: &ZBytes) -> Result { + ZSerde.deserialize(value) + } +} + +impl TryFrom<&mut ZBytes> for serde_yaml::Value { + type Error = serde_yaml::Error; + + fn try_from(value: &mut ZBytes) -> Result { + ZSerde.deserialize(&*value) + } +} + +// CBOR +impl Serialize for ZSerde { + type Output = Result; + + fn serialize(self, t: serde_cbor::Value) -> Self::Output { + Self.serialize(&t) + } +} + +impl TryFrom for ZBytes { + type Error = serde_cbor::Error; + + fn try_from(value: serde_cbor::Value) -> Result { + ZSerde.serialize(value) + } +} + +impl Serialize<&serde_cbor::Value> for ZSerde { + type Output = Result; + + fn serialize(self, t: &serde_cbor::Value) -> Self::Output { + let mut bytes = ZBytes::empty(); + serde_cbor::to_writer(bytes.0.writer(), t)?; + Ok(bytes) + } +} + +impl TryFrom<&serde_cbor::Value> for ZBytes { + type Error = serde_cbor::Error; + + fn try_from(value: &serde_cbor::Value) -> Result { + ZSerde.serialize(value) + } +} + +impl Serialize<&mut serde_cbor::Value> for ZSerde { + type Output = Result; + + fn serialize(self, t: &mut serde_cbor::Value) -> Self::Output { + ZSerde.serialize(&*t) + } +} + +impl TryFrom<&mut serde_cbor::Value> for ZBytes { + type Error = serde_cbor::Error; + + fn try_from(value: &mut serde_cbor::Value) -> Result { + ZSerde.serialize(value) + } +} + +impl Deserialize for ZSerde { + type Input<'a> = &'a ZBytes; + type Error = serde_cbor::Error; + + fn deserialize(self, v: Self::Input<'_>) -> Result { + serde_cbor::from_reader(v.reader()) + } +} + +impl TryFrom for serde_cbor::Value { + type Error = serde_cbor::Error; + + fn try_from(value: ZBytes) -> Result { + ZSerde.deserialize(&value) + } +} + +impl TryFrom<&ZBytes> for serde_cbor::Value { + type Error = serde_cbor::Error; + + fn try_from(value: &ZBytes) -> Result { + ZSerde.deserialize(value) + } +} + +impl TryFrom<&mut ZBytes> for serde_cbor::Value { + type Error = serde_cbor::Error; + + fn try_from(value: &mut ZBytes) -> Result { + ZSerde.deserialize(&*value) + } +} + +// Pickle +impl Serialize for ZSerde { + type Output = Result; + + fn serialize(self, t: serde_pickle::Value) -> Self::Output { + Self.serialize(&t) + } +} + +impl TryFrom for ZBytes { + type Error = serde_pickle::Error; + + fn try_from(value: serde_pickle::Value) -> Result { + ZSerde.serialize(value) + } +} + +impl Serialize<&serde_pickle::Value> for ZSerde { + type Output = Result; + + fn serialize(self, t: &serde_pickle::Value) -> Self::Output { + let mut bytes = ZBytes::empty(); + serde_pickle::value_to_writer( + &mut bytes.0.writer(), + t, + serde_pickle::SerOptions::default(), + )?; + Ok(bytes) + } +} + +impl TryFrom<&serde_pickle::Value> for ZBytes { + type Error = serde_pickle::Error; + + fn try_from(value: &serde_pickle::Value) -> Result { + ZSerde.serialize(value) + } +} + +impl Serialize<&mut serde_pickle::Value> for ZSerde { + type Output = Result; + + fn serialize(self, t: &mut serde_pickle::Value) -> Self::Output { + ZSerde.serialize(&*t) + } +} + +impl TryFrom<&mut serde_pickle::Value> for ZBytes { + type Error = serde_pickle::Error; + + fn try_from(value: &mut serde_pickle::Value) -> Result { + ZSerde.serialize(value) + } +} + +impl Deserialize for ZSerde { + type Input<'a> = &'a ZBytes; + type Error = serde_pickle::Error; + + fn deserialize(self, v: Self::Input<'_>) -> Result { + serde_pickle::value_from_reader(v.reader(), serde_pickle::DeOptions::default()) + } +} + +impl TryFrom for serde_pickle::Value { + type Error = serde_pickle::Error; + + fn try_from(value: ZBytes) -> Result { + ZSerde.deserialize(&value) + } +} + +impl TryFrom<&ZBytes> for serde_pickle::Value { + type Error = serde_pickle::Error; + + fn try_from(value: &ZBytes) -> Result { + ZSerde.deserialize(value) + } +} + +impl TryFrom<&mut ZBytes> for serde_pickle::Value { + type Error = serde_pickle::Error; + + fn try_from(value: &mut ZBytes) -> Result { + ZSerde.deserialize(&*value) + } +} + +// bytes::Bytes + +// Define a transparent wrapper type to get around Rust's orphan rule. +// This allows to use bytes::Bytes directly as supporting buffer of a +// ZSlice resulting in zero-copy and zero-alloc bytes::Bytes serialization. +#[repr(transparent)] +#[derive(Debug)] +struct BytesWrap(bytes::Bytes); + +impl ZSliceBuffer for BytesWrap { + fn as_slice(&self) -> &[u8] { + &self.0 + } + + fn as_any(&self) -> &dyn std::any::Any { + self + } + + fn as_any_mut(&mut self) -> &mut dyn std::any::Any { + self + } +} + +impl Serialize for ZSerde { + type Output = ZBytes; + + fn serialize(self, s: bytes::Bytes) -> Self::Output { + ZBytes::new(BytesWrap(s)) + } +} + +impl From for ZBytes { + fn from(t: bytes::Bytes) -> Self { + ZSerde.serialize(t) + } +} + +impl Deserialize for ZSerde { + type Input<'a> = &'a ZBytes; + type Error = Infallible; + + fn deserialize(self, v: Self::Input<'_>) -> Result { + // bytes::Bytes can be constructed only by passing ownership to the constructor. + // Thereofore, here we are forced to allocate a vector and copy the whole ZBytes + // content since bytes::Bytes does not support anything else than Box (and its + // variants like Vec and String). + let v: Vec = ZSerde.deserialize(v).unwrap_infallible(); + Ok(bytes::Bytes::from(v)) + } +} + +impl TryFrom for bytes::Bytes { + type Error = Infallible; + + fn try_from(value: ZBytes) -> Result { + ZSerde.deserialize(&value) + } +} + +impl TryFrom<&ZBytes> for bytes::Bytes { + type Error = Infallible; + + fn try_from(value: &ZBytes) -> Result { + ZSerde.deserialize(value) + } +} + +impl TryFrom<&mut ZBytes> for bytes::Bytes { + type Error = Infallible; + + fn try_from(value: &mut ZBytes) -> Result { + ZSerde.deserialize(&*value) + } +} + +// Shared memory conversion +#[cfg(feature = "shared-memory")] +impl Serialize for ZSerde { + type Output = ZBytes; + + fn serialize(self, t: ZShm) -> Self::Output { + let slice: ZSlice = t.into(); + ZBytes::new(slice) + } +} + +#[cfg(feature = "shared-memory")] +impl From for ZBytes { + fn from(t: ZShm) -> Self { + ZSerde.serialize(t) + } +} + +// Shared memory conversion +#[cfg(feature = "shared-memory")] +impl Serialize for ZSerde { + type Output = ZBytes; + + fn serialize(self, t: ZShmMut) -> Self::Output { + let slice: ZSlice = t.into(); + ZBytes::new(slice) + } +} + +#[cfg(feature = "shared-memory")] +impl From for ZBytes { + fn from(t: ZShmMut) -> Self { + ZSerde.serialize(t) + } +} + +#[cfg(feature = "shared-memory")] +impl<'a> Deserialize<&'a zshm> for ZSerde { + type Input<'b> = &'a ZBytes; + type Error = ZDeserializeError; + + fn deserialize(self, v: Self::Input<'a>) -> Result<&'a zshm, Self::Error> { + // A ZShm is expected to have only one slice + let mut zslices = v.0.zslices(); + if let Some(zs) = zslices.next() { + if let Some(shmb) = zs.downcast_ref::() { + return Ok(shmb.into()); + } + } + Err(ZDeserializeError) + } +} + +#[cfg(feature = "shared-memory")] +impl<'a> TryFrom<&'a ZBytes> for &'a zshm { + type Error = ZDeserializeError; + + fn try_from(value: &'a ZBytes) -> Result { + ZSerde.deserialize(value) + } +} + +#[cfg(feature = "shared-memory")] +impl<'a> TryFrom<&'a mut ZBytes> for &'a mut zshm { + type Error = ZDeserializeError; + + fn try_from(value: &'a mut ZBytes) -> Result { + ZSerde.deserialize(value) + } +} + +#[cfg(feature = "shared-memory")] +impl<'a> Deserialize<&'a mut zshm> for ZSerde { + type Input<'b> = &'a mut ZBytes; + type Error = ZDeserializeError; + + fn deserialize(self, v: Self::Input<'a>) -> Result<&'a mut zshm, Self::Error> { + // A ZSliceShmBorrowMut is expected to have only one slice + let mut zslices = v.0.zslices_mut(); + if let Some(zs) = zslices.next() { + // SAFETY: ShmBufInner cannot change the size of the slice + if let Some(shmb) = unsafe { zs.downcast_mut::() } { + return Ok(shmb.into()); + } + } + Err(ZDeserializeError) + } +} + +#[cfg(feature = "shared-memory")] +impl<'a> Deserialize<&'a mut zshmmut> for ZSerde { + type Input<'b> = &'a mut ZBytes; + type Error = ZDeserializeError; + + fn deserialize(self, v: Self::Input<'a>) -> Result<&'a mut zshmmut, Self::Error> { + // A ZSliceShmBorrowMut is expected to have only one slice + let mut zslices = v.0.zslices_mut(); + if let Some(zs) = zslices.next() { + // SAFETY: ShmBufInner cannot change the size of the slice + if let Some(shmb) = unsafe { zs.downcast_mut::() } { + return shmb.try_into().map_err(|_| ZDeserializeError); + } + } + Err(ZDeserializeError) + } +} + +#[cfg(feature = "shared-memory")] +impl<'a> TryFrom<&'a mut ZBytes> for &'a mut zshmmut { + type Error = ZDeserializeError; + + fn try_from(value: &'a mut ZBytes) -> Result { + ZSerde.deserialize(value) + } +} + +// Tuple (a, b) +macro_rules! impl_tuple2 { + ($t:expr) => {{ + let (a, b) = $t; + + let codec = Zenoh080::new(); + let mut buffer: ZBuf = ZBuf::empty(); + let mut writer = buffer.writer(); + let apld: ZBytes = a.into(); + let bpld: ZBytes = b.into(); + + // SAFETY: we are serializing slices on a ZBuf, so serialization will never + // fail unless we run out of memory. In that case, Rust memory allocator + // will panic before the serializer has any chance to fail. + unsafe { + codec.write(&mut writer, &apld.0).unwrap_unchecked(); + codec.write(&mut writer, &bpld.0).unwrap_unchecked(); + } + + ZBytes::new(buffer) + }}; +} + +impl Serialize<(A, B)> for ZSerde +where + A: Into, + B: Into, +{ + type Output = ZBytes; + + fn serialize(self, t: (A, B)) -> Self::Output { + impl_tuple2!(t) + } +} + +impl Serialize<&(A, B)> for ZSerde +where + for<'a> &'a A: Into, + for<'b> &'b B: Into, +{ + type Output = ZBytes; + + fn serialize(self, t: &(A, B)) -> Self::Output { + impl_tuple2!(t) + } +} + +impl From<(A, B)> for ZBytes +where + A: Into, + B: Into, +{ + fn from(value: (A, B)) -> Self { + ZSerde.serialize(value) + } +} + +#[derive(Debug)] +pub enum ZReadOrDeserializeErrorTuple2 +where + A: TryFrom, + >::Error: Debug, + B: TryFrom, + >::Error: Debug, +{ + One(ZReadOrDeserializeError), + Two(ZReadOrDeserializeError), +} + +impl std::fmt::Display for ZReadOrDeserializeErrorTuple2 +where + A: Debug, + A: TryFrom, + >::Error: Debug, + B: Debug, + B: TryFrom, + >::Error: Debug, +{ + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + ZReadOrDeserializeErrorTuple2::One(e) => { + f.write_fmt(format_args!("1st tuple element: {}", e)) + } + ZReadOrDeserializeErrorTuple2::Two(e) => { + f.write_fmt(format_args!("2nd tuple element: {}", e)) + } + } + } +} + +impl std::error::Error for ZReadOrDeserializeErrorTuple2 +where + A: Debug, + A: TryFrom, + >::Error: Debug, + B: Debug, + B: TryFrom, + >::Error: Debug, +{ +} + +impl Deserialize<(A, B)> for ZSerde +where + A: TryFrom, + >::Error: Debug, + B: TryFrom, + >::Error: Debug, +{ + type Input<'a> = &'a ZBytes; + type Error = ZReadOrDeserializeErrorTuple2; + + fn deserialize(self, bytes: Self::Input<'_>) -> Result<(A, B), Self::Error> { + let codec = Zenoh080::new(); + let mut reader = bytes.0.reader(); + + let abuf: ZBuf = codec + .read(&mut reader) + .map_err(|e| ZReadOrDeserializeErrorTuple2::One(ZReadOrDeserializeError::Read(e)))?; + let apld = ZBytes::new(abuf); + let a = A::try_from(apld).map_err(|e| { + ZReadOrDeserializeErrorTuple2::One(ZReadOrDeserializeError::Deserialize(e)) + })?; + + let bbuf: ZBuf = codec + .read(&mut reader) + .map_err(|e| ZReadOrDeserializeErrorTuple2::Two(ZReadOrDeserializeError::Read(e)))?; + let bpld = ZBytes::new(bbuf); + let b = B::try_from(bpld).map_err(|e| { + ZReadOrDeserializeErrorTuple2::Two(ZReadOrDeserializeError::Deserialize(e)) + })?; + + Ok((a, b)) + } +} + +impl TryFrom for (A, B) +where + A: TryFrom, + >::Error: Debug, + B: TryFrom, + >::Error: Debug, +{ + type Error = ZReadOrDeserializeErrorTuple2; + + fn try_from(value: ZBytes) -> Result { + ZSerde.deserialize(&value) + } +} + +impl TryFrom<&ZBytes> for (A, B) +where + A: TryFrom, + >::Error: Debug, + B: TryFrom, + >::Error: Debug, +{ + type Error = ZReadOrDeserializeErrorTuple2; + + fn try_from(value: &ZBytes) -> Result { + ZSerde.deserialize(value) + } +} + +impl TryFrom<&mut ZBytes> for (A, B) +where + A: TryFrom, + >::Error: Debug, + B: TryFrom, + >::Error: Debug, +{ + type Error = ZReadOrDeserializeErrorTuple2; + + fn try_from(value: &mut ZBytes) -> Result { + ZSerde.deserialize(&*value) + } +} + +// Tuple (a, b, c) +macro_rules! impl_tuple3 { + ($t:expr) => {{ + let (a, b, c) = $t; + + let codec = Zenoh080::new(); + let mut buffer: ZBuf = ZBuf::empty(); + let mut writer = buffer.writer(); + let apld: ZBytes = a.into(); + let bpld: ZBytes = b.into(); + let cpld: ZBytes = c.into(); + + // SAFETY: we are serializing slices on a ZBuf, so serialization will never + // fail unless we run out of memory. In that case, Rust memory allocator + // will panic before the serializer has any chance to fail. + unsafe { + codec.write(&mut writer, &apld.0).unwrap_unchecked(); + codec.write(&mut writer, &bpld.0).unwrap_unchecked(); + codec.write(&mut writer, &cpld.0).unwrap_unchecked(); + } + + ZBytes::new(buffer) + }}; +} + +impl Serialize<(A, B, C)> for ZSerde +where + A: Into, + B: Into, + C: Into, +{ + type Output = ZBytes; + + fn serialize(self, t: (A, B, C)) -> Self::Output { + impl_tuple3!(t) + } +} + +impl Serialize<&(A, B, C)> for ZSerde +where + for<'a> &'a A: Into, + for<'b> &'b B: Into, + for<'b> &'b C: Into, +{ + type Output = ZBytes; + + fn serialize(self, t: &(A, B, C)) -> Self::Output { + impl_tuple3!(t) + } +} + +impl From<(A, B, C)> for ZBytes +where + A: Into, + B: Into, + C: Into, +{ + fn from(value: (A, B, C)) -> Self { + ZSerde.serialize(value) + } +} + +#[derive(Debug)] +pub enum ZReadOrDeserializeErrorTuple3 +where + A: TryFrom, + >::Error: Debug, + B: TryFrom, + >::Error: Debug, + C: TryFrom, + >::Error: Debug, +{ + One(ZReadOrDeserializeError), + Two(ZReadOrDeserializeError), + Three(ZReadOrDeserializeError), +} + +impl std::fmt::Display for ZReadOrDeserializeErrorTuple3 +where + A: Debug, + A: TryFrom, + >::Error: Debug, + B: Debug, + B: TryFrom, + >::Error: Debug, + C: Debug, + C: TryFrom, + >::Error: Debug, +{ + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + ZReadOrDeserializeErrorTuple3::One(e) => { + f.write_fmt(format_args!("1st tuple element: {}", e)) + } + ZReadOrDeserializeErrorTuple3::Two(e) => { + f.write_fmt(format_args!("2nd tuple element: {}", e)) + } + ZReadOrDeserializeErrorTuple3::Three(e) => { + f.write_fmt(format_args!("3rd tuple element: {}", e)) + } + } + } +} + +impl std::error::Error for ZReadOrDeserializeErrorTuple3 +where + A: Debug, + A: TryFrom, + >::Error: Debug, + B: Debug, + B: TryFrom, + >::Error: Debug, + C: Debug, + C: TryFrom, + >::Error: Debug, +{ +} + +impl Deserialize<(A, B, C)> for ZSerde +where + A: TryFrom, + >::Error: Debug, + B: TryFrom, + >::Error: Debug, + C: TryFrom, + >::Error: Debug, +{ + type Input<'a> = &'a ZBytes; + type Error = ZReadOrDeserializeErrorTuple3; + + fn deserialize(self, bytes: Self::Input<'_>) -> Result<(A, B, C), Self::Error> { + let codec = Zenoh080::new(); + let mut reader = bytes.0.reader(); + + let abuf: ZBuf = codec + .read(&mut reader) + .map_err(|e| ZReadOrDeserializeErrorTuple3::One(ZReadOrDeserializeError::Read(e)))?; + let apld = ZBytes::new(abuf); + let a = A::try_from(apld).map_err(|e| { + ZReadOrDeserializeErrorTuple3::One(ZReadOrDeserializeError::Deserialize(e)) + })?; + + let bbuf: ZBuf = codec + .read(&mut reader) + .map_err(|e| ZReadOrDeserializeErrorTuple3::Two(ZReadOrDeserializeError::Read(e)))?; + let bpld = ZBytes::new(bbuf); + let b = B::try_from(bpld).map_err(|e| { + ZReadOrDeserializeErrorTuple3::Two(ZReadOrDeserializeError::Deserialize(e)) + })?; + + let cbuf: ZBuf = codec + .read(&mut reader) + .map_err(|e| ZReadOrDeserializeErrorTuple3::Three(ZReadOrDeserializeError::Read(e)))?; + let cpld = ZBytes::new(cbuf); + let c = C::try_from(cpld).map_err(|e| { + ZReadOrDeserializeErrorTuple3::Three(ZReadOrDeserializeError::Deserialize(e)) + })?; + + Ok((a, b, c)) + } +} + +impl TryFrom for (A, B, C) +where + A: TryFrom, + >::Error: Debug, + B: TryFrom, + >::Error: Debug, + C: TryFrom, + >::Error: Debug, +{ + type Error = ZReadOrDeserializeErrorTuple3; + + fn try_from(value: ZBytes) -> Result { + ZSerde.deserialize(&value) + } +} + +impl TryFrom<&ZBytes> for (A, B, C) +where + A: TryFrom, + >::Error: Debug, + B: TryFrom, + >::Error: Debug, + C: TryFrom, + >::Error: Debug, +{ + type Error = ZReadOrDeserializeErrorTuple3; + + fn try_from(value: &ZBytes) -> Result { + ZSerde.deserialize(value) + } +} + +impl TryFrom<&mut ZBytes> for (A, B, C) +where + A: TryFrom, + >::Error: Debug, + B: TryFrom, + >::Error: Debug, + C: TryFrom, + >::Error: Debug, +{ + type Error = ZReadOrDeserializeErrorTuple3; + + fn try_from(value: &mut ZBytes) -> Result { + ZSerde.deserialize(&*value) + } +} + +// Tuple (a, b, c, d) +macro_rules! impl_tuple4 { + ($t:expr) => {{ + let (a, b, c, d) = $t; + + let codec = Zenoh080::new(); + let mut buffer: ZBuf = ZBuf::empty(); + let mut writer = buffer.writer(); + let apld: ZBytes = a.into(); + let bpld: ZBytes = b.into(); + let cpld: ZBytes = c.into(); + let dpld: ZBytes = d.into(); + + // SAFETY: we are serializing slices on a ZBuf, so serialization will never + // fail unless we run out of memory. In that case, Rust memory allocator + // will panic before the serializer has any chance to fail. + unsafe { + codec.write(&mut writer, &apld.0).unwrap_unchecked(); + codec.write(&mut writer, &bpld.0).unwrap_unchecked(); + codec.write(&mut writer, &cpld.0).unwrap_unchecked(); + codec.write(&mut writer, &dpld.0).unwrap_unchecked(); + } + + ZBytes::new(buffer) + }}; +} + +impl Serialize<(A, B, C, D)> for ZSerde +where + A: Into, + B: Into, + C: Into, + D: Into, +{ + type Output = ZBytes; + + fn serialize(self, t: (A, B, C, D)) -> Self::Output { + impl_tuple4!(t) + } +} + +impl Serialize<&(A, B, C, D)> for ZSerde +where + for<'a> &'a A: Into, + for<'b> &'b B: Into, + for<'b> &'b C: Into, + for<'b> &'b D: Into, +{ + type Output = ZBytes; + + fn serialize(self, t: &(A, B, C, D)) -> Self::Output { + impl_tuple4!(t) + } +} + +impl From<(A, B, C, D)> for ZBytes +where + A: Into, + B: Into, + C: Into, + D: Into, +{ + fn from(value: (A, B, C, D)) -> Self { + ZSerde.serialize(value) + } +} + +#[derive(Debug)] +pub enum ZReadOrDeserializeErrorTuple4 +where + A: TryFrom, + >::Error: Debug, + B: TryFrom, + >::Error: Debug, + C: TryFrom, + >::Error: Debug, + D: TryFrom, + >::Error: Debug, +{ + One(ZReadOrDeserializeError), + Two(ZReadOrDeserializeError), + Three(ZReadOrDeserializeError), + Four(ZReadOrDeserializeError), +} + +impl std::fmt::Display for ZReadOrDeserializeErrorTuple4 +where + A: Debug, + A: TryFrom, + >::Error: Debug, + B: Debug, + B: TryFrom, + >::Error: Debug, + C: Debug, + C: TryFrom, + >::Error: Debug, + D: Debug, + D: TryFrom, + >::Error: Debug, +{ + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + ZReadOrDeserializeErrorTuple4::One(e) => { + f.write_fmt(format_args!("1st tuple element: {}", e)) + } + ZReadOrDeserializeErrorTuple4::Two(e) => { + f.write_fmt(format_args!("2nd tuple element: {}", e)) + } + ZReadOrDeserializeErrorTuple4::Three(e) => { + f.write_fmt(format_args!("3rd tuple element: {}", e)) + } + ZReadOrDeserializeErrorTuple4::Four(e) => { + f.write_fmt(format_args!("4th tuple element: {}", e)) + } + } + } +} + +impl std::error::Error for ZReadOrDeserializeErrorTuple4 +where + A: Debug, + A: TryFrom, + >::Error: Debug, + B: Debug, + B: TryFrom, + >::Error: Debug, + C: Debug, + C: TryFrom, + >::Error: Debug, + D: Debug, + D: TryFrom, + >::Error: Debug, +{ +} + +impl Deserialize<(A, B, C, D)> for ZSerde +where + A: TryFrom, + >::Error: Debug, + B: TryFrom, + >::Error: Debug, + C: TryFrom, + >::Error: Debug, + D: TryFrom, + >::Error: Debug, +{ + type Input<'a> = &'a ZBytes; + type Error = ZReadOrDeserializeErrorTuple4; + + fn deserialize(self, bytes: Self::Input<'_>) -> Result<(A, B, C, D), Self::Error> { + let codec = Zenoh080::new(); + let mut reader = bytes.0.reader(); + + let abuf: ZBuf = codec + .read(&mut reader) + .map_err(|e| ZReadOrDeserializeErrorTuple4::One(ZReadOrDeserializeError::Read(e)))?; + let apld = ZBytes::new(abuf); + let a = A::try_from(apld).map_err(|e| { + ZReadOrDeserializeErrorTuple4::One(ZReadOrDeserializeError::Deserialize(e)) + })?; + + let bbuf: ZBuf = codec + .read(&mut reader) + .map_err(|e| ZReadOrDeserializeErrorTuple4::Two(ZReadOrDeserializeError::Read(e)))?; + let bpld = ZBytes::new(bbuf); + let b = B::try_from(bpld).map_err(|e| { + ZReadOrDeserializeErrorTuple4::Two(ZReadOrDeserializeError::Deserialize(e)) + })?; + + let cbuf: ZBuf = codec + .read(&mut reader) + .map_err(|e| ZReadOrDeserializeErrorTuple4::Three(ZReadOrDeserializeError::Read(e)))?; + let cpld = ZBytes::new(cbuf); + let c = C::try_from(cpld).map_err(|e| { + ZReadOrDeserializeErrorTuple4::Three(ZReadOrDeserializeError::Deserialize(e)) + })?; + + let dbuf: ZBuf = codec + .read(&mut reader) + .map_err(|e| ZReadOrDeserializeErrorTuple4::Four(ZReadOrDeserializeError::Read(e)))?; + let dpld = ZBytes::new(dbuf); + let d = D::try_from(dpld).map_err(|e| { + ZReadOrDeserializeErrorTuple4::Four(ZReadOrDeserializeError::Deserialize(e)) + })?; + + Ok((a, b, c, d)) + } +} + +impl TryFrom for (A, B, C, D) +where + A: TryFrom, + >::Error: Debug, + B: TryFrom, + >::Error: Debug, + C: TryFrom, + >::Error: Debug, + D: TryFrom, + >::Error: Debug, +{ + type Error = ZReadOrDeserializeErrorTuple4; + + fn try_from(value: ZBytes) -> Result { + ZSerde.deserialize(&value) + } +} + +impl TryFrom<&ZBytes> for (A, B, C, D) +where + A: TryFrom, + >::Error: Debug, + B: TryFrom, + >::Error: Debug, + C: TryFrom, + >::Error: Debug, + D: TryFrom, + >::Error: Debug, +{ + type Error = ZReadOrDeserializeErrorTuple4; + + fn try_from(value: &ZBytes) -> Result { + ZSerde.deserialize(value) + } +} + +impl TryFrom<&mut ZBytes> for (A, B, C, D) +where + A: TryFrom, + >::Error: Debug, + B: TryFrom, + >::Error: Debug, + C: TryFrom, + >::Error: Debug, + D: TryFrom, + >::Error: Debug, +{ + type Error = ZReadOrDeserializeErrorTuple4; + + fn try_from(value: &mut ZBytes) -> Result { + ZSerde.deserialize(&*value) + } +} + +// HashMap +impl Serialize> for ZSerde +where + A: Into, + B: Into, +{ + type Output = ZBytes; + + fn serialize(self, mut t: HashMap) -> Self::Output { + ZBytes::from_iter(t.drain()) + } +} + +impl Serialize<&HashMap> for ZSerde +where + for<'a> &'a A: Into, + for<'b> &'b B: Into, +{ + type Output = ZBytes; + + fn serialize(self, t: &HashMap) -> Self::Output { + ZBytes::from_iter(t.iter()) + } +} + +impl From> for ZBytes +where + A: Into, + B: Into, +{ + fn from(value: HashMap) -> Self { + ZSerde.serialize(value) + } +} + +impl Deserialize> for ZSerde +where + A: TryFrom + Debug + std::cmp::Eq + std::hash::Hash, + >::Error: Debug, + B: TryFrom + Debug, + >::Error: Debug, +{ + type Input<'a> = &'a ZBytes; + type Error = ZReadOrDeserializeErrorTuple2; + + fn deserialize(self, bytes: Self::Input<'_>) -> Result, Self::Error> { + let mut hm = HashMap::new(); + for res in bytes.iter::<(A, B)>() { + let (k, v) = res?; + hm.insert(k, v); + } + Ok(hm) + } +} + +impl TryFrom for HashMap +where + A: TryFrom + Debug + std::cmp::Eq + std::hash::Hash, + >::Error: Debug, + B: TryFrom + Debug, + >::Error: Debug, +{ + type Error = ZReadOrDeserializeErrorTuple2; + + fn try_from(value: ZBytes) -> Result { + ZSerde.deserialize(&value) + } +} + +impl TryFrom<&ZBytes> for HashMap +where + A: TryFrom + Debug + std::cmp::Eq + std::hash::Hash, + >::Error: Debug, + B: TryFrom + Debug, + >::Error: Debug, +{ + type Error = ZReadOrDeserializeErrorTuple2; + + fn try_from(value: &ZBytes) -> Result { + ZSerde.deserialize(value) + } +} + +impl TryFrom<&mut ZBytes> for HashMap +where + A: TryFrom + Debug + std::cmp::Eq + std::hash::Hash, + >::Error: Debug, + B: TryFrom + Debug, + >::Error: Debug, +{ + type Error = ZReadOrDeserializeErrorTuple2; + + fn try_from(value: &mut ZBytes) -> Result { + ZSerde.deserialize(&*value) + } +} + +// Protocol attachment extension +impl From for AttachmentType { + fn from(this: ZBytes) -> Self { + AttachmentType { + buffer: this.into(), + } + } +} + +impl From> for ZBytes { + fn from(this: AttachmentType) -> Self { + this.buffer.into() + } +} + +mod tests { + + #[test] + fn serializer() { + use std::borrow::Cow; + + use rand::Rng; + use zenoh_buffers::{ZBuf, ZSlice}; + #[cfg(feature = "shared-memory")] + use zenoh_core::Wait; + use zenoh_protocol::core::Parameters; + #[cfg(feature = "shared-memory")] + use zenoh_shm::api::{ + buffer::zshm::{zshm, ZShm}, + protocol_implementations::posix::{ + posix_shm_provider_backend::PosixShmProviderBackend, protocol_id::POSIX_PROTOCOL_ID, + }, + provider::shm_provider::ShmProviderBuilder, + }; + + use super::ZBytes; + use crate::bytes::{Deserialize, Serialize, ZSerde}; + + const NUM: usize = 1_000; + + macro_rules! serialize_deserialize { + ($t:ty, $in:expr) => { + let i = $in; + let t = i.clone(); + println!("Serialize:\t{:?}", t); + let v = ZBytes::serialize(t); + println!("Deserialize:\t{:?}", v); + let o: $t = v.deserialize().unwrap(); + assert_eq!(i, o); + println!(""); + }; + } + + // WARN: test function body produces stack overflow, so I split it into subroutines + #[inline(never)] + fn numeric() { + let mut rng = rand::thread_rng(); + + // unsigned integer + serialize_deserialize!(u8, u8::MIN); + serialize_deserialize!(u16, u16::MIN); + serialize_deserialize!(u32, u32::MIN); + serialize_deserialize!(u64, u64::MIN); + serialize_deserialize!(usize, usize::MIN); + + serialize_deserialize!(u8, u8::MAX); + serialize_deserialize!(u16, u16::MAX); + serialize_deserialize!(u32, u32::MAX); + serialize_deserialize!(u64, u64::MAX); + serialize_deserialize!(usize, usize::MAX); + + for _ in 0..NUM { + serialize_deserialize!(u8, rng.gen::()); + serialize_deserialize!(u16, rng.gen::()); + serialize_deserialize!(u32, rng.gen::()); + serialize_deserialize!(u64, rng.gen::()); + serialize_deserialize!(usize, rng.gen::()); + } + + // signed integer + serialize_deserialize!(i8, i8::MIN); + serialize_deserialize!(i16, i16::MIN); + serialize_deserialize!(i32, i32::MIN); + serialize_deserialize!(i64, i64::MIN); + serialize_deserialize!(isize, isize::MIN); + + serialize_deserialize!(i8, i8::MAX); + serialize_deserialize!(i16, i16::MAX); + serialize_deserialize!(i32, i32::MAX); + serialize_deserialize!(i64, i64::MAX); + serialize_deserialize!(isize, isize::MAX); + + for _ in 0..NUM { + serialize_deserialize!(i8, rng.gen::()); + serialize_deserialize!(i16, rng.gen::()); + serialize_deserialize!(i32, rng.gen::()); + serialize_deserialize!(i64, rng.gen::()); + serialize_deserialize!(isize, rng.gen::()); + } + + // float + serialize_deserialize!(f32, f32::MIN); + serialize_deserialize!(f64, f64::MIN); + + serialize_deserialize!(f32, f32::MAX); + serialize_deserialize!(f64, f64::MAX); + + for _ in 0..NUM { + serialize_deserialize!(f32, rng.gen::()); + serialize_deserialize!(f64, rng.gen::()); + } + } + numeric(); + + // WARN: test function body produces stack overflow, so I split it into subroutines + #[inline(never)] + fn basic() { + let mut rng = rand::thread_rng(); + + // bool + serialize_deserialize!(bool, true); + serialize_deserialize!(bool, false); + + // char + serialize_deserialize!(char, char::MAX); + serialize_deserialize!(char, rng.gen::()); + + let a = 'a'; + let bytes = ZSerde.serialize(a); + let s: String = ZSerde.deserialize(&bytes).unwrap(); + assert_eq!(a.to_string(), s); + + let a = String::from("a"); + let bytes = ZSerde.serialize(&a); + let s: char = ZSerde.deserialize(&bytes).unwrap(); + assert_eq!(a, s.to_string()); + + // String + serialize_deserialize!(String, ""); + serialize_deserialize!(String, String::from("abcdef")); + + // Cow + serialize_deserialize!(Cow, Cow::from("")); + serialize_deserialize!(Cow, Cow::from(String::from("abcdef"))); + + // Vec + serialize_deserialize!(Vec, vec![0u8; 0]); + serialize_deserialize!(Vec, vec![0u8; 64]); + + // Cow<[u8]> + serialize_deserialize!(Cow<[u8]>, Cow::from(vec![0u8; 0])); + serialize_deserialize!(Cow<[u8]>, Cow::from(vec![0u8; 64])); + + // ZBuf + serialize_deserialize!(ZBuf, ZBuf::from(vec![0u8; 0])); + serialize_deserialize!(ZBuf, ZBuf::from(vec![0u8; 64])); + } + basic(); + + // WARN: test function body produces stack overflow, so I split it into subroutines + #[inline(never)] + fn reader_writer() { + let mut bytes = ZBytes::empty(); + let mut writer = bytes.writer(); + + let i1 = 1_u8; + let i2 = String::from("abcdef"); + let i3 = vec![2u8; 64]; + + println!("Write: {:?}", i1); + writer.serialize(i1); + println!("Write: {:?}", i2); + writer.serialize(&i2); + println!("Write: {:?}", i3); + writer.serialize(&i3); + + let mut reader = bytes.reader(); + let o1: u8 = reader.deserialize().unwrap(); + println!("Read: {:?}", o1); + let o2: String = reader.deserialize().unwrap(); + println!("Read: {:?}", o2); + let o3: Vec = reader.deserialize().unwrap(); + println!("Read: {:?}", o3); + + println!(); + + assert_eq!(i1, o1); + assert_eq!(i2, o2); + assert_eq!(i3, o3); + } + reader_writer(); + + // SHM + #[cfg(feature = "shared-memory")] + fn shm() { + // create an SHM backend... + let backend = PosixShmProviderBackend::builder() + .with_size(4096) + .unwrap() + .res() + .unwrap(); + // ...and an SHM provider + let provider = ShmProviderBuilder::builder() + .protocol_id::() + .backend(backend) + .res(); + + // Prepare a layout for allocations + let layout = provider.alloc(1024).into_layout().unwrap(); + + // allocate an SHM buffer + let mutable_shm_buf = layout.alloc().wait().unwrap(); + + // convert to immutable SHM buffer + let immutable_shm_buf: ZShm = mutable_shm_buf.into(); + + serialize_deserialize!(&zshm, immutable_shm_buf); + } + #[cfg(feature = "shared-memory")] + shm(); + + // Parameters + serialize_deserialize!(Parameters, Parameters::from("")); + serialize_deserialize!(Parameters, Parameters::from("a=1;b=2;c3")); + + // Bytes + serialize_deserialize!(bytes::Bytes, bytes::Bytes::from(vec![1, 2, 3, 4])); + serialize_deserialize!(bytes::Bytes, bytes::Bytes::from("Hello World")); + + // Tuple + serialize_deserialize!((usize, usize), (0, 1)); + serialize_deserialize!((usize, String), (0, String::from("a"))); + serialize_deserialize!((String, String), (String::from("a"), String::from("b"))); + serialize_deserialize!( + (Cow<'static, [u8]>, Cow<'static, [u8]>), + (Cow::from(vec![0u8; 8]), Cow::from(vec![0u8; 8])) + ); + serialize_deserialize!( + (Cow<'static, str>, Cow<'static, str>), + (Cow::from("a"), Cow::from("b")) + ); + + fn iterator() { + let v: [usize; 5] = [0, 1, 2, 3, 4]; + println!("Serialize:\t{:?}", v); + let p = ZBytes::from_iter(v.iter()); + println!("Deserialize:\t{:?}\n", p); + for (i, t) in p.iter::().enumerate() { + assert_eq!(i, t.unwrap()); + } + + let mut v = vec![[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11], [12, 13, 14, 15]]; + println!("Serialize:\t{:?}", v); + let p = ZBytes::from_iter(v.drain(..)); + println!("Deserialize:\t{:?}\n", p); + let mut iter = p.iter::<[u8; 4]>(); + assert_eq!(iter.next().unwrap().unwrap(), [0, 1, 2, 3]); + assert_eq!(iter.next().unwrap().unwrap(), [4, 5, 6, 7]); + assert_eq!(iter.next().unwrap().unwrap(), [8, 9, 10, 11]); + assert_eq!(iter.next().unwrap().unwrap(), [12, 13, 14, 15]); + assert!(iter.next().is_none()); + } + iterator(); + + fn hashmap() { + use std::collections::HashMap; + let mut hm: HashMap = HashMap::new(); + hm.insert(0, 0); + hm.insert(1, 1); + println!("Serialize:\t{:?}", hm); + let p = ZBytes::from(hm.clone()); + println!("Deserialize:\t{:?}\n", p); + let o = p.deserialize::>().unwrap(); + assert_eq!(hm, o); + + let mut hm: HashMap> = HashMap::new(); + hm.insert(0, vec![0u8; 8]); + hm.insert(1, vec![1u8; 16]); + println!("Serialize:\t{:?}", hm); + let p = ZBytes::from(hm.clone()); + println!("Deserialize:\t{:?}\n", p); + let o = p.deserialize::>>().unwrap(); + assert_eq!(hm, o); + + let mut hm: HashMap = HashMap::new(); + hm.insert(0, ZSlice::from(vec![0u8; 8])); + hm.insert(1, ZSlice::from(vec![1u8; 16])); + println!("Serialize:\t{:?}", hm); + let p = ZBytes::from(hm.clone()); + println!("Deserialize:\t{:?}\n", p); + let o = p.deserialize::>().unwrap(); + assert_eq!(hm, o); + + let mut hm: HashMap = HashMap::new(); + hm.insert(0, ZBuf::from(vec![0u8; 8])); + hm.insert(1, ZBuf::from(vec![1u8; 16])); + println!("Serialize:\t{:?}", hm); + let p = ZBytes::from(hm.clone()); + println!("Deserialize:\t{:?}\n", p); + let o = p.deserialize::>().unwrap(); + assert_eq!(hm, o); + + let mut hm: HashMap = HashMap::new(); + hm.insert(String::from("0"), String::from("a")); + hm.insert(String::from("1"), String::from("b")); + println!("Serialize:\t{:?}", hm); + let p = ZBytes::from(hm.clone()); + println!("Deserialize:\t{:?}\n", p); + let o = p.deserialize::>().unwrap(); + assert_eq!(hm, o); + + let mut hm: HashMap, Cow<'static, str>> = HashMap::new(); + hm.insert(Cow::from("0"), Cow::from("a")); + hm.insert(Cow::from("1"), Cow::from("b")); + println!("Serialize:\t{:?}", hm); + let p = ZBytes::from(hm.clone()); + println!("Deserialize:\t{:?}\n", p); + let o = p.deserialize::, Cow>>().unwrap(); + assert_eq!(hm, o); + } + hashmap(); + } +} diff --git a/zenoh/src/api/encoding.rs b/zenoh/src/api/encoding.rs new file mode 100644 index 0000000000..bc335a5fc2 --- /dev/null +++ b/zenoh/src/api/encoding.rs @@ -0,0 +1,1000 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// +use std::{borrow::Cow, convert::Infallible, fmt, str::FromStr}; + +use phf::phf_map; +use zenoh_buffers::{ZBuf, ZSlice}; +use zenoh_protocol::core::EncodingId; +#[cfg(feature = "shared-memory")] +use zenoh_shm::api::buffer::{zshm::ZShm, zshmmut::ZShmMut}; + +use super::bytes::ZBytes; + +/// Default encoding values used by Zenoh. +/// +/// An encoding has a similar role to *Content-type* in HTTP: it indicates, when present, how data should be interpreted by the application. +/// +/// Please note the Zenoh protocol does not impose any encoding value nor it operates on it. +/// It can be seen as some optional metadata that is carried over by Zenoh in such a way the application may perform different operations depending on the encoding value. +/// +/// A set of associated constants are provided to cover the most common encodings for user convenience. +/// This is parcticular useful in helping Zenoh to perform additional wire-level optimizations. +/// +/// # Examples +/// +/// ### String operations +/// +/// Create an [`Encoding`] from a string and viceversa. +/// ``` +/// use zenoh::bytes::Encoding; +/// +/// let encoding: Encoding = "text/plain".into(); +/// let text: String = encoding.clone().into(); +/// assert_eq!("text/plain", &text); +/// ``` +/// +/// ### Constants and cow operations +/// +/// Since some encoding values are internally optimized by Zenoh, it's generally more efficient to use +/// the defined constants and [`Cow`][std::borrow::Cow] conversion to obtain its string representation. +/// ``` +/// use zenoh::bytes::Encoding; +/// use std::borrow::Cow; +/// +/// // This allocates +/// assert_eq!("text/plain", &String::from(Encoding::TEXT_PLAIN)); +/// // This does NOT allocate +/// assert_eq!("text/plain", &Cow::from(Encoding::TEXT_PLAIN)); +/// ``` +/// +/// ### Schema +/// +/// Additionally, a schema can be associated to the encoding. +/// The conventions is to use the `;` separator if an encoding is created from a string. +/// Alternatively, [`with_schema()`](Encoding::with_schema) can be used to add a scheme to one of the associated constants. +/// ``` +/// use zenoh::bytes::Encoding; +/// +/// let encoding1 = Encoding::from("text/plain;utf-8"); +/// let encoding2 = Encoding::TEXT_PLAIN.with_schema("utf-8"); +/// assert_eq!(encoding1, encoding2); +/// assert_eq!("text/plain;utf-8", &encoding1.to_string()); +/// assert_eq!("text/plain;utf-8", &encoding2.to_string()); +/// ``` +#[repr(transparent)] +#[derive(Clone, Debug, PartialEq, Eq)] +pub struct Encoding(zenoh_protocol::core::Encoding); + +impl Encoding { + const SCHEMA_SEP: char = ';'; + + // For compatibility purposes Zenoh reserves any prefix value from `0` to `1023` included. + + // - Primitives types supported in all Zenoh bindings + /// Just some bytes. + /// + /// Constant alias for string: `"zenoh/bytes"`. + /// + /// Usually used for types: `Vec`, `&[u8]`, `[u8; N]`, `Cow<[u8]>`. + pub const ZENOH_BYTES: Encoding = Self(zenoh_protocol::core::Encoding { + id: 0, + schema: None, + }); + /// A VLE-encoded signed little-endian 8bit integer. Binary representation uses two's complement. + /// + /// Constant alias for string: `"zenoh/int8"`. + /// + /// Usually used for types: `i8`. + pub const ZENOH_INT8: Encoding = Self(zenoh_protocol::core::Encoding { + id: 1, + schema: None, + }); + /// A VLE-encoded signed little-endian 16bit integer. Binary representation uses two's complement. + /// + /// Constant alias for string: `"zenoh/int16"`. + /// + /// Usually used for types: `i16`. + pub const ZENOH_INT16: Encoding = Self(zenoh_protocol::core::Encoding { + id: 2, + schema: None, + }); + /// A VLE-encoded signed little-endian 32bit integer. Binary representation uses two's complement. + /// + /// Constant alias for string: `"zenoh/int32"`. + /// + /// Usually used for types: `i32`. + pub const ZENOH_INT32: Encoding = Self(zenoh_protocol::core::Encoding { + id: 3, + schema: None, + }); + /// A VLE-encoded signed little-endian 64bit integer. Binary representation uses two's complement. + /// + /// Constant alias for string: `"zenoh/int64"`. + /// + /// Usually used for types: `i64`. + pub const ZENOH_INT64: Encoding = Self(zenoh_protocol::core::Encoding { + id: 4, + schema: None, + }); + /// A VLE-encoded signed little-endian 128bit integer. Binary representation uses two's complement. + /// + /// Constant alias for string: `"zenoh/int128"`. + /// + /// Usually used for types: `i128`. + pub const ZENOH_INT128: Encoding = Self(zenoh_protocol::core::Encoding { + id: 5, + schema: None, + }); + /// A VLE-encoded unsigned little-endian 8bit integer. + /// + /// Constant alias for string: `"zenoh/uint8"`. + /// + /// Usually used for types: `u8`. + pub const ZENOH_UINT8: Encoding = Self(zenoh_protocol::core::Encoding { + id: 6, + schema: None, + }); + /// A VLE-encoded unsigned little-endian 16bit integer. + /// + /// Constant alias for string: `"zenoh/uint16"`. + /// + /// Usually used for types: `u16`. + pub const ZENOH_UINT16: Encoding = Self(zenoh_protocol::core::Encoding { + id: 7, + schema: None, + }); + /// A VLE-encoded unsigned little-endian 32bit integer. + /// + /// Constant alias for string: `"zenoh/uint32"`. + /// + /// Usually used for types: `u32`. + pub const ZENOH_UINT32: Encoding = Self(zenoh_protocol::core::Encoding { + id: 8, + schema: None, + }); + /// A VLE-encoded unsigned little-endian 64bit integer. + /// + /// Constant alias for string: `"zenoh/uint64"`. + /// + /// Usually used for types: `u64`. + pub const ZENOH_UINT64: Encoding = Self(zenoh_protocol::core::Encoding { + id: 9, + schema: None, + }); + /// A VLE-encoded unsigned little-endian 128bit integer. + /// + /// Constant alias for string: `"zenoh/uint128"`. + /// + /// Usually used for types: `u128`. + pub const ZENOH_UINT128: Encoding = Self(zenoh_protocol::core::Encoding { + id: 10, + schema: None, + }); + /// A VLE-encoded 32bit float. Binary representation uses *IEEE 754-2008* *binary32* . + /// + /// Constant alias for string: `"zenoh/float32"`. + /// + /// Usually used for types: `f32`. + pub const ZENOH_FLOAT32: Encoding = Self(zenoh_protocol::core::Encoding { + id: 11, + schema: None, + }); + /// A VLE-encoded 64bit float. Binary representation uses *IEEE 754-2008* *binary64*. + /// + /// Constant alias for string: `"zenoh/float64"`. + /// + /// Usually used for types: `f64`. + pub const ZENOH_FLOAT64: Encoding = Self(zenoh_protocol::core::Encoding { + id: 12, + schema: None, + }); + /// A boolean. `0` is `false`, `1` is `true`. Other values are invalid. + /// + /// Constant alias for string: `"zenoh/bool"`. + /// + /// Usually used for types: `bool`. + pub const ZENOH_BOOL: Encoding = Self(zenoh_protocol::core::Encoding { + id: 13, + schema: None, + }); + /// A UTF-8 string. + /// + /// Constant alias for string: `"zenoh/string"`. + /// + /// Usually used for types: `String`, `&str`, `Cow`, `char`. + pub const ZENOH_STRING: Encoding = Self(zenoh_protocol::core::Encoding { + id: 14, + schema: None, + }); + /// A zenoh error. + /// + /// Constant alias for string: `"zenoh/error"`. + /// + /// Usually used for types: `ReplyError`. + pub const ZENOH_ERROR: Encoding = Self(zenoh_protocol::core::Encoding { + id: 15, + schema: None, + }); + + // - Advanced types may be supported in some of the Zenoh bindings. + /// An application-specific stream of bytes. + /// + /// Constant alias for string: `"application/octet-stream"`. + pub const APPLICATION_OCTET_STREAM: Encoding = Self(zenoh_protocol::core::Encoding { + id: 16, + schema: None, + }); + /// A textual file. + /// + /// Constant alias for string: `"text/plain"`. + pub const TEXT_PLAIN: Encoding = Self(zenoh_protocol::core::Encoding { + id: 17, + schema: None, + }); + /// JSON data intended to be consumed by an application. + /// + /// Constant alias for string: `"application/json"`. + pub const APPLICATION_JSON: Encoding = Self(zenoh_protocol::core::Encoding { + id: 18, + schema: None, + }); + /// JSON data intended to be human readable. + /// + /// Constant alias for string: `"text/json"`. + pub const TEXT_JSON: Encoding = Self(zenoh_protocol::core::Encoding { + id: 19, + schema: None, + }); + /// A Common Data Representation (CDR)-encoded data. + /// + /// Constant alias for string: `"application/cdr"`. + pub const APPLICATION_CDR: Encoding = Self(zenoh_protocol::core::Encoding { + id: 20, + schema: None, + }); + /// A Concise Binary Object Representation (CBOR)-encoded data. + /// + /// Constant alias for string: `"application/cbor"`. + pub const APPLICATION_CBOR: Encoding = Self(zenoh_protocol::core::Encoding { + id: 21, + schema: None, + }); + /// YAML data intended to be consumed by an application. + /// + /// Constant alias for string: `"application/yaml"`. + pub const APPLICATION_YAML: Encoding = Self(zenoh_protocol::core::Encoding { + id: 22, + schema: None, + }); + /// YAML data intended to be human readable. + /// + /// Constant alias for string: `"text/yaml"`. + pub const TEXT_YAML: Encoding = Self(zenoh_protocol::core::Encoding { + id: 23, + schema: None, + }); + /// JSON5 encoded data that are human readable. + /// + /// Constant alias for string: `"text/json5"`. + pub const TEXT_JSON5: Encoding = Self(zenoh_protocol::core::Encoding { + id: 24, + schema: None, + }); + /// A Python object serialized using [pickle](https://docs.python.org/3/library/pickle.html). + /// + /// Constant alias for string: `"application/python-serialized-object"`. + pub const APPLICATION_PYTHON_SERIALIZED_OBJECT: Encoding = + Self(zenoh_protocol::core::Encoding { + id: 25, + schema: None, + }); + /// An application-specific protobuf-encoded data. + /// + /// Constant alias for string: `"application/protobuf"`. + pub const APPLICATION_PROTOBUF: Encoding = Self(zenoh_protocol::core::Encoding { + id: 26, + schema: None, + }); + /// A Java serialized object. + /// + /// Constant alias for string: `"application/java-serialized-object"`. + pub const APPLICATION_JAVA_SERIALIZED_OBJECT: Encoding = Self(zenoh_protocol::core::Encoding { + id: 27, + schema: None, + }); + /// An [openmetrics](https://github.com/OpenObservability/OpenMetrics) data, common used by [Prometheus](https://prometheus.io/). + /// + /// Constant alias for string: `"application/openmetrics-text"`. + pub const APPLICATION_OPENMETRICS_TEXT: Encoding = Self(zenoh_protocol::core::Encoding { + id: 28, + schema: None, + }); + /// A Portable Network Graphics (PNG) image. + /// + /// Constant alias for string: `"image/png"`. + pub const IMAGE_PNG: Encoding = Self(zenoh_protocol::core::Encoding { + id: 29, + schema: None, + }); + /// A Joint Photographic Experts Group (JPEG) image. + /// + /// Constant alias for string: `"image/jpeg"`. + pub const IMAGE_JPEG: Encoding = Self(zenoh_protocol::core::Encoding { + id: 30, + schema: None, + }); + /// A Graphics Interchange Format (GIF) image. + /// + /// Constant alias for string: `"image/gif"`. + pub const IMAGE_GIF: Encoding = Self(zenoh_protocol::core::Encoding { + id: 31, + schema: None, + }); + /// A BitMap (BMP) image. + /// + /// Constant alias for string: `"image/bmp"`. + pub const IMAGE_BMP: Encoding = Self(zenoh_protocol::core::Encoding { + id: 32, + schema: None, + }); + /// A Web Portable (WebP) image. + /// + /// Constant alias for string: `"image/webp"`. + pub const IMAGE_WEBP: Encoding = Self(zenoh_protocol::core::Encoding { + id: 33, + schema: None, + }); + /// An XML file intended to be consumed by an application.. + /// + /// Constant alias for string: `"application/xml"`. + pub const APPLICATION_XML: Encoding = Self(zenoh_protocol::core::Encoding { + id: 34, + schema: None, + }); + /// An encoded a list of tuples, each consisting of a name and a value. + /// + /// Constant alias for string: `"application/x-www-form-urlencoded"`. + pub const APPLICATION_X_WWW_FORM_URLENCODED: Encoding = Self(zenoh_protocol::core::Encoding { + id: 35, + schema: None, + }); + /// An HTML file. + /// + /// Constant alias for string: `"text/html"`. + pub const TEXT_HTML: Encoding = Self(zenoh_protocol::core::Encoding { + id: 36, + schema: None, + }); + /// An XML file that is human readable. + /// + /// Constant alias for string: `"text/xml"`. + pub const TEXT_XML: Encoding = Self(zenoh_protocol::core::Encoding { + id: 37, + schema: None, + }); + /// A CSS file. + /// + /// Constant alias for string: `"text/css"`. + pub const TEXT_CSS: Encoding = Self(zenoh_protocol::core::Encoding { + id: 38, + schema: None, + }); + /// A JavaScript file. + /// + /// Constant alias for string: `"text/javascript"`. + pub const TEXT_JAVASCRIPT: Encoding = Self(zenoh_protocol::core::Encoding { + id: 39, + schema: None, + }); + /// A MarkDown file. + /// + /// Constant alias for string: `"text/markdown"`. + pub const TEXT_MARKDOWN: Encoding = Self(zenoh_protocol::core::Encoding { + id: 40, + schema: None, + }); + /// A CSV file. + /// + /// Constant alias for string: `"text/csv"`. + pub const TEXT_CSV: Encoding = Self(zenoh_protocol::core::Encoding { + id: 41, + schema: None, + }); + /// An application-specific SQL query. + /// + /// Constant alias for string: `"application/sql"`. + pub const APPLICATION_SQL: Encoding = Self(zenoh_protocol::core::Encoding { + id: 42, + schema: None, + }); + /// Constrained Application Protocol (CoAP) data intended for CoAP-to-HTTP and HTTP-to-CoAP proxies. + /// + /// Constant alias for string: `"application/coap-payload"`. + pub const APPLICATION_COAP_PAYLOAD: Encoding = Self(zenoh_protocol::core::Encoding { + id: 43, + schema: None, + }); + /// Defines a JSON document structure for expressing a sequence of operations to apply to a JSON document. + /// + /// Constant alias for string: `"application/json-patch+json"`. + pub const APPLICATION_JSON_PATCH_JSON: Encoding = Self(zenoh_protocol::core::Encoding { + id: 44, + schema: None, + }); + /// A JSON text sequence consists of any number of JSON texts, all encoded in UTF-8. + /// + /// Constant alias for string: `"application/json-seq"`. + pub const APPLICATION_JSON_SEQ: Encoding = Self(zenoh_protocol::core::Encoding { + id: 45, + schema: None, + }); + /// A JSONPath defines a string syntax for selecting and extracting JSON values from within a given JSON value. + /// + /// Constant alias for string: `"application/jsonpath"`. + pub const APPLICATION_JSONPATH: Encoding = Self(zenoh_protocol::core::Encoding { + id: 46, + schema: None, + }); + /// A JSON Web Token (JWT). + /// + /// Constant alias for string: `"application/jwt"`. + pub const APPLICATION_JWT: Encoding = Self(zenoh_protocol::core::Encoding { + id: 47, + schema: None, + }); + /// An application-specific MPEG-4 encoded data, either audio or video. + /// + /// Constant alias for string: `"application/mp4"`. + pub const APPLICATION_MP4: Encoding = Self(zenoh_protocol::core::Encoding { + id: 48, + schema: None, + }); + /// A SOAP 1.2 message serialized as XML 1.0. + /// + /// Constant alias for string: `"application/soap+xml"`. + pub const APPLICATION_SOAP_XML: Encoding = Self(zenoh_protocol::core::Encoding { + id: 49, + schema: None, + }); + /// A YANG-encoded data commonly used by the Network Configuration Protocol (NETCONF). + /// + /// Constant alias for string: `"application/yang"`. + pub const APPLICATION_YANG: Encoding = Self(zenoh_protocol::core::Encoding { + id: 50, + schema: None, + }); + /// A MPEG-4 Advanced Audio Coding (AAC) media. + /// + /// Constant alias for string: `"audio/aac"`. + pub const AUDIO_AAC: Encoding = Self(zenoh_protocol::core::Encoding { + id: 51, + schema: None, + }); + /// A Free Lossless Audio Codec (FLAC) media. + /// + /// Constant alias for string: `"audio/flac"`. + pub const AUDIO_FLAC: Encoding = Self(zenoh_protocol::core::Encoding { + id: 52, + schema: None, + }); + /// An audio codec defined in MPEG-1, MPEG-2, MPEG-4, or registered at the MP4 registration authority. + /// + /// Constant alias for string: `"audio/mp4"`. + pub const AUDIO_MP4: Encoding = Self(zenoh_protocol::core::Encoding { + id: 53, + schema: None, + }); + /// An Ogg-encapsulated audio stream. + /// + /// Constant alias for string: `"audio/ogg"`. + pub const AUDIO_OGG: Encoding = Self(zenoh_protocol::core::Encoding { + id: 54, + schema: None, + }); + /// A Vorbis-encoded audio stream. + /// + /// Constant alias for string: `"audio/vorbis"`. + pub const AUDIO_VORBIS: Encoding = Self(zenoh_protocol::core::Encoding { + id: 55, + schema: None, + }); + /// A h261-encoded video stream. + /// + /// Constant alias for string: `"video/h261"`. + pub const VIDEO_H261: Encoding = Self(zenoh_protocol::core::Encoding { + id: 56, + schema: None, + }); + /// A h263-encoded video stream. + /// + /// Constant alias for string: `"video/h263"`. + pub const VIDEO_H263: Encoding = Self(zenoh_protocol::core::Encoding { + id: 57, + schema: None, + }); + /// A h264-encoded video stream. + /// + /// Constant alias for string: `"video/h264"`. + pub const VIDEO_H264: Encoding = Self(zenoh_protocol::core::Encoding { + id: 58, + schema: None, + }); + /// A h265-encoded video stream. + /// + /// Constant alias for string: `"video/h265"`. + pub const VIDEO_H265: Encoding = Self(zenoh_protocol::core::Encoding { + id: 59, + schema: None, + }); + /// A h266-encoded video stream. + /// + /// Constant alias for string: `"video/h266"`. + pub const VIDEO_H266: Encoding = Self(zenoh_protocol::core::Encoding { + id: 60, + schema: None, + }); + /// A video codec defined in MPEG-1, MPEG-2, MPEG-4, or registered at the MP4 registration authority. + /// + /// Constant alias for string: `"video/mp4"`. + pub const VIDEO_MP4: Encoding = Self(zenoh_protocol::core::Encoding { + id: 61, + schema: None, + }); + /// An Ogg-encapsulated video stream. + /// + /// Constant alias for string: `"video/ogg"`. + pub const VIDEO_OGG: Encoding = Self(zenoh_protocol::core::Encoding { + id: 62, + schema: None, + }); + /// An uncompressed, studio-quality video stream. + /// + /// Constant alias for string: `"video/raw"`. + pub const VIDEO_RAW: Encoding = Self(zenoh_protocol::core::Encoding { + id: 63, + schema: None, + }); + /// A VP8-encoded video stream. + /// + /// Constant alias for string: `"video/vp8"`. + pub const VIDEO_VP8: Encoding = Self(zenoh_protocol::core::Encoding { + id: 64, + schema: None, + }); + /// A VP9-encoded video stream. + /// + /// Constant alias for string: `"video/vp9"`. + pub const VIDEO_VP9: Encoding = Self(zenoh_protocol::core::Encoding { + id: 65, + schema: None, + }); + + const ID_TO_STR: phf::Map = phf_map! { + 0u16 => "zenoh/bytes", + 1u16 => "zenoh/int8", + 2u16 => "zenoh/int16", + 3u16 => "zenoh/int32", + 4u16 => "zenoh/int64", + 5u16 => "zenoh/int128", + 6u16 => "zenoh/uint8", + 7u16 => "zenoh/uint16", + 8u16 => "zenoh/uint32", + 9u16 => "zenoh/uint64", + 10u16 => "zenoh/uint128", + 11u16 => "zenoh/float32", + 12u16 => "zenoh/float64", + 13u16 => "zenoh/bool", + 14u16 => "zenoh/string", + 15u16 => "zenoh/error", + 16u16 => "application/octet-stream", + 17u16 => "text/plain", + 18u16 => "application/json", + 19u16 => "text/json", + 20u16 => "application/cdr", + 21u16 => "application/cbor", + 22u16 => "application/yaml", + 23u16 => "text/yaml", + 24u16 => "text/json5", + 25u16 => "application/python-serialized-object", + 26u16 => "application/protobuf", + 27u16 => "application/java-serialized-object", + 28u16 => "application/openmetrics-text", + 29u16 => "image/png", + 30u16 => "image/jpeg", + 31u16 => "image/gif", + 32u16 => "image/bmp", + 33u16 => "image/webp", + 34u16 => "application/xml", + 35u16 => "application/x-www-form-urlencoded", + 36u16 => "text/html", + 37u16 => "text/xml", + 38u16 => "text/css", + 39u16 => "text/javascript", + 40u16 => "text/markdown", + 41u16 => "text/csv", + 42u16 => "application/sql", + 43u16 => "application/coap-payload", + 44u16 => "application/json-patch+json", + 45u16 => "application/json-seq", + 46u16 => "application/jsonpath", + 47u16 => "application/jwt", + 48u16 => "application/mp4", + 49u16 => "application/soap+xml", + 50u16 => "application/yang", + 51u16 => "audio/aac", + 52u16 => "audio/flac", + 53u16 => "audio/mp4", + 54u16 => "audio/ogg", + 55u16 => "audio/vorbis", + 56u16 => "video/h261", + 57u16 => "video/h263", + 58u16 => "video/h264", + 59u16 => "video/h265", + 60u16 => "video/h266", + 61u16 => "video/mp4", + 62u16 => "video/ogg", + 63u16 => "video/raw", + 64u16 => "video/vp8", + 65u16 => "video/vp9", + }; + + const STR_TO_ID: phf::Map<&'static str, EncodingId> = phf_map! { + "zenoh/bytes" => 0u16, + "zenoh/int8" => 1u16, + "zenoh/int16" => 2u16, + "zenoh/int32" => 3u16, + "zenoh/int64" => 4u16, + "zenoh/int128" => 5u16, + "zenoh/uint8" => 6u16, + "zenoh/uint16" => 7u16, + "zenoh/uint32" => 8u16, + "zenoh/uint64" => 9u16, + "zenoh/uint128" => 10u16, + "zenoh/float32" => 11u16, + "zenoh/float64" => 12u16, + "zenoh/bool" => 13u16, + "zenoh/string" => 14u16, + "zenoh/error" => 15u16, + "application/octet-stream" => 16u16, + "text/plain" => 17u16, + "application/json" => 18u16, + "text/json" => 19u16, + "application/cdr" => 20u16, + "application/cbor" => 21u16, + "application/yaml" => 22u16, + "text/yaml" => 23u16, + "text/json5" => 24u16, + "application/python-serialized-object" => 25u16, + "application/protobuf" => 26u16, + "application/java-serialized-object" => 27u16, + "application/openmetrics-text" => 28u16, + "image/png" => 29u16, + "image/jpeg" => 30u16, + "image/gif" => 31u16, + "image/bmp" => 32u16, + "image/webp" => 33u16, + "application/xml" => 34u16, + "application/x-www-form-urlencoded" => 35u16, + "text/html" => 36u16, + "text/xml" => 37u16, + "text/css" => 38u16, + "text/javascript" => 39u16, + "text/markdown" => 40u16, + "text/csv" => 41u16, + "application/sql" => 42u16, + "application/coap-payload" => 43u16, + "application/json-patch+json" => 44u16, + "application/json-seq" => 45u16, + "application/jsonpath" => 46u16, + "application/jwt" => 47u16, + "application/mp4" => 48u16, + "application/soap+xml" => 49u16, + "application/yang" => 50u16, + "audio/aac" => 51u16, + "audio/flac" => 52u16, + "audio/mp4" => 53u16, + "audio/ogg" => 54u16, + "audio/vorbis" => 55u16, + "video/h261" => 56u16, + "video/h263" => 57u16, + "video/h264" => 58u16, + "video/h265" => 59u16, + "video/h266" => 60u16, + "video/mp4" => 61u16, + "video/ogg" => 62u16, + "video/raw" => 63u16, + "video/vp8" => 64u16, + "video/vp9" => 65u16, + }; + + /// The default [`Encoding`] is [`ZENOH_BYTES`](Encoding::ZENOH_BYTES). + pub const fn default() -> Self { + Self::ZENOH_BYTES + } + + /// Set a schema to this encoding. Zenoh does not define what a schema is and its semantichs is left to the implementer. + /// E.g. a common schema for `text/plain` encoding is `utf-8`. + pub fn with_schema(mut self, s: S) -> Self + where + S: Into, + { + let s: String = s.into(); + self.0.schema = Some(s.into_boxed_str().into_boxed_bytes().into()); + self + } +} + +impl Default for Encoding { + fn default() -> Self { + Self::default() + } +} + +impl From<&str> for Encoding { + fn from(t: &str) -> Self { + let mut inner = zenoh_protocol::core::Encoding::empty(); + + // Check if empty + if t.is_empty() { + return Encoding(inner); + } + + // Everything before `;` may be mapped to a known id + let (id, mut schema) = t.split_once(Encoding::SCHEMA_SEP).unwrap_or((t, "")); + if let Some(id) = Encoding::STR_TO_ID.get(id).copied() { + inner.id = id; + // if id is not recognized, e.g. `t == "my_encoding"`, put it in the schema + } else { + schema = t; + } + if !schema.is_empty() { + inner.schema = Some(ZSlice::from(schema.to_string().into_bytes())); + } + + Encoding(inner) + } +} + +impl From for Encoding { + fn from(value: String) -> Self { + Self::from(value.as_str()) + } +} + +impl FromStr for Encoding { + type Err = Infallible; + + fn from_str(s: &str) -> Result { + Ok(Self::from(s)) + } +} + +impl From<&Encoding> for Cow<'static, str> { + fn from(encoding: &Encoding) -> Self { + fn su8_to_str(schema: &[u8]) -> &str { + std::str::from_utf8(schema).unwrap_or("unknown(non-utf8)") + } + + match ( + Encoding::ID_TO_STR.get(&encoding.0.id).copied(), + encoding.0.schema.as_ref(), + ) { + // Perfect match + (Some(i), None) => Cow::Borrowed(i), + // ID and schema + (Some(i), Some(s)) => { + Cow::Owned(format!("{}{}{}", i, Encoding::SCHEMA_SEP, su8_to_str(s))) + } + // + (None, Some(s)) => Cow::Owned(format!( + "unknown({}){}{}", + encoding.0.id, + Encoding::SCHEMA_SEP, + su8_to_str(s) + )), + (None, None) => Cow::Owned(format!("unknown({})", encoding.0.id)), + } + } +} + +impl From for Cow<'static, str> { + fn from(encoding: Encoding) -> Self { + Self::from(&encoding) + } +} + +impl From for String { + fn from(encoding: Encoding) -> Self { + encoding.to_string() + } +} + +impl From for zenoh_protocol::core::Encoding { + fn from(value: Encoding) -> Self { + value.0 + } +} + +impl From for Encoding { + fn from(value: zenoh_protocol::core::Encoding) -> Self { + Self(value) + } +} + +impl fmt::Display for Encoding { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> std::fmt::Result { + let s = Cow::from(self); + f.write_str(s.as_ref()) + } +} + +#[allow(dead_code)] +// - Encoding trait +pub trait EncodingMapping { + const ENCODING: Encoding; +} + +// Bytes +impl EncodingMapping for ZBytes { + const ENCODING: Encoding = Encoding::ZENOH_BYTES; +} + +impl EncodingMapping for ZBuf { + const ENCODING: Encoding = Encoding::ZENOH_BYTES; +} + +impl EncodingMapping for Vec { + const ENCODING: Encoding = Encoding::ZENOH_BYTES; +} + +impl EncodingMapping for &[u8] { + const ENCODING: Encoding = Encoding::ZENOH_BYTES; +} + +impl EncodingMapping for Cow<'_, [u8]> { + const ENCODING: Encoding = Encoding::ZENOH_BYTES; +} + +// String +impl EncodingMapping for String { + const ENCODING: Encoding = Encoding::ZENOH_STRING; +} + +impl EncodingMapping for &str { + const ENCODING: Encoding = Encoding::ZENOH_STRING; +} + +impl EncodingMapping for Cow<'_, str> { + const ENCODING: Encoding = Encoding::ZENOH_STRING; +} + +// Zenoh unsigned integers +impl EncodingMapping for u8 { + const ENCODING: Encoding = Encoding::ZENOH_UINT8; +} + +impl EncodingMapping for u16 { + const ENCODING: Encoding = Encoding::ZENOH_UINT16; +} + +impl EncodingMapping for u32 { + const ENCODING: Encoding = Encoding::ZENOH_UINT32; +} + +impl EncodingMapping for u64 { + const ENCODING: Encoding = Encoding::ZENOH_UINT64; +} + +impl EncodingMapping for u128 { + const ENCODING: Encoding = Encoding::ZENOH_UINT128; +} + +impl EncodingMapping for usize { + #[cfg(target_pointer_width = "16")] + const ENCODING: Encoding = Encoding::ZENOH_UINT16; + #[cfg(target_pointer_width = "32")] + const ENCODING: Encoding = Encoding::ZENOH_UINT32; + #[cfg(target_pointer_width = "64")] + const ENCODING: Encoding = Encoding::ZENOH_UINT64; +} + +// Zenoh signed integers +impl EncodingMapping for i8 { + const ENCODING: Encoding = Encoding::ZENOH_INT8; +} + +impl EncodingMapping for i16 { + const ENCODING: Encoding = Encoding::ZENOH_INT16; +} + +impl EncodingMapping for i32 { + const ENCODING: Encoding = Encoding::ZENOH_INT32; +} + +impl EncodingMapping for i64 { + const ENCODING: Encoding = Encoding::ZENOH_INT64; +} + +impl EncodingMapping for i128 { + const ENCODING: Encoding = Encoding::ZENOH_INT128; +} + +impl EncodingMapping for isize { + #[cfg(target_pointer_width = "8")] + const ENCODING: Encoding = Encoding::ZENOH_INT8; + #[cfg(target_pointer_width = "16")] + const ENCODING: Encoding = Encoding::ZENOH_INT16; + #[cfg(target_pointer_width = "32")] + const ENCODING: Encoding = Encoding::ZENOH_INT32; + #[cfg(target_pointer_width = "64")] + const ENCODING: Encoding = Encoding::ZENOH_INT64; + #[cfg(target_pointer_width = "128")] + const ENCODING: Encoding = Encoding::ZENOH_INT128; +} + +// Zenoh floats +impl EncodingMapping for f32 { + const ENCODING: Encoding = Encoding::ZENOH_FLOAT32; +} + +impl EncodingMapping for f64 { + const ENCODING: Encoding = Encoding::ZENOH_FLOAT64; +} + +// Zenoh bool +impl EncodingMapping for bool { + const ENCODING: Encoding = Encoding::ZENOH_BOOL; +} + +// - Zenoh advanced types encoders/decoders +impl EncodingMapping for serde_json::Value { + const ENCODING: Encoding = Encoding::APPLICATION_JSON; +} + +impl EncodingMapping for serde_yaml::Value { + const ENCODING: Encoding = Encoding::APPLICATION_YAML; +} + +impl EncodingMapping for serde_cbor::Value { + const ENCODING: Encoding = Encoding::APPLICATION_CBOR; +} + +impl EncodingMapping for serde_pickle::Value { + const ENCODING: Encoding = Encoding::APPLICATION_PYTHON_SERIALIZED_OBJECT; +} + +impl Encoding { + #[zenoh_macros::internal] + pub fn id(&self) -> EncodingId { + self.0.id + } + #[zenoh_macros::internal] + pub fn schema(&self) -> Option<&ZSlice> { + self.0.schema.as_ref() + } + #[zenoh_macros::internal] + pub fn new(id: EncodingId, schema: Option) -> Self { + Encoding(zenoh_protocol::core::Encoding { id, schema }) + } +} + +// - Zenoh SHM +#[cfg(feature = "shared-memory")] +impl EncodingMapping for ZShm { + const ENCODING: Encoding = Encoding::ZENOH_BYTES; +} +#[cfg(feature = "shared-memory")] +impl EncodingMapping for ZShmMut { + const ENCODING: Encoding = Encoding::ZENOH_BYTES; +} diff --git a/zenoh/src/api/handlers/callback.rs b/zenoh/src/api/handlers/callback.rs new file mode 100644 index 0000000000..4f49e7c41f --- /dev/null +++ b/zenoh/src/api/handlers/callback.rs @@ -0,0 +1,90 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// + +//! Callback handler trait. +use super::{Dyn, IntoHandler}; + +/// A function that can transform a [`FnMut`]`(T)` to +/// a [`Fn`]`(T)` with the help of a [`Mutex`](std::sync::Mutex). +pub fn locked(fnmut: impl FnMut(T)) -> impl Fn(T) { + let lock = std::sync::Mutex::new(fnmut); + move |x| zlock!(lock)(x) +} + +/// An immutable callback function. +pub type Callback<'a, T> = Dyn; + +impl<'a, T, F> IntoHandler<'a, T> for F +where + F: Fn(T) + Send + Sync + 'a, +{ + type Handler = (); + fn into_handler(self) -> (Callback<'a, T>, Self::Handler) { + (Dyn::from(self), ()) + } +} + +impl IntoHandler<'static, T> for (flume::Sender, flume::Receiver) { + type Handler = flume::Receiver; + + fn into_handler(self) -> (Callback<'static, T>, Self::Handler) { + let (sender, receiver) = self; + ( + Dyn::new(move |t| { + if let Err(e) = sender.send(t) { + tracing::error!("{}", e) + } + }), + receiver, + ) + } +} + +/// A handler containing 2 callback functions: +/// - `callback`: the typical callback function. `context` will be passed as its last argument. +/// - `drop`: a callback called when this handler is dropped. +/// +/// It is guaranteed that: +/// +/// - `callback` will never be called once `drop` has started. +/// - `drop` will only be called **once**, and **after every** `callback` has ended. +/// - The two previous guarantees imply that `call` and `drop` are never called concurrently. +pub struct CallbackDrop +where + DropFn: FnMut() + Send + Sync + 'static, +{ + pub callback: Callback, + pub drop: DropFn, +} + +impl Drop for CallbackDrop +where + DropFn: FnMut() + Send + Sync + 'static, +{ + fn drop(&mut self) { + (self.drop)() + } +} + +impl<'a, OnEvent, Event, DropFn> IntoHandler<'a, Event> for CallbackDrop +where + OnEvent: Fn(Event) + Send + Sync + 'a, + DropFn: FnMut() + Send + Sync + 'static, +{ + type Handler = (); + + fn into_handler(self) -> (Callback<'a, Event>, Self::Handler) { + (Dyn::from(move |evt| (self.callback)(evt)), ()) + } +} diff --git a/zenoh/src/api/handlers/fifo.rs b/zenoh/src/api/handlers/fifo.rs new file mode 100644 index 0000000000..f0ae1a5257 --- /dev/null +++ b/zenoh/src/api/handlers/fifo.rs @@ -0,0 +1,61 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// + +//! Callback handler trait. +use super::{callback::Callback, Dyn, IntoHandler, API_DATA_RECEPTION_CHANNEL_SIZE}; + +/// The default handler in Zenoh is a FIFO queue. + +pub struct FifoChannel { + capacity: usize, +} + +impl FifoChannel { + /// Initialize the RingBuffer with the capacity size. + pub fn new(capacity: usize) -> Self { + Self { capacity } + } +} + +impl Default for FifoChannel { + fn default() -> Self { + Self::new(*API_DATA_RECEPTION_CHANNEL_SIZE) + } +} + +impl IntoHandler<'static, T> for FifoChannel { + type Handler = flume::Receiver; + + fn into_handler(self) -> (Callback<'static, T>, Self::Handler) { + flume::bounded(self.capacity).into_handler() + } +} + +impl IntoHandler<'static, T> + for (std::sync::mpsc::SyncSender, std::sync::mpsc::Receiver) +{ + type Handler = std::sync::mpsc::Receiver; + + fn into_handler(self) -> (Callback<'static, T>, Self::Handler) { + let (sender, receiver) = self; + ( + Dyn::new(move |t| { + if let Err(e) = sender.send(t) { + tracing::error!("{}", e) + } + }), + receiver, + ) + } +} diff --git a/zenoh/src/api/handlers/mod.rs b/zenoh/src/api/handlers/mod.rs new file mode 100644 index 0000000000..60ec658a4d --- /dev/null +++ b/zenoh/src/api/handlers/mod.rs @@ -0,0 +1,52 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// + +//! Callback handler trait. +mod callback; +mod fifo; +mod ring; + +pub use callback::*; +pub use fifo::*; +pub use ring::*; + +use crate::api::session::API_DATA_RECEPTION_CHANNEL_SIZE; + +/// An alias for `Arc`. +pub type Dyn = std::sync::Arc; + +/// A type that can be converted into a [`Callback`]-Handler pair. +/// +/// When Zenoh functions accept types that implement these, it intends to use the [`Callback`] as just that, +/// while granting you access to the handler through the returned value via [`std::ops::Deref`] and [`std::ops::DerefMut`]. +/// +/// Any closure that accepts `T` can be converted into a pair of itself and `()`. +pub trait IntoHandler<'a, T> { + type Handler; + + fn into_handler(self) -> (Callback<'a, T>, Self::Handler); +} + +/// The default handler in Zenoh is a FIFO queue. +#[repr(transparent)] +#[derive(Default)] +pub struct DefaultHandler(FifoChannel); + +impl IntoHandler<'static, T> for DefaultHandler { + type Handler = >::Handler; + + fn into_handler(self) -> (Callback<'static, T>, Self::Handler) { + self.0.into_handler() + } +} diff --git a/zenoh/src/api/handlers/ring.rs b/zenoh/src/api/handlers/ring.rs new file mode 100644 index 0000000000..7b058d1905 --- /dev/null +++ b/zenoh/src/api/handlers/ring.rs @@ -0,0 +1,168 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// + +//! Callback handler trait. +use std::{ + sync::{Arc, Weak}, + time::{Duration, Instant}, +}; + +use zenoh_collections::RingBuffer; +use zenoh_result::ZResult; + +use super::{callback::Callback, Dyn, IntoHandler}; +use crate::api::session::API_DATA_RECEPTION_CHANNEL_SIZE; + +/// A synchronous ring channel with a limited size that allows users to keep the last N data. +pub struct RingChannel { + capacity: usize, +} + +impl RingChannel { + /// Initialize the RingBuffer with the capacity size. + pub fn new(capacity: usize) -> Self { + Self { capacity } + } +} + +impl Default for RingChannel { + fn default() -> Self { + Self::new(*API_DATA_RECEPTION_CHANNEL_SIZE) + } +} + +struct RingChannelInner { + ring: std::sync::Mutex>, + not_empty: flume::Receiver<()>, +} + +pub struct RingChannelHandler { + ring: Weak>, +} + +impl RingChannelHandler { + /// Receive from the ring channel. + /// + /// If the ring channel is empty, this call will block until an element is available in the channel. + pub fn recv(&self) -> ZResult { + let Some(channel) = self.ring.upgrade() else { + bail!("The ringbuffer has been deleted."); + }; + loop { + if let Some(t) = channel.ring.lock().map_err(|e| zerror!("{}", e))?.pull() { + return Ok(t); + } + channel.not_empty.recv().map_err(|e| zerror!("{}", e))?; + } + } + + /// Receive from the ring channel with a deadline. + /// + /// If the ring channel is empty, this call will block until an element is available in the channel, + /// or return `None` if the deadline has passed. + pub fn recv_deadline(&self, deadline: Instant) -> ZResult> { + let Some(channel) = self.ring.upgrade() else { + bail!("The ringbuffer has been deleted."); + }; + + loop { + if let Some(t) = channel.ring.lock().map_err(|e| zerror!("{}", e))?.pull() { + return Ok(Some(t)); + } + match channel.not_empty.recv_deadline(deadline) { + Ok(()) => {} + Err(flume::RecvTimeoutError::Timeout) => return Ok(None), + Err(err) => bail!("{}", err), + } + } + } + + /// Receive from the ring channel with a timeout. + /// + /// If the ring channel is empty, this call will block until an element is available in the channel, + /// or return `None` if the deadline has expired. + pub fn recv_timeout(&self, timeout: Duration) -> ZResult> { + let Some(channel) = self.ring.upgrade() else { + bail!("The ringbuffer has been deleted."); + }; + + loop { + if let Some(t) = channel.ring.lock().map_err(|e| zerror!("{}", e))?.pull() { + return Ok(Some(t)); + } + match channel.not_empty.recv_timeout(timeout) { + Ok(()) => {} + Err(flume::RecvTimeoutError::Timeout) => return Ok(None), + Err(err) => bail!("{}", err), + } + } + } + + /// Receive from the ring channel. + /// + /// If the ring channel is empty, this call will block until an element is available in the channel. + pub async fn recv_async(&self) -> ZResult { + let Some(channel) = self.ring.upgrade() else { + bail!("The ringbuffer has been deleted."); + }; + loop { + if let Some(t) = channel.ring.lock().map_err(|e| zerror!("{}", e))?.pull() { + return Ok(t); + } + channel + .not_empty + .recv_async() + .await + .map_err(|e| zerror!("{}", e))?; + } + } + + /// Try to receive from the ring channel. + /// + /// If the ring channel is empty, this call will return immediately without blocking. + pub fn try_recv(&self) -> ZResult> { + let Some(channel) = self.ring.upgrade() else { + bail!("The ringbuffer has been deleted."); + }; + let mut guard = channel.ring.lock().map_err(|e| zerror!("{}", e))?; + Ok(guard.pull()) + } +} + +impl IntoHandler<'static, T> for RingChannel { + type Handler = RingChannelHandler; + + fn into_handler(self) -> (Callback<'static, T>, Self::Handler) { + let (sender, receiver) = flume::bounded(1); + let inner = Arc::new(RingChannelInner { + ring: std::sync::Mutex::new(RingBuffer::new(self.capacity)), + not_empty: receiver, + }); + let receiver = RingChannelHandler { + ring: Arc::downgrade(&inner), + }; + ( + Dyn::new(move |t| match inner.ring.lock() { + Ok(mut g) => { + // Eventually drop the oldest element. + g.push_force(t); + drop(g); + let _ = sender.try_send(()); + } + Err(e) => tracing::error!("{}", e), + }), + receiver, + ) + } +} diff --git a/zenoh/src/info.rs b/zenoh/src/api/info.rs similarity index 60% rename from zenoh/src/info.rs rename to zenoh/src/api/info.rs index 36910c666e..32bed0eb53 100644 --- a/zenoh/src/info.rs +++ b/zenoh/src/api/info.rs @@ -13,10 +13,13 @@ // //! Tools to access information about the current zenoh [`Session`](crate::Session). -use crate::SessionRef; -use std::future::Ready; -use zenoh_core::{AsyncResolve, Resolvable, SyncResolve}; -use zenoh_protocol::core::{WhatAmI, ZenohId}; +use std::future::{IntoFuture, Ready}; + +use zenoh_config::wrappers::ZenohId; +use zenoh_core::{Resolvable, Wait}; +use zenoh_protocol::core::WhatAmI; + +use super::session::SessionRef; /// A builder retuned by [`SessionInfo::zid()`](SessionInfo::zid) that allows /// to access the [`ZenohId`] of the current zenoh [`Session`](crate::Session). @@ -25,33 +28,34 @@ use zenoh_protocol::core::{WhatAmI, ZenohId}; /// ``` /// # #[tokio::main] /// # async fn main() { -/// use zenoh::prelude::r#async::*; +/// use zenoh::prelude::*; /// -/// let session = zenoh::open(config::peer()).res().await.unwrap(); -/// let zid = session.info().zid().res().await; +/// let session = zenoh::open(zenoh::config::peer()).await.unwrap(); +/// let zid = session.info().zid().await; /// # } /// ``` #[must_use = "Resolvables do nothing unless you resolve them using the `res` method from either `SyncResolve` or `AsyncResolve`"] #[derive(Debug)] -pub struct ZidBuilder<'a> { +pub struct ZenohIdBuilder<'a> { pub(crate) session: SessionRef<'a>, } -impl<'a> Resolvable for ZidBuilder<'a> { +impl<'a> Resolvable for ZenohIdBuilder<'a> { type To = ZenohId; } -impl<'a> SyncResolve for ZidBuilder<'a> { - fn res_sync(self) -> Self::To { +impl<'a> Wait for ZenohIdBuilder<'a> { + fn wait(self) -> Self::To { self.session.runtime.zid() } } -impl<'a> AsyncResolve for ZidBuilder<'a> { - type Future = Ready; +impl<'a> IntoFuture for ZenohIdBuilder<'a> { + type Output = ::To; + type IntoFuture = Ready<::To>; - fn res_async(self) -> Self::Future { - std::future::ready(self.res_sync()) + fn into_future(self) -> Self::IntoFuture { + std::future::ready(self.wait()) } } @@ -63,25 +67,25 @@ impl<'a> AsyncResolve for ZidBuilder<'a> { /// ``` /// # #[tokio::main] /// # async fn main() { -/// use zenoh::prelude::r#async::*; +/// use zenoh::prelude::*; /// -/// let session = zenoh::open(config::peer()).res().await.unwrap(); -/// let mut routers_zid = session.info().routers_zid().res().await; +/// let session = zenoh::open(zenoh::config::peer()).await.unwrap(); +/// let mut routers_zid = session.info().routers_zid().await; /// while let Some(router_zid) = routers_zid.next() {} /// # } /// ``` #[must_use = "Resolvables do nothing unless you resolve them using the `res` method from either `SyncResolve` or `AsyncResolve`"] #[derive(Debug)] -pub struct RoutersZidBuilder<'a> { +pub struct RoutersZenohIdBuilder<'a> { pub(crate) session: SessionRef<'a>, } -impl<'a> Resolvable for RoutersZidBuilder<'a> { +impl<'a> Resolvable for RoutersZenohIdBuilder<'a> { type To = Box + Send + Sync>; } -impl<'a> SyncResolve for RoutersZidBuilder<'a> { - fn res_sync(self) -> Self::To { +impl<'a> Wait for RoutersZenohIdBuilder<'a> { + fn wait(self) -> Self::To { Box::new( zenoh_runtime::ZRuntime::Application .block_in_place(self.session.runtime.manager().get_transports_unicast()) @@ -90,17 +94,18 @@ impl<'a> SyncResolve for RoutersZidBuilder<'a> { s.get_whatami() .ok() .and_then(|what| (what == WhatAmI::Router).then_some(())) - .and_then(|_| s.get_zid().ok()) + .and_then(|_| s.get_zid().map(Into::into).ok()) }), ) } } -impl<'a> AsyncResolve for RoutersZidBuilder<'a> { - type Future = Ready; +impl<'a> IntoFuture for RoutersZenohIdBuilder<'a> { + type Output = ::To; + type IntoFuture = Ready<::To>; - fn res_async(self) -> Self::Future { - std::future::ready(self.res_sync()) + fn into_future(self) -> Self::IntoFuture { + std::future::ready(self.wait()) } } @@ -111,26 +116,26 @@ impl<'a> AsyncResolve for RoutersZidBuilder<'a> { /// ``` /// # #[tokio::main] /// # async fn main() { -/// use zenoh::prelude::r#async::*; +/// use zenoh::prelude::*; /// -/// let session = zenoh::open(config::peer()).res().await.unwrap(); -/// let zid = session.info().zid().res().await; -/// let mut peers_zid = session.info().peers_zid().res().await; +/// let session = zenoh::open(zenoh::config::peer()).await.unwrap(); +/// let zid = session.info().zid().await; +/// let mut peers_zid = session.info().peers_zid().await; /// while let Some(peer_zid) = peers_zid.next() {} /// # } /// ``` #[must_use = "Resolvables do nothing unless you resolve them using the `res` method from either `SyncResolve` or `AsyncResolve`"] #[derive(Debug)] -pub struct PeersZidBuilder<'a> { +pub struct PeersZenohIdBuilder<'a> { pub(crate) session: SessionRef<'a>, } -impl<'a> Resolvable for PeersZidBuilder<'a> { +impl<'a> Resolvable for PeersZenohIdBuilder<'a> { type To = Box + Send + Sync>; } -impl<'a> SyncResolve for PeersZidBuilder<'a> { - fn res_sync(self) -> ::To { +impl<'a> Wait for PeersZenohIdBuilder<'a> { + fn wait(self) -> ::To { Box::new( zenoh_runtime::ZRuntime::Application .block_in_place(self.session.runtime.manager().get_transports_unicast()) @@ -139,32 +144,33 @@ impl<'a> SyncResolve for PeersZidBuilder<'a> { s.get_whatami() .ok() .and_then(|what| (what == WhatAmI::Peer).then_some(())) - .and_then(|_| s.get_zid().ok()) + .and_then(|_| s.get_zid().map(Into::into).ok()) }), ) } } -impl<'a> AsyncResolve for PeersZidBuilder<'a> { - type Future = Ready; +impl<'a> IntoFuture for PeersZenohIdBuilder<'a> { + type Output = ::To; + type IntoFuture = Ready<::To>; - fn res_async(self) -> Self::Future { - std::future::ready(self.res_sync()) + fn into_future(self) -> Self::IntoFuture { + std::future::ready(self.wait()) } } -/// Struct returned by [`Session::info()`](crate::SessionDeclarations::info) which allows +/// Struct returned by [`Session::info()`](crate::session::SessionDeclarations::info) which allows /// to access information about the current zenoh [`Session`](crate::Session). /// /// # Examples /// ``` /// # #[tokio::main] /// # async fn main() { -/// use zenoh::prelude::r#async::*; +/// use zenoh::prelude::*; /// -/// let session = zenoh::open(config::peer()).res().await.unwrap(); +/// let session = zenoh::open(zenoh::config::peer()).await.unwrap(); /// let info = session.info(); -/// let zid = info.zid().res().await; +/// let zid = info.zid().await; /// # } /// ``` pub struct SessionInfo<'a> { @@ -178,14 +184,14 @@ impl SessionInfo<'_> { /// ``` /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::r#async::*; + /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).res().await.unwrap(); - /// let zid = session.info().zid().res().await; + /// let session = zenoh::open(zenoh::config::peer()).await.unwrap(); + /// let zid = session.info().zid().await; /// # } /// ``` - pub fn zid(&self) -> ZidBuilder<'_> { - ZidBuilder { + pub fn zid(&self) -> ZenohIdBuilder<'_> { + ZenohIdBuilder { session: self.session.clone(), } } @@ -197,15 +203,15 @@ impl SessionInfo<'_> { /// ``` /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::r#async::*; + /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).res().await.unwrap(); - /// let mut routers_zid = session.info().routers_zid().res().await; + /// let session = zenoh::open(zenoh::config::peer()).await.unwrap(); + /// let mut routers_zid = session.info().routers_zid().await; /// while let Some(router_zid) = routers_zid.next() {} /// # } /// ``` - pub fn routers_zid(&self) -> RoutersZidBuilder<'_> { - RoutersZidBuilder { + pub fn routers_zid(&self) -> RoutersZenohIdBuilder<'_> { + RoutersZenohIdBuilder { session: self.session.clone(), } } @@ -216,15 +222,15 @@ impl SessionInfo<'_> { /// ``` /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::r#async::*; + /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).res().await.unwrap(); - /// let mut peers_zid = session.info().peers_zid().res().await; + /// let session = zenoh::open(zenoh::config::peer()).await.unwrap(); + /// let mut peers_zid = session.info().peers_zid().await; /// while let Some(peer_zid) = peers_zid.next() {} /// # } /// ``` - pub fn peers_zid(&self) -> PeersZidBuilder<'_> { - PeersZidBuilder { + pub fn peers_zid(&self) -> PeersZenohIdBuilder<'_> { + PeersZenohIdBuilder { session: self.session.clone(), } } diff --git a/zenoh/src/key_expr.rs b/zenoh/src/api/key_expr.rs similarity index 83% rename from zenoh/src/key_expr.rs rename to zenoh/src/api/key_expr.rs index 99f5aa6187..fc472e0db3 100644 --- a/zenoh/src/key_expr.rs +++ b/zenoh/src/api/key_expr.rs @@ -11,53 +11,22 @@ // Contributors: // ZettaScale Zenoh Team, // - -//! [Key expression](https://github.com/eclipse-zenoh/roadmap/blob/main/rfcs/ALL/Key%20Expressions.md) are Zenoh's address space. -//! -//! In Zenoh, operations are performed on keys. To allow addressing multiple keys with a single operation, we use Key Expressions (KE). -//! KEs are a small language that express sets of keys through a glob-like language. -//! -//! These semantics can be a bit difficult to implement, so this module provides the following facilities: -//! -//! # Storing Key Expressions -//! This module provides 3 flavours to store strings that have been validated to respect the KE syntax: -//! - [`keyexpr`] is the equivalent of a [`str`], -//! - [`OwnedKeyExpr`] works like an [`std::sync::Arc`], -//! - [`KeyExpr`] works like a [`std::borrow::Cow`], but also stores some additional context internal to Zenoh to optimize -//! routing and network usage. -//! -//! All of these types [`Deref`](core::ops::Deref) to [`keyexpr`], which notably has methods to check whether a given [`keyexpr::intersects`] with another, -//! or even if a [`keyexpr::includes`] another. -//! -//! # Tying values to Key Expressions -//! When storing values tied to Key Expressions, you might want something more specialized than a [`HashMap`](std::collections::HashMap) if you want to respect -//! the Key Expression semantics with high performance. -//! -//! Enter [KeTrees](keyexpr_tree). These are data-structures specially built to store KE-value pairs in a manner that supports the set-semantics of KEs. -//! -//! # Building and parsing Key Expressions -//! A common issue in REST API is the association of meaning to sections of the URL, and respecting that API in a convenient manner. -//! The same issue arises naturally when designing a KE space, and [`KeFormat`](format::KeFormat) was designed to help you with this, -//! both in constructing and in parsing KEs that fit the formats you've defined. -//! -//! [`kedefine`] also allows you to define formats at compile time, allowing a more performant, but more importantly safer and more convenient use of said formats, -//! as the [`keformat`] and [`kewrite`] macros will be able to tell you if you're attempting to set fields of the format that do not exist. - use std::{ convert::{TryFrom, TryInto}, - future::Ready, + future::{IntoFuture, Ready}, str::FromStr, }; -use zenoh_core::{AsyncResolve, Resolvable, SyncResolve}; -pub use zenoh_keyexpr::*; -pub use zenoh_macros::{kedefine, keformat, kewrite}; + +use zenoh_core::{Resolvable, Wait}; +use zenoh_keyexpr::{keyexpr, OwnedKeyExpr}; use zenoh_protocol::{ - core::{key_expr::canon::Canonizable, ExprId, WireExpr}, + core::{key_expr::canon::Canonize, ExprId, WireExpr}, network::{declare, DeclareBody, Mapping, UndeclareKeyExpr}, }; use zenoh_result::ZResult; -use crate::{net::primitives::Primitives, prelude::Selector, Session, Undeclarable}; +use super::session::{Session, UndeclarableSealed}; +use crate::net::primitives::Primitives; #[derive(Clone, Debug)] pub(crate) enum KeyExprInner<'a> { @@ -112,10 +81,10 @@ impl KeyExpr<'static> { /// # Safety /// Key Expressions must follow some rules to be accepted by a Zenoh network. /// Messages addressed with invalid key expressions will be dropped. - pub unsafe fn from_boxed_string_unchecked(s: Box) -> Self { - Self(KeyExprInner::Owned( - OwnedKeyExpr::from_boxed_string_unchecked(s), - )) + pub unsafe fn from_boxed_str_unchecked(s: Box) -> Self { + Self(KeyExprInner::Owned(OwnedKeyExpr::from_boxed_str_unchecked( + s, + ))) } } impl<'a> KeyExpr<'a> { @@ -175,7 +144,7 @@ impl<'a> KeyExpr<'a> { pub fn autocanonize(mut t: T) -> Result where Self: TryFrom, - T: Canonizable, + T: Canonize, { t.canonize(); Self::new(t) @@ -185,7 +154,7 @@ impl<'a> KeyExpr<'a> { /// # Safety /// Key Expressions must follow some rules to be accepted by a Zenoh network. /// Messages addressed with invalid key expressions will be dropped. - pub unsafe fn from_str_uncheckend(s: &'a str) -> Self { + pub unsafe fn from_str_unchecked(s: &'a str) -> Self { keyexpr::from_str_unchecked(s).into() } @@ -235,7 +204,7 @@ impl<'a> KeyExpr<'a> { /// This is notably useful for workspaces: /// ```rust /// # use std::convert::TryFrom; - /// # use zenoh::prelude::KeyExpr; + /// # use zenoh::key_expr::KeyExpr; /// # let get_workspace = || KeyExpr::try_from("some/workspace").unwrap(); /// let workspace: KeyExpr = get_workspace(); /// let topic = workspace.join("some/topic").unwrap(); @@ -301,20 +270,6 @@ impl<'a> KeyExpr<'a> { Ok(r.into()) } } - - pub fn with_parameters(self, selector: &'a str) -> Selector<'a> { - Selector { - key_expr: self, - parameters: selector.into(), - } - } - - pub fn with_owned_parameters(self, selector: String) -> Selector<'a> { - Selector { - key_expr: self, - parameters: selector.into(), - } - } } impl FromStr for KeyExpr<'static> { @@ -323,8 +278,8 @@ impl FromStr for KeyExpr<'static> { Ok(Self(KeyExprInner::Owned(s.parse()?))) } } -impl<'a> From> for OwnedKeyExpr { - fn from(val: super::KeyExpr<'a>) -> Self { +impl<'a> From> for OwnedKeyExpr { + fn from(val: KeyExpr<'a>) -> Self { match val.0 { KeyExprInner::Borrowed(key_expr) | KeyExprInner::BorrowedWire { key_expr, .. } => { key_expr.into() @@ -402,6 +357,7 @@ impl<'a> From> for String { } } } + impl<'a> TryFrom for KeyExpr<'a> { type Error = zenoh_result::Error; fn try_from(value: String) -> Result { @@ -553,7 +509,7 @@ impl<'a> KeyExpr<'a> { _ => false, } } - pub(crate) fn to_wire(&'a self, session: &crate::Session) -> WireExpr<'a> { + pub(crate) fn to_wire(&'a self, session: &Session) -> WireExpr<'a> { match &self.0 { KeyExprInner::Wire { key_expr, @@ -593,7 +549,7 @@ impl<'a> KeyExpr<'a> { } } -impl<'a> Undeclarable<&'a Session, KeyExprUndeclaration<'a>> for KeyExpr<'a> { +impl<'a> UndeclarableSealed<&'a Session, KeyExprUndeclaration<'a>> for KeyExpr<'a> { fn undeclare_inner(self, session: &'a Session) -> KeyExprUndeclaration<'a> { KeyExprUndeclaration { session, @@ -608,11 +564,11 @@ impl<'a> Undeclarable<&'a Session, KeyExprUndeclaration<'a>> for KeyExpr<'a> { /// ``` /// # #[tokio::main] /// # async fn main() { -/// use zenoh::prelude::r#async::*; +/// use zenoh::prelude::*; /// -/// let session = zenoh::open(config::peer()).res().await.unwrap(); -/// let key_expr = session.declare_keyexpr("key/expression").res().await.unwrap(); -/// session.undeclare(key_expr).res().await.unwrap(); +/// let session = zenoh::open(zenoh::config::peer()).await.unwrap(); +/// let key_expr = session.declare_keyexpr("key/expression").await.unwrap(); +/// session.undeclare(key_expr).await.unwrap(); /// # } /// ``` #[must_use = "Resolvables do nothing unless you resolve them using the `res` method from either `SyncResolve` or `AsyncResolve`"] @@ -625,8 +581,8 @@ impl Resolvable for KeyExprUndeclaration<'_> { type To = ZResult<()>; } -impl SyncResolve for KeyExprUndeclaration<'_> { - fn res_sync(self) -> ::To { +impl Wait for KeyExprUndeclaration<'_> { + fn wait(self) -> ::To { let KeyExprUndeclaration { session, expr } = self; let expr_id = match &expr.0 { KeyExprInner::Wire { @@ -664,9 +620,10 @@ impl SyncResolve for KeyExprUndeclaration<'_> { let primitives = state.primitives.as_ref().unwrap().clone(); drop(state); primitives.send_declare(zenoh_protocol::network::Declare { - ext_qos: declare::ext::QoSType::declare_default(), + interest_id: None, + ext_qos: declare::ext::QoSType::DECLARE, ext_tstamp: None, - ext_nodeid: declare::ext::NodeIdType::default(), + ext_nodeid: declare::ext::NodeIdType::DEFAULT, body: DeclareBody::UndeclareKeyExpr(UndeclareKeyExpr { id: expr_id }), }); @@ -674,11 +631,12 @@ impl SyncResolve for KeyExprUndeclaration<'_> { } } -impl AsyncResolve for KeyExprUndeclaration<'_> { - type Future = Ready; +impl IntoFuture for KeyExprUndeclaration<'_> { + type Output = ::To; + type IntoFuture = Ready<::To>; - fn res_async(self) -> Self::Future { - std::future::ready(self.res_sync()) + fn into_future(self) -> Self::IntoFuture { + std::future::ready(self.wait()) } } diff --git a/zenoh/src/liveliness.rs b/zenoh/src/api/liveliness.rs similarity index 65% rename from zenoh/src/liveliness.rs rename to zenoh/src/api/liveliness.rs index 9cf3b9c362..64f87c6de5 100644 --- a/zenoh/src/liveliness.rs +++ b/zenoh/src/api/liveliness.rs @@ -12,40 +12,25 @@ // ZettaScale Zenoh Team, // -//! Liveliness primitives. -//! -//! see [`Liveliness`] - -use crate::query::Reply; - -#[zenoh_macros::unstable] -use { - crate::{ - handlers::locked, - handlers::DefaultHandler, - prelude::*, - subscriber::{Subscriber, SubscriberInner}, - SessionRef, Undeclarable, - }, - std::convert::TryInto, - std::future::Ready, - std::sync::Arc, - std::time::Duration, - zenoh_config::unwrap_or_default, - zenoh_core::AsyncResolve, - zenoh_core::Resolvable, - zenoh_core::Result as ZResult, - zenoh_core::SyncResolve, - zenoh_protocol::network::declare::subscriber::ext::SubscriberInfo, +use std::{ + convert::TryInto, + future::{IntoFuture, Ready}, + sync::Arc, + time::Duration, }; -#[zenoh_macros::unstable] -pub(crate) static PREFIX_LIVELINESS: &str = crate::net::routing::PREFIX_LIVELINESS; - -#[zenoh_macros::unstable] -lazy_static::lazy_static!( - pub(crate) static ref KE_PREFIX_LIVELINESS: &'static keyexpr = unsafe { keyexpr::from_str_unchecked(PREFIX_LIVELINESS) }; -); +use zenoh_config::unwrap_or_default; +use zenoh_core::{Resolvable, Resolve, Result as ZResult, Wait}; + +use super::{ + handlers::{locked, DefaultHandler, IntoHandler}, + key_expr::KeyExpr, + query::Reply, + sample::{Locality, Sample}, + session::{Session, SessionRef, UndeclarableSealed}, + subscriber::{Subscriber, SubscriberInner}, + Id, +}; /// A structure with functions to declare a /// [`LivelinessToken`](LivelinessToken), query @@ -55,28 +40,58 @@ lazy_static::lazy_static!( /// A [`LivelinessToken`](LivelinessToken) is a token which liveliness is tied /// to the Zenoh [`Session`](Session) and can be monitored by remote applications. /// -/// A [`LivelinessToken`](LivelinessToken) with key `key/expression` can be -/// queried or subscribed to on key `@/liveliness/key/expression`. -/// /// The `Liveliness` structure can be obtained with the /// [`Session::liveliness()`](Session::liveliness) function /// of the [`Session`] struct. /// /// # Examples +/// ### Declaring a token /// ``` /// # #[tokio::main] /// # async fn main() { -/// use zenoh::prelude::r#async::*; +/// use zenoh::prelude::*; /// -/// let session = zenoh::open(config::peer()).res().await.unwrap(); +/// let session = zenoh::open(zenoh::config::peer()).await.unwrap(); /// let liveliness = session /// .liveliness() /// .declare_token("key/expression") -/// .res() /// .await /// .unwrap(); /// # } /// ``` +/// +/// ### Querying tokens +/// ``` +/// # #[tokio::main] +/// # async fn main() { +/// use zenoh::prelude::*; +/// +/// let session = zenoh::open(zenoh::config::peer()).await.unwrap(); +/// let replies = session.liveliness().get("key/**").await.unwrap(); +/// while let Ok(reply) = replies.recv_async().await { +/// if let Ok(sample) = reply.result() { +/// println!(">> Liveliness token {}", sample.key_expr()); +/// } +/// } +/// # } +/// ``` +/// +/// ### Subscribing to liveliness changes +/// ```no_run +/// # #[tokio::main] +/// # async fn main() { +/// use zenoh::{prelude::*, sample::SampleKind}; +/// +/// let session = zenoh::open(zenoh::config::peer()).await.unwrap(); +/// let subscriber = session.liveliness().declare_subscriber("key/**").await.unwrap(); +/// while let Ok(sample) = subscriber.recv_async().await { +/// match sample.kind() { +/// SampleKind::Put => println!("New liveliness: {}", sample.key_expr()), +/// SampleKind::Delete => println!("Lost liveliness: {}", sample.key_expr()), +/// } +/// } +/// # } +/// ``` #[zenoh_macros::unstable] pub struct Liveliness<'a> { pub(crate) session: SessionRef<'a>, @@ -94,13 +109,12 @@ impl<'a> Liveliness<'a> { /// ``` /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::r#async::*; + /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).res().await.unwrap(); + /// let session = zenoh::open(zenoh::config::peer()).await.unwrap(); /// let liveliness = session /// .liveliness() /// .declare_token("key/expression") - /// .res() /// .await /// .unwrap(); /// # } @@ -130,14 +144,14 @@ impl<'a> Liveliness<'a> { /// ```no_run /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::r#async::*; + /// use zenoh::{prelude::*, sample::SampleKind}; /// - /// let session = zenoh::open(config::peer()).res().await.unwrap(); - /// let subscriber = session.liveliness().declare_subscriber("key/expression").res().await.unwrap(); + /// let session = zenoh::open(zenoh::config::peer()).await.unwrap(); + /// let subscriber = session.liveliness().declare_subscriber("key/expression").await.unwrap(); /// while let Ok(sample) = subscriber.recv_async().await { - /// match sample.kind { - /// SampleKind::Put => println!("New liveliness: {}", sample.key_expr), - /// SampleKind::Delete => println!("Lost liveliness: {}", sample.key_expr), + /// match sample.kind() { + /// SampleKind::Put => println!("New liveliness: {}", sample.key_expr()), + /// SampleKind::Delete => println!("Lost liveliness: {}", sample.key_expr()), /// } /// } /// # } @@ -154,7 +168,7 @@ impl<'a> Liveliness<'a> { LivelinessSubscriberBuilder { session: self.session.clone(), key_expr: TryIntoKeyExpr::try_into(key_expr).map_err(Into::into), - handler: DefaultHandler, + handler: DefaultHandler::default(), } } @@ -168,13 +182,13 @@ impl<'a> Liveliness<'a> { /// ``` /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::r#async::*; + /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).res().await.unwrap(); - /// let replies = session.liveliness().get("key/expression").res().await.unwrap(); + /// let session = zenoh::open(zenoh::config::peer()).await.unwrap(); + /// let replies = session.liveliness().get("key/expression").await.unwrap(); /// while let Ok(reply) = replies.recv_async().await { - /// if let Ok(sample) = reply.sample { - /// println!(">> Liveliness token {}", sample.key_expr); + /// if let Ok(sample) = reply.result() { + /// println!(">> Liveliness token {}", sample.key_expr()); /// } /// } /// # } @@ -197,7 +211,7 @@ impl<'a> Liveliness<'a> { session: &self.session, key_expr, timeout, - handler: DefaultHandler, + handler: DefaultHandler::default(), } } } @@ -208,13 +222,12 @@ impl<'a> Liveliness<'a> { /// ``` /// # #[tokio::main] /// # async fn main() { -/// use zenoh::prelude::r#async::*; +/// use zenoh::prelude::*; /// -/// let session = zenoh::open(config::peer()).res().await.unwrap(); +/// let session = zenoh::open(zenoh::config::peer()).await.unwrap(); /// let liveliness = session /// .liveliness() /// .declare_token("key/expression") -/// .res() /// .await /// .unwrap(); /// # } @@ -233,9 +246,9 @@ impl<'a> Resolvable for LivelinessTokenBuilder<'a, '_> { } #[zenoh_macros::unstable] -impl SyncResolve for LivelinessTokenBuilder<'_, '_> { +impl Wait for LivelinessTokenBuilder<'_, '_> { #[inline] - fn res_sync(self) -> ::To { + fn wait(self) -> ::To { let session = self.session; let key_expr = self.key_expr?.into_owned(); session @@ -243,17 +256,18 @@ impl SyncResolve for LivelinessTokenBuilder<'_, '_> { .map(|tok_state| LivelinessToken { session, state: tok_state, - alive: true, + undeclare_on_drop: true, }) } } #[zenoh_macros::unstable] -impl AsyncResolve for LivelinessTokenBuilder<'_, '_> { - type Future = Ready; +impl IntoFuture for LivelinessTokenBuilder<'_, '_> { + type Output = ::To; + type IntoFuture = Ready<::To>; - fn res_async(self) -> Self::Future { - std::future::ready(self.res_sync()) + fn into_future(self) -> Self::IntoFuture { + std::future::ready(self.wait()) } } @@ -267,9 +281,6 @@ pub(crate) struct LivelinessTokenState { /// A token whose liveliness is tied to the Zenoh [`Session`](Session) /// and can be monitored by remote applications. /// -/// A `LivelinessToken` with key `key/expression` can be queried or subscribed -/// to on key `@/liveliness/key/expression`. -/// /// A declared liveliness token will be seen as alive by any other Zenoh /// application in the system that monitors it while the liveliness token /// is not undeclared or dropped, while the Zenoh application that declared @@ -283,13 +294,12 @@ pub(crate) struct LivelinessTokenState { /// ```no_run /// # #[tokio::main] /// # async fn main() { -/// use zenoh::prelude::r#async::*; +/// use zenoh::prelude::*; /// -/// let session = zenoh::open(config::peer()).res().await.unwrap(); +/// let session = zenoh::open(zenoh::config::peer()).await.unwrap(); /// let liveliness = session /// .liveliness() /// .declare_token("key/expression") -/// .res() /// .await /// .unwrap(); /// # } @@ -299,7 +309,7 @@ pub(crate) struct LivelinessTokenState { pub struct LivelinessToken<'a> { pub(crate) session: SessionRef<'a>, pub(crate) state: Arc, - pub(crate) alive: bool, + undeclare_on_drop: bool, } /// A [`Resolvable`] returned when undeclaring a [`LivelinessToken`](LivelinessToken). @@ -308,17 +318,16 @@ pub struct LivelinessToken<'a> { /// ``` /// # #[tokio::main] /// # async fn main() { -/// use zenoh::prelude::r#async::*; +/// use zenoh::prelude::*; /// -/// let session = zenoh::open(config::peer()).res().await.unwrap(); +/// let session = zenoh::open(zenoh::config::peer()).await.unwrap(); /// let liveliness = session /// .liveliness() /// .declare_token("key/expression") -/// .res() /// .await /// .unwrap(); /// -/// liveliness.undeclare().res().await.unwrap(); +/// liveliness.undeclare().await.unwrap(); /// # } /// ``` #[must_use = "Resolvables do nothing unless you resolve them using the `res` method from either `SyncResolve` or `AsyncResolve`"] @@ -333,19 +342,21 @@ impl Resolvable for LivelinessTokenUndeclaration<'_> { } #[zenoh_macros::unstable] -impl SyncResolve for LivelinessTokenUndeclaration<'_> { - fn res_sync(mut self) -> ::To { - self.token.alive = false; +impl Wait for LivelinessTokenUndeclaration<'_> { + fn wait(mut self) -> ::To { + // set the flag first to avoid double panic if this function panic + self.token.undeclare_on_drop = false; self.token.session.undeclare_liveliness(self.token.state.id) } } #[zenoh_macros::unstable] -impl<'a> AsyncResolve for LivelinessTokenUndeclaration<'a> { - type Future = Ready; +impl<'a> IntoFuture for LivelinessTokenUndeclaration<'a> { + type Output = ::To; + type IntoFuture = Ready<::To>; - fn res_async(self) -> Self::Future { - std::future::ready(self.res_sync()) + fn into_future(self) -> Self::IntoFuture { + std::future::ready(self.wait()) } } @@ -361,27 +372,36 @@ impl<'a> LivelinessToken<'a> { /// ``` /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::r#async::*; + /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).res().await.unwrap(); + /// let session = zenoh::open(zenoh::config::peer()).await.unwrap(); /// let liveliness = session /// .liveliness() /// .declare_token("key/expression") - /// .res() /// .await /// .unwrap(); /// - /// liveliness.undeclare().res().await.unwrap(); + /// liveliness.undeclare().await.unwrap(); /// # } /// ``` #[inline] pub fn undeclare(self) -> impl Resolve> + 'a { - Undeclarable::undeclare_inner(self, ()) + UndeclarableSealed::undeclare_inner(self, ()) + } + + /// Keep this liveliness token in background, until the session is closed. + #[inline] + #[zenoh_macros::unstable] + pub fn background(mut self) { + // It's not necessary to undeclare this resource when session close, as other sessions + // will clean all resources related to the closed one. + // So we can just never undeclare it. + self.undeclare_on_drop = false; } } #[zenoh_macros::unstable] -impl<'a> Undeclarable<(), LivelinessTokenUndeclaration<'a>> for LivelinessToken<'a> { +impl<'a> UndeclarableSealed<(), LivelinessTokenUndeclaration<'a>> for LivelinessToken<'a> { fn undeclare_inner(self, _: ()) -> LivelinessTokenUndeclaration<'a> { LivelinessTokenUndeclaration { token: self } } @@ -390,26 +410,24 @@ impl<'a> Undeclarable<(), LivelinessTokenUndeclaration<'a>> for LivelinessToken< #[zenoh_macros::unstable] impl Drop for LivelinessToken<'_> { fn drop(&mut self) { - if self.alive { + if self.undeclare_on_drop { let _ = self.session.undeclare_liveliness(self.state.id); } } } -/// A builder for initializing a [`FlumeSubscriber`](FlumeSubscriber). +/// A builder for initializing a liveliness [`FlumeSubscriber`](FlumeSubscriber). /// /// # Examples /// ``` /// # #[tokio::main] /// # async fn main() { -/// use zenoh::prelude::r#async::*; +/// use zenoh::prelude::*; /// -/// let session = zenoh::open(config::peer()).res().await.unwrap(); +/// let session = zenoh::open(zenoh::config::peer()).await.unwrap(); /// let subscriber = session +/// .liveliness() /// .declare_subscriber("key/expression") -/// .best_effort() -/// .pull_mode() -/// .res() /// .await /// .unwrap(); /// # } @@ -425,19 +443,19 @@ pub struct LivelinessSubscriberBuilder<'a, 'b, Handler> { #[zenoh_macros::unstable] impl<'a, 'b> LivelinessSubscriberBuilder<'a, 'b, DefaultHandler> { - /// Receive the samples for this subscription with a callback. + /// Receive the samples for this liveliness subscription with a callback. /// /// # Examples /// ``` /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::r#async::*; + /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).res().await.unwrap(); + /// let session = zenoh::open(zenoh::config::peer()).await.unwrap(); /// let subscriber = session + /// .liveliness() /// .declare_subscriber("key/expression") - /// .callback(|sample| { println!("Received: {} {}", sample.key_expr, sample.value); }) - /// .res() + /// .callback(|sample| { println!("Received: {} {:?}", sample.key_expr(), sample.payload()); }) /// .await /// .unwrap(); /// # } @@ -463,23 +481,23 @@ impl<'a, 'b> LivelinessSubscriberBuilder<'a, 'b, DefaultHandler> { } } - /// Receive the samples for this subscription with a mutable callback. + /// Receive the samples for this liveliness subscription with a mutable callback. /// /// Using this guarantees that your callback will never be called concurrently. - /// If your callback is also accepted by the [`callback`](SubscriberBuilder::callback) method, we suggest you use it instead of `callback_mut` + /// If your callback is also accepted by the [`callback`](LivelinessSubscriberBuilder::callback) method, we suggest you use it instead of `callback_mut` /// /// # Examples /// ``` /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::r#async::*; + /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).res().await.unwrap(); + /// let session = zenoh::open(zenoh::config::peer()).await.unwrap(); /// let mut n = 0; /// let subscriber = session + /// .liveliness() /// .declare_subscriber("key/expression") /// .callback_mut(move |_sample| { n += 1; }) - /// .res() /// .await /// .unwrap(); /// # } @@ -496,23 +514,23 @@ impl<'a, 'b> LivelinessSubscriberBuilder<'a, 'b, DefaultHandler> { self.callback(locked(callback)) } - /// Receive the samples for this subscription with a [`Handler`](crate::prelude::IntoCallbackReceiverPair). + /// Receive the samples for this liveliness subscription with a [`Handler`](crate::prelude::IntoHandler). /// /// # Examples /// ```no_run /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::r#async::*; + /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).res().await.unwrap(); + /// let session = zenoh::open(zenoh::config::peer()).await.unwrap(); /// let subscriber = session + /// .liveliness() /// .declare_subscriber("key/expression") /// .with(flume::bounded(32)) - /// .res() /// .await /// .unwrap(); /// while let Ok(sample) = subscriber.recv_async().await { - /// println!("Received: {} {}", sample.key_expr, sample.value); + /// println!("Received: {} {:?}", sample.key_expr(), sample.payload()); /// } /// # } /// ``` @@ -520,7 +538,7 @@ impl<'a, 'b> LivelinessSubscriberBuilder<'a, 'b, DefaultHandler> { #[zenoh_macros::unstable] pub fn with(self, handler: Handler) -> LivelinessSubscriberBuilder<'a, 'b, Handler> where - Handler: crate::prelude::IntoCallbackReceiverPair<'static, Sample>, + Handler: crate::handlers::IntoHandler<'static, Sample>, { let LivelinessSubscriberBuilder { session, @@ -538,53 +556,51 @@ impl<'a, 'b> LivelinessSubscriberBuilder<'a, 'b, DefaultHandler> { #[zenoh_macros::unstable] impl<'a, Handler> Resolvable for LivelinessSubscriberBuilder<'a, '_, Handler> where - Handler: IntoCallbackReceiverPair<'static, Sample> + Send, - Handler::Receiver: Send, + Handler: IntoHandler<'static, Sample> + Send, + Handler::Handler: Send, { - type To = ZResult>; + type To = ZResult>; } #[zenoh_macros::unstable] -impl<'a, Handler> SyncResolve for LivelinessSubscriberBuilder<'a, '_, Handler> +impl<'a, Handler> Wait for LivelinessSubscriberBuilder<'a, '_, Handler> where - Handler: IntoCallbackReceiverPair<'static, Sample> + Send, - Handler::Receiver: Send, + Handler: IntoHandler<'static, Sample> + Send, + Handler::Handler: Send, { #[zenoh_macros::unstable] - fn res_sync(self) -> ::To { + fn wait(self) -> ::To { + use super::subscriber::SubscriberKind; + let key_expr = self.key_expr?; let session = self.session; - let (callback, receiver) = self.handler.into_cb_receiver_pair(); + let (callback, handler) = self.handler.into_handler(); session - .declare_subscriber_inner( - &key_expr, - &Some(KeyExpr::from(*KE_PREFIX_LIVELINESS)), - Locality::default(), - callback, - &SubscriberInfo::default(), - ) + .declare_liveliness_subscriber_inner(&key_expr, Locality::default(), callback) .map(|sub_state| Subscriber { subscriber: SubscriberInner { session, state: sub_state, - alive: true, + kind: SubscriberKind::LivelinessSubscriber, + undeclare_on_drop: true, }, - receiver, + handler, }) } } #[zenoh_macros::unstable] -impl<'a, Handler> AsyncResolve for LivelinessSubscriberBuilder<'a, '_, Handler> +impl<'a, Handler> IntoFuture for LivelinessSubscriberBuilder<'a, '_, Handler> where - Handler: IntoCallbackReceiverPair<'static, Sample> + Send, - Handler::Receiver: Send, + Handler: IntoHandler<'static, Sample> + Send, + Handler::Handler: Send, { - type Future = Ready; + type Output = ::To; + type IntoFuture = Ready<::To>; #[zenoh_macros::unstable] - fn res_async(self) -> Self::Future { - std::future::ready(self.res_sync()) + fn into_future(self) -> Self::IntoFuture { + std::future::ready(self.wait()) } } @@ -595,20 +611,18 @@ where /// # #[tokio::main] /// # async fn main() { /// # use std::convert::TryFrom; -/// use zenoh::prelude::r#async::*; -/// use zenoh::query::*; +/// use zenoh::prelude::*; /// -/// let session = zenoh::open(config::peer()).res().await.unwrap(); +/// let session = zenoh::open(zenoh::config::peer()).await.unwrap(); /// let tokens = session /// .liveliness() /// .get("key/expression") -/// .res() /// .await /// .unwrap(); /// while let Ok(token) = tokens.recv_async().await { -/// match token.sample { -/// Ok(sample) => println!("Alive token ('{}')", sample.key_expr.as_str(),), -/// Err(err) => println!("Received (ERROR: '{}')", String::try_from(&err).unwrap()), +/// match token.result() { +/// Ok(sample) => println!("Alive token ('{}')", sample.key_expr().as_str()), +/// Err(err) => println!("Received (ERROR: '{:?}')", err.payload()), /// } /// } /// # } @@ -629,14 +643,13 @@ impl<'a, 'b> LivelinessGetBuilder<'a, 'b, DefaultHandler> { /// ``` /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::r#async::*; + /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).res().await.unwrap(); + /// let session = zenoh::open(zenoh::config::peer()).await.unwrap(); /// let queryable = session /// .liveliness() /// .get("key/expression") - /// .callback(|reply| {println!("Received {:?}", reply.sample);}) - /// .res() + /// .callback(|reply| { println!("Received {:?}", reply.result()); }) /// .await /// .unwrap(); /// # } @@ -660,7 +673,7 @@ impl<'a, 'b> LivelinessGetBuilder<'a, 'b, DefaultHandler> { } } - /// Receive the replies for this query with a mutable callback. + /// Receive the replies for this liveliness query with a mutable callback. /// /// Using this guarantees that your callback will never be called concurrently. /// If your callback is also accepted by the [`callback`](LivelinessGetBuilder::callback) method, we suggest you use it instead of `callback_mut` @@ -669,15 +682,14 @@ impl<'a, 'b> LivelinessGetBuilder<'a, 'b, DefaultHandler> { /// ``` /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::r#async::*; + /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).res().await.unwrap(); + /// let session = zenoh::open(zenoh::config::peer()).await.unwrap(); /// let mut n = 0; /// let queryable = session /// .liveliness() /// .get("key/expression") /// .callback_mut(move |reply| {n += 1;}) - /// .res() /// .await /// .unwrap(); /// # } @@ -693,31 +705,30 @@ impl<'a, 'b> LivelinessGetBuilder<'a, 'b, DefaultHandler> { self.callback(locked(callback)) } - /// Receive the replies for this query with a [`Handler`](crate::prelude::IntoCallbackReceiverPair). + /// Receive the replies for this liveliness query with a [`Handler`](crate::handlers::IntoHandler). /// /// # Examples /// ``` /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::r#async::*; + /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).res().await.unwrap(); + /// let session = zenoh::open(zenoh::config::peer()).await.unwrap(); /// let replies = session /// .liveliness() /// .get("key/expression") /// .with(flume::bounded(32)) - /// .res() /// .await /// .unwrap(); /// while let Ok(reply) = replies.recv_async().await { - /// println!("Received {:?}", reply.sample); + /// println!("Received {:?}", reply.result()); /// } /// # } /// ``` #[inline] pub fn with(self, handler: Handler) -> LivelinessGetBuilder<'a, 'b, Handler> where - Handler: IntoCallbackReceiverPair<'static, Reply>, + Handler: IntoHandler<'static, Reply>, { let LivelinessGetBuilder { session, @@ -745,45 +756,34 @@ impl<'a, 'b, Handler> LivelinessGetBuilder<'a, 'b, Handler> { impl Resolvable for LivelinessGetBuilder<'_, '_, Handler> where - Handler: IntoCallbackReceiverPair<'static, Reply> + Send, - Handler::Receiver: Send, + Handler: IntoHandler<'static, Reply> + Send, + Handler::Handler: Send, { - type To = ZResult; + type To = ZResult; } -impl SyncResolve for LivelinessGetBuilder<'_, '_, Handler> +impl Wait for LivelinessGetBuilder<'_, '_, Handler> where - Handler: IntoCallbackReceiverPair<'static, Reply> + Send, - Handler::Receiver: Send, + Handler: IntoHandler<'static, Reply> + Send, + Handler::Handler: Send, { - fn res_sync(self) -> ::To { - let (callback, receiver) = self.handler.into_cb_receiver_pair(); - + fn wait(self) -> ::To { + let (callback, receiver) = self.handler.into_handler(); self.session - .query( - &self.key_expr?.into(), - &Some(KeyExpr::from(*KE_PREFIX_LIVELINESS)), - QueryTarget::default(), - QueryConsolidation::default(), - Locality::default(), - self.timeout, - None, - #[cfg(feature = "unstable")] - None, - callback, - ) + .liveliness_query(&self.key_expr?, self.timeout, callback) .map(|_| receiver) } } -impl AsyncResolve for LivelinessGetBuilder<'_, '_, Handler> +impl IntoFuture for LivelinessGetBuilder<'_, '_, Handler> where - Handler: IntoCallbackReceiverPair<'static, Reply> + Send, - Handler::Receiver: Send, + Handler: IntoHandler<'static, Reply> + Send, + Handler::Handler: Send, { - type Future = Ready; + type Output = ::To; + type IntoFuture = Ready<::To>; - fn res_async(self) -> Self::Future { - std::future::ready(self.res_sync()) + fn into_future(self) -> Self::IntoFuture { + std::future::ready(self.wait()) } } diff --git a/zenoh/src/plugins/loader.rs b/zenoh/src/api/loader.rs similarity index 97% rename from zenoh/src/plugins/loader.rs rename to zenoh/src/api/loader.rs index c037ba4c4a..175e0c6816 100644 --- a/zenoh/src/plugins/loader.rs +++ b/zenoh/src/api/loader.rs @@ -11,11 +11,12 @@ // Contributors: // ZettaScale Zenoh Team, // -use super::sealed::{PluginsManager, PLUGIN_PREFIX}; -use crate::runtime::Runtime; use zenoh_config::{Config, PluginLoad}; use zenoh_result::ZResult; +use super::plugins::{PluginsManager, PLUGIN_PREFIX}; +use crate::net::runtime::Runtime; + pub(crate) fn load_plugin( plugin_mgr: &mut PluginsManager, name: &str, diff --git a/zenoh/src/api/mod.rs b/zenoh/src/api/mod.rs new file mode 100644 index 0000000000..d3053cb3c9 --- /dev/null +++ b/zenoh/src/api/mod.rs @@ -0,0 +1,38 @@ +// +// Copyright (c) 2024 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// + +pub(crate) type Id = u32; + +pub(crate) mod admin; +pub(crate) mod builders; +pub(crate) mod bytes; +pub(crate) mod encoding; +pub(crate) mod handlers; +pub(crate) mod info; +pub(crate) mod key_expr; +#[cfg(feature = "unstable")] +pub(crate) mod liveliness; +#[cfg(feature = "plugins")] +pub(crate) mod loader; +#[cfg(feature = "plugins")] +pub(crate) mod plugins; +pub(crate) mod publisher; +pub(crate) mod query; +pub(crate) mod queryable; +pub(crate) mod sample; +pub(crate) mod scouting; +pub(crate) mod selector; +pub(crate) mod session; +pub(crate) mod subscriber; +pub(crate) mod value; diff --git a/zenoh/src/plugins/sealed.rs b/zenoh/src/api/plugins.rs similarity index 90% rename from zenoh/src/plugins/sealed.rs rename to zenoh/src/api/plugins.rs index 1c6d752abf..2623ce2c6f 100644 --- a/zenoh/src/plugins/sealed.rs +++ b/zenoh/src/api/plugins.rs @@ -14,15 +14,16 @@ //! `zenohd`'s plugin system. For more details, consult the [detailed documentation](https://github.com/eclipse-zenoh/roadmap/blob/main/rfcs/ALL/Plugins/Zenoh%20Plugins.md). -use crate::{prelude::Selector, runtime::Runtime}; use zenoh_core::zconfigurable; - use zenoh_plugin_trait::{ Plugin, PluginControl, PluginInstance, PluginReport, PluginStatusRec, StructVersion, }; use zenoh_protocol::core::key_expr::keyexpr; use zenoh_result::ZResult; +use super::key_expr::KeyExpr; +use crate::net::runtime::Runtime; + zconfigurable! { pub static ref PLUGIN_PREFIX: String = "zenoh_plugin_".to_string(); } @@ -92,14 +93,13 @@ pub trait RunningPluginTrait: Send + Sync + PluginControl { /// Function called on any query on admin space that matches this plugin's sub-part of the admin space. /// Thus the plugin can reply its contribution to the global admin space of this zenohd. /// Parameters: - /// * `selector`: the full selector of the query (usually only key_expr part is used). This selector is - /// exactly the same as it was requested by user, for example "@/router/ROUTER_ID/plugins/PLUGIN_NAME/some/plugin/info" or "@/router/*/plugins/*/foo/bar". - /// But the plugin's [RunningPluginTrait::adminspace_getter] is called only if the selector matches the `plugin_status_key` - /// * `plugin_status_key`: the actual path to plugin's status in the admin space. For example "@/router/ROUTER_ID/plugins/PLUGIN_NAME" - /// - /// Returns value: + /// * `key_expr`: the key_expr selector of the query. This key_expr is + /// exactly the same as it was requested by user, for example "@/ROUTER_ID/router/plugins/PLUGIN_NAME/some/plugin/info" or "@/*/router/plugins/*/foo/bar". + /// But the plugin's [RunningPluginTrait::adminspace_getter] is called only if the key_expr matches the `plugin_status_key` + /// * `plugin_status_key`: the actual path to plugin's status in the admin space. For example "@/ROUTER_ID/router/plugins/PLUGIN_NAME" + /// Returns value: /// * `Ok(Vec)`: the list of responses to the query. For example if plugins can return information on subleys "foo", "bar", "foo/buzz" and "bar/buzz" - /// and it's requested with the query "@/router/ROUTER_ID/plugins/PLUGIN_NAME/*", it should return only information on "foo" and "bar" subkeys, but not on "foo/buzz" and "bar/buzz" + /// and it's requested with the query "@/ROUTER_ID/router/plugins/PLUGIN_NAME/*", it should return only information on "foo" and "bar" subkeys, but not on "foo/buzz" and "bar/buzz" /// as they doesn't match the query. /// * `Err(ZError)`: Problem occurred when processing the query. /// @@ -113,7 +113,7 @@ pub trait RunningPluginTrait: Send + Sync + PluginControl { /// fn adminspace_getter<'a>( &'a self, - _selector: &'a Selector<'a>, + _key_expr: &'a KeyExpr<'a>, _plugin_status_key: &str, ) -> ZResult> { Ok(Vec::new()) diff --git a/zenoh/src/publication.rs b/zenoh/src/api/publisher.rs similarity index 52% rename from zenoh/src/publication.rs rename to zenoh/src/api/publisher.rs index 9373fa021d..c8e0ace03e 100644 --- a/zenoh/src/publication.rs +++ b/zenoh/src/api/publisher.rs @@ -12,174 +12,68 @@ // ZettaScale Zenoh Team, // -//! Publishing primitives. -#[zenoh_macros::unstable] -use crate::handlers::Callback; -#[zenoh_macros::unstable] -use crate::handlers::DefaultHandler; -use crate::net::primitives::Primitives; -use crate::prelude::*; -#[zenoh_macros::unstable] -use crate::sample::Attachment; -use crate::sample::DataInfo; -use crate::sample::QoS; -use crate::Encoding; -use crate::SessionRef; -use crate::Undeclarable; -use std::future::Ready; -use zenoh_core::{zread, AsyncResolve, Resolvable, Resolve, SyncResolve}; -use zenoh_protocol::network::push::ext; -use zenoh_protocol::network::Mapping; -use zenoh_protocol::network::Push; -use zenoh_protocol::zenoh::Del; -use zenoh_protocol::zenoh::PushBody; -use zenoh_protocol::zenoh::Put; -use zenoh_result::ZResult; - -/// The kind of congestion control. -pub use zenoh_protocol::core::CongestionControl; - -/// A builder for initializing a [`delete`](crate::Session::delete) operation. -/// -/// # Examples -/// ``` -/// # #[tokio::main] -/// # async fn main() { -/// use zenoh::prelude::r#async::*; -/// use zenoh::publication::CongestionControl; -/// -/// let session = zenoh::open(config::peer()).res().await.unwrap(); -/// session -/// .delete("key/expression") -/// .res() -/// .await -/// .unwrap(); -/// # } -/// ``` -pub type DeleteBuilder<'a, 'b> = PutBuilder<'a, 'b>; +use std::{ + convert::TryFrom, + fmt, + future::{IntoFuture, Ready}, + pin::Pin, + task::{Context, Poll}, +}; -/// A builder for initializing a [`put`](crate::Session::put) operation. -/// -/// # Examples -/// ``` -/// # #[tokio::main] -/// # async fn main() { -/// use zenoh::prelude::r#async::*; -/// use zenoh::publication::CongestionControl; -/// -/// let session = zenoh::open(config::peer()).res().await.unwrap(); -/// session -/// .put("key/expression", "value") -/// .encoding(KnownEncoding::TextPlain) -/// .congestion_control(CongestionControl::Block) -/// .res() -/// .await -/// .unwrap(); -/// # } -/// ``` -#[must_use = "Resolvables do nothing unless you resolve them using the `res` method from either `SyncResolve` or `AsyncResolve`"] -#[derive(Debug, Clone)] -pub struct PutBuilder<'a, 'b> { - pub(crate) publisher: PublisherBuilder<'a, 'b>, - pub(crate) value: Value, - pub(crate) kind: SampleKind, - #[cfg(feature = "unstable")] - pub(crate) attachment: Option, -} - -impl PutBuilder<'_, '_> { - /// Change the encoding of the written data. - #[inline] - pub fn encoding(mut self, encoding: IntoEncoding) -> Self - where - IntoEncoding: Into, - { - self.value.encoding = encoding.into(); - self - } - /// Change the `congestion_control` to apply when routing the data. - #[inline] - pub fn congestion_control(mut self, congestion_control: CongestionControl) -> Self { - self.publisher = self.publisher.congestion_control(congestion_control); - self - } - - /// Change the priority of the written data. - #[inline] - pub fn priority(mut self, priority: Priority) -> Self { - self.publisher = self.publisher.priority(priority); - self - } - - /// Restrict the matching subscribers that will receive the published data - /// to the ones that have the given [`Locality`](crate::prelude::Locality). - #[zenoh_macros::unstable] - #[inline] - pub fn allowed_destination(mut self, destination: Locality) -> Self { - self.publisher = self.publisher.allowed_destination(destination); - self - } - - pub fn kind(mut self, kind: SampleKind) -> Self { - self.kind = kind; - self - } - - #[zenoh_macros::unstable] - pub fn with_attachment(mut self, attachment: Attachment) -> Self { - self.attachment = Some(attachment); - self - } -} - -impl Resolvable for PutBuilder<'_, '_> { - type To = ZResult<()>; -} - -impl SyncResolve for PutBuilder<'_, '_> { - #[inline] - fn res_sync(self) -> ::To { - let PublisherBuilder { - session, - key_expr, - congestion_control, - priority, - destination, - } = self.publisher; - - let publisher = Publisher { - session, - key_expr: key_expr?, - congestion_control, - priority, - destination, - }; - - resolve_put( - &publisher, - self.value, - self.kind, - #[cfg(feature = "unstable")] - self.attachment, - ) - } +use futures::Sink; +use zenoh_core::{zread, Resolvable, Resolve, Wait}; +use zenoh_protocol::{ + core::CongestionControl, + network::{push::ext, Push}, + zenoh::{Del, PushBody, Put}, +}; +use zenoh_result::{Error, ZResult}; +#[cfg(feature = "unstable")] +use { + crate::api::{ + handlers::{Callback, DefaultHandler, IntoHandler}, + sample::SourceInfo, + }, + std::{ + collections::HashSet, + sync::{Arc, Mutex}, + }, + zenoh_config::wrappers::EntityGlobalId, + zenoh_protocol::core::EntityGlobalIdProto, +}; + +use super::{ + builders::publisher::{ + PublicationBuilder, PublicationBuilderDelete, PublicationBuilderPut, + PublisherDeleteBuilder, PublisherPutBuilder, + }, + bytes::ZBytes, + encoding::Encoding, + key_expr::KeyExpr, + sample::{DataInfo, Locality, QoS, Sample, SampleFields, SampleKind}, + session::{SessionRef, UndeclarableSealed}, +}; +use crate::{ + api::{subscriber::SubscriberKind, Id}, + net::primitives::Primitives, +}; + +pub(crate) struct PublisherState { + pub(crate) id: Id, + pub(crate) remote_id: Id, + pub(crate) key_expr: KeyExpr<'static>, + pub(crate) destination: Locality, } -impl AsyncResolve for PutBuilder<'_, '_> { - type Future = Ready; - - fn res_async(self) -> Self::Future { - std::future::ready(self.res_sync()) +impl fmt::Debug for PublisherState { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("Publisher") + .field("id", &self.id) + .field("key_expr", &self.key_expr) + .finish() } } -use futures::Sink; -use std::convert::TryFrom; -use std::convert::TryInto; -use std::pin::Pin; -use std::task::{Context, Poll}; -use zenoh_result::Error; - #[zenoh_macros::unstable] #[derive(Clone)] pub enum PublisherRef<'a> { @@ -217,11 +111,11 @@ impl std::fmt::Debug for PublisherRef<'_> { /// ``` /// # #[tokio::main] /// # async fn main() { -/// use zenoh::prelude::r#async::*; +/// use zenoh::prelude::*; /// -/// let session = zenoh::open(config::peer()).res().await.unwrap().into_arc(); -/// let publisher = session.declare_publisher("key/expression").res().await.unwrap(); -/// publisher.put("value").res().await.unwrap(); +/// let session = zenoh::open(zenoh::config::peer()).await.unwrap().into_arc(); +/// let publisher = session.declare_publisher("key/expression").await.unwrap(); +/// publisher.put("value").await.unwrap(); /// # } /// ``` /// @@ -232,49 +126,75 @@ impl std::fmt::Debug for PublisherRef<'_> { /// # #[tokio::main] /// # async fn main() { /// use futures::StreamExt; -/// use zenoh::prelude::r#async::*; +/// use zenoh::prelude::*; /// -/// let session = zenoh::open(config::peer()).res().await.unwrap().into_arc(); -/// let mut subscriber = session.declare_subscriber("key/expression").res().await.unwrap(); -/// let publisher = session.declare_publisher("another/key/expression").res().await.unwrap(); +/// let session = zenoh::open(zenoh::config::peer()).await.unwrap().into_arc(); +/// let mut subscriber = session.declare_subscriber("key/expression").await.unwrap(); +/// let publisher = session.declare_publisher("another/key/expression").await.unwrap(); /// subscriber.stream().map(Ok).forward(publisher).await.unwrap(); /// # } /// ``` #[derive(Debug, Clone)] pub struct Publisher<'a> { pub(crate) session: SessionRef<'a>, + pub(crate) id: Id, pub(crate) key_expr: KeyExpr<'a>, + pub(crate) encoding: Encoding, pub(crate) congestion_control: CongestionControl, pub(crate) priority: Priority, + pub(crate) is_express: bool, pub(crate) destination: Locality, + #[cfg(feature = "unstable")] + pub(crate) matching_listeners: Arc>>, + pub(crate) undeclare_on_drop: bool, } impl<'a> Publisher<'a> { + /// Returns the [`EntityGlobalId`] of this Publisher. + /// + /// # Examples + /// ``` + /// # #[tokio::main] + /// # async fn main() { + /// use zenoh::prelude::*; + /// + /// let session = zenoh::open(zenoh::config::peer()).await.unwrap(); + /// let publisher = session.declare_publisher("key/expression") + /// .await + /// .unwrap(); + /// let publisher_id = publisher.id(); + /// # } + /// ``` + #[zenoh_macros::unstable] + pub fn id(&self) -> EntityGlobalId { + EntityGlobalIdProto { + zid: self.session.zid().into(), + eid: self.id, + } + .into() + } + + #[inline] pub fn key_expr(&self) -> &KeyExpr<'a> { &self.key_expr } - /// Change the `congestion_control` to apply when routing the data. + /// Get the [`Encoding`] used when publishing data. #[inline] - pub fn congestion_control(mut self, congestion_control: CongestionControl) -> Self { - self.congestion_control = congestion_control; - self + pub fn encoding(&self) -> &Encoding { + &self.encoding } - /// Change the priority of the written data. + /// Get the `congestion_control` applied when routing the data. #[inline] - pub fn priority(mut self, priority: Priority) -> Self { - self.priority = priority; - self + pub fn congestion_control(&self) -> CongestionControl { + self.congestion_control } - /// Restrict the matching subscribers that will receive the published data - /// to the ones that have the given [`Locality`](crate::prelude::Locality). - #[zenoh_macros::unstable] + /// Get the priority of the written data. #[inline] - pub fn allowed_destination(mut self, destination: Locality) -> Self { - self.destination = destination; - self + pub fn priority(&self) -> Priority { + self.priority } /// Consumes the given `Publisher`, returning a thread-safe reference-counting @@ -291,11 +211,11 @@ impl<'a> Publisher<'a> { /// ```no_run /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::r#async::*; + /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).res().await.unwrap().into_arc(); - /// let publisher = session.declare_publisher("key/expression").res().await.unwrap().into_arc(); - /// let matching_listener = publisher.matching_listener().res().await.unwrap(); + /// let session = zenoh::open(zenoh::config::peer()).await.unwrap().into_arc(); + /// let publisher = session.declare_publisher("key/expression").await.unwrap().into_arc(); + /// let matching_listener = publisher.matching_listener().await.unwrap(); /// /// tokio::task::spawn(async move { /// while let Ok(matching_status) = matching_listener.recv_async().await { @@ -313,35 +233,35 @@ impl<'a> Publisher<'a> { std::sync::Arc::new(self) } - fn _write(&self, kind: SampleKind, value: Value) -> Publication { - Publication { - publisher: self, - value, - kind, - #[cfg(feature = "unstable")] - attachment: None, - } - } - /// Put data. /// /// # Examples /// ``` /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::r#async::*; + /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).res().await.unwrap().into_arc(); - /// let publisher = session.declare_publisher("key/expression").res().await.unwrap(); - /// publisher.put("value").res().await.unwrap(); + /// let session = zenoh::open(zenoh::config::peer()).await.unwrap().into_arc(); + /// let publisher = session.declare_publisher("key/expression").await.unwrap(); + /// publisher.put("value").await.unwrap(); /// # } /// ``` #[inline] - pub fn put(&self, value: IntoValue) -> Publication + pub fn put(&self, payload: IntoZBytes) -> PublisherPutBuilder<'_> where - IntoValue: Into, + IntoZBytes: Into, { - self._write(SampleKind::Put, value.into()) + PublicationBuilder { + publisher: self, + kind: PublicationBuilderPut { + payload: payload.into(), + encoding: self.encoding.clone(), + }, + timestamp: None, + #[cfg(feature = "unstable")] + source_info: SourceInfo::empty(), + attachment: None, + } } /// Delete data. @@ -350,15 +270,22 @@ impl<'a> Publisher<'a> { /// ``` /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::r#async::*; + /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).res().await.unwrap().into_arc(); - /// let publisher = session.declare_publisher("key/expression").res().await.unwrap(); - /// publisher.delete().res().await.unwrap(); + /// let session = zenoh::open(zenoh::config::peer()).await.unwrap().into_arc(); + /// let publisher = session.declare_publisher("key/expression").await.unwrap(); + /// publisher.delete().await.unwrap(); /// # } /// ``` - pub fn delete(&self) -> Publication { - self._write(SampleKind::Delete, Value::empty()) + pub fn delete(&self) -> PublisherDeleteBuilder<'_> { + PublicationBuilder { + publisher: self, + kind: PublicationBuilderDelete, + timestamp: None, + #[cfg(feature = "unstable")] + source_info: SourceInfo::empty(), + attachment: None, + } } /// Return the [`MatchingStatus`] of the publisher. @@ -370,13 +297,12 @@ impl<'a> Publisher<'a> { /// ``` /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::r#async::*; + /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).res().await.unwrap().into_arc(); - /// let publisher = session.declare_publisher("key/expression").res().await.unwrap(); + /// let session = zenoh::open(zenoh::config::peer()).await.unwrap().into_arc(); + /// let publisher = session.declare_publisher("key/expression").await.unwrap(); /// let matching_subscribers: bool = publisher /// .matching_status() - /// .res() /// .await /// .unwrap() /// .matching_subscribers(); @@ -399,11 +325,11 @@ impl<'a> Publisher<'a> { /// ```no_run /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::r#async::*; + /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).res().await.unwrap(); - /// let publisher = session.declare_publisher("key/expression").res().await.unwrap(); - /// let matching_listener = publisher.matching_listener().res().await.unwrap(); + /// let session = zenoh::open(zenoh::config::peer()).await.unwrap(); + /// let publisher = session.declare_publisher("key/expression").await.unwrap(); + /// let matching_listener = publisher.matching_listener().await.unwrap(); /// while let Ok(matching_status) = matching_listener.recv_async().await { /// if matching_status.matching_subscribers() { /// println!("Publisher has matching subscribers."); @@ -417,7 +343,7 @@ impl<'a> Publisher<'a> { pub fn matching_listener(&self) -> MatchingListenerBuilder<'_, DefaultHandler> { MatchingListenerBuilder { publisher: PublisherRef::Borrow(self), - handler: DefaultHandler, + handler: DefaultHandler::default(), } } @@ -427,53 +353,24 @@ impl<'a> Publisher<'a> { /// ``` /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::r#async::*; + /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).res().await.unwrap(); - /// let publisher = session.declare_publisher("key/expression").res().await.unwrap(); - /// publisher.undeclare().res().await.unwrap(); + /// let session = zenoh::open(zenoh::config::peer()).await.unwrap(); + /// let publisher = session.declare_publisher("key/expression").await.unwrap(); + /// publisher.undeclare().await.unwrap(); /// # } /// ``` pub fn undeclare(self) -> impl Resolve> + 'a { - Undeclarable::undeclare_inner(self, ()) + UndeclarableSealed::undeclare_inner(self, ()) } -} - -/// Internal function for sending data with specified [`kind`](SampleKind) -pub trait HasWriteWithSampleKind { - type WriteOutput<'a> - where - Self: 'a; - fn write>( - &self, - kind: SampleKind, - value: IntoValue, - ) -> Self::WriteOutput<'_>; -} -impl<'a> HasWriteWithSampleKind for Publisher<'a> { - type WriteOutput<'b> = Publication<'b> - where - 'a: 'b; - /// Send data with [`kind`](SampleKind) (Put or Delete). - /// - /// # Examples - /// ``` - /// # #[tokio::main] - /// # async fn main() { - /// use zenoh::prelude::r#async::*; - /// use zenoh::publication::HasWriteWithSampleKind; - /// - /// let session = zenoh::open(config::peer()).res().await.unwrap().into_arc(); - /// let publisher = session.declare_publisher("key/expression").res().await.unwrap(); - /// publisher.write(SampleKind::Put, "value").res().await.unwrap(); - /// # } - /// ``` - fn write(&self, kind: SampleKind, value: IntoValue) -> Self::WriteOutput<'_> - where - IntoValue: Into, - { - self._write(kind, value.into()) + #[cfg(feature = "unstable")] + fn undeclare_matching_listeners(&self) -> ZResult<()> { + let ids: Vec = zlock!(self.matching_listeners).drain().collect(); + for id in ids { + self.session.undeclare_matches_listener_inner(id)? + } + Ok(()) } } @@ -489,11 +386,11 @@ impl<'a> HasWriteWithSampleKind for Publisher<'a> { /// ```no_run /// # #[tokio::main] /// # async fn main() { -/// use zenoh::prelude::r#async::*; +/// use zenoh::prelude::*; /// -/// let session = zenoh::open(config::peer()).res().await.unwrap().into_arc(); -/// let publisher = session.declare_publisher("key/expression").res().await.unwrap().into_arc(); -/// let matching_listener = publisher.matching_listener().res().await.unwrap(); +/// let session = zenoh::open(zenoh::config::peer()).await.unwrap().into_arc(); +/// let publisher = session.declare_publisher("key/expression").await.unwrap().into_arc(); +/// let matching_listener = publisher.matching_listener().await.unwrap(); /// /// tokio::task::spawn(async move { /// while let Ok(matching_status) = matching_listener.recv_async().await { @@ -512,11 +409,11 @@ pub trait PublisherDeclarations { /// ```no_run /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::r#async::*; + /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).res().await.unwrap().into_arc(); - /// let publisher = session.declare_publisher("key/expression").res().await.unwrap().into_arc(); - /// let matching_listener = publisher.matching_listener().res().await.unwrap(); + /// let session = zenoh::open(zenoh::config::peer()).await.unwrap().into_arc(); + /// let publisher = session.declare_publisher("key/expression").await.unwrap().into_arc(); + /// let matching_listener = publisher.matching_listener().await.unwrap(); /// /// tokio::task::spawn(async move { /// while let Ok(matching_status) = matching_listener.recv_async().await { @@ -539,11 +436,11 @@ impl PublisherDeclarations for std::sync::Arc> { /// ```no_run /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::r#async::*; + /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).res().await.unwrap().into_arc(); - /// let publisher = session.declare_publisher("key/expression").res().await.unwrap().into_arc(); - /// let matching_listener = publisher.matching_listener().res().await.unwrap(); + /// let session = zenoh::open(zenoh::config::peer()).await.unwrap().into_arc(); + /// let publisher = session.declare_publisher("key/expression").await.unwrap().into_arc(); + /// let matching_listener = publisher.matching_listener().await.unwrap(); /// /// tokio::task::spawn(async move { /// while let Ok(matching_status) = matching_listener.recv_async().await { @@ -560,12 +457,12 @@ impl PublisherDeclarations for std::sync::Arc> { fn matching_listener(&self) -> MatchingListenerBuilder<'static, DefaultHandler> { MatchingListenerBuilder { publisher: PublisherRef::Shared(self.clone()), - handler: DefaultHandler, + handler: DefaultHandler::default(), } } } -impl<'a> Undeclarable<(), PublisherUndeclaration<'a>> for Publisher<'a> { +impl<'a> UndeclarableSealed<(), PublisherUndeclaration<'a>> for Publisher<'a> { fn undeclare_inner(self, _: ()) -> PublisherUndeclaration<'a> { PublisherUndeclaration { publisher: self } } @@ -577,11 +474,11 @@ impl<'a> Undeclarable<(), PublisherUndeclaration<'a>> for Publisher<'a> { /// ``` /// # #[tokio::main] /// # async fn main() { -/// use zenoh::prelude::r#async::*; +/// use zenoh::prelude::*; /// -/// let session = zenoh::open(config::peer()).res().await.unwrap(); -/// let publisher = session.declare_publisher("key/expression").res().await.unwrap(); -/// publisher.undeclare().res().await.unwrap(); +/// let session = zenoh::open(zenoh::config::peer()).await.unwrap(); +/// let publisher = session.declare_publisher("key/expression").await.unwrap(); +/// publisher.undeclare().await.unwrap(); /// # } /// ``` #[must_use = "Resolvables do nothing unless you resolve them using the `res` method from either `SyncResolve` or `AsyncResolve`"] @@ -593,85 +490,38 @@ impl Resolvable for PublisherUndeclaration<'_> { type To = ZResult<()>; } -impl SyncResolve for PublisherUndeclaration<'_> { - fn res_sync(mut self) -> ::To { - let Publisher { - session, key_expr, .. - } = &self.publisher; - session - .undeclare_publication_intent(key_expr.clone()) - .res_sync()?; - self.publisher.key_expr = unsafe { keyexpr::from_str_unchecked("") }.into(); - Ok(()) +impl Wait for PublisherUndeclaration<'_> { + fn wait(mut self) -> ::To { + // set the flag first to avoid double panic if this function panic + self.publisher.undeclare_on_drop = false; + #[cfg(feature = "unstable")] + self.publisher.undeclare_matching_listeners()?; + self.publisher + .session + .undeclare_publisher_inner(self.publisher.id) } } -impl AsyncResolve for PublisherUndeclaration<'_> { - type Future = Ready; +impl IntoFuture for PublisherUndeclaration<'_> { + type Output = ::To; + type IntoFuture = Ready<::To>; - fn res_async(self) -> Self::Future { - std::future::ready(self.res_sync()) + fn into_future(self) -> Self::IntoFuture { + std::future::ready(self.wait()) } } impl Drop for Publisher<'_> { fn drop(&mut self) { - if !self.key_expr.is_empty() { - let _ = self - .session - .undeclare_publication_intent(self.key_expr.clone()) - .res_sync(); - } - } -} - -/// A [`Resolvable`] returned by [`Publisher::put()`](Publisher::put), -/// [`Publisher::delete()`](Publisher::delete) and [`Publisher::write()`](Publisher::write). -#[must_use = "Resolvables do nothing unless you resolve them using the `res` method from either `SyncResolve` or `AsyncResolve`"] -pub struct Publication<'a> { - publisher: &'a Publisher<'a>, - value: Value, - kind: SampleKind, - #[cfg(feature = "unstable")] - pub(crate) attachment: Option, -} - -impl<'a> Publication<'a> { - #[zenoh_macros::unstable] - pub fn with_attachment(mut self, attachment: Attachment) -> Self { - self.attachment = Some(attachment); - self - } -} - -impl Resolvable for Publication<'_> { - type To = ZResult<()>; -} - -impl SyncResolve for Publication<'_> { - fn res_sync(self) -> ::To { - resolve_put( - self.publisher, - self.value, - self.kind, + if self.undeclare_on_drop { #[cfg(feature = "unstable")] - self.attachment, - ) - } -} - -impl AsyncResolve for Publication<'_> { - type Future = Ready; - - fn res_async(self) -> Self::Future { - std::future::ready(self.res_sync()) + let _ = self.undeclare_matching_listeners(); + let _ = self.session.undeclare_publisher_inner(self.id); + } } } -impl<'a, IntoValue> Sink for Publisher<'a> -where - IntoValue: Into, -{ +impl<'a> Sink for Publisher<'a> { type Error = Error; #[inline] @@ -680,8 +530,23 @@ where } #[inline] - fn start_send(self: Pin<&mut Self>, item: IntoValue) -> Result<(), Self::Error> { - self.put(item.into()).res_sync() + fn start_send(self: Pin<&mut Self>, item: Sample) -> Result<(), Self::Error> { + let SampleFields { + payload, + kind, + encoding, + attachment, + .. + } = item.into(); + self.resolve_put( + payload, + kind, + encoding, + None, + #[cfg(feature = "unstable")] + SourceInfo::empty(), + attachment, + ) } #[inline] @@ -695,222 +560,88 @@ where } } -/// A builder for initializing a [`Publisher`]. -/// -/// # Examples -/// ``` -/// # #[tokio::main] -/// # async fn main() { -/// use zenoh::prelude::r#async::*; -/// use zenoh::publication::CongestionControl; -/// -/// let session = zenoh::open(config::peer()).res().await.unwrap(); -/// let publisher = session -/// .declare_publisher("key/expression") -/// .congestion_control(CongestionControl::Block) -/// .res() -/// .await -/// .unwrap(); -/// # } -/// ``` -#[must_use = "Resolvables do nothing unless you resolve them using the `res` method from either `SyncResolve` or `AsyncResolve`"] -#[derive(Debug)] -pub struct PublisherBuilder<'a, 'b: 'a> { - pub(crate) session: SessionRef<'a>, - pub(crate) key_expr: ZResult>, - pub(crate) congestion_control: CongestionControl, - pub(crate) priority: Priority, - pub(crate) destination: Locality, -} - -impl<'a, 'b> Clone for PublisherBuilder<'a, 'b> { - fn clone(&self) -> Self { - Self { - session: self.session.clone(), - key_expr: match &self.key_expr { - Ok(k) => Ok(k.clone()), - Err(e) => Err(zerror!("Cloned KE Error: {}", e).into()), - }, - congestion_control: self.congestion_control, - priority: self.priority, - destination: self.destination, - } - } -} - -impl<'a, 'b> PublisherBuilder<'a, 'b> { - /// Change the `congestion_control` to apply when routing the data. - #[inline] - pub fn congestion_control(mut self, congestion_control: CongestionControl) -> Self { - self.congestion_control = congestion_control; - self - } - - /// Change the priority of the written data. - #[inline] - pub fn priority(mut self, priority: Priority) -> Self { - self.priority = priority; - self - } - - /// Restrict the matching subscribers that will receive the published data - /// to the ones that have the given [`Locality`](crate::prelude::Locality). - #[zenoh_macros::unstable] - #[inline] - pub fn allowed_destination(mut self, destination: Locality) -> Self { - self.destination = destination; - self - } -} - -impl<'a, 'b> Resolvable for PublisherBuilder<'a, 'b> { - type To = ZResult>; -} - -impl<'a, 'b> SyncResolve for PublisherBuilder<'a, 'b> { - fn res_sync(self) -> ::To { - let mut key_expr = self.key_expr?; - if !key_expr.is_fully_optimized(&self.session) { - let session_id = self.session.id; - let expr_id = self.session.declare_prefix(key_expr.as_str()).res_sync(); - let prefix_len = key_expr - .len() - .try_into() - .expect("How did you get a key expression with a length over 2^32!?"); - key_expr = match key_expr.0 { - crate::key_expr::KeyExprInner::Borrowed(key_expr) - | crate::key_expr::KeyExprInner::BorrowedWire { key_expr, .. } => { - KeyExpr(crate::key_expr::KeyExprInner::BorrowedWire { - key_expr, - expr_id, - mapping: Mapping::Sender, - prefix_len, - session_id, - }) - } - crate::key_expr::KeyExprInner::Owned(key_expr) - | crate::key_expr::KeyExprInner::Wire { key_expr, .. } => { - KeyExpr(crate::key_expr::KeyExprInner::Wire { - key_expr, - expr_id, - mapping: Mapping::Sender, - prefix_len, - session_id, - }) - } - } - } - self.session - .declare_publication_intent(key_expr.clone()) - .res_sync()?; - let publisher = Publisher { - session: self.session, - key_expr, - congestion_control: self.congestion_control, - priority: self.priority, - destination: self.destination, +impl Publisher<'_> { + pub(crate) fn resolve_put( + &self, + payload: ZBytes, + kind: SampleKind, + encoding: Encoding, + timestamp: Option, + #[cfg(feature = "unstable")] source_info: SourceInfo, + attachment: Option, + ) -> ZResult<()> { + tracing::trace!("write({:?}, [...])", &self.key_expr); + let primitives = zread!(self.session.state) + .primitives + .as_ref() + .unwrap() + .clone(); + let timestamp = if timestamp.is_none() { + self.session.runtime.new_timestamp() + } else { + timestamp }; - tracing::trace!("publish({:?})", publisher.key_expr); - Ok(publisher) - } -} - -impl<'a, 'b> AsyncResolve for PublisherBuilder<'a, 'b> { - type Future = Ready; - - fn res_async(self) -> Self::Future { - std::future::ready(self.res_sync()) - } -} - -fn resolve_put( - publisher: &Publisher<'_>, - value: Value, - kind: SampleKind, - #[cfg(feature = "unstable")] attachment: Option, -) -> ZResult<()> { - tracing::trace!("write({:?}, [...])", &publisher.key_expr); - let primitives = zread!(publisher.session.state) - .primitives - .as_ref() - .unwrap() - .clone(); - let timestamp = publisher.session.runtime.new_timestamp(); - - if publisher.destination != Locality::SessionLocal { - primitives.send_push(Push { - wire_expr: publisher.key_expr.to_wire(&publisher.session).to_owned(), - ext_qos: ext::QoSType::new( - publisher.priority.into(), - publisher.congestion_control, - false, - ), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - payload: match kind { - SampleKind::Put => { - #[allow(unused_mut)] - let mut ext_attachment = None; - #[cfg(feature = "unstable")] - { - if let Some(attachment) = attachment.clone() { - ext_attachment = Some(attachment.into()); - } - } - PushBody::Put(Put { + if self.destination != Locality::SessionLocal { + primitives.send_push(Push { + wire_expr: self.key_expr.to_wire(&self.session).to_owned(), + ext_qos: ext::QoSType::new( + self.priority.into(), + self.congestion_control, + self.is_express, + ), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + payload: match kind { + SampleKind::Put => PushBody::Put(Put { timestamp, - encoding: value.encoding.clone(), + encoding: encoding.clone().into(), + #[cfg(feature = "unstable")] + ext_sinfo: source_info.into(), + #[cfg(not(feature = "unstable"))] ext_sinfo: None, #[cfg(feature = "shared-memory")] ext_shm: None, - ext_attachment, + ext_attachment: attachment.clone().map(|a| a.into()), ext_unknown: vec![], - payload: value.payload.clone(), - }) - } - SampleKind::Delete => { - #[allow(unused_mut)] - let mut ext_attachment = None; - #[cfg(feature = "unstable")] - { - if let Some(attachment) = attachment.clone() { - ext_attachment = Some(attachment.into()); - } - } - PushBody::Del(Del { + payload: payload.clone().into(), + }), + SampleKind::Delete => PushBody::Del(Del { timestamp, + #[cfg(feature = "unstable")] + ext_sinfo: source_info.into(), + #[cfg(not(feature = "unstable"))] ext_sinfo: None, - ext_attachment, + ext_attachment: attachment.clone().map(|a| a.into()), ext_unknown: vec![], - }) - } - }, - }); - } - if publisher.destination != Locality::Remote { - let data_info = DataInfo { - kind, - encoding: Some(value.encoding), - timestamp, - source_id: None, - source_sn: None, - qos: QoS::from(ext::QoSType::new( - publisher.priority.into(), - publisher.congestion_control, - false, - )), - }; - - publisher.session.handle_data( - true, - &publisher.key_expr.to_wire(&publisher.session), - Some(data_info), - value.payload, - #[cfg(feature = "unstable")] - attachment, - ); + }), + }, + }); + } + if self.destination != Locality::Remote { + let data_info = DataInfo { + kind, + encoding: Some(encoding), + timestamp, + source_id: None, + source_sn: None, + qos: QoS::from(ext::QoSType::new( + self.priority.into(), + self.congestion_control, + self.is_express, + )), + }; + + self.session.execute_subscriber_callbacks( + true, + &self.key_expr.to_wire(&self.session), + Some(data_info), + payload.into(), + SubscriberKind::Subscriber, + attachment, + ); + } + Ok(()) } - Ok(()) } /// The Priority of zenoh messages. @@ -928,12 +659,19 @@ pub enum Priority { } impl Priority { + /// Default + pub const DEFAULT: Self = Self::Data; /// The lowest Priority - pub const MIN: Self = Self::Background; + #[zenoh_macros::internal] + pub const MIN: Self = Self::MIN_; + const MIN_: Self = Self::Background; /// The highest Priority - pub const MAX: Self = Self::RealTime; + #[zenoh_macros::internal] + pub const MAX: Self = Self::MAX_; + const MAX_: Self = Self::RealTime; /// The number of available priorities - pub const NUM: usize = 1 + Self::MIN as usize - Self::MAX as usize; + #[zenoh_macros::internal] + pub const NUM: usize = 1 + Self::MIN_ as usize - Self::MAX_ as usize; } impl TryFrom for Priority { @@ -959,8 +697,8 @@ impl TryFrom for Priority { unknown => bail!( "{} is not a valid priority value. Admitted values are: [{}-{}].", unknown, - Self::MAX as u8, - Self::MIN as u8 + Self::MAX_ as u8, + Self::MIN_ as u8 ), } } @@ -1003,11 +741,11 @@ impl TryFrom for Priority { /// ``` /// # #[tokio::main] /// # async fn main() { -/// use zenoh::prelude::r#async::*; +/// use zenoh::prelude::*; /// -/// let session = zenoh::open(config::peer()).res().await.unwrap().into_arc(); -/// let publisher = session.declare_publisher("key/expression").res().await.unwrap(); -/// let matching_status = publisher.matching_status().res().await.unwrap(); +/// let session = zenoh::open(zenoh::config::peer()).await.unwrap().into_arc(); +/// let publisher = session.declare_publisher("key/expression").await.unwrap(); +/// let matching_status = publisher.matching_status().await.unwrap(); /// # } /// ``` #[zenoh_macros::unstable] @@ -1024,13 +762,12 @@ impl MatchingStatus { /// ``` /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::r#async::*; + /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).res().await.unwrap().into_arc(); - /// let publisher = session.declare_publisher("key/expression").res().await.unwrap(); + /// let session = zenoh::open(zenoh::config::peer()).await.unwrap().into_arc(); + /// let publisher = session.declare_publisher("key/expression").await.unwrap(); /// let matching_subscribers: bool = publisher /// .matching_status() - /// .res() /// .await /// .unwrap() /// .matching_subscribers(); @@ -1057,10 +794,10 @@ impl<'a> MatchingListenerBuilder<'a, DefaultHandler> { /// ``` /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::r#async::*; + /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).res().await.unwrap(); - /// let publisher = session.declare_publisher("key/expression").res().await.unwrap(); + /// let session = zenoh::open(zenoh::config::peer()).await.unwrap(); + /// let publisher = session.declare_publisher("key/expression").await.unwrap(); /// let matching_listener = publisher /// .matching_listener() /// .callback(|matching_status| { @@ -1070,7 +807,6 @@ impl<'a> MatchingListenerBuilder<'a, DefaultHandler> { /// println!("Publisher has NO MORE matching subscribers."); /// } /// }) - /// .res() /// .await /// .unwrap(); /// # } @@ -1097,15 +833,14 @@ impl<'a> MatchingListenerBuilder<'a, DefaultHandler> { /// ``` /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::r#async::*; + /// use zenoh::prelude::*; /// /// let mut n = 0; - /// let session = zenoh::open(config::peer()).res().await.unwrap(); - /// let publisher = session.declare_publisher("key/expression").res().await.unwrap(); + /// let session = zenoh::open(zenoh::config::peer()).await.unwrap(); + /// let publisher = session.declare_publisher("key/expression").await.unwrap(); /// let matching_listener = publisher /// .matching_listener() /// .callback_mut(move |_matching_status| { n += 1; }) - /// .res() /// .await /// .unwrap(); /// # } @@ -1119,23 +854,22 @@ impl<'a> MatchingListenerBuilder<'a, DefaultHandler> { where CallbackMut: FnMut(MatchingStatus) + Send + Sync + 'static, { - self.callback(crate::handlers::locked(callback)) + self.callback(crate::api::handlers::locked(callback)) } - /// Receive the MatchingStatuses for this listener with a [`Handler`](crate::prelude::IntoCallbackReceiverPair). + /// Receive the MatchingStatuses for this listener with a [`Handler`](crate::prelude::IntoHandler). /// /// # Examples /// ```no_run /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::r#async::*; + /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).res().await.unwrap(); - /// let publisher = session.declare_publisher("key/expression").res().await.unwrap(); + /// let session = zenoh::open(zenoh::config::peer()).await.unwrap(); + /// let publisher = session.declare_publisher("key/expression").await.unwrap(); /// let matching_listener = publisher /// .matching_listener() /// .with(flume::bounded(32)) - /// .res() /// .await /// .unwrap(); /// while let Ok(matching_status) = matching_listener.recv_async().await { @@ -1151,7 +885,7 @@ impl<'a> MatchingListenerBuilder<'a, DefaultHandler> { #[zenoh_macros::unstable] pub fn with(self, handler: Handler) -> MatchingListenerBuilder<'a, Handler> where - Handler: crate::prelude::IntoCallbackReceiverPair<'static, MatchingStatus>, + Handler: IntoHandler<'static, MatchingStatus>, { let MatchingListenerBuilder { publisher, @@ -1164,46 +898,49 @@ impl<'a> MatchingListenerBuilder<'a, DefaultHandler> { #[zenoh_macros::unstable] impl<'a, Handler> Resolvable for MatchingListenerBuilder<'a, Handler> where - Handler: IntoCallbackReceiverPair<'static, MatchingStatus> + Send, - Handler::Receiver: Send, + Handler: IntoHandler<'static, MatchingStatus> + Send, + Handler::Handler: Send, { - type To = ZResult>; + type To = ZResult>; } #[zenoh_macros::unstable] -impl<'a, Handler> SyncResolve for MatchingListenerBuilder<'a, Handler> +impl<'a, Handler> Wait for MatchingListenerBuilder<'a, Handler> where - Handler: IntoCallbackReceiverPair<'static, MatchingStatus> + Send, - Handler::Receiver: Send, + Handler: IntoHandler<'static, MatchingStatus> + Send, + Handler::Handler: Send, { #[zenoh_macros::unstable] - fn res_sync(self) -> ::To { - let (callback, receiver) = self.handler.into_cb_receiver_pair(); - self.publisher + fn wait(self) -> ::To { + let (callback, receiver) = self.handler.into_handler(); + let state = self + .publisher .session - .declare_matches_listener_inner(&self.publisher, callback) - .map(|listener_state| MatchingListener { - listener: MatchingListenerInner { - publisher: self.publisher, - state: listener_state, - alive: true, - }, - receiver, - }) + .declare_matches_listener_inner(&self.publisher, callback)?; + zlock!(self.publisher.matching_listeners).insert(state.id); + Ok(MatchingListener { + listener: MatchingListenerInner { + publisher: self.publisher, + state, + undeclare_on_drop: true, + }, + receiver, + }) } } #[zenoh_macros::unstable] -impl<'a, Handler> AsyncResolve for MatchingListenerBuilder<'a, Handler> +impl<'a, Handler> IntoFuture for MatchingListenerBuilder<'a, Handler> where - Handler: IntoCallbackReceiverPair<'static, MatchingStatus> + Send, - Handler::Receiver: Send, + Handler: IntoHandler<'static, MatchingStatus> + Send, + Handler::Handler: Send, { - type Future = Ready; + type Output = ::To; + type IntoFuture = Ready<::To>; #[zenoh_macros::unstable] - fn res_async(self) -> Self::Future { - std::future::ready(self.res_sync()) + fn into_future(self) -> Self::IntoFuture { + std::future::ready(self.wait()) } } @@ -1230,19 +967,19 @@ impl std::fmt::Debug for MatchingListenerState { pub(crate) struct MatchingListenerInner<'a> { pub(crate) publisher: PublisherRef<'a>, pub(crate) state: std::sync::Arc, - pub(crate) alive: bool, + undeclare_on_drop: bool, } #[zenoh_macros::unstable] impl<'a> MatchingListenerInner<'a> { #[inline] pub fn undeclare(self) -> MatchingListenerUndeclaration<'a> { - Undeclarable::undeclare_inner(self, ()) + UndeclarableSealed::undeclare_inner(self, ()) } } #[zenoh_macros::unstable] -impl<'a> Undeclarable<(), MatchingListenerUndeclaration<'a>> for MatchingListenerInner<'a> { +impl<'a> UndeclarableSealed<(), MatchingListenerUndeclaration<'a>> for MatchingListenerInner<'a> { fn undeclare_inner(self, _: ()) -> MatchingListenerUndeclaration<'a> { MatchingListenerUndeclaration { subscriber: self } } @@ -1255,11 +992,11 @@ impl<'a> Undeclarable<(), MatchingListenerUndeclaration<'a>> for MatchingListene /// ```no_run /// # #[tokio::main] /// # async fn main() { -/// use zenoh::prelude::r#async::*; +/// use zenoh::prelude::*; /// -/// let session = zenoh::open(config::peer()).res().await.unwrap(); -/// let publisher = session.declare_publisher("key/expression").res().await.unwrap(); -/// let matching_listener = publisher.matching_listener().res().await.unwrap(); +/// let session = zenoh::open(zenoh::config::peer()).await.unwrap(); +/// let publisher = session.declare_publisher("key/expression").await.unwrap(); +/// let matching_listener = publisher.matching_listener().await.unwrap(); /// while let Ok(matching_status) = matching_listener.recv_async().await { /// if matching_status.matching_subscribers() { /// println!("Publisher has matching subscribers."); @@ -1272,7 +1009,7 @@ impl<'a> Undeclarable<(), MatchingListenerUndeclaration<'a>> for MatchingListene #[zenoh_macros::unstable] pub struct MatchingListener<'a, Receiver> { pub(crate) listener: MatchingListenerInner<'a>, - pub receiver: Receiver, + pub(crate) receiver: Receiver, } #[zenoh_macros::unstable] @@ -1286,24 +1023,32 @@ impl<'a, Receiver> MatchingListener<'a, Receiver> { /// ``` /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::r#async::*; + /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).res().await.unwrap(); - /// let publisher = session.declare_publisher("key/expression").res().await.unwrap(); - /// let matching_listener = publisher.matching_listener().res().await.unwrap(); - /// matching_listener.undeclare().res().await.unwrap(); + /// let session = zenoh::open(zenoh::config::peer()).await.unwrap(); + /// let publisher = session.declare_publisher("key/expression").await.unwrap(); + /// let matching_listener = publisher.matching_listener().await.unwrap(); + /// matching_listener.undeclare().await.unwrap(); /// # } /// ``` #[inline] pub fn undeclare(self) -> MatchingListenerUndeclaration<'a> { self.listener.undeclare() } + + /// Make the matching listener run in background, until the publisher is undeclared. + #[inline] + #[zenoh_macros::unstable] + pub fn background(mut self) { + // The matching listener will be undeclared as part of publisher undeclaration. + self.listener.undeclare_on_drop = false; + } } #[zenoh_macros::unstable] -impl<'a, T> Undeclarable<(), MatchingListenerUndeclaration<'a>> for MatchingListener<'a, T> { +impl<'a, T> UndeclarableSealed<(), MatchingListenerUndeclaration<'a>> for MatchingListener<'a, T> { fn undeclare_inner(self, _: ()) -> MatchingListenerUndeclaration<'a> { - Undeclarable::undeclare_inner(self.listener, ()) + UndeclarableSealed::undeclare_inner(self.listener, ()) } } @@ -1333,9 +1078,11 @@ impl Resolvable for MatchingListenerUndeclaration<'_> { } #[zenoh_macros::unstable] -impl SyncResolve for MatchingListenerUndeclaration<'_> { - fn res_sync(mut self) -> ::To { - self.subscriber.alive = false; +impl Wait for MatchingListenerUndeclaration<'_> { + fn wait(mut self) -> ::To { + // set the flag first to avoid double panic if this function panic + self.subscriber.undeclare_on_drop = false; + zlock!(self.subscriber.publisher.matching_listeners).remove(&self.subscriber.state.id); self.subscriber .publisher .session @@ -1344,18 +1091,20 @@ impl SyncResolve for MatchingListenerUndeclaration<'_> { } #[zenoh_macros::unstable] -impl AsyncResolve for MatchingListenerUndeclaration<'_> { - type Future = Ready; +impl IntoFuture for MatchingListenerUndeclaration<'_> { + type Output = ::To; + type IntoFuture = Ready<::To>; - fn res_async(self) -> Self::Future { - std::future::ready(self.res_sync()) + fn into_future(self) -> Self::IntoFuture { + std::future::ready(self.wait()) } } #[zenoh_macros::unstable] impl Drop for MatchingListenerInner<'_> { fn drop(&mut self) { - if self.alive { + if self.undeclare_on_drop { + zlock!(self.publisher.matching_listeners).remove(&self.state.id); let _ = self .publisher .session @@ -1366,12 +1115,20 @@ impl Drop for MatchingListenerInner<'_> { #[cfg(test)] mod tests { + use zenoh_config::Config; + use zenoh_core::Wait; + + use crate::api::{sample::SampleKind, session::SessionDeclarations}; + + #[cfg(feature = "internal")] #[test] fn priority_from() { - use super::Priority as APrio; use std::convert::TryInto; + use zenoh_protocol::core::Priority as TPrio; + use super::Priority as APrio; + for i in APrio::MAX as u8..=APrio::MIN as u8 { let p: APrio = i.try_into().unwrap(); @@ -1392,22 +1149,26 @@ mod tests { #[test] fn sample_kind_integrity_in_publication() { - use crate::publication::HasWriteWithSampleKind; - use crate::{open, prelude::sync::*}; - use zenoh_protocol::core::SampleKind; + use crate::api::session::open; const KEY_EXPR: &str = "test/sample_kind_integrity/publication"; const VALUE: &str = "zenoh"; fn sample_kind_integrity_in_publication_with(kind: SampleKind) { - let session = open(Config::default()).res().unwrap(); - let sub = session.declare_subscriber(KEY_EXPR).res().unwrap(); - let pub_ = session.declare_publisher(KEY_EXPR).res().unwrap(); - pub_.write(kind, VALUE).res().unwrap(); + let session = open(Config::default()).wait().unwrap(); + let sub = session.declare_subscriber(KEY_EXPR).wait().unwrap(); + let pub_ = session.declare_publisher(KEY_EXPR).wait().unwrap(); + + match kind { + SampleKind::Put => pub_.put(VALUE).wait().unwrap(), + SampleKind::Delete => pub_.delete().wait().unwrap(), + } let sample = sub.recv().unwrap(); assert_eq!(sample.kind, kind); - assert_eq!(sample.value.to_string(), VALUE); + if let SampleKind::Put = kind { + assert_eq!(sample.payload.deserialize::().unwrap(), VALUE); + } } sample_kind_integrity_in_publication_with(SampleKind::Put); @@ -1416,25 +1177,24 @@ mod tests { #[test] fn sample_kind_integrity_in_put_builder() { - use crate::{open, prelude::sync::*}; - use zenoh_protocol::core::SampleKind; + use crate::api::session::open; const KEY_EXPR: &str = "test/sample_kind_integrity/put_builder"; const VALUE: &str = "zenoh"; fn sample_kind_integrity_in_put_builder_with(kind: SampleKind) { - let session = open(Config::default()).res().unwrap(); - let sub = session.declare_subscriber(KEY_EXPR).res().unwrap(); + let session = open(Config::default()).wait().unwrap(); + let sub = session.declare_subscriber(KEY_EXPR).wait().unwrap(); match kind { - SampleKind::Put => session.put(KEY_EXPR, VALUE).res().unwrap(), - SampleKind::Delete => session.delete(KEY_EXPR).res().unwrap(), + SampleKind::Put => session.put(KEY_EXPR, VALUE).wait().unwrap(), + SampleKind::Delete => session.delete(KEY_EXPR).wait().unwrap(), } let sample = sub.recv().unwrap(); assert_eq!(sample.kind, kind); if let SampleKind::Put = kind { - assert_eq!(sample.value.to_string(), VALUE); + assert_eq!(sample.payload.deserialize::().unwrap(), VALUE); } } diff --git a/zenoh/src/api/query.rs b/zenoh/src/api/query.rs new file mode 100644 index 0000000000..2a1016db5f --- /dev/null +++ b/zenoh/src/api/query.rs @@ -0,0 +1,521 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// + +use std::{ + collections::HashMap, + future::{IntoFuture, Ready}, + time::Duration, +}; + +#[cfg(feature = "unstable")] +use zenoh_config::ZenohId; +use zenoh_core::{Resolvable, Wait}; +use zenoh_keyexpr::OwnedKeyExpr; +#[cfg(feature = "unstable")] +use zenoh_protocol::core::ZenohIdProto; +use zenoh_protocol::core::{CongestionControl, Parameters}; +use zenoh_result::ZResult; + +use super::{ + builders::sample::{EncodingBuilderTrait, QoSBuilderTrait}, + bytes::ZBytes, + encoding::Encoding, + handlers::{locked, Callback, DefaultHandler, IntoHandler}, + key_expr::KeyExpr, + publisher::Priority, + sample::{Locality, QoSBuilder, Sample}, + selector::Selector, + session::Session, + value::Value, +}; +#[cfg(feature = "unstable")] +use super::{sample::SourceInfo, selector::ZenohParameters}; +use crate::{bytes::OptionZBytes, sample::SampleBuilderTrait}; + +/// The [`Queryable`](crate::query::Queryable)s that should be target of a [`get`](Session::get). +pub type QueryTarget = zenoh_protocol::network::request::ext::TargetType; + +/// The kind of consolidation. +pub type ConsolidationMode = zenoh_protocol::zenoh::query::Consolidation; + +/// The replies consolidation strategy to apply on replies to a [`get`](Session::get). +#[derive(Clone, Copy, Debug, PartialEq, Eq)] +pub struct QueryConsolidation { + pub(crate) mode: ConsolidationMode, +} + +impl QueryConsolidation { + pub const DEFAULT: Self = Self::AUTO; + /// Automatic query consolidation strategy selection. + pub const AUTO: Self = Self { + mode: ConsolidationMode::Auto, + }; + + pub(crate) const fn from_mode(mode: ConsolidationMode) -> Self { + Self { mode } + } + + /// Returns the requested [`ConsolidationMode`]. + pub fn mode(&self) -> ConsolidationMode { + self.mode + } +} + +impl From for QueryConsolidation { + fn from(mode: ConsolidationMode) -> Self { + Self::from_mode(mode) + } +} + +impl Default for QueryConsolidation { + fn default() -> Self { + Self::DEFAULT + } +} + +/// Error returned by a [`get`](Session::get). +#[derive(Clone, Debug, PartialEq, Eq, Default)] +pub struct ReplyError { + pub(crate) payload: ZBytes, + pub(crate) encoding: Encoding, +} + +impl ReplyError { + /// Gets the payload of this ReplyError. + #[inline] + pub fn payload(&self) -> &ZBytes { + &self.payload + } + + /// Gets the encoding of this ReplyError. + #[inline] + pub fn encoding(&self) -> &Encoding { + &self.encoding + } +} + +impl From for ReplyError { + fn from(value: Value) -> Self { + Self { + payload: value.payload, + encoding: value.encoding, + } + } +} + +/// Struct returned by a [`get`](Session::get). +#[non_exhaustive] +#[derive(Clone, Debug)] +pub struct Reply { + pub(crate) result: Result, + #[cfg(feature = "unstable")] + pub(crate) replier_id: Option, +} + +impl Reply { + /// Gets the a borrowed result of this `Reply`. Use [`Reply::into_result`] to take ownership of the result. + pub fn result(&self) -> Result<&Sample, &ReplyError> { + self.result.as_ref() + } + + /// Gets the a mutable borrowed result of this `Reply`. Use [`Reply::into_result`] to take ownership of the result. + pub fn result_mut(&mut self) -> Result<&mut Sample, &mut ReplyError> { + self.result.as_mut() + } + + /// Converts this `Reply` into the its result. Use [`Reply::result`] it you don't want to take ownership. + pub fn into_result(self) -> Result { + self.result + } + + #[zenoh_macros::unstable] + /// Gets the id of the zenoh instance that answered this Reply. + pub fn replier_id(&self) -> Option { + self.replier_id.map(Into::into) + } +} + +impl From for Result { + fn from(value: Reply) -> Self { + value.into_result() + } +} + +#[cfg(feature = "unstable")] +pub(crate) struct LivelinessQueryState { + pub(crate) callback: Callback<'static, Reply>, +} + +pub(crate) struct QueryState { + pub(crate) nb_final: usize, + pub(crate) key_expr: KeyExpr<'static>, + pub(crate) parameters: Parameters<'static>, + pub(crate) reception_mode: ConsolidationMode, + pub(crate) replies: Option>, + pub(crate) callback: Callback<'static, Reply>, +} + +impl QueryState { + pub(crate) fn selector(&self) -> Selector { + Selector::borrowed(&self.key_expr, &self.parameters) + } +} + +/// A builder for initializing a `query`. +/// +/// # Examples +/// ``` +/// # #[tokio::main] +/// # async fn main() { +/// use zenoh::{prelude::*, query::{ConsolidationMode, QueryTarget}}; +/// +/// let session = zenoh::open(zenoh::config::peer()).await.unwrap(); +/// let replies = session +/// .get("key/expression?value>1") +/// .target(QueryTarget::All) +/// .consolidation(ConsolidationMode::None) +/// .await +/// .unwrap(); +/// while let Ok(reply) = replies.recv_async().await { +/// println!("Received {:?}", reply.result()) +/// } +/// # } +/// ``` +#[must_use = "Resolvables do nothing unless you resolve them using the `res` method from either `SyncResolve` or `AsyncResolve`"] +#[derive(Debug)] +pub struct SessionGetBuilder<'a, 'b, Handler> { + pub(crate) session: &'a Session, + pub(crate) selector: ZResult>, + pub(crate) target: QueryTarget, + pub(crate) consolidation: QueryConsolidation, + pub(crate) qos: QoSBuilder, + pub(crate) destination: Locality, + pub(crate) timeout: Duration, + pub(crate) handler: Handler, + pub(crate) value: Option, + pub(crate) attachment: Option, + #[cfg(feature = "unstable")] + pub(crate) source_info: SourceInfo, +} + +impl SampleBuilderTrait for SessionGetBuilder<'_, '_, Handler> { + #[zenoh_macros::unstable] + fn source_info(self, source_info: SourceInfo) -> Self { + Self { + source_info, + ..self + } + } + + fn attachment>(self, attachment: T) -> Self { + let attachment: OptionZBytes = attachment.into(); + Self { + attachment: attachment.into(), + ..self + } + } +} + +impl QoSBuilderTrait for SessionGetBuilder<'_, '_, DefaultHandler> { + fn congestion_control(self, congestion_control: CongestionControl) -> Self { + let qos = self.qos.congestion_control(congestion_control); + Self { qos, ..self } + } + + fn priority(self, priority: Priority) -> Self { + let qos = self.qos.priority(priority); + Self { qos, ..self } + } + + fn express(self, is_express: bool) -> Self { + let qos = self.qos.express(is_express); + Self { qos, ..self } + } +} + +impl EncodingBuilderTrait for SessionGetBuilder<'_, '_, Handler> { + fn encoding>(self, encoding: T) -> Self { + let mut value = self.value.unwrap_or_default(); + value.encoding = encoding.into(); + Self { + value: Some(value), + ..self + } + } +} + +impl<'a, 'b> SessionGetBuilder<'a, 'b, DefaultHandler> { + /// Receive the replies for this query with a callback. + /// + /// # Examples + /// ``` + /// # #[tokio::main] + /// # async fn main() { + /// use zenoh::prelude::*; + /// + /// let session = zenoh::open(zenoh::config::peer()).await.unwrap(); + /// let queryable = session + /// .get("key/expression") + /// .callback(|reply| {println!("Received {:?}", reply.result());}) + /// .await + /// .unwrap(); + /// # } + /// ``` + #[inline] + pub fn callback(self, callback: Callback) -> SessionGetBuilder<'a, 'b, Callback> + where + Callback: Fn(Reply) + Send + Sync + 'static, + { + let SessionGetBuilder { + session, + selector, + target, + consolidation, + qos, + destination, + timeout, + value, + attachment, + #[cfg(feature = "unstable")] + source_info, + handler: _, + } = self; + SessionGetBuilder { + session, + selector, + target, + consolidation, + qos, + destination, + timeout, + value, + attachment, + #[cfg(feature = "unstable")] + source_info, + handler: callback, + } + } + + /// Receive the replies for this query with a mutable callback. + /// + /// Using this guarantees that your callback will never be called concurrently. + /// If your callback is also accepted by the [`callback`](crate::session::SessionGetBuilder::callback) method, we suggest you use it instead of `callback_mut` + /// + /// # Examples + /// ``` + /// # #[tokio::main] + /// # async fn main() { + /// use zenoh::prelude::*; + /// + /// let session = zenoh::open(zenoh::config::peer()).await.unwrap(); + /// let mut n = 0; + /// let queryable = session + /// .get("key/expression") + /// .callback_mut(move |reply| {n += 1;}) + /// .await + /// .unwrap(); + /// # } + /// ``` + #[inline] + pub fn callback_mut( + self, + callback: CallbackMut, + ) -> SessionGetBuilder<'a, 'b, impl Fn(Reply) + Send + Sync + 'static> + where + CallbackMut: FnMut(Reply) + Send + Sync + 'static, + { + self.callback(locked(callback)) + } + + /// Receive the replies for this query with a [`Handler`](crate::handlers::IntoHandler). + /// + /// # Examples + /// ``` + /// # #[tokio::main] + /// # async fn main() { + /// use zenoh::prelude::*; + /// + /// let session = zenoh::open(zenoh::config::peer()).await.unwrap(); + /// let replies = session + /// .get("key/expression") + /// .with(flume::bounded(32)) + /// .await + /// .unwrap(); + /// while let Ok(reply) = replies.recv_async().await { + /// println!("Received {:?}", reply.result()); + /// } + /// # } + /// ``` + #[inline] + pub fn with(self, handler: Handler) -> SessionGetBuilder<'a, 'b, Handler> + where + Handler: IntoHandler<'static, Reply>, + { + let SessionGetBuilder { + session, + selector, + target, + consolidation, + qos, + destination, + timeout, + value, + attachment, + #[cfg(feature = "unstable")] + source_info, + handler: _, + } = self; + SessionGetBuilder { + session, + selector, + target, + consolidation, + qos, + destination, + timeout, + value, + attachment, + #[cfg(feature = "unstable")] + source_info, + handler, + } + } +} +impl<'a, 'b, Handler> SessionGetBuilder<'a, 'b, Handler> { + #[inline] + pub fn payload(mut self, payload: IntoZBytes) -> Self + where + IntoZBytes: Into, + { + let mut value = self.value.unwrap_or_default(); + value.payload = payload.into(); + self.value = Some(value); + self + } + + /// Change the target of the query. + #[inline] + pub fn target(self, target: QueryTarget) -> Self { + Self { target, ..self } + } + + /// Change the consolidation mode of the query. + #[inline] + pub fn consolidation>(self, consolidation: QC) -> Self { + Self { + consolidation: consolidation.into(), + ..self + } + } + + /// Restrict the matching queryables that will receive the query + /// to the ones that have the given [`Locality`](crate::prelude::Locality). + #[zenoh_macros::unstable] + #[inline] + pub fn allowed_destination(self, destination: Locality) -> Self { + Self { + destination, + ..self + } + } + + /// Set query timeout. + #[inline] + pub fn timeout(self, timeout: Duration) -> Self { + Self { timeout, ..self } + } + + /// By default, `get` guarantees that it will only receive replies whose key expressions intersect + /// with the queried key expression. + /// + /// If allowed to through `accept_replies(ReplyKeyExpr::Any)`, queryables may also reply on key + /// expressions that don't intersect with the query's. + #[zenoh_macros::unstable] + pub fn accept_replies(self, accept: ReplyKeyExpr) -> Self { + if accept == ReplyKeyExpr::Any { + if let Ok(Selector { + key_expr, + mut parameters, + }) = self.selector + { + parameters.to_mut().set_reply_key_expr_any(); + let selector = Ok(Selector { + key_expr, + parameters, + }); + return Self { selector, ..self }; + } + } + self + } +} + +#[zenoh_macros::unstable] +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Default)] +pub enum ReplyKeyExpr { + Any, + #[default] + MatchingQuery, +} + +impl Resolvable for SessionGetBuilder<'_, '_, Handler> +where + Handler: IntoHandler<'static, Reply> + Send, + Handler::Handler: Send, +{ + type To = ZResult; +} + +impl Wait for SessionGetBuilder<'_, '_, Handler> +where + Handler: IntoHandler<'static, Reply> + Send, + Handler::Handler: Send, +{ + fn wait(self) -> ::To { + let (callback, receiver) = self.handler.into_handler(); + let Selector { + key_expr, + parameters, + } = self.selector?; + self.session + .query( + &key_expr, + ¶meters, + self.target, + self.consolidation, + self.qos.into(), + self.destination, + self.timeout, + self.value, + self.attachment, + #[cfg(feature = "unstable")] + self.source_info, + callback, + ) + .map(|_| receiver) + } +} + +impl IntoFuture for SessionGetBuilder<'_, '_, Handler> +where + Handler: IntoHandler<'static, Reply> + Send, + Handler::Handler: Send, +{ + type Output = ::To; + type IntoFuture = Ready<::To>; + + fn into_future(self) -> Self::IntoFuture { + std::future::ready(self.wait()) + } +} diff --git a/zenoh/src/api/queryable.rs b/zenoh/src/api/queryable.rs new file mode 100644 index 0000000000..61ae0093ea --- /dev/null +++ b/zenoh/src/api/queryable.rs @@ -0,0 +1,930 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// +use std::{ + fmt, + future::{IntoFuture, Ready}, + ops::{Deref, DerefMut}, + sync::Arc, +}; + +use uhlc::Timestamp; +use zenoh_core::{Resolvable, Resolve, Wait}; +use zenoh_protocol::{ + core::{CongestionControl, EntityId, Parameters, WireExpr, ZenohIdProto}, + network::{response, Mapping, RequestId, Response, ResponseFinal}, + zenoh::{self, reply::ReplyBody, Del, Put, ResponseBody}, +}; +use zenoh_result::ZResult; +#[zenoh_macros::unstable] +use { + super::{query::ReplyKeyExpr, sample::SourceInfo}, + zenoh_config::wrappers::EntityGlobalId, + zenoh_protocol::core::EntityGlobalIdProto, +}; + +#[zenoh_macros::unstable] +use super::selector::ZenohParameters; +use super::{ + builders::sample::{ + EncodingBuilderTrait, QoSBuilderTrait, SampleBuilder, SampleBuilderTrait, + TimestampBuilderTrait, + }, + bytes::{OptionZBytes, ZBytes}, + encoding::Encoding, + handlers::{locked, DefaultHandler, IntoHandler}, + key_expr::KeyExpr, + publisher::Priority, + sample::{Locality, QoSBuilder, Sample, SampleKind}, + selector::Selector, + session::{SessionRef, UndeclarableSealed}, + value::Value, + Id, +}; +use crate::net::primitives::Primitives; + +pub(crate) struct QueryInner { + pub(crate) key_expr: KeyExpr<'static>, + pub(crate) parameters: Parameters<'static>, + pub(crate) qid: RequestId, + pub(crate) zid: ZenohIdProto, + pub(crate) primitives: Arc, +} + +impl Drop for QueryInner { + fn drop(&mut self) { + self.primitives.send_response_final(ResponseFinal { + rid: self.qid, + ext_qos: response::ext::QoSType::RESPONSE_FINAL, + ext_tstamp: None, + }); + } +} + +/// Structs received by a [`Queryable`]. +#[derive(Clone)] +pub struct Query { + pub(crate) inner: Arc, + pub(crate) eid: EntityId, + pub(crate) value: Option, + pub(crate) attachment: Option, +} + +impl Query { + /// The full [`Selector`] of this Query. + #[inline(always)] + pub fn selector(&self) -> Selector<'_> { + Selector::borrowed(&self.inner.key_expr, &self.inner.parameters) + } + + /// The key selector part of this Query. + #[inline(always)] + pub fn key_expr(&self) -> &KeyExpr<'static> { + &self.inner.key_expr + } + + /// This Query's selector parameters. + #[inline(always)] + pub fn parameters(&self) -> &Parameters<'static> { + &self.inner.parameters + } + + /// This Query's payload. + #[inline(always)] + pub fn payload(&self) -> Option<&ZBytes> { + self.value.as_ref().map(|v| &v.payload) + } + + /// This Query's payload. + #[inline(always)] + pub fn payload_mut(&mut self) -> Option<&mut ZBytes> { + self.value.as_mut().map(|v| &mut v.payload) + } + + /// This Query's encoding. + #[inline(always)] + pub fn encoding(&self) -> Option<&Encoding> { + self.value.as_ref().map(|v| &v.encoding) + } + + /// This Query's attachment. + pub fn attachment(&self) -> Option<&ZBytes> { + self.attachment.as_ref() + } + + /// This Query's attachment. + pub fn attachment_mut(&mut self) -> Option<&mut ZBytes> { + self.attachment.as_mut() + } + + /// Sends a reply in the form of [`Sample`] to this Query. + /// + /// By default, queries only accept replies whose key expression intersects with the query's. + /// Unless the query has enabled disjoint replies (you can check this through [`Query::accepts_replies`]), + /// replying on a disjoint key expression will result in an error when resolving the reply. + /// This api is for internal use only. + #[inline(always)] + #[zenoh_macros::internal] + pub fn reply_sample(&self, sample: Sample) -> ReplySample<'_> { + ReplySample { + query: self, + sample, + } + } + + /// Sends a reply to this Query. + /// + /// By default, queries only accept replies whose key expression intersects with the query's. + /// Unless the query has enabled disjoint replies (you can check this through [`Query::accepts_replies`]), + /// replying on a disjoint key expression will result in an error when resolving the reply. + #[inline(always)] + pub fn reply<'b, TryIntoKeyExpr, IntoZBytes>( + &self, + key_expr: TryIntoKeyExpr, + payload: IntoZBytes, + ) -> ReplyBuilder<'_, 'b, ReplyBuilderPut> + where + TryIntoKeyExpr: TryInto>, + >>::Error: Into, + IntoZBytes: Into, + { + ReplyBuilder { + query: self, + key_expr: key_expr.try_into().map_err(Into::into), + qos: response::ext::QoSType::RESPONSE.into(), + kind: ReplyBuilderPut { + payload: payload.into(), + encoding: Encoding::default(), + }, + timestamp: None, + #[cfg(feature = "unstable")] + source_info: SourceInfo::empty(), + attachment: None, + } + } + + /// Sends a error reply to this Query. + /// + #[inline(always)] + pub fn reply_err(&self, payload: IntoZBytes) -> ReplyErrBuilder<'_> + where + IntoZBytes: Into, + { + ReplyErrBuilder { + query: self, + value: Value::new(payload, Encoding::default()), + } + } + + /// Sends a delete reply to this Query. + /// + /// By default, queries only accept replies whose key expression intersects with the query's. + /// Unless the query has enabled disjoint replies (you can check this through [`Query::accepts_replies`]), + /// replying on a disjoint key expression will result in an error when resolving the reply. + #[inline(always)] + pub fn reply_del<'b, TryIntoKeyExpr>( + &self, + key_expr: TryIntoKeyExpr, + ) -> ReplyBuilder<'_, 'b, ReplyBuilderDelete> + where + TryIntoKeyExpr: TryInto>, + >>::Error: Into, + { + ReplyBuilder { + query: self, + key_expr: key_expr.try_into().map_err(Into::into), + qos: response::ext::QoSType::RESPONSE.into(), + kind: ReplyBuilderDelete, + timestamp: None, + #[cfg(feature = "unstable")] + source_info: SourceInfo::empty(), + attachment: None, + } + } + + /// Queries may or may not accept replies on key expressions that do not intersect with their own key expression. + /// This getter allows you to check whether or not a specific query does. + #[zenoh_macros::unstable] + pub fn accepts_replies(&self) -> ZResult { + self._accepts_any_replies().map(|any| { + if any { + ReplyKeyExpr::Any + } else { + ReplyKeyExpr::MatchingQuery + } + }) + } + #[cfg(feature = "unstable")] + fn _accepts_any_replies(&self) -> ZResult { + Ok(self.parameters().reply_key_expr_any()) + } +} + +impl fmt::Debug for Query { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("Query") + .field("key_selector", &self.inner.key_expr) + .field("parameters", &self.inner.parameters) + .finish() + } +} + +impl fmt::Display for Query { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("Query") + .field( + "selector", + &format!("{}{}", &self.inner.key_expr, &self.inner.parameters), + ) + .finish() + } +} + +#[zenoh_macros::internal] +pub struct ReplySample<'a> { + query: &'a Query, + sample: Sample, +} + +#[zenoh_macros::internal] +impl Resolvable for ReplySample<'_> { + type To = ZResult<()>; +} + +#[zenoh_macros::internal] +impl Wait for ReplySample<'_> { + fn wait(self) -> ::To { + self.query._reply_sample(self.sample) + } +} + +#[zenoh_macros::internal] +impl IntoFuture for ReplySample<'_> { + type Output = ::To; + type IntoFuture = Ready<::To>; + + fn into_future(self) -> Self::IntoFuture { + std::future::ready(self.wait()) + } +} + +#[derive(Debug)] +pub struct ReplyBuilderPut { + payload: ZBytes, + encoding: Encoding, +} +#[derive(Debug)] +pub struct ReplyBuilderDelete; + +/// A builder returned by [`Query::reply()`](Query::reply) and [`Query::reply_del()`](Query::reply_del) +#[must_use = "Resolvables do nothing unless you resolve them using the `res` method from either `SyncResolve` or `AsyncResolve`"] +#[derive(Debug)] +pub struct ReplyBuilder<'a, 'b, T> { + query: &'a Query, + key_expr: ZResult>, + kind: T, + timestamp: Option, + qos: QoSBuilder, + #[cfg(feature = "unstable")] + source_info: SourceInfo, + attachment: Option, +} + +impl TimestampBuilderTrait for ReplyBuilder<'_, '_, T> { + fn timestamp>>(self, timestamp: U) -> Self { + Self { + timestamp: timestamp.into(), + ..self + } + } +} + +impl SampleBuilderTrait for ReplyBuilder<'_, '_, T> { + fn attachment>(self, attachment: U) -> Self { + let attachment: OptionZBytes = attachment.into(); + Self { + attachment: attachment.into(), + ..self + } + } + + #[cfg(feature = "unstable")] + fn source_info(self, source_info: SourceInfo) -> Self { + Self { + source_info, + ..self + } + } +} + +impl QoSBuilderTrait for ReplyBuilder<'_, '_, T> { + fn congestion_control(self, congestion_control: CongestionControl) -> Self { + let qos = self.qos.congestion_control(congestion_control); + Self { qos, ..self } + } + + fn priority(self, priority: Priority) -> Self { + let qos = self.qos.priority(priority); + Self { qos, ..self } + } + + fn express(self, is_express: bool) -> Self { + let qos = self.qos.express(is_express); + Self { qos, ..self } + } +} + +impl EncodingBuilderTrait for ReplyBuilder<'_, '_, ReplyBuilderPut> { + fn encoding>(self, encoding: T) -> Self { + Self { + kind: ReplyBuilderPut { + encoding: encoding.into(), + ..self.kind + }, + ..self + } + } +} + +impl Resolvable for ReplyBuilder<'_, '_, T> { + type To = ZResult<()>; +} + +impl Wait for ReplyBuilder<'_, '_, ReplyBuilderPut> { + fn wait(self) -> ::To { + let key_expr = self.key_expr?.into_owned(); + let sample = SampleBuilder::put(key_expr, self.kind.payload) + .encoding(self.kind.encoding) + .timestamp(self.timestamp) + .qos(self.qos.into()); + #[cfg(feature = "unstable")] + let sample = sample.source_info(self.source_info); + let sample = sample.attachment(self.attachment); + self.query._reply_sample(sample.into()) + } +} + +impl Wait for ReplyBuilder<'_, '_, ReplyBuilderDelete> { + fn wait(self) -> ::To { + let key_expr = self.key_expr?.into_owned(); + let sample = SampleBuilder::delete(key_expr) + .timestamp(self.timestamp) + .qos(self.qos.into()); + #[cfg(feature = "unstable")] + let sample = sample.source_info(self.source_info); + let sample = sample.attachment(self.attachment); + self.query._reply_sample(sample.into()) + } +} + +impl Query { + fn _reply_sample(&self, sample: Sample) -> ZResult<()> { + let c = zcondfeat!( + "unstable", + !self._accepts_any_replies().unwrap_or(false), + true + ); + if c && !self.key_expr().intersects(&sample.key_expr) { + bail!("Attempted to reply on `{}`, which does not intersect with query `{}`, despite query only allowing replies on matching key expressions", sample.key_expr, self.key_expr()) + } + #[cfg(not(feature = "unstable"))] + let ext_sinfo = None; + #[cfg(feature = "unstable")] + let ext_sinfo = sample.source_info.into(); + self.inner.primitives.send_response(Response { + rid: self.inner.qid, + wire_expr: WireExpr { + scope: 0, + suffix: std::borrow::Cow::Owned(sample.key_expr.into()), + mapping: Mapping::Sender, + }, + payload: ResponseBody::Reply(zenoh::Reply { + consolidation: zenoh::Consolidation::DEFAULT, + ext_unknown: vec![], + payload: match sample.kind { + SampleKind::Put => ReplyBody::Put(Put { + timestamp: sample.timestamp, + encoding: sample.encoding.into(), + ext_sinfo, + #[cfg(feature = "shared-memory")] + ext_shm: None, + ext_attachment: sample.attachment.map(|a| a.into()), + ext_unknown: vec![], + payload: sample.payload.into(), + }), + SampleKind::Delete => ReplyBody::Del(Del { + timestamp: sample.timestamp, + ext_sinfo, + ext_attachment: sample.attachment.map(|a| a.into()), + ext_unknown: vec![], + }), + }, + }), + ext_qos: sample.qos.into(), + ext_tstamp: None, + ext_respid: Some(response::ext::ResponderIdType { + zid: self.inner.zid, + eid: self.eid, + }), + }); + Ok(()) + } +} + +impl IntoFuture for ReplyBuilder<'_, '_, ReplyBuilderPut> { + type Output = ::To; + type IntoFuture = Ready<::To>; + + fn into_future(self) -> Self::IntoFuture { + std::future::ready(self.wait()) + } +} + +impl IntoFuture for ReplyBuilder<'_, '_, ReplyBuilderDelete> { + type Output = ::To; + type IntoFuture = Ready<::To>; + + fn into_future(self) -> Self::IntoFuture { + std::future::ready(self.wait()) + } +} + +/// A builder returned by [`Query::reply_err()`](Query::reply_err). +#[must_use = "Resolvables do nothing unless you resolve them using the `res` method from either `SyncResolve` or `AsyncResolve`"] +#[derive(Debug)] +pub struct ReplyErrBuilder<'a> { + query: &'a Query, + value: Value, +} + +impl EncodingBuilderTrait for ReplyErrBuilder<'_> { + fn encoding>(self, encoding: T) -> Self { + let mut value = self.value.clone(); + value.encoding = encoding.into(); + Self { value, ..self } + } +} + +impl<'a> Resolvable for ReplyErrBuilder<'a> { + type To = ZResult<()>; +} + +impl Wait for ReplyErrBuilder<'_> { + fn wait(self) -> ::To { + self.query.inner.primitives.send_response(Response { + rid: self.query.inner.qid, + wire_expr: WireExpr { + scope: 0, + suffix: std::borrow::Cow::Owned(self.query.key_expr().as_str().to_owned()), + mapping: Mapping::Sender, + }, + payload: ResponseBody::Err(zenoh::Err { + encoding: self.value.encoding.into(), + ext_sinfo: None, + #[cfg(feature = "shared-memory")] + ext_shm: None, + ext_unknown: vec![], + payload: self.value.payload.into(), + }), + ext_qos: response::ext::QoSType::RESPONSE, + ext_tstamp: None, + ext_respid: Some(response::ext::ResponderIdType { + zid: self.query.inner.zid, + eid: self.query.eid, + }), + }); + Ok(()) + } +} + +impl<'a> IntoFuture for ReplyErrBuilder<'a> { + type Output = ::To; + type IntoFuture = Ready<::To>; + + fn into_future(self) -> Self::IntoFuture { + std::future::ready(self.wait()) + } +} + +pub(crate) struct QueryableState { + pub(crate) id: Id, + pub(crate) key_expr: WireExpr<'static>, + pub(crate) complete: bool, + pub(crate) origin: Locality, + pub(crate) callback: Arc, +} + +impl fmt::Debug for QueryableState { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("Queryable") + .field("id", &self.id) + .field("key_expr", &self.key_expr) + .field("complete", &self.complete) + .finish() + } +} + +/// An entity able to reply to queries through a callback. +/// +/// CallbackQueryables can be created from a zenoh [`Session`](crate::Session) +/// with the [`declare_queryable`](crate::Session::declare_queryable) function +/// and the [`callback`](QueryableBuilder::callback) function +/// of the resulting builder. +/// +/// Queryables are automatically undeclared when dropped. +/// +/// # Examples +/// ```no_run +/// # #[tokio::main] +/// # async fn main() { +/// use futures::prelude::*; +/// use zenoh::prelude::*; +/// +/// let session = zenoh::open(zenoh::config::peer()).await.unwrap(); +/// let queryable = session.declare_queryable("key/expression").await.unwrap(); +/// while let Ok(query) = queryable.recv_async().await { +/// println!(">> Handling query '{}'", query.selector()); +/// query.reply("key/expression", "value") +/// .await +/// .unwrap(); +/// } +/// # } +/// ``` +#[derive(Debug)] +pub(crate) struct CallbackQueryable<'a> { + pub(crate) session: SessionRef<'a>, + pub(crate) state: Arc, + undeclare_on_drop: bool, +} + +impl<'a> UndeclarableSealed<(), QueryableUndeclaration<'a>> for CallbackQueryable<'a> { + fn undeclare_inner(self, _: ()) -> QueryableUndeclaration<'a> { + QueryableUndeclaration { queryable: self } + } +} + +/// A [`Resolvable`] returned when undeclaring a queryable. +/// +/// # Examples +/// ``` +/// # #[tokio::main] +/// # async fn main() { +/// use zenoh::prelude::*; +/// +/// let session = zenoh::open(zenoh::config::peer()).await.unwrap(); +/// let queryable = session.declare_queryable("key/expression").await.unwrap(); +/// queryable.undeclare().await.unwrap(); +/// # } +/// ``` +#[must_use = "Resolvables do nothing unless you resolve them using the `res` method from either `SyncResolve` or `AsyncResolve`"] +pub struct QueryableUndeclaration<'a> { + queryable: CallbackQueryable<'a>, +} + +impl Resolvable for QueryableUndeclaration<'_> { + type To = ZResult<()>; +} + +impl Wait for QueryableUndeclaration<'_> { + fn wait(mut self) -> ::To { + // set the flag first to avoid double panic if this function panic + self.queryable.undeclare_on_drop = false; + self.queryable + .session + .close_queryable(self.queryable.state.id) + } +} + +impl<'a> IntoFuture for QueryableUndeclaration<'a> { + type Output = ::To; + type IntoFuture = Ready<::To>; + + fn into_future(self) -> Self::IntoFuture { + std::future::ready(self.wait()) + } +} + +impl Drop for CallbackQueryable<'_> { + fn drop(&mut self) { + if self.undeclare_on_drop { + let _ = self.session.close_queryable(self.state.id); + } + } +} + +/// A builder for initializing a [`Queryable`]. +/// +/// # Examples +/// ``` +/// # #[tokio::main] +/// # async fn main() { +/// use zenoh::prelude::*; +/// +/// let session = zenoh::open(zenoh::config::peer()).await.unwrap(); +/// let queryable = session.declare_queryable("key/expression").await.unwrap(); +/// # } +/// ``` +#[must_use = "Resolvables do nothing unless you resolve them using the `res` method from either `SyncResolve` or `AsyncResolve`"] +#[derive(Debug)] +pub struct QueryableBuilder<'a, 'b, Handler> { + pub(crate) session: SessionRef<'a>, + pub(crate) key_expr: ZResult>, + pub(crate) complete: bool, + pub(crate) origin: Locality, + pub(crate) handler: Handler, +} + +impl<'a, 'b> QueryableBuilder<'a, 'b, DefaultHandler> { + /// Receive the queries for this Queryable with a callback. + /// + /// # Examples + /// ``` + /// # #[tokio::main] + /// # async fn main() { + /// use zenoh::prelude::*; + /// + /// let session = zenoh::open(zenoh::config::peer()).await.unwrap(); + /// let queryable = session + /// .declare_queryable("key/expression") + /// .callback(|query| {println!(">> Handling query '{}'", query.selector());}) + /// .await + /// .unwrap(); + /// # } + /// ``` + #[inline] + pub fn callback(self, callback: Callback) -> QueryableBuilder<'a, 'b, Callback> + where + Callback: Fn(Query) + Send + Sync + 'static, + { + let QueryableBuilder { + session, + key_expr, + complete, + origin, + handler: _, + } = self; + QueryableBuilder { + session, + key_expr, + complete, + origin, + handler: callback, + } + } + + /// Receive the queries for this Queryable with a mutable callback. + /// + /// Using this guarantees that your callback will never be called concurrently. + /// If your callback is also accepted by the [`callback`](QueryableBuilder::callback) method, we suggest you use it instead of `callback_mut` + /// + /// # Examples + /// ``` + /// # #[tokio::main] + /// # async fn main() { + /// use zenoh::prelude::*; + /// + /// let session = zenoh::open(zenoh::config::peer()).await.unwrap(); + /// let mut n = 0; + /// let queryable = session + /// .declare_queryable("key/expression") + /// .callback_mut(move |query| {n += 1;}) + /// .await + /// .unwrap(); + /// # } + /// ``` + #[inline] + pub fn callback_mut( + self, + callback: CallbackMut, + ) -> QueryableBuilder<'a, 'b, impl Fn(Query) + Send + Sync + 'static> + where + CallbackMut: FnMut(Query) + Send + Sync + 'static, + { + self.callback(locked(callback)) + } + + /// Receive the queries for this Queryable with a [`Handler`](crate::handlers::IntoHandler). + /// + /// # Examples + /// ```no_run + /// # #[tokio::main] + /// # async fn main() { + /// use zenoh::prelude::*; + /// + /// let session = zenoh::open(zenoh::config::peer()).await.unwrap(); + /// let queryable = session + /// .declare_queryable("key/expression") + /// .with(flume::bounded(32)) + /// .await + /// .unwrap(); + /// while let Ok(query) = queryable.recv_async().await { + /// println!(">> Handling query '{}'", query.selector()); + /// } + /// # } + /// ``` + #[inline] + pub fn with(self, handler: Handler) -> QueryableBuilder<'a, 'b, Handler> + where + Handler: IntoHandler<'static, Query>, + { + let QueryableBuilder { + session, + key_expr, + complete, + origin, + handler: _, + } = self; + QueryableBuilder { + session, + key_expr, + complete, + origin, + handler, + } + } + + /// Restrict the matching queries that will be receive by this [`Queryable`] + /// to the ones that have the given [`Locality`](crate::prelude::Locality). + #[inline] + #[zenoh_macros::unstable] + pub fn allowed_origin(mut self, origin: Locality) -> Self { + self.origin = origin; + self + } +} +impl<'a, 'b, Handler> QueryableBuilder<'a, 'b, Handler> { + /// Change queryable completeness. + #[inline] + pub fn complete(mut self, complete: bool) -> Self { + self.complete = complete; + self + } +} + +/// A queryable that provides data through a [`Handler`](crate::handlers::IntoHandler). +/// +/// Queryables can be created from a zenoh [`Session`](crate::Session) +/// with the [`declare_queryable`](crate::session::SessionDeclarations::declare_queryable) function +/// and the [`with`](QueryableBuilder::with) function +/// of the resulting builder. +/// +/// Queryables are automatically undeclared when dropped. +/// +/// # Examples +/// ```no_run +/// # #[tokio::main] +/// # async fn main() { +/// use zenoh::prelude::*; +/// +/// let session = zenoh::open(zenoh::config::peer()).await.unwrap(); +/// let queryable = session +/// .declare_queryable("key/expression") +/// .with(flume::bounded(32)) +/// .await +/// .unwrap(); +/// while let Ok(query) = queryable.recv_async().await { +/// println!(">> Handling query '{}'", query.selector()); +/// query.reply("key/expression", "value") +/// .await +/// .unwrap(); +/// } +/// # } +/// ``` +#[non_exhaustive] +#[derive(Debug)] +pub struct Queryable<'a, Handler> { + pub(crate) queryable: CallbackQueryable<'a>, + pub(crate) handler: Handler, +} + +impl<'a, Handler> Queryable<'a, Handler> { + /// Returns the [`EntityGlobalId`] of this Queryable. + /// + /// # Examples + /// ``` + /// # #[tokio::main] + /// # async fn main() { + /// use zenoh::prelude::*; + /// + /// let session = zenoh::open(zenoh::config::peer()).await.unwrap(); + /// let queryable = session.declare_queryable("key/expression") + /// .await + /// .unwrap(); + /// let queryable_id = queryable.id(); + /// # } + /// ``` + #[zenoh_macros::unstable] + pub fn id(&self) -> EntityGlobalId { + EntityGlobalIdProto { + zid: self.queryable.session.zid().into(), + eid: self.queryable.state.id, + } + .into() + } + + /// Returns a reference to this queryable's handler. + /// An handler is anything that implements [`IntoHandler`]. + /// The default handler is [`DefaultHandler`]. + pub fn handler(&self) -> &Handler { + &self.handler + } + + /// Returns a mutable reference to this queryable's handler. + /// An handler is anything that implements [`IntoHandler`]. + /// The default handler is [`DefaultHandler`]. + pub fn handler_mut(&mut self) -> &mut Handler { + &mut self.handler + } + + #[inline] + pub fn undeclare(self) -> impl Resolve> + 'a { + UndeclarableSealed::undeclare_inner(self, ()) + } + + /// Make the queryable run in background, until the session is closed. + #[inline] + #[zenoh_macros::unstable] + pub fn background(mut self) { + // It's not necessary to undeclare this resource when session close, as other sessions + // will clean all resources related to the closed one. + // So we can just never undeclare it. + self.queryable.undeclare_on_drop = false; + } +} + +impl<'a, T> UndeclarableSealed<(), QueryableUndeclaration<'a>> for Queryable<'a, T> { + fn undeclare_inner(self, _: ()) -> QueryableUndeclaration<'a> { + UndeclarableSealed::undeclare_inner(self.queryable, ()) + } +} + +impl Deref for Queryable<'_, Handler> { + type Target = Handler; + + fn deref(&self) -> &Self::Target { + self.handler() + } +} + +impl DerefMut for Queryable<'_, Handler> { + fn deref_mut(&mut self) -> &mut Self::Target { + self.handler_mut() + } +} + +impl<'a, Handler> Resolvable for QueryableBuilder<'a, '_, Handler> +where + Handler: IntoHandler<'static, Query> + Send, + Handler::Handler: Send, +{ + type To = ZResult>; +} + +impl<'a, Handler> Wait for QueryableBuilder<'a, '_, Handler> +where + Handler: IntoHandler<'static, Query> + Send, + Handler::Handler: Send, +{ + fn wait(self) -> ::To { + let session = self.session; + let (callback, receiver) = self.handler.into_handler(); + session + .declare_queryable_inner( + &self.key_expr?.to_wire(&session), + self.complete, + self.origin, + callback, + ) + .map(|qable_state| Queryable { + queryable: CallbackQueryable { + session, + state: qable_state, + undeclare_on_drop: true, + }, + handler: receiver, + }) + } +} + +impl<'a, Handler> IntoFuture for QueryableBuilder<'a, '_, Handler> +where + Handler: IntoHandler<'static, Query> + Send, + Handler::Handler: Send, +{ + type Output = ::To; + type IntoFuture = Ready<::To>; + + fn into_future(self) -> Self::IntoFuture { + std::future::ready(self.wait()) + } +} diff --git a/zenoh/src/api/sample.rs b/zenoh/src/api/sample.rs new file mode 100644 index 0000000000..220785c668 --- /dev/null +++ b/zenoh/src/api/sample.rs @@ -0,0 +1,453 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// + +//! Sample primitives +use std::{convert::TryFrom, fmt}; + +#[cfg(feature = "unstable")] +use serde::Serialize; +use zenoh_config::wrappers::EntityGlobalId; +use zenoh_protocol::{ + core::{CongestionControl, Timestamp}, + network::declare::ext::QoSType, +}; + +use super::{ + builders::sample::QoSBuilderTrait, bytes::ZBytes, encoding::Encoding, key_expr::KeyExpr, + publisher::Priority, value::Value, +}; + +pub type SourceSn = u64; + +/// The locality of samples to be received by subscribers or targeted by publishers. +#[zenoh_macros::unstable] +#[derive(Clone, Copy, Debug, Default, Serialize, PartialEq, Eq)] +pub enum Locality { + SessionLocal, + Remote, + #[default] + Any, +} +#[cfg(not(feature = "unstable"))] +#[derive(Clone, Copy, Debug, Default, PartialEq, Eq)] +pub(crate) enum Locality { + SessionLocal, + Remote, + #[default] + Any, +} + +#[derive(Debug, Clone, PartialEq, Eq, Default)] +pub(crate) struct DataInfo { + pub kind: SampleKind, + pub encoding: Option, + pub timestamp: Option, + pub source_id: Option, + pub source_sn: Option, + pub qos: QoS, +} + +pub(crate) trait DataInfoIntoSample { + fn into_sample( + self, + key_expr: IntoKeyExpr, + payload: IntoZBytes, + attachment: Option, + ) -> Sample + where + IntoKeyExpr: Into>, + IntoZBytes: Into; +} + +impl DataInfoIntoSample for DataInfo { + // This function is for internal use only. + // Technically it may create invalid sample (e.g. a delete sample with a payload and encoding) + // The test for it is intentionally not added to avoid inserting extra "if" into hot path. + // The correctness of the data should be ensured by the caller. + #[inline] + fn into_sample( + self, + key_expr: IntoKeyExpr, + payload: IntoZBytes, + attachment: Option, + ) -> Sample + where + IntoKeyExpr: Into>, + IntoZBytes: Into, + { + Sample { + key_expr: key_expr.into(), + payload: payload.into(), + kind: self.kind, + encoding: self.encoding.unwrap_or_default(), + timestamp: self.timestamp, + qos: self.qos, + #[cfg(feature = "unstable")] + source_info: SourceInfo { + source_id: self.source_id, + source_sn: self.source_sn, + }, + attachment, + } + } +} + +impl DataInfoIntoSample for Option { + #[inline] + fn into_sample( + self, + key_expr: IntoKeyExpr, + payload: IntoZBytes, + attachment: Option, + ) -> Sample + where + IntoKeyExpr: Into>, + IntoZBytes: Into, + { + if let Some(data_info) = self { + data_info.into_sample(key_expr, payload, attachment) + } else { + Sample { + key_expr: key_expr.into(), + payload: payload.into(), + kind: SampleKind::Put, + encoding: Encoding::default(), + timestamp: None, + qos: QoS::default(), + #[cfg(feature = "unstable")] + source_info: SourceInfo::empty(), + attachment, + } + } + } +} + +/// Information on the source of a zenoh [`Sample`]. +#[zenoh_macros::unstable] +#[derive(Debug, Clone)] +pub struct SourceInfo { + /// The [`EntityGlobalId`] of the zenoh entity that published the concerned [`Sample`]. + pub source_id: Option, + /// The sequence number of the [`Sample`] from the source. + pub source_sn: Option, +} + +#[test] +#[cfg(feature = "unstable")] +fn source_info_stack_size() { + use zenoh_protocol::core::ZenohIdProto; + + use crate::api::sample::{SourceInfo, SourceSn}; + + assert_eq!(std::mem::size_of::(), 16); + assert_eq!(std::mem::size_of::>(), 17); + assert_eq!(std::mem::size_of::>(), 16); + assert_eq!(std::mem::size_of::(), 17 + 16 + 7); +} + +#[zenoh_macros::unstable] +impl SourceInfo { + pub(crate) fn empty() -> Self { + SourceInfo { + source_id: None, + source_sn: None, + } + } + pub(crate) fn is_empty(&self) -> bool { + self.source_id.is_none() && self.source_sn.is_none() + } +} + +#[zenoh_macros::unstable] +impl From for Option { + fn from(source_info: SourceInfo) -> Option { + if source_info.is_empty() { + None + } else { + Some(zenoh_protocol::zenoh::put::ext::SourceInfoType { + id: source_info.source_id.unwrap_or_default().into(), + sn: source_info.source_sn.unwrap_or_default() as u32, + }) + } + } +} + +#[zenoh_macros::unstable] +impl From for SourceInfo { + fn from(data_info: DataInfo) -> Self { + SourceInfo { + source_id: data_info.source_id, + source_sn: data_info.source_sn, + } + } +} + +#[zenoh_macros::unstable] +impl From> for SourceInfo { + fn from(data_info: Option) -> Self { + match data_info { + Some(data_info) => data_info.into(), + None => SourceInfo::empty(), + } + } +} + +#[zenoh_macros::unstable] +impl Default for SourceInfo { + fn default() -> Self { + Self::empty() + } +} + +/// The kind of a `Sample`. +#[repr(u8)] +#[derive(Debug, Default, Copy, Clone, PartialEq, Eq)] +pub enum SampleKind { + /// if the `Sample` was issued by a `put` operation. + #[default] + Put = 0, + /// if the `Sample` was issued by a `delete` operation. + Delete = 1, +} + +impl fmt::Display for SampleKind { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + SampleKind::Put => write!(f, "PUT"), + SampleKind::Delete => write!(f, "DELETE"), + } + } +} + +impl TryFrom for SampleKind { + type Error = u64; + fn try_from(kind: u64) -> Result { + match kind { + 0 => Ok(SampleKind::Put), + 1 => Ok(SampleKind::Delete), + _ => Err(kind), + } + } +} + +/// Structure with public fields for sample. It's convenient if it's necessary to decompose a sample into its fields. +pub struct SampleFields { + pub key_expr: KeyExpr<'static>, + pub payload: ZBytes, + pub kind: SampleKind, + pub encoding: Encoding, + pub timestamp: Option, + pub express: bool, + pub priority: Priority, + pub congestion_control: CongestionControl, + #[cfg(feature = "unstable")] + pub source_info: SourceInfo, + pub attachment: Option, +} + +impl From for SampleFields { + fn from(sample: Sample) -> Self { + SampleFields { + key_expr: sample.key_expr, + payload: sample.payload, + kind: sample.kind, + encoding: sample.encoding, + timestamp: sample.timestamp, + express: sample.qos.express(), + priority: sample.qos.priority(), + congestion_control: sample.qos.congestion_control(), + #[cfg(feature = "unstable")] + source_info: sample.source_info, + attachment: sample.attachment, + } + } +} + +/// A zenoh sample. +#[non_exhaustive] +#[derive(Clone, Debug)] +pub struct Sample { + pub(crate) key_expr: KeyExpr<'static>, + pub(crate) payload: ZBytes, + pub(crate) kind: SampleKind, + pub(crate) encoding: Encoding, + pub(crate) timestamp: Option, + pub(crate) qos: QoS, + #[cfg(feature = "unstable")] + pub(crate) source_info: SourceInfo, + pub(crate) attachment: Option, +} + +impl Sample { + /// Gets the key expression on which this Sample was published. + #[inline] + pub fn key_expr(&self) -> &KeyExpr<'static> { + &self.key_expr + } + + /// Gets the payload of this Sample. + #[inline] + pub fn payload(&self) -> &ZBytes { + &self.payload + } + + /// Gets the payload of this Sample. + #[inline] + pub fn payload_mut(&mut self) -> &mut ZBytes { + &mut self.payload + } + + /// Gets the kind of this Sample. + #[inline] + pub fn kind(&self) -> SampleKind { + self.kind + } + + /// Gets the encoding of this sample + #[inline] + pub fn encoding(&self) -> &Encoding { + &self.encoding + } + + /// Gets the timestamp of this Sample + #[inline] + pub fn timestamp(&self) -> Option<&Timestamp> { + self.timestamp.as_ref() + } + + /// Gets the congetion control of this Sample + pub fn congestion_control(&self) -> CongestionControl { + self.qos.congestion_control() + } + + /// Gets the priority of this Sample + pub fn priority(&self) -> Priority { + self.qos.priority() + } + + /// Gets the express flag value. If `true`, the message is not batched during transmission, in order to reduce latency. + pub fn express(&self) -> bool { + self.qos.express() + } + + /// Gets infos on the source of this Sample. + #[zenoh_macros::unstable] + #[inline] + pub fn source_info(&self) -> &SourceInfo { + &self.source_info + } + + /// Gets the sample attachment: a map of key-value pairs, where each key and value are byte-slices. + #[inline] + pub fn attachment(&self) -> Option<&ZBytes> { + self.attachment.as_ref() + } + + /// Gets the sample attachment: a map of key-value pairs, where each key and value are byte-slices. + #[inline] + pub fn attachment_mut(&mut self) -> Option<&mut ZBytes> { + self.attachment.as_mut() + } +} + +impl From for Value { + fn from(sample: Sample) -> Self { + Value::new(sample.payload, sample.encoding) + } +} + +/// Structure containing quality of service data +#[derive(Debug, Default, Copy, Clone, Eq, PartialEq)] +pub(crate) struct QoS { + inner: QoSType, +} + +#[derive(Debug)] +pub(crate) struct QoSBuilder(QoS); + +impl From for QoSBuilder { + fn from(qos: QoS) -> Self { + QoSBuilder(qos) + } +} + +impl From for QoSBuilder { + fn from(qos: QoSType) -> Self { + QoSBuilder(QoS { inner: qos }) + } +} + +impl From for QoS { + fn from(builder: QoSBuilder) -> Self { + builder.0 + } +} + +impl QoSBuilderTrait for QoSBuilder { + fn congestion_control(self, congestion_control: CongestionControl) -> Self { + let mut inner = self.0.inner; + inner.set_congestion_control(congestion_control); + Self(QoS { inner }) + } + + fn priority(self, priority: Priority) -> Self { + let mut inner = self.0.inner; + inner.set_priority(priority.into()); + Self(QoS { inner }) + } + + fn express(self, is_express: bool) -> Self { + let mut inner = self.0.inner; + inner.set_is_express(is_express); + Self(QoS { inner }) + } +} + +impl QoS { + /// Gets priority of the message. + pub fn priority(&self) -> Priority { + match Priority::try_from(self.inner.get_priority()) { + Ok(p) => p, + Err(e) => { + tracing::trace!( + "Failed to convert priority: {}; replacing with default value", + e.to_string() + ); + Priority::default() + } + } + } + + /// Gets congestion control of the message. + pub fn congestion_control(&self) -> CongestionControl { + self.inner.get_congestion_control() + } + + /// Gets express flag value. If `true`, the message is not batched during transmission, in order to reduce latency. + pub fn express(&self) -> bool { + self.inner.is_express() + } +} + +impl From for QoS { + fn from(qos: QoSType) -> Self { + QoS { inner: qos } + } +} + +impl From for QoSType { + fn from(qos: QoS) -> Self { + qos.inner + } +} diff --git a/zenoh/src/scouting.rs b/zenoh/src/api/scouting.rs similarity index 71% rename from zenoh/src/scouting.rs rename to zenoh/src/api/scouting.rs index 5324bcc320..4f08530533 100644 --- a/zenoh/src/scouting.rs +++ b/zenoh/src/api/scouting.rs @@ -11,22 +11,25 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::handlers::{locked, Callback, DefaultHandler}; -use crate::net::runtime::{orchestrator::Loop, Runtime}; +use std::{ + fmt, + future::{IntoFuture, Ready}, + net::SocketAddr, + ops::Deref, + time::Duration, +}; -use std::time::Duration; -use std::{fmt, future::Ready, net::SocketAddr, ops::Deref}; use tokio::net::UdpSocket; -use zenoh_core::{AsyncResolve, Resolvable, SyncResolve}; +use zenoh_config::wrappers::Hello; +use zenoh_core::{Resolvable, Wait}; use zenoh_protocol::core::WhatAmIMatcher; use zenoh_result::ZResult; use zenoh_task::TerminatableTask; -/// Constants and helpers for zenoh `whatami` flags. -pub use zenoh_protocol::core::WhatAmI; - -/// A zenoh Hello message. -pub use zenoh_protocol::scouting::Hello; +use crate::{ + api::handlers::{locked, Callback, DefaultHandler, IntoHandler}, + net::runtime::{orchestrator::Loop, Runtime}, +}; /// A builder for initializing a [`Scout`]. /// @@ -34,11 +37,9 @@ pub use zenoh_protocol::scouting::Hello; /// ```no_run /// # #[tokio::main] /// # async fn main() { -/// use zenoh::prelude::r#async::*; -/// use zenoh::scouting::WhatAmI; +/// use zenoh::{config::WhatAmI, prelude::*}; /// -/// let receiver = zenoh::scout(WhatAmI::Peer | WhatAmI::Router, config::default()) -/// .res() +/// let receiver = zenoh::scout(WhatAmI::Peer | WhatAmI::Router, zenoh::config::default()) /// .await /// .unwrap(); /// while let Ok(hello) = receiver.recv_async().await { @@ -61,12 +62,10 @@ impl ScoutBuilder { /// ``` /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::r#async::*; - /// use zenoh::scouting::WhatAmI; + /// use zenoh::{config::WhatAmI, prelude::*}; /// - /// let scout = zenoh::scout(WhatAmI::Peer | WhatAmI::Router, config::default()) + /// let scout = zenoh::scout(WhatAmI::Peer | WhatAmI::Router, zenoh::config::default()) /// .callback(|hello| { println!("{}", hello); }) - /// .res() /// .await /// .unwrap(); /// # } @@ -97,13 +96,11 @@ impl ScoutBuilder { /// ``` /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::r#async::*; - /// use zenoh::scouting::WhatAmI; + /// use zenoh::{config::WhatAmI, prelude::*}; /// /// let mut n = 0; - /// let scout = zenoh::scout(WhatAmI::Peer | WhatAmI::Router, config::default()) + /// let scout = zenoh::scout(WhatAmI::Peer | WhatAmI::Router, zenoh::config::default()) /// .callback_mut(move |_hello| { n += 1; }) - /// .res() /// .await /// .unwrap(); /// # } @@ -119,18 +116,16 @@ impl ScoutBuilder { self.callback(locked(callback)) } - /// Receive the [`Hello`] messages from this scout with a [`Handler`](crate::prelude::IntoCallbackReceiverPair). + /// Receive the [`Hello`] messages from this scout with a [`Handler`](crate::handlers::IntoHandler). /// /// # Examples /// ```no_run /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::r#async::*; - /// use zenoh::scouting::WhatAmI; + /// use zenoh::{config::WhatAmI, prelude::*}; /// - /// let receiver = zenoh::scout(WhatAmI::Peer | WhatAmI::Router, config::default()) + /// let receiver = zenoh::scout(WhatAmI::Peer | WhatAmI::Router, zenoh::config::default()) /// .with(flume::bounded(32)) - /// .res() /// .await /// .unwrap(); /// while let Ok(hello) = receiver.recv_async().await { @@ -141,7 +136,7 @@ impl ScoutBuilder { #[inline] pub fn with(self, handler: Handler) -> ScoutBuilder where - Handler: crate::prelude::IntoCallbackReceiverPair<'static, Hello>, + Handler: IntoHandler<'static, Hello>, { let ScoutBuilder { what, @@ -158,32 +153,33 @@ impl ScoutBuilder { impl Resolvable for ScoutBuilder where - Handler: crate::prelude::IntoCallbackReceiverPair<'static, Hello> + Send, - Handler::Receiver: Send, + Handler: IntoHandler<'static, Hello> + Send, + Handler::Handler: Send, { - type To = ZResult>; + type To = ZResult>; } -impl SyncResolve for ScoutBuilder +impl Wait for ScoutBuilder where - Handler: crate::prelude::IntoCallbackReceiverPair<'static, Hello> + Send, - Handler::Receiver: Send, + Handler: IntoHandler<'static, Hello> + Send, + Handler::Handler: Send, { - fn res_sync(self) -> ::To { - let (callback, receiver) = self.handler.into_cb_receiver_pair(); - scout(self.what, self.config?, callback).map(|scout| Scout { scout, receiver }) + fn wait(self) -> ::To { + let (callback, receiver) = self.handler.into_handler(); + _scout(self.what, self.config?, callback).map(|scout| Scout { scout, receiver }) } } -impl AsyncResolve for ScoutBuilder +impl IntoFuture for ScoutBuilder where - Handler: crate::prelude::IntoCallbackReceiverPair<'static, Hello> + Send, - Handler::Receiver: Send, + Handler: IntoHandler<'static, Hello> + Send, + Handler::Handler: Send, { - type Future = Ready; + type Output = ::To; + type IntoFuture = Ready<::To>; - fn res_async(self) -> Self::Future { - std::future::ready(self.res_sync()) + fn into_future(self) -> Self::IntoFuture { + std::future::ready(self.wait()) } } @@ -193,12 +189,10 @@ where /// ``` /// # #[tokio::main] /// # async fn main() { -/// use zenoh::prelude::r#async::*; -/// use zenoh::scouting::WhatAmI; +/// use zenoh::{config::WhatAmI, prelude::*}; /// -/// let scout = zenoh::scout(WhatAmI::Peer | WhatAmI::Router, config::default()) +/// let scout = zenoh::scout(WhatAmI::Peer | WhatAmI::Router, zenoh::config::default()) /// .callback(|hello| { println!("{}", hello); }) -/// .res() /// .await /// .unwrap(); /// # } @@ -215,12 +209,10 @@ impl ScoutInner { /// ``` /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::r#async::*; - /// use zenoh::scouting::WhatAmI; + /// use zenoh::{config::WhatAmI, prelude::*}; /// - /// let scout = zenoh::scout(WhatAmI::Peer | WhatAmI::Router, config::default()) + /// let scout = zenoh::scout(WhatAmI::Peer | WhatAmI::Router, zenoh::config::default()) /// .callback(|hello| { println!("{}", hello); }) - /// .res() /// .await /// .unwrap(); /// scout.stop(); @@ -246,18 +238,16 @@ impl fmt::Debug for ScoutInner { } } -/// A scout that returns [`Hello`] messages through a [`Handler`](crate::prelude::IntoCallbackReceiverPair). +/// A scout that returns [`Hello`] messages through a [`Handler`](crate::handlers::IntoHandler). /// /// # Examples /// ```no_run /// # #[tokio::main] /// # async fn main() { -/// use zenoh::prelude::r#async::*; -/// use zenoh::scouting::WhatAmI; +/// use zenoh::{config::WhatAmI, prelude::*}; /// -/// let receiver = zenoh::scout(WhatAmI::Peer | WhatAmI::Router, config::default()) +/// let receiver = zenoh::scout(WhatAmI::Peer | WhatAmI::Router, zenoh::config::default()) /// .with(flume::bounded(32)) -/// .res() /// .await /// .unwrap(); /// while let Ok(hello) = receiver.recv_async().await { @@ -269,7 +259,7 @@ impl fmt::Debug for ScoutInner { #[derive(Debug)] pub struct Scout { pub(crate) scout: ScoutInner, - pub receiver: Receiver, + pub(crate) receiver: Receiver, } impl Deref for Scout { @@ -287,12 +277,10 @@ impl Scout { /// ```no_run /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::r#async::*; - /// use zenoh::scouting::WhatAmI; + /// use zenoh::{config::WhatAmI, prelude::*}; /// - /// let scout = zenoh::scout(WhatAmI::Router, config::default()) + /// let scout = zenoh::scout(WhatAmI::Router, zenoh::config::default()) /// .with(flume::bounded(32)) - /// .res() /// .await /// .unwrap(); /// let _router = scout.recv_async().await; @@ -304,7 +292,7 @@ impl Scout { } } -fn scout( +fn _scout( what: WhatAmIMatcher, config: zenoh_config::Config, callback: Callback<'static, Hello>, @@ -337,7 +325,7 @@ fn scout( let scout = Runtime::scout(&sockets, what, &addr, move |hello| { let callback = callback.clone(); async move { - callback(hello); + callback(hello.into()); Loop::Continue } }); @@ -355,3 +343,44 @@ fn scout( } Ok(ScoutInner { scout_task: None }) } + +/// Scout for routers and/or peers. +/// +/// [`scout`] spawns a task that periodically sends scout messages and waits for [`Hello`](crate::scouting::Hello) replies. +/// +/// Drop the returned [`Scout`] to stop the scouting task. +/// +/// # Arguments +/// +/// * `what` - The kind of zenoh process to scout for +/// * `config` - The configuration [`crate::Config`] to use for scouting +/// +/// # Examples +/// ```no_run +/// # #[tokio::main] +/// # async fn main() { +/// use zenoh::{config::WhatAmI, prelude::*}; +/// +/// let receiver = zenoh::scout(WhatAmI::Peer | WhatAmI::Router, zenoh::config::default()) +/// .await +/// .unwrap(); +/// while let Ok(hello) = receiver.recv_async().await { +/// println!("{}", hello); +/// } +/// # } +/// ``` +pub fn scout, TryIntoConfig>( + what: I, + config: TryIntoConfig, +) -> ScoutBuilder +where + TryIntoConfig: std::convert::TryInto + Send + 'static, + >::Error: + Into, +{ + ScoutBuilder { + what: what.into(), + config: config.try_into().map_err(|e| e.into()), + handler: DefaultHandler::default(), + } +} diff --git a/zenoh/src/api/selector.rs b/zenoh/src/api/selector.rs new file mode 100644 index 0000000000..d7b7466be2 --- /dev/null +++ b/zenoh/src/api/selector.rs @@ -0,0 +1,371 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// + +//! [Selector](https://github.com/eclipse-zenoh/roadmap/tree/main/rfcs/ALL/Selectors) to issue queries +use std::{borrow::Cow, convert::TryFrom, str::FromStr}; + +use zenoh_protocol::core::{ + key_expr::{keyexpr, OwnedKeyExpr}, + Parameters, +}; +#[cfg(feature = "unstable")] +use ::{zenoh_result::ZResult, zenoh_util::time_range::TimeRange}; + +use super::{key_expr::KeyExpr, queryable::Query}; + +/// A selector is the combination of a [Key Expression](crate::key_expr::KeyExpr), which defines the +/// set of keys that are relevant to an operation, and a set of parameters +/// with a few intended uses: +/// - specifying arguments to a queryable, allowing the passing of Remote Procedure Call parameters +/// - filtering by value, +/// - filtering by metadata, such as the timestamp of a value, +/// - specifying arguments to zenoh when using the REST API. +/// +/// When in string form, selectors look a lot like a URI, with similar semantics: +/// - the `key_expr` before the first `?` must be a valid key expression. +/// - the `parameters` after the first `?` should be encoded like the query section of a URL: +/// - parameters are separated by `&`, +/// - the parameter name and value are separated by the first `=`, +/// - in the absence of `=`, the parameter value is considered to be the empty string, +/// - both name and value should use percent-encoding to escape characters, +/// - defining a value for the same parameter name twice is considered undefined behavior, +/// with the encouraged behaviour being to reject operations when a duplicate parameter is detected. +/// +/// Zenoh intends to standardize the usage of a set of parameter names. To avoid conflicting with RPC parameters, +/// the Zenoh team has settled on reserving the set of parameter names that start with non-alphanumeric characters. +/// +/// The full specification for selectors is available [here](https://github.com/eclipse-zenoh/roadmap/tree/main/rfcs/ALL/Selectors), +/// it includes standardized parameters. +/// +/// Queryable implementers are encouraged to prefer these standardized parameter names when implementing their +/// associated features, and to prefix their own parameter names to avoid having conflicting parameter names with other +/// queryables. +/// +/// Here are the currently standardized parameters for Zenoh (check the specification page for the exhaustive list): +/// - **`[unstable]`** `_time`: used to express interest in only values dated within a certain time range, values for +/// this parameter must be readable by the [Zenoh Time DSL](zenoh_util::time_range::TimeRange) for the value to be considered valid. +/// - **`[unstable]`** `_anyke`: used in queries to express interest in replies coming from any key expression. By default, only replies +/// whose key expression match query's key expression are accepted. `_anyke` disables the query-reply key expression matching check. +#[non_exhaustive] +#[derive(Clone, PartialEq, Eq)] +pub struct Selector<'a> { + /// The part of this selector identifying which keys should be part of the selection. + pub key_expr: Cow<'a, KeyExpr<'a>>, + /// the part of this selector identifying which values should be part of the selection. + pub parameters: Cow<'a, Parameters<'a>>, +} + +impl<'a> Selector<'a> { + /// Builds a new selector which owns keyexpr and parameters + pub fn owned(key_expr: K, parameters: P) -> Self + where + K: Into>, + P: Into>, + { + Self { + key_expr: Cow::Owned(key_expr.into()), + parameters: Cow::Owned(parameters.into()), + } + } + /// Build a new selector holding references to keyexpr and parameters + /// Useful for printing pairs of keyexpr and parameters in url-like format + pub fn borrowed(key_expr: &'a KeyExpr<'a>, parameters: &'a Parameters<'a>) -> Self { + Self { + key_expr: Cow::Borrowed(key_expr), + parameters: Cow::Borrowed(parameters), + } + } + + /// Convert this selector into an owned one. + pub fn into_owned(self) -> Selector<'static> { + Selector::owned( + self.key_expr.into_owned().into_owned(), + self.parameters.into_owned().into_owned(), + ) + } +} + +impl<'a, K, P> From<(K, P)> for Selector<'a> +where + K: Into>, + P: Into>, +{ + fn from((key_expr, parameters): (K, P)) -> Self { + Self::owned(key_expr, parameters) + } +} + +impl<'a> From> for (KeyExpr<'a>, Parameters<'a>) { + fn from(selector: Selector<'a>) -> Self { + ( + selector.key_expr.into_owned(), + selector.parameters.into_owned(), + ) + } +} + +impl<'a> From<&'a Selector<'a>> for (&'a KeyExpr<'a>, &'a Parameters<'a>) { + fn from(selector: &'a Selector<'a>) -> Self { + (selector.key_expr.as_ref(), selector.parameters.as_ref()) + } +} + +#[zenoh_macros::unstable] +/// The trait allows to set/read parameters processed by the zenoh library itself +pub trait ZenohParameters { + /// Text parameter names are not part of the public API. They exposed just to provide information about current parameters + /// namings, allowing user to avoid conflicts with custom parameters. It's also possible that some of these zenoh-specific parameters + /// which now are stored in the key-value pairs will be later passed in some other way, keeping the same get/set interface functions. + const REPLY_KEY_EXPR_ANY_SEL_PARAM: &'static str = "_anyke"; + const TIME_RANGE_KEY: &'static str = "_time"; + /// Sets the time range targeted by the selector parameters. + fn set_time_range>>(&mut self, time_range: T); + /// Sets the parameter allowing to receive replies from queryables not matching + /// the requested key expression. This may happen in this scenario: + /// - we are requesting keyexpr `a/b`. + /// - queryable is declared to handle `a/*` queries and contains data for `a/b` and `a/c`. + /// - queryable receives our request and sends two replies with data for `a/b` and `a/c` + /// + /// Normally only `a/b` reply would be accepted, but with `_anyke` parameter set, both replies are accepted. + /// NOTE: `_anyke` indicates that ANY key expression is allowed. I.e., if `_anyke` parameter is set, a reply + /// on `x/y/z` is valid even if the queryable is declared on `a/*`. + fn set_reply_key_expr_any(&mut self); + /// Extracts the standardized `_time` argument from the selector parameters. + /// Returns `None` if the `_time` argument is not present or `Some` with the result of parsing the `_time` argument + /// if it is present. + fn time_range(&self) -> Option>; + /// Returns true if `_anyke` parameter is present in the selector parameters + fn reply_key_expr_any(&self) -> bool; +} + +#[cfg(feature = "unstable")] +impl ZenohParameters for Parameters<'_> { + /// Sets the time range targeted by the selector parameters. + fn set_time_range>>(&mut self, time_range: T) { + let mut time_range: Option = time_range.into(); + match time_range.take() { + Some(tr) => self.insert(Self::TIME_RANGE_KEY, format!("{}", tr)), + None => self.remove(Self::TIME_RANGE_KEY), + }; + } + + /// Sets parameter allowing to querier to reply to this request even + /// it the requested key expression does not match the reply key expression. + fn set_reply_key_expr_any(&mut self) { + self.insert(Self::REPLY_KEY_EXPR_ANY_SEL_PARAM, ""); + } + + /// Extracts the standardized `_time` argument from the selector parameters. + /// + /// The default implementation still causes a complete pass through the selector parameters to ensure that there are no duplicates of the `_time` key. + fn time_range(&self) -> Option> { + self.get(Self::TIME_RANGE_KEY) + .map(|tr| tr.parse().map_err(Into::into)) + } + + /// Returns true if `_anyke` parameter is present in the selector parameters + fn reply_key_expr_any(&self) -> bool { + self.contains_key(Self::REPLY_KEY_EXPR_ANY_SEL_PARAM) + } +} + +impl std::fmt::Debug for Selector<'_> { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + write!(f, "sel\"{self}\"") + } +} + +impl std::fmt::Display for Selector<'_> { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + write!(f, "{}", self.key_expr)?; + if !self.parameters.is_empty() { + write!(f, "?{}", self.parameters.as_str())?; + } + Ok(()) + } +} + +impl<'a> From<&Selector<'a>> for Selector<'a> { + fn from(s: &Selector<'a>) -> Self { + s.clone() + } +} + +impl TryFrom for Selector<'_> { + type Error = zenoh_result::Error; + fn try_from(mut s: String) -> Result { + match s.find('?') { + Some(qmark_position) => { + let parameters = s[qmark_position + 1..].to_owned(); + s.truncate(qmark_position); + Ok(Selector::owned(KeyExpr::try_from(s)?, parameters)) + } + None => Ok(KeyExpr::try_from(s)?.into()), + } + } +} + +impl<'a> TryFrom<&'a str> for Selector<'a> { + type Error = zenoh_result::Error; + fn try_from(s: &'a str) -> Result { + match s.find('?') { + Some(qmark_position) => { + let params = &s[qmark_position + 1..]; + Ok(Selector::owned( + KeyExpr::try_from(&s[..qmark_position])?, + params, + )) + } + None => Ok(KeyExpr::try_from(s)?.into()), + } + } +} +impl FromStr for Selector<'static> { + type Err = zenoh_result::Error; + fn from_str(s: &str) -> Result { + s.to_owned().try_into() + } +} + +impl<'a> TryFrom<&'a String> for Selector<'a> { + type Error = zenoh_result::Error; + fn try_from(s: &'a String) -> Result { + Self::try_from(s.as_str()) + } +} + +impl<'a> From<&'a Query> for Selector<'a> { + fn from(q: &'a Query) -> Self { + Self { + key_expr: Cow::Borrowed(&q.inner.key_expr), + parameters: Cow::Borrowed(&q.inner.parameters), + } + } +} + +impl<'a> From<&'a KeyExpr<'a>> for Selector<'a> { + fn from(key_selector: &'a KeyExpr<'a>) -> Self { + Self { + key_expr: Cow::Borrowed(key_selector), + parameters: Cow::Owned("".into()), + } + } +} + +impl<'a> From<&'a keyexpr> for Selector<'a> { + fn from(key_selector: &'a keyexpr) -> Self { + Self { + key_expr: Cow::Owned(key_selector.into()), + parameters: Cow::Owned("".into()), + } + } +} + +impl<'a> From<&'a OwnedKeyExpr> for Selector<'a> { + fn from(key_selector: &'a OwnedKeyExpr) -> Self { + Self { + key_expr: Cow::Owned(key_selector.into()), + parameters: Cow::Owned("".into()), + } + } +} + +impl From for Selector<'static> { + fn from(key_selector: OwnedKeyExpr) -> Self { + Self { + key_expr: Cow::Owned(key_selector.into()), + parameters: Cow::Owned("".into()), + } + } +} + +impl<'a> From> for Selector<'a> { + fn from(key_selector: KeyExpr<'a>) -> Self { + Self { + key_expr: Cow::Owned(key_selector), + parameters: Cow::Owned("".into()), + } + } +} + +#[cfg(feature = "unstable")] +#[test] +fn selector_accessors() { + use std::collections::HashMap; + + for s in [ + "hello/there?_timetrick", + "hello/there?_timetrick;_time", + "hello/there?_timetrick;_time;_filter", + "hello/there?_timetrick;_time=[..]", + "hello/there?_timetrick;_time=[..];_filter", + ] { + let Selector { + key_expr, + parameters, + } = s.try_into().unwrap(); + assert_eq!(key_expr.as_str(), "hello/there"); + let mut parameters = parameters.into_owned(); + + println!("Parameters start: {}", parameters); + for i in parameters.iter() { + println!("\t{:?}", i); + } + + assert_eq!(parameters.get("_timetrick").unwrap(), ""); + + const TIME_RANGE_KEY: &str = Parameters::TIME_RANGE_KEY; + const ANYKE: &str = Parameters::REPLY_KEY_EXPR_ANY_SEL_PARAM; + + let time_range = "[now(-2s)..now(2s)]"; + zcondfeat!( + "unstable", + { + let time_range = time_range.parse().unwrap(); + parameters.set_time_range(time_range); + assert_eq!(parameters.time_range().unwrap().unwrap(), time_range); + }, + { + parameters.insert(TIME_RANGE_KEY, time_range); + } + ); + assert_eq!(parameters.get(TIME_RANGE_KEY).unwrap(), time_range); + + let hm: HashMap<&str, &str> = HashMap::from(¶meters); + assert!(hm.contains_key(TIME_RANGE_KEY)); + + parameters.insert("_filter", ""); + assert_eq!(parameters.get("_filter").unwrap(), ""); + + let hm: HashMap = HashMap::from(¶meters); + assert!(hm.contains_key(TIME_RANGE_KEY)); + + parameters.extend_from_iter(hm.iter()); + assert_eq!(parameters.get("_filter").unwrap(), ""); + + parameters.insert(ANYKE, ""); + + println!("Parameters end: {}", parameters); + for i in parameters.iter() { + println!("\t{:?}", i); + } + + assert_eq!( + HashMap::::from(¶meters), + HashMap::::from(Parameters::from( + "_anyke;_filter;_time=[now(-2s)..now(2s)];_timetrick" + )) + ); + } +} diff --git a/zenoh/src/session.rs b/zenoh/src/api/session.rs similarity index 58% rename from zenoh/src/session.rs rename to zenoh/src/api/session.rs index 95366c9216..451c1340ad 100644 --- a/zenoh/src/session.rs +++ b/zenoh/src/api/session.rs @@ -11,129 +11,165 @@ // Contributors: // ZettaScale Zenoh Team, // +use std::{ + collections::HashMap, + convert::TryInto, + fmt, + future::{IntoFuture, Ready}, + ops::Deref, + sync::{ + atomic::{AtomicU16, Ordering}, + Arc, RwLock, + }, + time::{Duration, SystemTime, UNIX_EPOCH}, +}; -use crate::admin; -use crate::config::Config; -use crate::config::Notifier; -use crate::handlers::{Callback, DefaultHandler}; -use crate::info::*; -use crate::key_expr::KeyExprInner; -#[zenoh_macros::unstable] -use crate::liveliness::{Liveliness, LivelinessTokenState}; -use crate::net::primitives::Primitives; -use crate::net::routing::dispatcher::face::Face; -use crate::net::runtime::Runtime; -use crate::prelude::Locality; -use crate::prelude::{KeyExpr, Parameters}; -use crate::publication::*; -use crate::query::*; -use crate::queryable::*; -use crate::runtime::RuntimeBuilder; -#[cfg(feature = "unstable")] -use crate::sample::Attachment; -use crate::sample::DataInfo; -use crate::sample::QoS; -use crate::selector::TIME_RANGE_KEY; -use crate::subscriber::*; -use crate::Id; -use crate::Priority; -use crate::Sample; -use crate::SampleKind; -use crate::Selector; -use crate::Value; -use std::collections::HashMap; -use std::convert::TryFrom; -use std::convert::TryInto; -use std::fmt; -use std::ops::Deref; -use std::sync::atomic::{AtomicU16, AtomicUsize, Ordering}; -use std::sync::Arc; -use std::sync::RwLock; -use std::time::Duration; use tracing::{error, trace, warn}; -use uhlc::HLC; +use uhlc::{Timestamp, HLC}; use zenoh_buffers::ZBuf; use zenoh_collections::SingleOrVec; -use zenoh_config::unwrap_or_default; -use zenoh_core::{zconfigurable, zread, Resolve, ResolveClosure, ResolveFuture, SyncResolve}; -use zenoh_protocol::network::AtomicRequestId; -use zenoh_protocol::network::RequestId; +use zenoh_config::{unwrap_or_default, wrappers::ZenohId, Config, Notifier}; +use zenoh_core::{zconfigurable, zread, Resolvable, Resolve, ResolveClosure, ResolveFuture, Wait}; +#[cfg(feature = "unstable")] +use zenoh_protocol::network::{ + declare::{DeclareToken, SubscriberId, TokenId, UndeclareToken}, + ext, + interest::InterestId, +}; use zenoh_protocol::{ core::{ key_expr::{keyexpr, OwnedKeyExpr}, - AtomicExprId, CongestionControl, ExprId, WireExpr, ZenohId, EMPTY_EXPR_ID, + AtomicExprId, CongestionControl, EntityId, ExprId, Parameters, WireExpr, EMPTY_EXPR_ID, }, network::{ + self, declare::{ - self, common::ext::WireExprType, queryable::ext::QueryableInfo, + self, common::ext::WireExprType, queryable::ext::QueryableInfoType, subscriber::ext::SubscriberInfo, Declare, DeclareBody, DeclareKeyExpr, DeclareQueryable, DeclareSubscriber, UndeclareQueryable, UndeclareSubscriber, }, - ext, - request::{self, ext::TargetType, Request}, - Mapping, Push, Response, ResponseFinal, + interest::{InterestMode, InterestOptions}, + request::{self, ext::TargetType}, + AtomicRequestId, DeclareFinal, Interest, Mapping, Push, Request, RequestId, Response, + ResponseFinal, }, zenoh::{ - query::{ - self, - ext::{ConsolidationType, QueryBodyType}, - }, - Pull, PushBody, RequestBody, ResponseBody, + query::{self, ext::QueryBodyType, Consolidation}, + reply::ReplyBody, + Del, PushBody, Put, RequestBody, ResponseBody, }, }; use zenoh_result::ZResult; +#[cfg(feature = "shared-memory")] +use zenoh_shm::api::client_storage::ShmClientStorage; use zenoh_task::TaskController; -use zenoh_util::core::AsyncResolve; + +use super::{ + admin, + builders::publisher::{ + PublicationBuilderDelete, PublicationBuilderPut, PublisherBuilder, SessionDeleteBuilder, + SessionPutBuilder, + }, + bytes::ZBytes, + encoding::Encoding, + handlers::{Callback, DefaultHandler}, + info::SessionInfo, + key_expr::{KeyExpr, KeyExprInner}, + publisher::{Priority, PublisherState}, + query::{ + ConsolidationMode, QueryConsolidation, QueryState, QueryTarget, Reply, SessionGetBuilder, + }, + queryable::{Query, QueryInner, QueryableBuilder, QueryableState}, + sample::{DataInfo, DataInfoIntoSample, Locality, QoS, Sample, SampleKind}, + selector::Selector, + subscriber::{SubscriberBuilder, SubscriberKind, SubscriberState}, + value::Value, + Id, +}; +#[cfg(feature = "unstable")] +use super::{ + liveliness::{Liveliness, LivelinessTokenState}, + publisher::Publisher, + publisher::{MatchingListenerState, MatchingStatus}, + query::LivelinessQueryState, + sample::SourceInfo, +}; +#[cfg(feature = "unstable")] +use crate::api::selector::ZenohParameters; +use crate::net::{ + primitives::Primitives, + routing::dispatcher::face::Face, + runtime::{Runtime, RuntimeBuilder}, +}; +#[cfg(feature = "unstable")] +use crate::pubsub::Reliability; zconfigurable! { pub(crate) static ref API_DATA_RECEPTION_CHANNEL_SIZE: usize = 256; pub(crate) static ref API_QUERY_RECEPTION_CHANNEL_SIZE: usize = 256; pub(crate) static ref API_REPLY_EMISSION_CHANNEL_SIZE: usize = 256; pub(crate) static ref API_REPLY_RECEPTION_CHANNEL_SIZE: usize = 256; - pub(crate) static ref API_OPEN_SESSION_DELAY: u64 = 500; } pub(crate) struct SessionState { pub(crate) primitives: Option>, // @TODO replace with MaybeUninit ?? pub(crate) expr_id_counter: AtomicExprId, // @TODO: manage rollover and uniqueness pub(crate) qid_counter: AtomicRequestId, - pub(crate) decl_id_counter: AtomicUsize, + #[cfg(feature = "unstable")] + pub(crate) liveliness_qid_counter: AtomicRequestId, pub(crate) local_resources: HashMap, pub(crate) remote_resources: HashMap, + #[cfg(feature = "unstable")] + pub(crate) remote_subscribers: HashMap>, + pub(crate) publishers: HashMap, + #[cfg(feature = "unstable")] + pub(crate) remote_tokens: HashMap>, //pub(crate) publications: Vec, pub(crate) subscribers: HashMap>, + pub(crate) liveliness_subscribers: HashMap>, pub(crate) queryables: HashMap>, #[cfg(feature = "unstable")] pub(crate) tokens: HashMap>, #[cfg(feature = "unstable")] pub(crate) matching_listeners: HashMap>, pub(crate) queries: HashMap, + #[cfg(feature = "unstable")] + pub(crate) liveliness_queries: HashMap, pub(crate) aggregated_subscribers: Vec, - //pub(crate) aggregated_publishers: Vec, + pub(crate) aggregated_publishers: Vec, } impl SessionState { pub(crate) fn new( aggregated_subscribers: Vec, - _aggregated_publishers: Vec, + aggregated_publishers: Vec, ) -> SessionState { SessionState { primitives: None, expr_id_counter: AtomicExprId::new(1), // Note: start at 1 because 0 is reserved for NO_RESOURCE qid_counter: AtomicRequestId::new(0), - decl_id_counter: AtomicUsize::new(0), + #[cfg(feature = "unstable")] + liveliness_qid_counter: AtomicRequestId::new(0), local_resources: HashMap::new(), remote_resources: HashMap::new(), + #[cfg(feature = "unstable")] + remote_subscribers: HashMap::new(), + publishers: HashMap::new(), + #[cfg(feature = "unstable")] + remote_tokens: HashMap::new(), //publications: Vec::new(), subscribers: HashMap::new(), + liveliness_subscribers: HashMap::new(), queryables: HashMap::new(), #[cfg(feature = "unstable")] tokens: HashMap::new(), #[cfg(feature = "unstable")] matching_listeners: HashMap::new(), queries: HashMap::new(), + #[cfg(feature = "unstable")] + liveliness_queries: HashMap::new(), aggregated_subscribers, - //aggregated_publishers, + aggregated_publishers, } } } @@ -231,14 +267,32 @@ impl SessionState { self.remote_key_to_expr(key_expr) } } + + pub(crate) fn subscribers(&self, kind: SubscriberKind) -> &HashMap> { + match kind { + SubscriberKind::Subscriber => &self.subscribers, + SubscriberKind::LivelinessSubscriber => &self.liveliness_subscribers, + } + } + + pub(crate) fn subscribers_mut( + &mut self, + kind: SubscriberKind, + ) -> &mut HashMap> { + match kind { + SubscriberKind::Subscriber => &mut self.subscribers, + SubscriberKind::LivelinessSubscriber => &mut self.liveliness_subscribers, + } + } } impl fmt::Debug for SessionState { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!( f, - "SessionState{{ subscribers: {} }}", - self.subscribers.len() + "SessionState{{ subscribers: {}, liveliness_subscribers: {} }}", + self.subscribers.len(), + self.liveliness_subscribers.len() ) } } @@ -246,7 +300,36 @@ impl fmt::Debug for SessionState { pub(crate) struct ResourceNode { pub(crate) key_expr: OwnedKeyExpr, pub(crate) subscribers: Vec>, + pub(crate) liveliness_subscribers: Vec>, +} + +impl ResourceNode { + pub(crate) fn new(key_expr: OwnedKeyExpr) -> Self { + Self { + key_expr, + subscribers: Vec::new(), + liveliness_subscribers: Vec::new(), + } + } + + pub(crate) fn subscribers(&self, kind: SubscriberKind) -> &Vec> { + match kind { + SubscriberKind::Subscriber => &self.subscribers, + SubscriberKind::LivelinessSubscriber => &self.liveliness_subscribers, + } + } + + pub(crate) fn subscribers_mut( + &mut self, + kind: SubscriberKind, + ) -> &mut Vec> { + match kind { + SubscriberKind::Subscriber => &mut self.subscribers, + SubscriberKind::LivelinessSubscriber => &mut self.liveliness_subscribers, + } + } } + pub(crate) enum Resource { Prefix { prefix: Box }, Node(ResourceNode), @@ -255,16 +338,13 @@ pub(crate) enum Resource { impl Resource { pub(crate) fn new(name: Box) -> Self { if keyexpr::new(name.as_ref()).is_ok() { - Self::for_keyexpr(unsafe { OwnedKeyExpr::from_boxed_string_unchecked(name) }) + Self::for_keyexpr(unsafe { OwnedKeyExpr::from_boxed_str_unchecked(name) }) } else { Self::Prefix { prefix: name } } } pub(crate) fn for_keyexpr(key_expr: OwnedKeyExpr) -> Self { - Self::Node(ResourceNode { - key_expr, - subscribers: Vec::new(), - }) + Self::Node(ResourceNode::new(key_expr)) } pub(crate) fn name(&self) -> &str { match self { @@ -290,7 +370,7 @@ impl<'s, 'a> SessionDeclarations<'s, 'a> for SessionRef<'a> { fn declare_subscriber<'b, TryIntoKeyExpr>( &'s self, key_expr: TryIntoKeyExpr, - ) -> SubscriberBuilder<'a, 'b, PushMode, DefaultHandler> + ) -> SubscriberBuilder<'a, 'b, DefaultHandler> where TryIntoKeyExpr: TryInto>, >>::Error: Into, @@ -298,10 +378,10 @@ impl<'s, 'a> SessionDeclarations<'s, 'a> for SessionRef<'a> { SubscriberBuilder { session: self.clone(), key_expr: TryIntoKeyExpr::try_into(key_expr).map_err(Into::into), - reliability: Reliability::default(), - mode: PushMode, + #[cfg(feature = "unstable")] + reliability: Reliability::DEFAULT, origin: Locality::default(), - handler: DefaultHandler, + handler: DefaultHandler::default(), } } fn declare_queryable<'b, TryIntoKeyExpr>( @@ -317,7 +397,7 @@ impl<'s, 'a> SessionDeclarations<'s, 'a> for SessionRef<'a> { key_expr: key_expr.try_into().map_err(Into::into), complete: false, origin: Locality::default(), - handler: DefaultHandler, + handler: DefaultHandler::default(), } } fn declare_publisher<'b, TryIntoKeyExpr>( @@ -331,8 +411,10 @@ impl<'s, 'a> SessionDeclarations<'s, 'a> for SessionRef<'a> { PublisherBuilder { session: self.clone(), key_expr: key_expr.try_into().map_err(Into::into), - congestion_control: CongestionControl::default(), - priority: Priority::default(), + encoding: Encoding::default(), + congestion_control: CongestionControl::DEFAULT, + priority: Priority::DEFAULT, + is_express: false, destination: Locality::default(), } } @@ -369,31 +451,47 @@ impl fmt::Debug for SessionRef<'_> { } } -/// A trait implemented by types that can be undeclared. -pub trait Undeclarable> +pub(crate) trait UndeclarableSealed> where O: Resolve + Send, { fn undeclare_inner(self, session: S) -> O; } -impl<'a, O, T, G> Undeclarable<&'a Session, O, T> for G +impl<'a, O, T, G> UndeclarableSealed<&'a Session, O, T> for G where O: Resolve + Send, - G: Undeclarable<(), O, T>, + G: UndeclarableSealed<(), O, T>, { fn undeclare_inner(self, _: &'a Session) -> O { self.undeclare_inner(()) } } +// NOTE: `UndeclarableInner` is only pub(crate) to hide the `undeclare_inner` method. So we don't +// care about the `private_bounds` lint in this particular case. +#[allow(private_bounds)] +/// A trait implemented by types that can be undeclared. +pub trait Undeclarable: UndeclarableSealed +where + O: Resolve + Send, +{ +} + +impl Undeclarable for U +where + O: Resolve + Send, + U: UndeclarableSealed, +{ +} + /// A zenoh session. /// pub struct Session { pub(crate) runtime: Runtime, pub(crate) state: Arc>, pub(crate) id: u16, - pub(crate) alive: bool, + close_on_drop: bool, owns_runtime: bool, task_controller: TaskController, } @@ -415,7 +513,7 @@ impl Session { runtime: runtime.clone(), state: state.clone(), id: SESSION_ID_COUNTER.fetch_add(1, Ordering::SeqCst), - alive: true, + close_on_drop: true, owns_runtime: false, task_controller: TaskController::default(), }; @@ -435,8 +533,8 @@ impl Session { /// pointer to it (`Arc`). This is equivalent to `Arc::new(session)`. /// /// This is useful to share ownership of the `Session` between several threads - /// and tasks. It also allows to create [`Subscriber`](Subscriber) and - /// [`Queryable`](Queryable) with static lifetime that can be moved to several + /// and tasks. It also allows to create [`Subscriber`](crate::pubsub::Subscriber) and + /// [`Queryable`](crate::query::Queryable) with static lifetime that can be moved to several /// threads and tasks /// /// Note: the given zenoh `Session` will be closed when the last reference to @@ -446,11 +544,10 @@ impl Session { /// ```no_run /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::r#async::*; + /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).res().await.unwrap().into_arc(); + /// let session = zenoh::open(zenoh::config::peer()).await.unwrap().into_arc(); /// let subscriber = session.declare_subscriber("key/expression") - /// .res() /// .await /// .unwrap(); /// tokio::task::spawn(async move { @@ -469,7 +566,7 @@ impl Session { /// the program's life. Dropping the returned reference will cause a memory /// leak. /// - /// This is useful to move entities (like [`Subscriber`](Subscriber)) which + /// This is useful to move entities (like [`Subscriber`](crate::pubsub::Subscriber)) which /// lifetimes are bound to the session lifetime in several threads or tasks. /// /// Note: the given zenoh `Session` cannot be closed any more. At process @@ -480,11 +577,10 @@ impl Session { /// ```no_run /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::r#async::*; - /// use zenoh::Session; + /// use zenoh::prelude::*; /// - /// let session = Session::leak(zenoh::open(config::peer()).res().await.unwrap()); - /// let subscriber = session.declare_subscriber("key/expression").res().await.unwrap(); + /// let session = zenoh::Session::leak(zenoh::open(zenoh::config::peer()).await.unwrap()); + /// let subscriber = session.declare_subscriber("key/expression").await.unwrap(); /// tokio::task::spawn(async move { /// while let Ok(sample) = subscriber.recv_async().await { /// println!("Received: {:?}", sample); @@ -499,7 +595,7 @@ impl Session { /// Returns the identifier of the current session. `zid()` is a convenient shortcut. /// See [`Session::info()`](`Session::info()`) and [`SessionInfo::zid()`](`SessionInfo::zid()`) for more details. pub fn zid(&self) -> ZenohId { - self.info().zid().res_sync() + self.info().zid().wait() } pub fn hlc(&self) -> Option<&HLC> { @@ -515,15 +611,17 @@ impl Session { /// ``` /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::r#async::*; + /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).res().await.unwrap(); - /// session.close().res().await.unwrap(); + /// let session = zenoh::open(zenoh::config::peer()).await.unwrap(); + /// session.close().await.unwrap(); /// # } /// ``` pub fn close(mut self) -> impl Resolve> { ResolveFuture::new(async move { trace!("close()"); + // set the flag first to avoid double panic if this function panic + self.close_on_drop = false; self.task_controller.terminate_all(Duration::from_secs(10)); if self.owns_runtime { self.runtime.close().await?; @@ -534,7 +632,6 @@ impl Session { state.queryables.clear(); drop(state); primitives.as_ref().unwrap().send_close(); - self.alive = false; Ok(()) }) } @@ -544,7 +641,7 @@ impl Session { O: Resolve>, T: Undeclarable<&'a Self, O, ZResult<()>>, { - Undeclarable::undeclare_inner(decl, self) + UndeclarableSealed::undeclare_inner(decl, self) } /// Get the current configuration of the zenoh [`Session`](Session). @@ -559,9 +656,9 @@ impl Session { /// ``` /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::r#async::*; + /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).res().await.unwrap(); + /// let session = zenoh::open(zenoh::config::peer()).await.unwrap(); /// let peers = session.config().get("connect/endpoints").unwrap(); /// # } /// ``` @@ -570,15 +667,42 @@ impl Session { /// ``` /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::r#async::*; + /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).res().await.unwrap(); + /// let session = zenoh::open(zenoh::config::peer()).await.unwrap(); /// let _ = session.config().insert_json5("connect/endpoints", r#"["tcp/127.0.0.1/7447"]"#); /// # } /// ``` pub fn config(&self) -> &Notifier { self.runtime.config() } + + /// Get a new Timestamp from a Zenoh session [`Session`](Session). + /// + /// The returned timestamp has the current time, with the Session's runtime ZenohID + /// + /// # Examples + /// ### Read current zenoh configuration + /// ``` + /// # #[tokio::main] + /// # async fn main() { + /// use zenoh::prelude::*; + /// + /// let session = zenoh::open(zenoh::config::peer()).await.unwrap(); + /// let timestamp = session.new_timestamp(); + /// # } + /// ``` + pub fn new_timestamp(&self) -> Timestamp { + match self.hlc() { + Some(hlc) => hlc.new_timestamp(), + None => { + // Called in the case that the runtime is not initialized with an hlc + // UNIX_EPOCH is Returns a Timespec::zero(), Unwrap Should be permissable here + let now = SystemTime::now().duration_since(UNIX_EPOCH).unwrap().into(); + Timestamp::new(now, self.runtime.zid().into()) + } + } + } } impl<'a> SessionDeclarations<'a, 'a> for Session { @@ -588,7 +712,7 @@ impl<'a> SessionDeclarations<'a, 'a> for Session { fn declare_subscriber<'b, TryIntoKeyExpr>( &'a self, key_expr: TryIntoKeyExpr, - ) -> SubscriberBuilder<'a, 'b, PushMode, DefaultHandler> + ) -> SubscriberBuilder<'a, 'b, DefaultHandler> where TryIntoKeyExpr: TryInto>, >>::Error: Into, @@ -631,10 +755,10 @@ impl Session { /// ``` /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::r#async::*; + /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).res().await.unwrap(); - /// let key_expr = session.declare_keyexpr("key/expression").res().await.unwrap(); + /// let session = zenoh::open(zenoh::config::peer()).await.unwrap(); + /// let key_expr = session.declare_keyexpr("key/expression").await.unwrap(); /// # } /// ``` pub fn declare_keyexpr<'a, 'b: 'a, TryIntoKeyExpr>( @@ -657,7 +781,7 @@ impl Session { ResolveClosure::new(move || { let key_expr: KeyExpr = key_expr?; let prefix_len = key_expr.len() as u32; - let expr_id = self.declare_prefix(key_expr.as_str()).res_sync(); + let expr_id = self.declare_prefix(key_expr.as_str()).wait(); let key_expr = match key_expr.0 { KeyExprInner::Borrowed(key_expr) | KeyExprInner::BorrowedWire { key_expr, .. } => { KeyExpr(KeyExprInner::BorrowedWire { @@ -687,40 +811,43 @@ impl Session { /// # Arguments /// /// * `key_expr` - Key expression matching the resources to put - /// * `value` - The value to put + /// * `payload` - The payload to put /// /// # Examples /// ``` /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::r#async::*; + /// use zenoh::{bytes::Encoding, prelude::*}; /// - /// let session = zenoh::open(config::peer()).res().await.unwrap(); + /// let session = zenoh::open(zenoh::config::peer()).await.unwrap(); /// session - /// .put("key/expression", "value") - /// .encoding(KnownEncoding::TextPlain) - /// .res() + /// .put("key/expression", "payload") + /// .encoding(Encoding::TEXT_PLAIN) /// .await /// .unwrap(); /// # } /// ``` #[inline] - pub fn put<'a, 'b: 'a, TryIntoKeyExpr, IntoValue>( + pub fn put<'a, 'b: 'a, TryIntoKeyExpr, IntoZBytes>( &'a self, key_expr: TryIntoKeyExpr, - value: IntoValue, - ) -> PutBuilder<'a, 'b> + payload: IntoZBytes, + ) -> SessionPutBuilder<'a, 'b> where TryIntoKeyExpr: TryInto>, >>::Error: Into, - IntoValue: Into, + IntoZBytes: Into, { - PutBuilder { + SessionPutBuilder { publisher: self.declare_publisher(key_expr), - value: value.into(), - kind: SampleKind::Put, - #[cfg(feature = "unstable")] + kind: PublicationBuilderPut { + payload: payload.into(), + encoding: Encoding::default(), + }, + timestamp: None, attachment: None, + #[cfg(feature = "unstable")] + source_info: SourceInfo::empty(), } } @@ -734,32 +861,33 @@ impl Session { /// ``` /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::r#async::*; + /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).res().await.unwrap(); - /// session.delete("key/expression").res().await.unwrap(); + /// let session = zenoh::open(zenoh::config::peer()).await.unwrap(); + /// session.delete("key/expression").await.unwrap(); /// # } /// ``` #[inline] pub fn delete<'a, 'b: 'a, TryIntoKeyExpr>( &'a self, key_expr: TryIntoKeyExpr, - ) -> DeleteBuilder<'a, 'b> + ) -> SessionDeleteBuilder<'a, 'b> where TryIntoKeyExpr: TryInto>, >>::Error: Into, { - PutBuilder { + SessionDeleteBuilder { publisher: self.declare_publisher(key_expr), - value: Value::empty(), - kind: SampleKind::Delete, - #[cfg(feature = "unstable")] + kind: PublicationBuilderDelete, + timestamp: None, attachment: None, + #[cfg(feature = "unstable")] + source_info: SourceInfo::empty(), } } /// Query data from the matching queryables in the system. /// - /// Unless explicitly requested via [`GetBuilder::accept_replies`], replies are guaranteed to have + /// Unless explicitly requested via [`accept_replies`](crate::session::SessionGetBuilder::accept_replies), replies are guaranteed to have /// key expressions that match the requested `selector`. /// /// # Arguments @@ -770,19 +898,19 @@ impl Session { /// ``` /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::r#async::*; + /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).res().await.unwrap(); - /// let replies = session.get("key/expression").res().await.unwrap(); + /// let session = zenoh::open(zenoh::config::peer()).await.unwrap(); + /// let replies = session.get("key/expression").await.unwrap(); /// while let Ok(reply) = replies.recv_async().await { - /// println!(">> Received {:?}", reply.sample); + /// println!(">> Received {:?}", reply.result()); /// } /// # } /// ``` pub fn get<'a, 'b: 'a, TryIntoSelector>( &'a self, selector: TryIntoSelector, - ) -> GetBuilder<'a, 'b, DefaultHandler> + ) -> SessionGetBuilder<'a, 'b, DefaultHandler> where TryIntoSelector: TryInto>, >>::Error: Into, @@ -792,53 +920,61 @@ impl Session { let conf = self.runtime.config().lock(); Duration::from_millis(unwrap_or_default!(conf.queries_default_timeout())) }; - GetBuilder { + let qos: QoS = request::ext::QoSType::REQUEST.into(); + SessionGetBuilder { session: self, selector, - scope: Ok(None), - target: QueryTarget::default(), - consolidation: QueryConsolidation::default(), + target: QueryTarget::DEFAULT, + consolidation: QueryConsolidation::DEFAULT, + qos: qos.into(), destination: Locality::default(), timeout, value: None, - #[cfg(feature = "unstable")] attachment: None, - handler: DefaultHandler, + handler: DefaultHandler::default(), + #[cfg(feature = "unstable")] + source_info: SourceInfo::empty(), } } } impl Session { pub(crate) fn clone(&self) -> Self { - Session { + Self { runtime: self.runtime.clone(), state: self.state.clone(), id: self.id, - alive: false, + close_on_drop: false, owns_runtime: self.owns_runtime, task_controller: self.task_controller.clone(), } } #[allow(clippy::new_ret_no_self)] - pub(super) fn new(config: Config) -> impl Resolve> { + pub(super) fn new( + config: Config, + #[cfg(feature = "shared-memory")] shm_clients: Option>, + ) -> impl Resolve> { ResolveFuture::new(async move { tracing::debug!("Config: {:?}", &config); let aggregated_subscribers = config.aggregation().subscribers().clone(); let aggregated_publishers = config.aggregation().publishers().clone(); - let mut runtime = RuntimeBuilder::new(config).build().await?; + #[allow(unused_mut)] // Required for shared-memory + let mut runtime = RuntimeBuilder::new(config); + #[cfg(feature = "shared-memory")] + { + runtime = runtime.shm_clients(shm_clients); + } + let mut runtime = runtime.build().await?; let mut session = Self::init( runtime.clone(), aggregated_subscribers, aggregated_publishers, ) - .res_async() .await; session.owns_runtime = true; runtime.start().await?; - // Workaround for the declare_and_shoot problem - tokio::time::sleep(Duration::from_millis(*API_OPEN_SESSION_DELAY)).await; Ok(session) }) } @@ -856,15 +992,15 @@ impl Session { None => { let expr_id = state.expr_id_counter.fetch_add(1, Ordering::SeqCst); let mut res = Resource::new(Box::from(prefix)); - if let Resource::Node(ResourceNode { - key_expr, - subscribers, - .. - }) = &mut res - { - for sub in state.subscribers.values() { - if key_expr.intersects(&sub.key_expr) { - subscribers.push(sub.clone()); + if let Resource::Node(res_node) = &mut res { + for kind in [ + SubscriberKind::Subscriber, + SubscriberKind::LivelinessSubscriber, + ] { + for sub in state.subscribers(kind).values() { + if res_node.key_expr.intersects(&sub.key_expr) { + res_node.subscribers_mut(kind).push(sub.clone()); + } } } } @@ -872,9 +1008,10 @@ impl Session { let primitives = state.primitives.as_ref().unwrap().clone(); drop(state); primitives.send_declare(Declare { - ext_qos: declare::ext::QoSType::declare_default(), + interest_id: None, + ext_qos: declare::ext::QoSType::DECLARE, ext_tstamp: None, - ext_nodeid: declare::ext::NodeIdType::default(), + ext_nodeid: declare::ext::NodeIdType::DEFAULT, body: DeclareBody::DeclareKeyExpr(DeclareKeyExpr { id: expr_id, wire_expr: WireExpr { @@ -890,150 +1027,172 @@ impl Session { }) } - /// Declare a publication for the given key expression. - /// - /// Puts that match the given key expression will only be sent on the network - /// if matching subscribers exist in the system. - /// - /// # Arguments - /// - /// * `key_expr` - The key expression to publish - pub(crate) fn declare_publication_intent<'a>( - &'a self, - _key_expr: KeyExpr<'a>, - ) -> impl Resolve> + 'a { - ResolveClosure::new(move || { - // tracing::trace!("declare_publication({:?})", key_expr); - // let mut state = zwrite!(self.state); - // if !state.publications.iter().any(|p| **p == **key_expr) { - // let declared_pub = if let Some(join_pub) = state - // .aggregated_publishers - // .iter() - // .find(|s| s.includes(&key_expr)) - // { - // let joined_pub = state.publications.iter().any(|p| join_pub.includes(p)); - // (!joined_pub).then(|| join_pub.clone().into()) - // } else { - // Some(key_expr.clone()) - // }; - // state.publications.push(key_expr.into()); - - // if let Some(res) = declared_pub { - // let primitives = state.primitives.as_ref().unwrap().clone(); - // drop(state); - // primitives.decl_publisher(&res.to_wire(self), None); - // } - // } - Ok(()) - }) + pub(crate) fn declare_publisher_inner( + &self, + key_expr: KeyExpr, + destination: Locality, + ) -> ZResult { + let mut state = zwrite!(self.state); + tracing::trace!("declare_publisher({:?})", key_expr); + let id = self.runtime.next_id(); + + let mut pub_state = PublisherState { + id, + remote_id: id, + key_expr: key_expr.clone().into_owned(), + destination, + }; + + let declared_pub = (destination != Locality::SessionLocal) + .then(|| { + match state + .aggregated_publishers + .iter() + .find(|s| s.includes(&key_expr)) + { + Some(join_pub) => { + if let Some(joined_pub) = state.publishers.values().find(|p| { + p.destination != Locality::SessionLocal + && join_pub.includes(&p.key_expr) + }) { + pub_state.remote_id = joined_pub.remote_id; + None + } else { + Some(join_pub.clone().into()) + } + } + None => { + if let Some(twin_pub) = state.publishers.values().find(|p| { + p.destination != Locality::SessionLocal && p.key_expr == key_expr + }) { + pub_state.remote_id = twin_pub.remote_id; + None + } else { + Some(key_expr.clone()) + } + } + } + }) + .flatten(); + + state.publishers.insert(id, pub_state); + + if let Some(res) = declared_pub { + let primitives = state.primitives.as_ref().unwrap().clone(); + drop(state); + primitives.send_interest(Interest { + id, + mode: InterestMode::CurrentFuture, + options: InterestOptions::KEYEXPRS + InterestOptions::SUBSCRIBERS, + wire_expr: Some(res.to_wire(self).to_owned()), + ext_qos: network::ext::QoSType::DEFAULT, + ext_tstamp: None, + ext_nodeid: network::ext::NodeIdType::DEFAULT, + }); + } + Ok(id) } - /// Undeclare a publication previously declared - /// with [`declare_publication`](Session::declare_publication). - /// - /// # Arguments - /// - /// * `key_expr` - The key expression of the publication to undeclarte - pub(crate) fn undeclare_publication_intent<'a>( - &'a self, - _key_expr: KeyExpr<'a>, - ) -> impl Resolve> + 'a { - ResolveClosure::new(move || { - // let mut state = zwrite!(self.state); - // if let Some(idx) = state.publications.iter().position(|p| **p == *key_expr) { - // trace!("undeclare_publication({:?})", key_expr); - // state.publications.remove(idx); - // match state - // .aggregated_publishers - // .iter() - // .find(|s| s.includes(&key_expr)) - // { - // Some(join_pub) => { - // let joined_pub = state.publications.iter().any(|p| join_pub.includes(p)); - // if !joined_pub { - // let primitives = state.primitives.as_ref().unwrap().clone(); - // let key_expr = WireExpr::from(join_pub).to_owned(); - // drop(state); - // primitives.forget_publisher(&key_expr, None); - // } - // } - // None => { - // let primitives = state.primitives.as_ref().unwrap().clone(); - // drop(state); - // primitives.forget_publisher(&key_expr.to_wire(self), None); - // } - // }; - // } else { - // bail!("Unable to find publication") - // } + pub(crate) fn undeclare_publisher_inner(&self, pid: Id) -> ZResult<()> { + let mut state = zwrite!(self.state); + if let Some(pub_state) = state.publishers.remove(&pid) { + trace!("undeclare_publisher({:?})", pub_state); + if pub_state.destination != Locality::SessionLocal { + // Note: there might be several publishers on the same KeyExpr. + // Before calling forget_publishers(key_expr), check if this was the last one. + if !state.publishers.values().any(|p| { + p.destination != Locality::SessionLocal && p.remote_id == pub_state.remote_id + }) { + let primitives = state.primitives.as_ref().unwrap().clone(); + drop(state); + primitives.send_interest(Interest { + id: pub_state.remote_id, + mode: InterestMode::Final, + options: InterestOptions::empty(), + wire_expr: None, + ext_qos: declare::ext::QoSType::DEFAULT, + ext_tstamp: None, + ext_nodeid: declare::ext::NodeIdType::DEFAULT, + }); + } + } Ok(()) - }) + } else { + Err(zerror!("Unable to find publisher").into()) + } } pub(crate) fn declare_subscriber_inner( &self, key_expr: &KeyExpr, - scope: &Option, origin: Locality, callback: Callback<'static, Sample>, info: &SubscriberInfo, ) -> ZResult> { let mut state = zwrite!(self.state); - tracing::trace!("subscribe({:?})", key_expr); - let id = state.decl_id_counter.fetch_add(1, Ordering::SeqCst); - let key_expr = match scope { - Some(scope) => scope / key_expr, - None => key_expr.clone(), - }; + tracing::trace!("declare_subscriber({:?})", key_expr); + let id = self.runtime.next_id(); - let sub_state = Arc::new(SubscriberState { + let mut sub_state = SubscriberState { id, + remote_id: id, key_expr: key_expr.clone().into_owned(), - scope: scope.clone().map(|e| e.into_owned()), origin, callback, - }); + }; - #[cfg(not(feature = "unstable"))] let declared_sub = origin != Locality::SessionLocal; - #[cfg(feature = "unstable")] - let declared_sub = origin != Locality::SessionLocal - && !key_expr - .as_str() - .starts_with(crate::liveliness::PREFIX_LIVELINESS); let declared_sub = declared_sub .then(|| { match state - .aggregated_subscribers // TODO: can this be an OwnedKeyExpr? - .iter() - .find(|s| s.includes( &key_expr)) + .aggregated_subscribers + .iter() + .find(|s| s.includes(key_expr)) { Some(join_sub) => { - let joined_sub = state.subscribers.values().any(|s| { - s.origin != Locality::SessionLocal && join_sub.includes(&s.key_expr) - }); - (!joined_sub).then(|| join_sub.clone().into()) + if let Some(joined_sub) = state + .subscribers(SubscriberKind::Subscriber) + .values() + .find(|s| { + s.origin != Locality::SessionLocal && join_sub.includes(&s.key_expr) + }) + { + sub_state.remote_id = joined_sub.remote_id; + None + } else { + Some(join_sub.clone().into()) + } } None => { - let twin_sub = state - .subscribers + if let Some(twin_sub) = state + .subscribers(SubscriberKind::Subscriber) .values() - .any(|s| s.origin != Locality::SessionLocal && s.key_expr == key_expr); - (!twin_sub).then(|| key_expr.clone()) + .find(|s| s.origin != Locality::SessionLocal && s.key_expr == *key_expr) + { + sub_state.remote_id = twin_sub.remote_id; + None + } else { + Some(key_expr.clone()) + } } } }) .flatten(); - state.subscribers.insert(sub_state.id, sub_state.clone()); + let sub_state = Arc::new(sub_state); + + state + .subscribers_mut(SubscriberKind::Subscriber) + .insert(sub_state.id, sub_state.clone()); for res in state .local_resources .values_mut() .filter_map(Resource::as_node_mut) { if key_expr.intersects(&res.key_expr) { - res.subscribers.push(sub_state.clone()); + res.subscribers_mut(SubscriberKind::Subscriber) + .push(sub_state.clone()); } } for res in state @@ -1042,7 +1201,8 @@ impl Session { .filter_map(Resource::as_node_mut) { if key_expr.intersects(&res.key_expr) { - res.subscribers.push(sub_state.clone()); + res.subscribers_mut(SubscriberKind::Subscriber) + .push(sub_state.clone()); } } @@ -1054,14 +1214,14 @@ impl Session { // match key_expr.as_str().find('*') { // Some(0) => key_expr.to_wire(self), // Some(pos) => { - // let expr_id = self.declare_prefix(&key_expr.as_str()[..pos]).res_sync(); + // let expr_id = self.declare_prefix(&key_expr.as_str()[..pos]).wait(); // WireExpr { // scope: expr_id, // suffix: std::borrow::Cow::Borrowed(&key_expr.as_str()[pos..]), // } // } // None => { - // let expr_id = self.declare_prefix(key_expr.as_str()).res_sync(); + // let expr_id = self.declare_prefix(key_expr.as_str()).wait(); // WireExpr { // scope: expr_id, // suffix: std::borrow::Cow::Borrowed(""), @@ -1073,11 +1233,12 @@ impl Session { // }; primitives.send_declare(Declare { - ext_qos: declare::ext::QoSType::declare_default(), + interest_id: None, + ext_qos: declare::ext::QoSType::DECLARE, ext_tstamp: None, - ext_nodeid: declare::ext::NodeIdType::default(), + ext_nodeid: declare::ext::NodeIdType::DEFAULT, body: DeclareBody::DeclareSubscriber(DeclareSubscriber { - id: id as u32, + id, wire_expr: key_expr.to_wire(self).to_owned(), ext_info: *info, }), @@ -1093,96 +1254,71 @@ impl Session { Ok(sub_state) } - pub(crate) fn unsubscribe(&self, sid: usize) -> ZResult<()> { + pub(crate) fn undeclare_subscriber_inner(&self, sid: Id, kind: SubscriberKind) -> ZResult<()> { let mut state = zwrite!(self.state); - if let Some(sub_state) = state.subscribers.remove(&sid) { - trace!("unsubscribe({:?})", sub_state); + if let Some(sub_state) = state.subscribers_mut(kind).remove(&sid) { + trace!("undeclare_subscriber({:?})", sub_state); for res in state .local_resources .values_mut() .filter_map(Resource::as_node_mut) { - res.subscribers.retain(|sub| sub.id != sub_state.id); + res.subscribers_mut(kind) + .retain(|sub| sub.id != sub_state.id); } for res in state .remote_resources .values_mut() .filter_map(Resource::as_node_mut) { - res.subscribers.retain(|sub| sub.id != sub_state.id); + res.subscribers_mut(kind) + .retain(|sub| sub.id != sub_state.id); } - #[cfg(not(feature = "unstable"))] - let send_forget = sub_state.origin != Locality::SessionLocal; - #[cfg(feature = "unstable")] - let send_forget = sub_state.origin != Locality::SessionLocal - && !sub_state - .key_expr - .as_str() - .starts_with(crate::liveliness::PREFIX_LIVELINESS); - if send_forget { + if sub_state.origin != Locality::SessionLocal && kind == SubscriberKind::Subscriber { // Note: there might be several Subscribers on the same KeyExpr. // Before calling forget_subscriber(key_expr), check if this was the last one. - let key_expr = &sub_state.key_expr; - match state - .aggregated_subscribers - .iter() - .find(|s| s.includes(key_expr)) - { - Some(join_sub) => { - let joined_sub = state.subscribers.values().any(|s| { - s.origin != Locality::SessionLocal && join_sub.includes(&s.key_expr) - }); - if !joined_sub { - let primitives = state.primitives.as_ref().unwrap().clone(); - let wire_expr = WireExpr::from(join_sub).to_owned(); - drop(state); - primitives.send_declare(Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { - id: 0, // @TODO use proper SubscriberId (#703) - ext_wire_expr: WireExprType { wire_expr }, - }), - }); - - #[cfg(feature = "unstable")] - { - let state = zread!(self.state); - self.update_status_down(&state, &sub_state.key_expr) - } - } + if !state.subscribers(kind).values().any(|s| { + s.origin != Locality::SessionLocal && s.remote_id == sub_state.remote_id + }) { + let primitives = state.primitives.as_ref().unwrap().clone(); + drop(state); + primitives.send_declare(Declare { + interest_id: None, + ext_qos: declare::ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: declare::ext::NodeIdType::DEFAULT, + body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { + id: sub_state.remote_id, + ext_wire_expr: WireExprType { + wire_expr: WireExpr::empty(), + }, + }), + }); + #[cfg(feature = "unstable")] + { + let state = zread!(self.state); + self.update_status_down(&state, &sub_state.key_expr) } - None => { - let twin_sub = state - .subscribers - .values() - .any(|s| s.origin != Locality::SessionLocal && s.key_expr == *key_expr); - if !twin_sub { - let primitives = state.primitives.as_ref().unwrap().clone(); - drop(state); - primitives.send_declare(Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { - id: 0, // @TODO use proper SubscriberId (#703) - ext_wire_expr: WireExprType { - wire_expr: key_expr.to_wire(self).to_owned(), - }, - }), - }); + } + } else { + #[cfg(feature = "unstable")] + if kind == SubscriberKind::LivelinessSubscriber { + let primitives = state.primitives.as_ref().unwrap().clone(); + drop(state); - #[cfg(feature = "unstable")] - { - let state = zread!(self.state); - self.update_status_down(&state, &sub_state.key_expr) - } - } - } - }; + primitives.send_interest(Interest { + id: sub_state.id, + mode: InterestMode::Final, + options: InterestOptions::empty(), + wire_expr: None, + ext_qos: declare::ext::QoSType::DEFAULT, + ext_tstamp: None, + ext_nodeid: declare::ext::NodeIdType::DEFAULT, + }); + } } + Ok(()) } else { Err(zerror!("Unable to find subscriber").into()) @@ -1197,8 +1333,8 @@ impl Session { callback: Callback<'static, Query>, ) -> ZResult> { let mut state = zwrite!(self.state); - tracing::trace!("queryable({:?})", key_expr); - let id = state.decl_id_counter.fetch_add(1, Ordering::SeqCst); + tracing::trace!("declare_queryable({:?})", key_expr); + let id = self.runtime.next_id(); let qable_state = Arc::new(QueryableState { id, key_expr: key_expr.to_owned(), @@ -1206,158 +1342,50 @@ impl Session { origin, callback, }); - #[cfg(feature = "complete_n")] - { - state.queryables.insert(id, qable_state.clone()); - - if origin != Locality::SessionLocal && complete { - let primitives = state.primitives.as_ref().unwrap().clone(); - let complete = Session::complete_twin_qabls(&state, key_expr); - drop(state); - let qabl_info = QueryableInfo { - complete, - distance: 0, - }; - primitives.send_declare(Declare { - ext_qos: declare::ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: declare::ext::NodeIdType::default(), - body: DeclareBody::DeclareQueryable(DeclareQueryable { - id: id as u32, - wire_expr: key_expr.to_owned(), - ext_info: qabl_info, - }), - }); - } - } - #[cfg(not(feature = "complete_n"))] - { - let twin_qabl = Session::twin_qabl(&state, key_expr); - let complete_twin_qabl = twin_qabl && Session::complete_twin_qabl(&state, key_expr); - state.queryables.insert(id, qable_state.clone()); + state.queryables.insert(id, qable_state.clone()); - if origin != Locality::SessionLocal && (!twin_qabl || (!complete_twin_qabl && complete)) - { - let primitives = state.primitives.as_ref().unwrap().clone(); - let complete = u8::from(!complete_twin_qabl && complete); - drop(state); - let qabl_info = QueryableInfo { - complete, - distance: 0, - }; - primitives.send_declare(Declare { - ext_qos: declare::ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: declare::ext::NodeIdType::default(), - body: DeclareBody::DeclareQueryable(DeclareQueryable { - id: id as u32, - wire_expr: key_expr.to_owned(), - ext_info: qabl_info, - }), - }); - } + if origin != Locality::SessionLocal { + let primitives = state.primitives.as_ref().unwrap().clone(); + drop(state); + let qabl_info = QueryableInfoType { + complete, + distance: 0, + }; + primitives.send_declare(Declare { + interest_id: None, + ext_qos: declare::ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: declare::ext::NodeIdType::DEFAULT, + body: DeclareBody::DeclareQueryable(DeclareQueryable { + id, + wire_expr: key_expr.to_owned(), + ext_info: qabl_info, + }), + }); } Ok(qable_state) } - pub(crate) fn twin_qabl(state: &SessionState, key: &WireExpr) -> bool { - state.queryables.values().any(|q| { - q.origin != Locality::SessionLocal - && state.local_wireexpr_to_expr(&q.key_expr).unwrap() - == state.local_wireexpr_to_expr(key).unwrap() - }) - } - - #[cfg(not(feature = "complete_n"))] - pub(crate) fn complete_twin_qabl(state: &SessionState, key: &WireExpr) -> bool { - state.queryables.values().any(|q| { - q.origin != Locality::SessionLocal - && q.complete - && state.local_wireexpr_to_expr(&q.key_expr).unwrap() - == state.local_wireexpr_to_expr(key).unwrap() - }) - } - - #[cfg(feature = "complete_n")] - pub(crate) fn complete_twin_qabls(state: &SessionState, key: &WireExpr) -> u8 { - state - .queryables - .values() - .filter(|q| { - q.origin != Locality::SessionLocal - && q.complete - && state.local_wireexpr_to_expr(&q.key_expr).unwrap() - == state.local_wireexpr_to_expr(key).unwrap() - }) - .count() as u8 - } - - pub(crate) fn close_queryable(&self, qid: usize) -> ZResult<()> { + pub(crate) fn close_queryable(&self, qid: Id) -> ZResult<()> { let mut state = zwrite!(self.state); if let Some(qable_state) = state.queryables.remove(&qid) { - trace!("close_queryable({:?})", qable_state); + trace!("undeclare_queryable({:?})", qable_state); if qable_state.origin != Locality::SessionLocal { let primitives = state.primitives.as_ref().unwrap().clone(); - if Session::twin_qabl(&state, &qable_state.key_expr) { - // There still exist Queryables on the same KeyExpr. - if qable_state.complete { - #[cfg(feature = "complete_n")] - { - let complete = - Session::complete_twin_qabls(&state, &qable_state.key_expr); - drop(state); - let qabl_info = QueryableInfo { - complete, - distance: 0, - }; - primitives.send_declare(Declare { - ext_qos: declare::ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: declare::ext::NodeIdType::default(), - body: DeclareBody::DeclareQueryable(DeclareQueryable { - id: 0, // @TODO use proper QueryableId (#703) - wire_expr: qable_state.key_expr.clone(), - ext_info: qabl_info, - }), - }); - } - #[cfg(not(feature = "complete_n"))] - { - if !Session::complete_twin_qabl(&state, &qable_state.key_expr) { - drop(state); - let qabl_info = QueryableInfo { - complete: 0, - distance: 0, - }; - primitives.send_declare(Declare { - ext_qos: declare::ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: declare::ext::NodeIdType::default(), - body: DeclareBody::DeclareQueryable(DeclareQueryable { - id: 0, // @TODO use proper QueryableId (#703) - wire_expr: qable_state.key_expr.clone(), - ext_info: qabl_info, - }), - }); - } - } - } - } else { - // There are no more Queryables on the same KeyExpr. - drop(state); - primitives.send_declare(Declare { - ext_qos: declare::ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: declare::ext::NodeIdType::default(), - body: DeclareBody::UndeclareQueryable(UndeclareQueryable { - id: 0, // @TODO use proper QueryableId (#703) - ext_wire_expr: WireExprType { - wire_expr: qable_state.key_expr.clone(), - }, - }), - }); - } + drop(state); + primitives.send_declare(Declare { + interest_id: None, + ext_qos: declare::ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: declare::ext::NodeIdType::DEFAULT, + body: DeclareBody::UndeclareQueryable(UndeclareQueryable { + id: qable_state.id, + ext_wire_expr: WireExprType { + wire_expr: qable_state.key_expr.clone(), + }, + }), + }); } Ok(()) } else { @@ -1372,8 +1400,7 @@ impl Session { ) -> ZResult> { let mut state = zwrite!(self.state); tracing::trace!("declare_liveliness({:?})", key_expr); - let id = state.decl_id_counter.fetch_add(1, Ordering::SeqCst); - let key_expr = KeyExpr::from(*crate::liveliness::KE_PREFIX_LIVELINESS / key_expr); + let id = self.runtime.next_id(); let tok_state = Arc::new(LivelinessTokenState { id, key_expr: key_expr.clone().into_owned(), @@ -1383,20 +1410,83 @@ impl Session { let primitives = state.primitives.as_ref().unwrap().clone(); drop(state); primitives.send_declare(Declare { - ext_qos: declare::ext::QoSType::declare_default(), + interest_id: None, + ext_qos: declare::ext::QoSType::DECLARE, ext_tstamp: None, - ext_nodeid: declare::ext::NodeIdType::default(), - body: DeclareBody::DeclareSubscriber(DeclareSubscriber { - id: id as u32, + ext_nodeid: declare::ext::NodeIdType::DEFAULT, + body: DeclareBody::DeclareToken(DeclareToken { + id, wire_expr: key_expr.to_wire(self).to_owned(), - ext_info: SubscriberInfo::default(), }), }); Ok(tok_state) } + #[cfg(feature = "unstable")] + pub(crate) fn declare_liveliness_subscriber_inner( + &self, + key_expr: &KeyExpr, + origin: Locality, + callback: Callback<'static, Sample>, + ) -> ZResult> { + let mut state = zwrite!(self.state); + trace!("declare_liveliness_subscriber({:?})", key_expr); + let id = self.runtime.next_id(); + + let sub_state = SubscriberState { + id, + remote_id: id, + key_expr: key_expr.clone().into_owned(), + origin, + callback, + }; + + let sub_state = Arc::new(sub_state); + + state + .subscribers_mut(SubscriberKind::LivelinessSubscriber) + .insert(sub_state.id, sub_state.clone()); + + for res in state + .local_resources + .values_mut() + .filter_map(Resource::as_node_mut) + { + if key_expr.intersects(&res.key_expr) { + res.subscribers_mut(SubscriberKind::LivelinessSubscriber) + .push(sub_state.clone()); + } + } + + for res in state + .remote_resources + .values_mut() + .filter_map(Resource::as_node_mut) + { + if key_expr.intersects(&res.key_expr) { + res.subscribers_mut(SubscriberKind::LivelinessSubscriber) + .push(sub_state.clone()); + } + } + + let primitives = state.primitives.as_ref().unwrap().clone(); + drop(state); + + primitives.send_interest(Interest { + id, + mode: InterestMode::Future, + options: InterestOptions::KEYEXPRS + InterestOptions::TOKENS, + wire_expr: Some(key_expr.to_wire(self).to_owned()), + ext_qos: declare::ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: declare::ext::NodeIdType::DEFAULT, + }); + + Ok(sub_state) + } + #[zenoh_macros::unstable] - pub(crate) fn undeclare_liveliness(&self, tid: usize) -> ZResult<()> { + pub(crate) fn undeclare_liveliness(&self, tid: Id) -> ZResult<()> { let mut state = zwrite!(self.state); if let Some(tok_state) = state.tokens.remove(&tid) { trace!("undeclare_liveliness({:?})", tok_state); @@ -1407,14 +1497,13 @@ impl Session { let primitives = state.primitives.as_ref().unwrap().clone(); drop(state); primitives.send_declare(Declare { - ext_qos: ext::QoSType::declare_default(), + interest_id: None, + ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { - id: 0, // @TODO use proper SubscriberId (#703) - ext_wire_expr: WireExprType { - wire_expr: key_expr.to_wire(self).to_owned(), - }, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::UndeclareToken(UndeclareToken { + id: tok_state.id, + ext_wire_expr: WireExprType::null(), }), }); } @@ -1431,8 +1520,7 @@ impl Session { callback: Callback<'static, MatchingStatus>, ) -> ZResult> { let mut state = zwrite!(self.state); - - let id = state.decl_id_counter.fetch_add(1, Ordering::SeqCst); + let id = self.runtime.next_id(); tracing::trace!("matches_listener({:?}) => {id}", publisher.key_expr); let listener_state = Arc::new(MatchingListenerState { id, @@ -1465,33 +1553,29 @@ impl Session { key_expr: &KeyExpr, destination: Locality, ) -> ZResult { - use crate::net::routing::dispatcher::tables::RoutingExpr; let router = self.runtime.router(); let tables = zread!(router.tables.tables); - let res = crate::net::routing::dispatcher::resource::Resource::get_resource( - &tables.root_res, - key_expr.as_str(), - ); - let route = crate::net::routing::dispatcher::pubsub::get_local_data_route( - &tables, - &res, - &mut RoutingExpr::new(&tables.root_res, key_expr.as_str()), - ); + let matching_subscriptions = + crate::net::routing::dispatcher::pubsub::get_matching_subscriptions(&tables, key_expr); drop(tables); let matching = match destination { - Locality::Any => !route.is_empty(), + Locality::Any => !matching_subscriptions.is_empty(), Locality::Remote => { if let Some(face) = zread!(self.state).primitives.as_ref() { - route.values().any(|dir| !Arc::ptr_eq(&dir.0, &face.state)) + matching_subscriptions + .values() + .any(|dir| !Arc::ptr_eq(dir, &face.state)) } else { - !route.is_empty() + !matching_subscriptions.is_empty() } } Locality::SessionLocal => { if let Some(face) = zread!(self.state).primitives.as_ref() { - route.values().any(|dir| Arc::ptr_eq(&dir.0, &face.state)) + matching_subscriptions + .values() + .any(|dir| Arc::ptr_eq(dir, &face.state)) } else { false } @@ -1577,7 +1661,7 @@ impl Session { } #[zenoh_macros::unstable] - pub(crate) fn undeclare_matches_listener_inner(&self, sid: usize) -> ZResult<()> { + pub(crate) fn undeclare_matches_listener_inner(&self, sid: Id) -> ZResult<()> { let mut state = zwrite!(self.state); if let Some(state) = state.matching_listeners.remove(&sid) { trace!("undeclare_matches_listener_inner({:?})", state); @@ -1587,51 +1671,25 @@ impl Session { } } - pub(crate) fn handle_data( + pub(crate) fn execute_subscriber_callbacks( &self, local: bool, key_expr: &WireExpr, info: Option, payload: ZBuf, - #[cfg(feature = "unstable")] attachment: Option, + kind: SubscriberKind, + attachment: Option, ) { let mut callbacks = SingleOrVec::default(); let state = zread!(self.state); if key_expr.suffix.is_empty() { match state.get_res(&key_expr.scope, key_expr.mapping, local) { Some(Resource::Node(res)) => { - for sub in &res.subscribers { + for sub in res.subscribers(kind) { if sub.origin == Locality::Any || (local == (sub.origin == Locality::SessionLocal)) { - match &sub.scope { - Some(scope) => { - if !res.key_expr.starts_with(&***scope) { - tracing::warn!( - "Received Data for `{}`, which didn't start with scope `{}`: don't deliver to scoped Subscriber.", - res.key_expr, - scope, - ); - } else { - match KeyExpr::try_from(&res.key_expr[(scope.len() + 1)..]) - { - Ok(key_expr) => callbacks.push(( - sub.callback.clone(), - key_expr.into_owned(), - )), - Err(e) => { - tracing::warn!( - "Error unscoping received Data for `{}`: {}", - res.key_expr, - e, - ); - } - } - } - } - None => callbacks - .push((sub.callback.clone(), res.key_expr.clone().into())), - }; + callbacks.push((sub.callback.clone(), res.key_expr.clone().into())); } } } @@ -1650,38 +1708,12 @@ impl Session { } else { match state.wireexpr_to_keyexpr(key_expr, local) { Ok(key_expr) => { - for sub in state.subscribers.values() { + for sub in state.subscribers(kind).values() { if (sub.origin == Locality::Any || (local == (sub.origin == Locality::SessionLocal))) && key_expr.intersects(&sub.key_expr) { - match &sub.scope { - Some(scope) => { - if !key_expr.starts_with(&***scope) { - tracing::warn!( - "Received Data for `{}`, which didn't start with scope `{}`: don't deliver to scoped Subscriber.", - key_expr, - scope, - ); - } else { - match KeyExpr::try_from(&key_expr[(scope.len() + 1)..]) { - Ok(key_expr) => callbacks.push(( - sub.callback.clone(), - key_expr.into_owned(), - )), - Err(e) => { - tracing::warn!( - "Error unscoping received Data for `{}`: {}", - key_expr, - e, - ); - } - } - } - } - None => callbacks - .push((sub.callback.clone(), key_expr.clone().into_owned())), - }; + callbacks.push((sub.callback.clone(), key_expr.clone().into_owned())); } } } @@ -1694,72 +1726,44 @@ impl Session { drop(state); let zenoh_collections::single_or_vec::IntoIter { drain, last } = callbacks.into_iter(); for (cb, key_expr) in drain { - #[allow(unused_mut)] - let mut sample = Sample::with_info(key_expr, payload.clone(), info.clone()); - #[cfg(feature = "unstable")] - { - sample.attachment.clone_from(&attachment); - } + let sample = info + .clone() + .into_sample(key_expr, payload.clone(), attachment.clone()); cb(sample); } if let Some((cb, key_expr)) = last { - #[allow(unused_mut)] - let mut sample = Sample::with_info(key_expr, payload, info); - #[cfg(feature = "unstable")] - { - sample.attachment = attachment; - } + let sample = info.into_sample(key_expr, payload, attachment.clone()); cb(sample); } } - pub(crate) fn pull<'a>(&'a self, key_expr: &'a KeyExpr) -> impl Resolve> + 'a { - ResolveClosure::new(move || { - trace!("pull({:?})", key_expr); - let state = zread!(self.state); - let primitives = state.primitives.as_ref().unwrap().clone(); - drop(state); - primitives.send_request(Request { - id: 0, // @TODO compute a proper request ID - wire_expr: key_expr.to_wire(self).to_owned(), - ext_qos: ext::QoSType::request_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - ext_target: request::ext::TargetType::default(), - ext_budget: None, - ext_timeout: None, - payload: RequestBody::Pull(Pull { - ext_unknown: vec![], - }), - }); - Ok(()) - }) - } - #[allow(clippy::too_many_arguments)] pub(crate) fn query( &self, - selector: &Selector<'_>, - scope: &Option>, + key_expr: &KeyExpr<'_>, + parameters: &Parameters<'_>, target: QueryTarget, consolidation: QueryConsolidation, + qos: QoS, destination: Locality, timeout: Duration, value: Option, - #[cfg(feature = "unstable")] attachment: Option, + attachment: Option, + #[cfg(feature = "unstable")] source: SourceInfo, callback: Callback<'static, Reply>, ) -> ZResult<()> { - tracing::trace!("get({}, {:?}, {:?})", selector, target, consolidation); + tracing::trace!( + "get({}, {:?}, {:?})", + Selector::borrowed(key_expr, parameters), + target, + consolidation + ); let mut state = zwrite!(self.state); let consolidation = match consolidation.mode { - Mode::Auto => { - if selector.decode().any(|(k, _)| k.as_ref() == TIME_RANGE_KEY) { - ConsolidationMode::None - } else { - ConsolidationMode::Latest - } - } - Mode::Manual(mode) => mode, + #[cfg(feature = "unstable")] + ConsolidationMode::Auto if parameters.time_range().is_some() => ConsolidationMode::None, + ConsolidationMode::Auto => ConsolidationMode::Latest, + mode => mode, }; let qid = state.qid_counter.fetch_add(1, Ordering::SeqCst); let nb_final = match destination { @@ -1771,6 +1775,7 @@ impl Session { self.task_controller .spawn_with_rt(zenoh_runtime::ZRuntime::Net, { let state = self.state.clone(); + #[cfg(feature = "unstable")] let zid = self.runtime.zid(); async move { tokio::select! { @@ -1785,8 +1790,9 @@ impl Session { } } (query.callback)(Reply { - sample: Err("Timeout".into()), - replier_id: zid, + result: Err(Value::new("Timeout", Encoding::ZENOH_STRING).into()), + #[cfg(feature = "unstable")] + replier_id: Some(zid.into()), }); } } @@ -1795,22 +1801,14 @@ impl Session { } }); - let selector = match scope { - Some(scope) => Selector { - key_expr: scope / &*selector.key_expr, - parameters: selector.parameters.clone(), - }, - None => selector.clone(), - }; - tracing::trace!("Register query {} (nb_final = {})", qid, nb_final); - let wexpr = selector.key_expr.to_wire(self).to_owned(); + let wexpr = key_expr.to_wire(self).to_owned(); state.queries.insert( qid, QueryState { nb_final, - selector: selector.clone().into_owned(), - scope: scope.clone().map(|e| e.into_owned()), + key_expr: key_expr.clone().into_owned(), + parameters: parameters.clone().into_owned(), reception_mode: consolidation, replies: (consolidation != ConsolidationMode::None).then(HashMap::new), callback, @@ -1818,35 +1816,31 @@ impl Session { ); let primitives = state.primitives.as_ref().unwrap().clone(); - drop(state); + if destination != Locality::SessionLocal { - #[allow(unused_mut)] - let mut ext_attachment = None; - #[cfg(feature = "unstable")] - { - if let Some(attachment) = attachment.clone() { - ext_attachment = Some(attachment.into()); - } - } + let ext_attachment = attachment.clone().map(Into::into); primitives.send_request(Request { id: qid, wire_expr: wexpr.clone(), - ext_qos: request::ext::QoSType::request_default(), + ext_qos: qos.into(), ext_tstamp: None, - ext_nodeid: request::ext::NodeIdType::default(), + ext_nodeid: request::ext::NodeIdType::DEFAULT, ext_target: target, ext_budget: None, ext_timeout: Some(timeout), payload: RequestBody::Query(zenoh_protocol::zenoh::Query { - parameters: selector.parameters().to_string(), + consolidation, + parameters: parameters.to_string(), + #[cfg(feature = "unstable")] + ext_sinfo: source.into(), + #[cfg(not(feature = "unstable"))] ext_sinfo: None, - ext_consolidation: consolidation.into(), ext_body: value.as_ref().map(|v| query::ext::QueryBodyType { #[cfg(feature = "shared-memory")] ext_shm: None, - encoding: v.encoding.clone(), - payload: v.payload.clone(), + encoding: v.encoding.clone().into(), + payload: v.payload.clone().into(), }), ext_attachment, ext_unknown: vec![], @@ -1857,23 +1851,78 @@ impl Session { self.handle_query( true, &wexpr, - selector.parameters(), + parameters.as_str(), qid, target, - consolidation.into(), + consolidation, value.as_ref().map(|v| query::ext::QueryBodyType { #[cfg(feature = "shared-memory")] ext_shm: None, - encoding: v.encoding.clone(), - payload: v.payload.clone(), + encoding: v.encoding.clone().into(), + payload: v.payload.clone().into(), }), - #[cfg(feature = "unstable")] attachment, ); } Ok(()) } + #[cfg(feature = "unstable")] + pub(crate) fn liveliness_query( + &self, + key_expr: &KeyExpr<'_>, + timeout: Duration, + callback: Callback<'static, Reply>, + ) -> ZResult<()> { + tracing::trace!("liveliness.get({}, {:?})", key_expr, timeout); + let mut state = zwrite!(self.state); + let id = state.liveliness_qid_counter.fetch_add(1, Ordering::SeqCst); + let token = self.task_controller.get_cancellation_token(); + self.task_controller + .spawn_with_rt(zenoh_runtime::ZRuntime::Net, { + let state = self.state.clone(); + let zid = self.runtime.zid(); + async move { + tokio::select! { + _ = tokio::time::sleep(timeout) => { + let mut state = zwrite!(state); + if let Some(query) = state.liveliness_queries.remove(&id) { + std::mem::drop(state); + tracing::debug!("Timeout on liveliness query {}! Send error and close.", id); + (query.callback)(Reply { + result: Err(Value::new("Timeout", Encoding::ZENOH_STRING).into()), + #[cfg(feature = "unstable")] + replier_id: Some(zid.into()), + }); + } + } + _ = token.cancelled() => {} + } + } + }); + + tracing::trace!("Register liveliness query {}", id); + let wexpr = key_expr.to_wire(self).to_owned(); + state + .liveliness_queries + .insert(id, LivelinessQueryState { callback }); + + let primitives = state.primitives.as_ref().unwrap().clone(); + drop(state); + + primitives.send_interest(Interest { + id, + mode: InterestMode::Current, + options: InterestOptions::KEYEXPRS + InterestOptions::TOKENS, + wire_expr: Some(wexpr.clone()), + ext_qos: request::ext::QoSType::DEFAULT, + ext_tstamp: None, + ext_nodeid: request::ext::NodeIdType::DEFAULT, + }); + + Ok(()) + } + #[allow(clippy::too_many_arguments)] pub(crate) fn handle_query( &self, @@ -1882,19 +1931,19 @@ impl Session { parameters: &str, qid: RequestId, _target: TargetType, - _consolidation: ConsolidationType, + _consolidation: Consolidation, body: Option, - #[cfg(feature = "unstable")] attachment: Option, + attachment: Option, ) { - let (primitives, key_expr, callbacks) = { + let (primitives, key_expr, queryables) = { let state = zread!(self.state); match state.wireexpr_to_keyexpr(key_expr, local) { Ok(key_expr) => { - let callbacks = state + let queryables = state .queryables - .values() + .iter() .filter( - |queryable| + |(_, queryable)| (queryable.origin == Locality::Any || (local == (queryable.origin == Locality::SessionLocal))) && @@ -1911,12 +1960,12 @@ impl Session { } } ) - .map(|qable| qable.callback.clone()) - .collect::>>(); + .map(|(id, qable)| (*id, qable.callback.clone())) + .collect::)>>(); ( state.primitives.as_ref().unwrap().clone(), key_expr.into_owned(), - callbacks, + queryables, ) } Err(err) => { @@ -1926,37 +1975,35 @@ impl Session { } }; - let parameters = parameters.to_owned(); - - let zid = self.runtime.zid(); // @TODO build/use prebuilt specific zid + let zid = self.runtime.zid(); - let query = Query { - inner: Arc::new(QueryInner { - key_expr, - parameters, - value: body.map(|b| Value { - payload: b.payload, - encoding: b.encoding, + let query_inner = Arc::new(QueryInner { + key_expr, + parameters: parameters.to_owned().into(), + qid, + zid: zid.into(), + primitives: if local { + Arc::new(self.clone()) + } else { + primitives + }, + }); + for (eid, callback) in queryables { + callback(Query { + inner: query_inner.clone(), + eid, + value: body.as_ref().map(|b| Value { + payload: b.payload.clone().into(), + encoding: b.encoding.clone().into(), }), - qid, - zid, - primitives: if local { - Arc::new(self.clone()) - } else { - primitives - }, - #[cfg(feature = "unstable")] - attachment, - }), - }; - for callback in callbacks.iter() { - callback(query.clone()); + attachment: attachment.clone(), + }); } } } impl<'s> SessionDeclarations<'s, 'static> for Arc { - /// Create a [`Subscriber`](Subscriber) for the given key expression. + /// Create a [`Subscriber`](crate::pubsub::Subscriber) for the given key expression. /// /// # Arguments /// @@ -1966,11 +2013,10 @@ impl<'s> SessionDeclarations<'s, 'static> for Arc { /// ```no_run /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::r#async::*; + /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).res().await.unwrap().into_arc(); + /// let session = zenoh::open(zenoh::config::peer()).await.unwrap().into_arc(); /// let subscriber = session.declare_subscriber("key/expression") - /// .res() /// .await /// .unwrap(); /// tokio::task::spawn(async move { @@ -1983,7 +2029,7 @@ impl<'s> SessionDeclarations<'s, 'static> for Arc { fn declare_subscriber<'b, TryIntoKeyExpr>( &'s self, key_expr: TryIntoKeyExpr, - ) -> SubscriberBuilder<'static, 'b, PushMode, DefaultHandler> + ) -> SubscriberBuilder<'static, 'b, DefaultHandler> where TryIntoKeyExpr: TryInto>, >>::Error: Into, @@ -1991,37 +2037,36 @@ impl<'s> SessionDeclarations<'s, 'static> for Arc { SubscriberBuilder { session: SessionRef::Shared(self.clone()), key_expr: key_expr.try_into().map_err(Into::into), - reliability: Reliability::default(), - mode: PushMode, + #[cfg(feature = "unstable")] + reliability: Reliability::DEFAULT, origin: Locality::default(), - handler: DefaultHandler, + handler: DefaultHandler::default(), } } - /// Create a [`Queryable`](Queryable) for the given key expression. + /// Create a [`Queryable`](crate::query::Queryable) for the given key expression. /// /// # Arguments /// /// * `key_expr` - The key expression matching the queries the - /// [`Queryable`](Queryable) will reply to + /// [`Queryable`](crate::query::Queryable) will reply to /// /// # Examples /// ```no_run /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::r#async::*; + /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).res().await.unwrap().into_arc(); + /// let session = zenoh::open(zenoh::config::peer()).await.unwrap().into_arc(); /// let queryable = session.declare_queryable("key/expression") - /// .res() /// .await /// .unwrap(); /// tokio::task::spawn(async move { /// while let Ok(query) = queryable.recv_async().await { - /// query.reply(Ok(Sample::try_from( + /// query.reply( /// "key/expression", /// "value", - /// ).unwrap())).res().await.unwrap(); + /// ).await.unwrap(); /// } /// }).await; /// # } @@ -2039,11 +2084,11 @@ impl<'s> SessionDeclarations<'s, 'static> for Arc { key_expr: key_expr.try_into().map_err(Into::into), complete: false, origin: Locality::default(), - handler: DefaultHandler, + handler: DefaultHandler::default(), } } - /// Create a [`Publisher`](crate::publication::Publisher) for the given key expression. + /// Create a [`Publisher`](crate::pubsub::Publisher) for the given key expression. /// /// # Arguments /// @@ -2053,14 +2098,13 @@ impl<'s> SessionDeclarations<'s, 'static> for Arc { /// ``` /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::r#async::*; + /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).res().await.unwrap().into_arc(); + /// let session = zenoh::open(zenoh::config::peer()).await.unwrap().into_arc(); /// let publisher = session.declare_publisher("key/expression") - /// .res() /// .await /// .unwrap(); - /// publisher.put("value").res().await.unwrap(); + /// publisher.put("value").await.unwrap(); /// # } /// ``` fn declare_publisher<'b, TryIntoKeyExpr>( @@ -2074,8 +2118,10 @@ impl<'s> SessionDeclarations<'s, 'static> for Arc { PublisherBuilder { session: SessionRef::Shared(self.clone()), key_expr: key_expr.try_into().map_err(Into::into), - congestion_control: CongestionControl::default(), - priority: Priority::default(), + encoding: Encoding::default(), + congestion_control: CongestionControl::DEFAULT, + priority: Priority::DEFAULT, + is_express: false, destination: Locality::default(), } } @@ -2086,13 +2132,12 @@ impl<'s> SessionDeclarations<'s, 'static> for Arc { /// ``` /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::r#async::*; + /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).res().await.unwrap().into_arc(); + /// let session = zenoh::open(zenoh::config::peer()).await.unwrap().into_arc(); /// let liveliness = session /// .liveliness() /// .declare_token("key/expression") - /// .res() /// .await /// .unwrap(); /// # } @@ -2112,6 +2157,9 @@ impl<'s> SessionDeclarations<'s, 'static> for Arc { } impl Primitives for Session { + fn send_interest(&self, msg: zenoh_protocol::network::Interest) { + trace!("recv Interest {} {:?}", msg.id, msg.wire_expr); + } fn send_declare(&self, msg: zenoh_protocol::network::Declare) { match msg.body { zenoh_protocol::network::DeclareBody::DeclareKeyExpr(m) => { @@ -2119,18 +2167,21 @@ impl Primitives for Session { let state = &mut zwrite!(self.state); match state.remote_key_to_expr(&m.wire_expr) { Ok(key_expr) => { - let mut subs = Vec::new(); - for sub in state.subscribers.values() { - if key_expr.intersects(&sub.key_expr) { - subs.push(sub.clone()); + let mut res_node = ResourceNode::new(key_expr.clone().into()); + for kind in [ + SubscriberKind::Subscriber, + SubscriberKind::LivelinessSubscriber, + ] { + for sub in state.subscribers(kind).values() { + if key_expr.intersects(&sub.key_expr) { + res_node.subscribers_mut(kind).push(sub.clone()); + } } } - let res = Resource::Node(ResourceNode { - key_expr: key_expr.into(), - subscribers: subs, - }); - state.remote_resources.insert(m.id, res); + state + .remote_resources + .insert(m.id, Resource::Node(res_node)); } Err(e) => error!( "Received Resource for invalid wire_expr `{}`: {}", @@ -2145,83 +2196,159 @@ impl Primitives for Session { trace!("recv DeclareSubscriber {} {:?}", m.id, m.wire_expr); #[cfg(feature = "unstable")] { - let state = zread!(self.state); - match state.wireexpr_to_keyexpr(&m.wire_expr, false) { + let mut state = zwrite!(self.state); + match state + .wireexpr_to_keyexpr(&m.wire_expr, false) + .map(|e| e.into_owned()) + { Ok(expr) => { + state.remote_subscribers.insert(m.id, expr.clone()); self.update_status_up(&state, &expr); + } + Err(err) => { + tracing::error!( + "Received DeclareSubscriber for unknown wire_expr: {}", + err + ) + } + } + } + } + zenoh_protocol::network::DeclareBody::UndeclareSubscriber(m) => { + trace!("recv UndeclareSubscriber {:?}", m.id); + #[cfg(feature = "unstable")] + { + let mut state = zwrite!(self.state); + if let Some(expr) = state.remote_subscribers.remove(&m.id) { + self.update_status_down(&state, &expr); + } else { + tracing::error!("Received Undeclare Subscriber for unknown id: {}", m.id); + } + } + } + zenoh_protocol::network::DeclareBody::DeclareQueryable(m) => { + trace!("recv DeclareQueryable {} {:?}", m.id, m.wire_expr); + } + zenoh_protocol::network::DeclareBody::UndeclareQueryable(m) => { + trace!("recv UndeclareQueryable {:?}", m.id); + } + zenoh_protocol::network::DeclareBody::DeclareToken(m) => { + trace!("recv DeclareToken {:?}", m.id); + #[cfg(feature = "unstable")] + { + let mut state = zwrite!(self.state); + match state + .wireexpr_to_keyexpr(&m.wire_expr, false) + .map(|e| e.into_owned()) + { + Ok(key_expr) => { + if let Some(interest_id) = msg.interest_id { + if let Some(query) = state.liveliness_queries.get(&interest_id) { + let reply = Reply { + result: Ok(Sample { + key_expr, + payload: ZBytes::empty(), + kind: SampleKind::Put, + encoding: Encoding::default(), + timestamp: None, + qos: QoS::default(), + #[cfg(feature = "unstable")] + source_info: SourceInfo::empty(), + #[cfg(feature = "unstable")] + attachment: None, + }), + #[cfg(feature = "unstable")] + replier_id: None, + }; + + (query.callback)(reply); + } + } else { + state.remote_tokens.insert(m.id, key_expr.clone()); - if expr - .as_str() - .starts_with(crate::liveliness::PREFIX_LIVELINESS) - { drop(state); - self.handle_data( + + self.execute_subscriber_callbacks( false, &m.wire_expr, None, ZBuf::default(), + SubscriberKind::LivelinessSubscriber, #[cfg(feature = "unstable")] None, ); } } Err(err) => { - tracing::error!( - "Received DeclareSubscriber for unknown wire_expr: {}", - err - ) + tracing::error!("Received DeclareToken for unknown wire_expr: {}", err) } } } } - zenoh_protocol::network::DeclareBody::UndeclareSubscriber(m) => { - trace!("recv UndeclareSubscriber {:?}", m.id); + zenoh_protocol::network::DeclareBody::UndeclareToken(m) => { + trace!("recv UndeclareToken {:?}", m.id); #[cfg(feature = "unstable")] { - let state = zread!(self.state); - match state.wireexpr_to_keyexpr(&m.ext_wire_expr.wire_expr, false) { - Ok(expr) => { - self.update_status_down(&state, &expr); + let mut state = zwrite!(self.state); + if let Some(key_expr) = state.remote_tokens.remove(&m.id) { + drop(state); + + let data_info = DataInfo { + kind: SampleKind::Delete, + ..Default::default() + }; - if expr - .as_str() - .starts_with(crate::liveliness::PREFIX_LIVELINESS) - { + self.execute_subscriber_callbacks( + false, + &key_expr.to_wire(self), + Some(data_info), + ZBuf::default(), + SubscriberKind::LivelinessSubscriber, + #[cfg(feature = "unstable")] + None, + ); + } else if m.ext_wire_expr.wire_expr != WireExpr::empty() { + match state + .wireexpr_to_keyexpr(&m.ext_wire_expr.wire_expr, false) + .map(|e| e.into_owned()) + { + Ok(key_expr) => { drop(state); + let data_info = DataInfo { kind: SampleKind::Delete, ..Default::default() }; - self.handle_data( + + self.execute_subscriber_callbacks( false, - &m.ext_wire_expr.wire_expr, + &key_expr.to_wire(self), Some(data_info), ZBuf::default(), + SubscriberKind::LivelinessSubscriber, #[cfg(feature = "unstable")] None, ); } - } - Err(err) => { - tracing::error!( - "Received Forget Subscriber for unknown key_expr: {}", - err - ) + Err(err) => { + tracing::error!( + "Received UndeclareToken for unknown wire_expr: {}", + err + ) + } } } } } - zenoh_protocol::network::DeclareBody::DeclareQueryable(m) => { - trace!("recv DeclareQueryable {} {:?}", m.id, m.wire_expr); - } - zenoh_protocol::network::DeclareBody::UndeclareQueryable(m) => { - trace!("recv UndeclareQueryable {:?}", m.id); + DeclareBody::DeclareFinal(DeclareFinal) => { + trace!("recv DeclareFinal {:?}", msg.interest_id); + + #[cfg(feature = "unstable")] + if let Some(interest_id) = msg.interest_id { + let mut state = zwrite!(self.state); + let _ = state.liveliness_queries.remove(&interest_id); + } } - DeclareBody::DeclareToken(_) => todo!(), - DeclareBody::UndeclareToken(_) => todo!(), - DeclareBody::DeclareInterest(_) => todo!(), - DeclareBody::FinalInterest(_) => todo!(), - DeclareBody::UndeclareInterest(_) => todo!(), } } @@ -2231,18 +2358,18 @@ impl Primitives for Session { PushBody::Put(m) => { let info = DataInfo { kind: SampleKind::Put, - encoding: Some(m.encoding), + encoding: Some(m.encoding.into()), timestamp: m.timestamp, qos: QoS::from(msg.ext_qos), - source_id: m.ext_sinfo.as_ref().map(|i| i.zid), + source_id: m.ext_sinfo.as_ref().map(|i| i.id.into()), source_sn: m.ext_sinfo.as_ref().map(|i| i.sn as u64), }; - self.handle_data( + self.execute_subscriber_callbacks( false, &msg.wire_expr, Some(info), m.payload, - #[cfg(feature = "unstable")] + SubscriberKind::Subscriber, m.ext_attachment.map(Into::into), ) } @@ -2252,15 +2379,15 @@ impl Primitives for Session { encoding: None, timestamp: m.timestamp, qos: QoS::from(msg.ext_qos), - source_id: m.ext_sinfo.as_ref().map(|i| i.zid), + source_id: m.ext_sinfo.as_ref().map(|i| i.id.into()), source_sn: m.ext_sinfo.as_ref().map(|i| i.sn as u64), }; - self.handle_data( + self.execute_subscriber_callbacks( false, &msg.wire_expr, Some(info), ZBuf::empty(), - #[cfg(feature = "unstable")] + SubscriberKind::Subscriber, m.ext_attachment.map(Into::into), ) } @@ -2276,53 +2403,30 @@ impl Primitives for Session { &m.parameters, msg.id, msg.ext_target, - m.ext_consolidation, + m.consolidation, m.ext_body, - #[cfg(feature = "unstable")] m.ext_attachment.map(Into::into), ), - RequestBody::Put(_) => (), - RequestBody::Del(_) => (), - RequestBody::Pull(_) => todo!(), } } fn send_response(&self, msg: Response) { trace!("recv Response {:?}", msg); match msg.payload { - ResponseBody::Ack(_) => { - tracing::warn!( - "Received a ResponseBody::Ack, but this isn't supported yet. Dropping message." - ) - } - ResponseBody::Put(_) => { - tracing::warn!( - "Received a ResponseBody::Put, but this isn't supported yet. Dropping message." - ) - } ResponseBody::Err(e) => { let mut state = zwrite!(self.state); match state.queries.get_mut(&msg.rid) { Some(query) => { let callback = query.callback.clone(); std::mem::drop(state); - let value = match e.ext_body { - Some(body) => Value { - payload: body.payload, - encoding: body.encoding, - }, - None => Value { - payload: ZBuf::empty(), - encoding: zenoh_protocol::core::Encoding::EMPTY, - }, - }; - let replier_id = match e.ext_sinfo { - Some(info) => info.zid, - None => ZenohId::rand(), + let value = Value { + payload: e.payload.into(), + encoding: e.encoding.into(), }; let new_reply = Reply { - replier_id, - sample: Err(value), + result: Err(value.into()), + #[cfg(feature = "unstable")] + replier_id: e.ext_sinfo.map(|info| info.id.zid), }; callback(new_reply); } @@ -2342,66 +2446,70 @@ impl Primitives for Session { }; match state.queries.get_mut(&msg.rid) { Some(query) => { - if !matches!( - query - .selector - .parameters() - .get_bools([crate::query::_REPLY_KEY_EXPR_ANY_SEL_PARAM]), - Ok([true]) - ) && !query.selector.key_expr.intersects(&key_expr) - { + let c = + zcondfeat!("unstable", !query.parameters.reply_key_expr_any(), true); + if c && !query.key_expr.intersects(&key_expr) { tracing::warn!( "Received Reply for `{}` from `{:?}, which didn't match query `{}`: dropping Reply.", key_expr, msg.ext_respid, - query.selector + query.selector() ); return; } - let key_expr = match &query.scope { - Some(scope) => { - if !key_expr.starts_with(&***scope) { - tracing::warn!( - "Received Reply for `{}` from `{:?}, which didn't start with scope `{}`: dropping Reply.", - key_expr, - msg.ext_respid, - scope, - ); - return; - } - match KeyExpr::try_from(&key_expr[(scope.len() + 1)..]) { - Ok(key_expr) => key_expr, - Err(e) => { - tracing::warn!( - "Error unscoping received Reply for `{}` from `{:?}: {}", - key_expr, - msg.ext_respid, - e, - ); - return; - } - } - } - None => key_expr, - }; - let info = DataInfo { - kind: SampleKind::Put, - encoding: Some(m.encoding), - timestamp: m.timestamp, - qos: QoS::from(msg.ext_qos), - source_id: m.ext_sinfo.as_ref().map(|i| i.zid), - source_sn: m.ext_sinfo.as_ref().map(|i| i.sn as u64), - }; - #[allow(unused_mut)] - let mut sample = - Sample::with_info(key_expr.into_owned(), m.payload, Some(info)); - #[cfg(feature = "unstable")] - { - sample.attachment = m.ext_attachment.map(Into::into); + + struct Ret { + payload: ZBuf, + info: DataInfo, + attachment: Option, } + let Ret { + payload, + info, + attachment, + } = match m.payload { + ReplyBody::Put(Put { + timestamp, + encoding, + ext_sinfo, + ext_attachment: _attachment, + payload, + .. + }) => Ret { + payload, + info: DataInfo { + kind: SampleKind::Put, + encoding: Some(encoding.into()), + timestamp, + qos: QoS::from(msg.ext_qos), + source_id: ext_sinfo.as_ref().map(|i| i.id.into()), + source_sn: ext_sinfo.as_ref().map(|i| i.sn as u64), + }, + attachment: _attachment.map(Into::into), + }, + ReplyBody::Del(Del { + timestamp, + ext_sinfo, + ext_attachment: _attachment, + .. + }) => Ret { + payload: ZBuf::empty(), + info: DataInfo { + kind: SampleKind::Delete, + encoding: None, + timestamp, + qos: QoS::from(msg.ext_qos), + source_id: ext_sinfo.as_ref().map(|i| i.id.into()), + source_sn: ext_sinfo.as_ref().map(|i| i.sn as u64), + }, + attachment: _attachment.map(Into::into), + }, + }; + let sample = info.into_sample(key_expr.into_owned(), payload, attachment); let new_reply = Reply { - sample: Ok(sample), - replier_id: ZenohId::rand(), // TODO + result: Ok(sample), + #[cfg(feature = "unstable")] + replier_id: None, }; let callback = match query.reception_mode { @@ -2410,15 +2518,15 @@ impl Primitives for Session { } ConsolidationMode::Monotonic => { match query.replies.as_ref().unwrap().get( - new_reply.sample.as_ref().unwrap().key_expr.as_keyexpr(), + new_reply.result.as_ref().unwrap().key_expr.as_keyexpr(), ) { Some(reply) => { - if new_reply.sample.as_ref().unwrap().timestamp - > reply.sample.as_ref().unwrap().timestamp + if new_reply.result.as_ref().unwrap().timestamp + > reply.result.as_ref().unwrap().timestamp { query.replies.as_mut().unwrap().insert( new_reply - .sample + .result .as_ref() .unwrap() .key_expr @@ -2434,7 +2542,7 @@ impl Primitives for Session { None => { query.replies.as_mut().unwrap().insert( new_reply - .sample + .result .as_ref() .unwrap() .key_expr @@ -2446,17 +2554,17 @@ impl Primitives for Session { } } } - ConsolidationMode::Latest => { + Consolidation::Auto | ConsolidationMode::Latest => { match query.replies.as_ref().unwrap().get( - new_reply.sample.as_ref().unwrap().key_expr.as_keyexpr(), + new_reply.result.as_ref().unwrap().key_expr.as_keyexpr(), ) { Some(reply) => { - if new_reply.sample.as_ref().unwrap().timestamp - > reply.sample.as_ref().unwrap().timestamp + if new_reply.result.as_ref().unwrap().timestamp + > reply.result.as_ref().unwrap().timestamp { query.replies.as_mut().unwrap().insert( new_reply - .sample + .result .as_ref() .unwrap() .key_expr @@ -2469,7 +2577,7 @@ impl Primitives for Session { None => { query.replies.as_mut().unwrap().insert( new_reply - .sample + .result .as_ref() .unwrap() .key_expr @@ -2525,8 +2633,8 @@ impl Primitives for Session { impl Drop for Session { fn drop(&mut self) { - if self.alive { - let _ = self.clone().close().res_sync(); + if self.close_on_drop { + let _ = self.clone().close().wait(); } } } @@ -2540,21 +2648,20 @@ impl fmt::Debug for Session { /// Functions to create zenoh entities /// /// This trait contains functions to create zenoh entities like -/// [`Subscriber`](crate::subscriber::Subscriber), and -/// [`Queryable`](crate::queryable::Queryable) +/// [`Subscriber`](crate::pubsub::Subscriber), and +/// [`Queryable`](crate::query::Queryable) /// /// This trait is implemented by [`Session`](crate::session::Session) itself and -/// by wrappers [`SessionRef`](crate::session::SessionRef) and [`Arc`](crate::session::Arc) +/// by wrappers [`SessionRef`](crate::session::SessionRef) and [`Arc`](std::sync::Arc) /// /// # Examples /// ```no_run /// # #[tokio::main] /// # async fn main() { -/// use zenoh::prelude::r#async::*; +/// use zenoh::prelude::*; /// -/// let session = zenoh::open(config::peer()).res().await.unwrap().into_arc(); +/// let session = zenoh::open(zenoh::config::peer()).await.unwrap().into_arc(); /// let subscriber = session.declare_subscriber("key/expression") -/// .res() /// .await /// .unwrap(); /// tokio::task::spawn(async move { @@ -2565,7 +2672,7 @@ impl fmt::Debug for Session { /// # } /// ``` pub trait SessionDeclarations<'s, 'a> { - /// Create a [`Subscriber`](crate::subscriber::Subscriber) for the given key expression. + /// Create a [`Subscriber`](crate::pubsub::Subscriber) for the given key expression. /// /// # Arguments /// @@ -2575,11 +2682,10 @@ pub trait SessionDeclarations<'s, 'a> { /// ```no_run /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::r#async::*; + /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).res().await.unwrap().into_arc(); + /// let session = zenoh::open(zenoh::config::peer()).await.unwrap().into_arc(); /// let subscriber = session.declare_subscriber("key/expression") - /// .res() /// .await /// .unwrap(); /// tokio::task::spawn(async move { @@ -2592,35 +2698,34 @@ pub trait SessionDeclarations<'s, 'a> { fn declare_subscriber<'b, TryIntoKeyExpr>( &'s self, key_expr: TryIntoKeyExpr, - ) -> SubscriberBuilder<'a, 'b, PushMode, DefaultHandler> + ) -> SubscriberBuilder<'a, 'b, DefaultHandler> where TryIntoKeyExpr: TryInto>, >>::Error: Into; - /// Create a [`Queryable`](crate::queryable::Queryable) for the given key expression. + /// Create a [`Queryable`](crate::query::Queryable) for the given key expression. /// /// # Arguments /// /// * `key_expr` - The key expression matching the queries the - /// [`Queryable`](crate::queryable::Queryable) will reply to + /// [`Queryable`](crate::query::Queryable) will reply to /// /// # Examples /// ```no_run /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::r#async::*; + /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).res().await.unwrap().into_arc(); + /// let session = zenoh::open(zenoh::config::peer()).await.unwrap().into_arc(); /// let queryable = session.declare_queryable("key/expression") - /// .res() /// .await /// .unwrap(); /// tokio::task::spawn(async move { /// while let Ok(query) = queryable.recv_async().await { - /// query.reply(Ok(Sample::try_from( + /// query.reply( /// "key/expression", /// "value", - /// ).unwrap())).res().await.unwrap(); + /// ).await.unwrap(); /// } /// }).await; /// # } @@ -2633,7 +2738,7 @@ pub trait SessionDeclarations<'s, 'a> { TryIntoKeyExpr: TryInto>, >>::Error: Into; - /// Create a [`Publisher`](crate::publication::Publisher) for the given key expression. + /// Create a [`Publisher`](crate::pubsub::Publisher) for the given key expression. /// /// # Arguments /// @@ -2643,14 +2748,13 @@ pub trait SessionDeclarations<'s, 'a> { /// ``` /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::r#async::*; + /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).res().await.unwrap().into_arc(); + /// let session = zenoh::open(zenoh::config::peer()).await.unwrap().into_arc(); /// let publisher = session.declare_publisher("key/expression") - /// .res() /// .await /// .unwrap(); - /// publisher.put("value").res().await.unwrap(); + /// publisher.put("value").await.unwrap(); /// # } /// ``` fn declare_publisher<'b, TryIntoKeyExpr>( @@ -2667,13 +2771,12 @@ pub trait SessionDeclarations<'s, 'a> { /// ``` /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::r#async::*; + /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).res().await.unwrap().into_arc(); + /// let session = zenoh::open(zenoh::config::peer()).await.unwrap().into_arc(); /// let liveliness = session /// .liveliness() /// .declare_token("key/expression") - /// .res() /// .await /// .unwrap(); /// # } @@ -2686,9 +2789,9 @@ pub trait SessionDeclarations<'s, 'a> { /// ``` /// # #[tokio::main] /// # async fn main() { - /// use zenoh::prelude::r#async::*; + /// use zenoh::prelude::*; /// - /// let session = zenoh::open(config::peer()).res().await.unwrap(); + /// let session = zenoh::open(zenoh::config::peer()).await.unwrap(); /// let info = session.info(); /// # } /// ``` @@ -2696,6 +2799,11 @@ pub trait SessionDeclarations<'s, 'a> { } impl crate::net::primitives::EPrimitives for Session { + #[inline] + fn send_interest(&self, ctx: crate::net::routing::RoutingContext) { + (self as &dyn Primitives).send_interest(ctx.msg) + } + #[inline] fn send_declare(&self, ctx: crate::net::routing::RoutingContext) { (self as &dyn Primitives).send_declare(ctx.msg) @@ -2707,21 +2815,200 @@ impl crate::net::primitives::EPrimitives for Session { } #[inline] - fn send_request(&self, ctx: crate::net::routing::RoutingContext) { - (self as &dyn Primitives).send_request(ctx.msg) + fn send_request(&self, msg: Request) { + (self as &dyn Primitives).send_request(msg) } #[inline] - fn send_response(&self, ctx: crate::net::routing::RoutingContext) { - (self as &dyn Primitives).send_response(ctx.msg) + fn send_response(&self, msg: Response) { + (self as &dyn Primitives).send_response(msg) } #[inline] - fn send_response_final(&self, ctx: crate::net::routing::RoutingContext) { - (self as &dyn Primitives).send_response_final(ctx.msg) + fn send_response_final(&self, msg: ResponseFinal) { + (self as &dyn Primitives).send_response_final(msg) } fn as_any(&self) -> &dyn std::any::Any { self } } + +/// Open a zenoh [`Session`]. +/// +/// # Arguments +/// +/// * `config` - The [`Config`] for the zenoh session +/// +/// # Examples +/// ``` +/// # #[tokio::main] +/// # async fn main() { +/// use zenoh::prelude::*; +/// +/// let session = zenoh::open(zenoh::config::peer()).await.unwrap(); +/// # } +/// ``` +/// +/// ``` +/// # #[tokio::main] +/// # async fn main() { +/// use std::str::FromStr; +/// use zenoh::{session::ZenohId, prelude::*}; +/// +/// let mut config = zenoh::config::peer(); +/// config.set_id(ZenohId::from_str("221b72df20924c15b8794c6bdb471150").unwrap()); +/// config.connect.endpoints.set( +/// ["tcp/10.10.10.10:7447", "tcp/11.11.11.11:7447"].iter().map(|s|s.parse().unwrap()).collect()); +/// +/// let session = zenoh::open(config).await.unwrap(); +/// # } +/// ``` +pub fn open(config: TryIntoConfig) -> OpenBuilder +where + TryIntoConfig: std::convert::TryInto + Send + 'static, + >::Error: std::fmt::Debug, +{ + OpenBuilder { + config, + #[cfg(feature = "shared-memory")] + shm_clients: None, + } +} + +/// A builder returned by [`open`] used to open a zenoh [`Session`]. +/// +/// # Examples +/// ``` +/// # #[tokio::main] +/// # async fn main() { +/// use zenoh::prelude::*; +/// +/// let session = zenoh::open(zenoh::config::peer()).await.unwrap(); +/// # } +/// ``` +#[must_use = "Resolvables do nothing unless you resolve them using the `res` method from either `SyncResolve` or `AsyncResolve`"] +pub struct OpenBuilder +where + TryIntoConfig: std::convert::TryInto + Send + 'static, + >::Error: std::fmt::Debug, +{ + config: TryIntoConfig, + #[cfg(feature = "shared-memory")] + shm_clients: Option>, +} + +#[cfg(feature = "shared-memory")] +impl OpenBuilder +where + TryIntoConfig: std::convert::TryInto + Send + 'static, + >::Error: std::fmt::Debug, +{ + pub fn with_shm_clients(mut self, shm_clients: Arc) -> Self { + self.shm_clients = Some(shm_clients); + self + } +} + +impl Resolvable for OpenBuilder +where + TryIntoConfig: std::convert::TryInto + Send + 'static, + >::Error: std::fmt::Debug, +{ + type To = ZResult; +} + +impl Wait for OpenBuilder +where + TryIntoConfig: std::convert::TryInto + Send + 'static, + >::Error: std::fmt::Debug, +{ + fn wait(self) -> ::To { + let config: crate::config::Config = self + .config + .try_into() + .map_err(|e| zerror!("Invalid Zenoh configuration {:?}", &e))?; + Session::new( + config, + #[cfg(feature = "shared-memory")] + self.shm_clients, + ) + .wait() + } +} + +impl IntoFuture for OpenBuilder +where + TryIntoConfig: std::convert::TryInto + Send + 'static, + >::Error: std::fmt::Debug, +{ + type Output = ::To; + type IntoFuture = Ready<::To>; + + fn into_future(self) -> Self::IntoFuture { + std::future::ready(self.wait()) + } +} + +/// Initialize a Session with an existing Runtime. +/// This operation is used by the plugins to share the same Runtime as the router. +#[zenoh_macros::internal] +pub fn init(runtime: Runtime) -> InitBuilder { + InitBuilder { + runtime, + aggregated_subscribers: vec![], + aggregated_publishers: vec![], + } +} + +/// A builder returned by [`init`] and used to initialize a Session with an existing Runtime. +#[must_use = "Resolvables do nothing unless you resolve them using the `res` method from either `SyncResolve` or `AsyncResolve`"] +#[doc(hidden)] +#[zenoh_macros::internal] +pub struct InitBuilder { + runtime: Runtime, + aggregated_subscribers: Vec, + aggregated_publishers: Vec, +} + +#[zenoh_macros::internal] +impl InitBuilder { + #[inline] + pub fn aggregated_subscribers(mut self, exprs: Vec) -> Self { + self.aggregated_subscribers = exprs; + self + } + + #[inline] + pub fn aggregated_publishers(mut self, exprs: Vec) -> Self { + self.aggregated_publishers = exprs; + self + } +} + +#[zenoh_macros::internal] +impl Resolvable for InitBuilder { + type To = ZResult; +} + +#[zenoh_macros::internal] +impl Wait for InitBuilder { + fn wait(self) -> ::To { + Ok(Session::init( + self.runtime, + self.aggregated_subscribers, + self.aggregated_publishers, + ) + .wait()) + } +} + +#[zenoh_macros::internal] +impl IntoFuture for InitBuilder { + type Output = ::To; + type IntoFuture = Ready<::To>; + + fn into_future(self) -> Self::IntoFuture { + std::future::ready(self.wait()) + } +} diff --git a/zenoh/src/api/subscriber.rs b/zenoh/src/api/subscriber.rs new file mode 100644 index 0000000000..0e82a20331 --- /dev/null +++ b/zenoh/src/api/subscriber.rs @@ -0,0 +1,558 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// + +use std::{ + fmt, + future::{IntoFuture, Ready}, + ops::{Deref, DerefMut}, + sync::Arc, +}; + +use zenoh_core::{Resolvable, Wait}; +use zenoh_protocol::network::declare::subscriber::ext::SubscriberInfo; +use zenoh_result::ZResult; +#[cfg(feature = "unstable")] +use {zenoh_config::wrappers::EntityGlobalId, zenoh_protocol::core::EntityGlobalIdProto}; + +use super::{ + handlers::{locked, Callback, DefaultHandler, IntoHandler}, + key_expr::KeyExpr, + sample::{Locality, Sample}, + session::{SessionRef, UndeclarableSealed}, + Id, +}; +#[cfg(feature = "unstable")] +use crate::pubsub::Reliability; + +pub(crate) struct SubscriberState { + pub(crate) id: Id, + pub(crate) remote_id: Id, + pub(crate) key_expr: KeyExpr<'static>, + pub(crate) origin: Locality, + pub(crate) callback: Callback<'static, Sample>, +} + +impl fmt::Debug for SubscriberState { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("Subscriber") + .field("id", &self.id) + .field("key_expr", &self.key_expr) + .finish() + } +} + +/// A subscriber that provides data through a callback. +/// +/// CallbackSubscribers can be created from a zenoh [`Session`](crate::Session) +/// with the [`declare_subscriber`](crate::SessionDeclarations::declare_subscriber) function +/// and the [`callback`](SubscriberBuilder::callback) function +/// of the resulting builder. +/// +/// Subscribers are automatically undeclared when dropped. +/// +/// # Examples +/// ``` +/// # #[tokio::main] +/// # async fn main() { +/// use zenoh::prelude::*; +/// +/// let session = zenoh::open(zenoh::config::peer()).await.unwrap(); +/// let subscriber = session +/// .declare_subscriber("key/expression") +/// .callback(|sample| { println!("Received: {} {:?}", sample.key_expr(), sample.payload()) }) +/// .await +/// .unwrap(); +/// # } +/// ``` +#[derive(Debug)] +pub(crate) struct SubscriberInner<'a> { + pub(crate) session: SessionRef<'a>, + pub(crate) state: Arc, + pub(crate) kind: SubscriberKind, + pub(crate) undeclare_on_drop: bool, +} + +impl<'a> SubscriberInner<'a> { + /// Close a [`CallbackSubscriber`](CallbackSubscriber). + /// + /// `CallbackSubscribers` are automatically closed when dropped, but you may want to use this function to handle errors or + /// close the `CallbackSubscriber` asynchronously. + /// + /// # Examples + /// ``` + /// # #[tokio::main] + /// # async fn main() { + /// use zenoh::{prelude::*, sample::Sample}; + /// + /// let session = zenoh::open(zenoh::config::peer()).await.unwrap(); + /// # fn data_handler(_sample: Sample) { }; + /// let subscriber = session + /// .declare_subscriber("key/expression") + /// .callback(data_handler) + /// .await + /// .unwrap(); + /// subscriber.undeclare().await.unwrap(); + /// # } + /// ``` + #[inline] + pub fn undeclare(self) -> SubscriberUndeclaration<'a> { + UndeclarableSealed::undeclare_inner(self, ()) + } +} + +impl<'a> UndeclarableSealed<(), SubscriberUndeclaration<'a>> for SubscriberInner<'a> { + fn undeclare_inner(self, _: ()) -> SubscriberUndeclaration<'a> { + SubscriberUndeclaration { subscriber: self } + } +} + +/// A [`Resolvable`] returned when undeclaring a subscriber. +/// +/// # Examples +/// ``` +/// # #[tokio::main] +/// # async fn main() { +/// use zenoh::prelude::*; +/// +/// let session = zenoh::open(zenoh::config::peer()).await.unwrap(); +/// let subscriber = session +/// .declare_subscriber("key/expression") +/// .await +/// .unwrap(); +/// subscriber.undeclare().await.unwrap(); +/// # } +/// ``` +#[must_use = "Resolvables do nothing unless you resolve them using the `res` method from either `SyncResolve` or `AsyncResolve`"] +pub struct SubscriberUndeclaration<'a> { + subscriber: SubscriberInner<'a>, +} + +impl Resolvable for SubscriberUndeclaration<'_> { + type To = ZResult<()>; +} + +impl Wait for SubscriberUndeclaration<'_> { + fn wait(mut self) -> ::To { + // set the flag first to avoid double panic if this function panic + self.subscriber.undeclare_on_drop = false; + self.subscriber + .session + .undeclare_subscriber_inner(self.subscriber.state.id, self.subscriber.kind) + } +} + +impl IntoFuture for SubscriberUndeclaration<'_> { + type Output = ::To; + type IntoFuture = Ready<::To>; + + fn into_future(self) -> Self::IntoFuture { + std::future::ready(self.wait()) + } +} + +impl Drop for SubscriberInner<'_> { + fn drop(&mut self) { + if self.undeclare_on_drop { + let _ = self + .session + .undeclare_subscriber_inner(self.state.id, self.kind); + } + } +} + +/// A builder for initializing a [`FlumeSubscriber`]. +/// +/// # Examples +/// ``` +/// # #[tokio::main] +/// # async fn main() { +/// use zenoh::prelude::*; +/// +/// let session = zenoh::open(zenoh::config::peer()).await.unwrap(); +/// let subscriber = session +/// .declare_subscriber("key/expression") +/// .best_effort() +/// .await +/// .unwrap(); +/// # } +/// ``` +#[must_use = "Resolvables do nothing unless you resolve them using the `res` method from either `SyncResolve` or `AsyncResolve`"] +#[derive(Debug)] +pub struct SubscriberBuilder<'a, 'b, Handler> { + #[cfg(feature = "unstable")] + pub session: SessionRef<'a>, + #[cfg(not(feature = "unstable"))] + pub(crate) session: SessionRef<'a>, + + #[cfg(feature = "unstable")] + pub key_expr: ZResult>, + #[cfg(not(feature = "unstable"))] + pub(crate) key_expr: ZResult>, + + #[cfg(feature = "unstable")] + pub reliability: Reliability, + + #[cfg(feature = "unstable")] + pub origin: Locality, + #[cfg(not(feature = "unstable"))] + pub(crate) origin: Locality, + + #[cfg(feature = "unstable")] + pub handler: Handler, + #[cfg(not(feature = "unstable"))] + pub(crate) handler: Handler, +} + +impl<'a, 'b> SubscriberBuilder<'a, 'b, DefaultHandler> { + /// Receive the samples for this subscription with a callback. + /// + /// # Examples + /// ``` + /// # #[tokio::main] + /// # async fn main() { + /// use zenoh::prelude::*; + /// + /// let session = zenoh::open(zenoh::config::peer()).await.unwrap(); + /// let subscriber = session + /// .declare_subscriber("key/expression") + /// .callback(|sample| { println!("Received: {} {:?}", sample.key_expr(), sample.payload()); }) + /// .await + /// .unwrap(); + /// # } + /// ``` + #[inline] + pub fn callback(self, callback: Callback) -> SubscriberBuilder<'a, 'b, Callback> + where + Callback: Fn(Sample) + Send + Sync + 'static, + { + let SubscriberBuilder { + session, + key_expr, + #[cfg(feature = "unstable")] + reliability, + origin, + handler: _, + } = self; + SubscriberBuilder { + session, + key_expr, + #[cfg(feature = "unstable")] + reliability, + origin, + handler: callback, + } + } + + /// Receive the samples for this subscription with a mutable callback. + /// + /// Using this guarantees that your callback will never be called concurrently. + /// If your callback is also accepted by the [`callback`](SubscriberBuilder::callback) method, we suggest you use it instead of `callback_mut` + /// + /// # Examples + /// ``` + /// # #[tokio::main] + /// # async fn main() { + /// use zenoh::prelude::*; + /// + /// let session = zenoh::open(zenoh::config::peer()).await.unwrap(); + /// let mut n = 0; + /// let subscriber = session + /// .declare_subscriber("key/expression") + /// .callback_mut(move |_sample| { n += 1; }) + /// .await + /// .unwrap(); + /// # } + /// ``` + #[inline] + pub fn callback_mut( + self, + callback: CallbackMut, + ) -> SubscriberBuilder<'a, 'b, impl Fn(Sample) + Send + Sync + 'static> + where + CallbackMut: FnMut(Sample) + Send + Sync + 'static, + { + self.callback(locked(callback)) + } + + /// Receive the samples for this subscription with a [`Handler`](crate::handlers::IntoHandler). + /// + /// # Examples + /// ```no_run + /// # #[tokio::main] + /// # async fn main() { + /// use zenoh::prelude::*; + /// + /// let session = zenoh::open(zenoh::config::peer()).await.unwrap(); + /// let subscriber = session + /// .declare_subscriber("key/expression") + /// .with(flume::bounded(32)) + /// .await + /// .unwrap(); + /// while let Ok(sample) = subscriber.recv_async().await { + /// println!("Received: {} {:?}", sample.key_expr(), sample.payload()); + /// } + /// # } + /// ``` + #[inline] + pub fn with(self, handler: Handler) -> SubscriberBuilder<'a, 'b, Handler> + where + Handler: IntoHandler<'static, Sample>, + { + let SubscriberBuilder { + session, + key_expr, + #[cfg(feature = "unstable")] + reliability, + origin, + handler: _, + } = self; + SubscriberBuilder { + session, + key_expr, + #[cfg(feature = "unstable")] + reliability, + origin, + handler, + } + } +} + +impl<'a, 'b, Handler> SubscriberBuilder<'a, 'b, Handler> { + /// Change the subscription reliability. + #[inline] + #[zenoh_macros::unstable] + pub fn reliability(mut self, reliability: Reliability) -> Self { + self.reliability = reliability; + self + } + + /// Change the subscription reliability to `Reliable`. + #[inline] + #[zenoh_macros::unstable] + pub fn reliable(mut self) -> Self { + self.reliability = Reliability::Reliable; + self + } + + /// Change the subscription reliability to `BestEffort`. + #[inline] + #[zenoh_macros::unstable] + pub fn best_effort(mut self) -> Self { + self.reliability = Reliability::BestEffort; + self + } + + /// Restrict the matching publications that will be receive by this [`Subscriber`] + /// to the ones that have the given [`Locality`](crate::prelude::Locality). + #[zenoh_macros::unstable] + #[inline] + pub fn allowed_origin(mut self, origin: Locality) -> Self { + self.origin = origin; + self + } +} + +// Push mode +impl<'a, Handler> Resolvable for SubscriberBuilder<'a, '_, Handler> +where + Handler: IntoHandler<'static, Sample> + Send, + Handler::Handler: Send, +{ + type To = ZResult>; +} + +impl<'a, Handler> Wait for SubscriberBuilder<'a, '_, Handler> +where + Handler: IntoHandler<'static, Sample> + Send, + Handler::Handler: Send, +{ + fn wait(self) -> ::To { + let key_expr = self.key_expr?; + let session = self.session; + let (callback, receiver) = self.handler.into_handler(); + session + .declare_subscriber_inner( + &key_expr, + self.origin, + callback, + #[cfg(feature = "unstable")] + &SubscriberInfo { + reliability: self.reliability, + }, + #[cfg(not(feature = "unstable"))] + &SubscriberInfo::default(), + ) + .map(|sub_state| Subscriber { + subscriber: SubscriberInner { + session, + state: sub_state, + kind: SubscriberKind::Subscriber, + undeclare_on_drop: true, + }, + handler: receiver, + }) + } +} + +impl<'a, Handler> IntoFuture for SubscriberBuilder<'a, '_, Handler> +where + Handler: IntoHandler<'static, Sample> + Send, + Handler::Handler: Send, +{ + type Output = ::To; + type IntoFuture = Ready<::To>; + + fn into_future(self) -> Self::IntoFuture { + std::future::ready(self.wait()) + } +} + +/// A subscriber that provides data through a [`Handler`](crate::handlers::IntoHandler). +/// +/// Subscribers can be created from a zenoh [`Session`](crate::Session) +/// with the [`declare_subscriber`](crate::session::SessionDeclarations::declare_subscriber) function +/// and the [`with`](SubscriberBuilder::with) function +/// of the resulting builder. +/// +/// Subscribers are automatically undeclared when dropped. +/// +/// # Examples +/// ```no_run +/// # #[tokio::main] +/// # async fn main() { +/// use zenoh::prelude::*; +/// +/// let session = zenoh::open(zenoh::config::peer()).await.unwrap(); +/// let subscriber = session +/// .declare_subscriber("key/expression") +/// .with(flume::bounded(32)) +/// .await +/// .unwrap(); +/// while let Ok(sample) = subscriber.recv_async().await { +/// println!("Received: {} {:?}", sample.key_expr(), sample.payload()); +/// } +/// # } +/// ``` +#[non_exhaustive] +#[derive(Debug)] +pub struct Subscriber<'a, Handler> { + pub(crate) subscriber: SubscriberInner<'a>, + pub(crate) handler: Handler, +} + +impl<'a, Handler> Subscriber<'a, Handler> { + /// Returns the [`EntityGlobalId`] of this Subscriber. + /// + /// # Examples + /// ``` + /// # #[tokio::main] + /// # async fn main() { + /// use zenoh::prelude::*; + /// + /// let session = zenoh::open(zenoh::config::peer()).await.unwrap(); + /// let subscriber = session.declare_subscriber("key/expression") + /// .await + /// .unwrap(); + /// let subscriber_id = subscriber.id(); + /// # } + /// ``` + #[zenoh_macros::unstable] + pub fn id(&self) -> EntityGlobalId { + EntityGlobalIdProto { + zid: self.subscriber.session.zid().into(), + eid: self.subscriber.state.id, + } + .into() + } + + /// Returns the [`KeyExpr`] this Subscriber subscribes to. + pub fn key_expr(&self) -> &KeyExpr<'static> { + &self.subscriber.state.key_expr + } + + /// Returns a reference to this subscriber's handler. + /// An handler is anything that implements [`IntoHandler`]. + /// The default handler is [`DefaultHandler`]. + pub fn handler(&self) -> &Handler { + &self.handler + } + + /// Returns a mutable reference to this subscriber's handler. + /// An handler is anything that implements [`IntoHandler`]. + /// The default handler is [`DefaultHandler`]. + pub fn handler_mut(&mut self) -> &mut Handler { + &mut self.handler + } + + /// Close a [`Subscriber`]. + /// + /// Subscribers are automatically closed when dropped, but you may want to use this function to handle errors or + /// close the Subscriber asynchronously. + /// + /// # Examples + /// ``` + /// # #[tokio::main] + /// # async fn main() { + /// use zenoh::prelude::*; + /// + /// let session = zenoh::open(zenoh::config::peer()).await.unwrap(); + /// let subscriber = session.declare_subscriber("key/expression") + /// .await + /// .unwrap(); + /// subscriber.undeclare().await.unwrap(); + /// # } + /// ``` + #[inline] + pub fn undeclare(self) -> SubscriberUndeclaration<'a> { + self.subscriber.undeclare() + } + + /// Make the subscriber run in background, until the session is closed. + #[inline] + #[zenoh_macros::unstable] + pub fn background(mut self) { + // It's not necessary to undeclare this resource when session close, as other sessions + // will clean all resources related to the closed one. + // So we can just never undeclare it. + self.subscriber.undeclare_on_drop = false; + } +} + +impl<'a, T> UndeclarableSealed<(), SubscriberUndeclaration<'a>> for Subscriber<'a, T> { + fn undeclare_inner(self, _: ()) -> SubscriberUndeclaration<'a> { + UndeclarableSealed::undeclare_inner(self.subscriber, ()) + } +} + +impl Deref for Subscriber<'_, Handler> { + type Target = Handler; + + fn deref(&self) -> &Self::Target { + self.handler() + } +} +impl DerefMut for Subscriber<'_, Handler> { + fn deref_mut(&mut self) -> &mut Self::Target { + self.handler_mut() + } +} + +/// A [`Subscriber`] that provides data through a `flume` channel. +pub type FlumeSubscriber<'a> = Subscriber<'a, flume::Receiver>; + +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub(crate) enum SubscriberKind { + Subscriber, + LivelinessSubscriber, +} diff --git a/zenoh/src/api/value.rs b/zenoh/src/api/value.rs new file mode 100644 index 0000000000..88470b3360 --- /dev/null +++ b/zenoh/src/api/value.rs @@ -0,0 +1,75 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// + +//! Value primitives. +use super::{bytes::ZBytes, encoding::Encoding}; + +/// A zenoh [`Value`] contains a `payload` and an [`Encoding`] that indicates how the payload's [`ZBytes`] should be interpreted. +#[non_exhaustive] +#[derive(Clone, Debug, PartialEq, Eq)] +pub struct Value { + pub payload: ZBytes, + pub encoding: Encoding, +} + +impl Value { + /// Creates a new [`Value`] with specified [`ZBytes`] and [`Encoding`]. + pub fn new(payload: T, encoding: E) -> Self + where + T: Into, + E: Into, + { + Value { + payload: payload.into(), + encoding: encoding.into(), + } + } + /// Creates an empty [`Value`]. + pub const fn empty() -> Self { + Value { + payload: ZBytes::empty(), + encoding: Encoding::default(), + } + } + /// Checks if the [`Value`] is empty. + /// Value is considered empty if its payload is empty and encoding is default. + pub fn is_empty(&self) -> bool { + self.payload.is_empty() && self.encoding == Encoding::default() + } + + /// Gets binary [`ZBytes`] of this [`Value`]. + pub fn payload(&self) -> &ZBytes { + &self.payload + } + + /// Gets [`Encoding`] of this [`Value`]. + pub fn encoding(&self) -> &Encoding { + &self.encoding + } +} + +impl From> for Value +where + T: Into, +{ + fn from(t: Option) -> Self { + t.map_or_else(Value::empty, Into::into) + } +} + +impl Default for Value { + fn default() -> Self { + Value::empty() + } +} diff --git a/zenoh/src/handlers.rs b/zenoh/src/handlers.rs deleted file mode 100644 index cf187298a9..0000000000 --- a/zenoh/src/handlers.rs +++ /dev/null @@ -1,126 +0,0 @@ -// -// Copyright (c) 2023 ZettaScale Technology -// -// This program and the accompanying materials are made available under the -// terms of the Eclipse Public License 2.0 which is available at -// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 -// which is available at https://www.apache.org/licenses/LICENSE-2.0. -// -// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 -// -// Contributors: -// ZettaScale Zenoh Team, -// - -//! Callback handler trait. -use crate::API_DATA_RECEPTION_CHANNEL_SIZE; - -/// An alias for `Arc`. -pub type Dyn = std::sync::Arc; -/// An immutable callback function. -pub type Callback<'a, T> = Dyn; - -/// A type that can be converted into a [`Callback`]-receiver pair. -/// -/// When Zenoh functions accept types that implement these, it intends to use the [`Callback`] as just that, -/// while granting you access to the receiver through the returned value via [`std::ops::Deref`] and [`std::ops::DerefMut`]. -/// -/// Any closure that accepts `T` can be converted into a pair of itself and `()`. -pub trait IntoCallbackReceiverPair<'a, T> { - type Receiver; - fn into_cb_receiver_pair(self) -> (Callback<'a, T>, Self::Receiver); -} -impl<'a, T, F> IntoCallbackReceiverPair<'a, T> for F -where - F: Fn(T) + Send + Sync + 'a, -{ - type Receiver = (); - fn into_cb_receiver_pair(self) -> (Callback<'a, T>, Self::Receiver) { - (Dyn::from(self), ()) - } -} -impl IntoCallbackReceiverPair<'static, T> - for (flume::Sender, flume::Receiver) -{ - type Receiver = flume::Receiver; - - fn into_cb_receiver_pair(self) -> (Callback<'static, T>, Self::Receiver) { - let (sender, receiver) = self; - ( - Dyn::new(move |t| { - if let Err(e) = sender.send(t) { - tracing::error!("{}", e) - } - }), - receiver, - ) - } -} -pub struct DefaultHandler; -impl IntoCallbackReceiverPair<'static, T> for DefaultHandler { - type Receiver = flume::Receiver; - fn into_cb_receiver_pair(self) -> (Callback<'static, T>, Self::Receiver) { - flume::bounded(*API_DATA_RECEPTION_CHANNEL_SIZE).into_cb_receiver_pair() - } -} -impl IntoCallbackReceiverPair<'static, T> - for (std::sync::mpsc::SyncSender, std::sync::mpsc::Receiver) -{ - type Receiver = std::sync::mpsc::Receiver; - fn into_cb_receiver_pair(self) -> (Callback<'static, T>, Self::Receiver) { - let (sender, receiver) = self; - ( - Dyn::new(move |t| { - if let Err(e) = sender.send(t) { - tracing::error!("{}", e) - } - }), - receiver, - ) - } -} - -/// A function that can transform a [`FnMut`]`(T)` to -/// a [`Fn`]`(T)` with the help of a [`Mutex`](std::sync::Mutex). -pub fn locked(fnmut: impl FnMut(T)) -> impl Fn(T) { - let lock = std::sync::Mutex::new(fnmut); - move |x| zlock!(lock)(x) -} - -/// A handler containing 2 callback functions: -/// - `callback`: the typical callback function. `context` will be passed as its last argument. -/// - `drop`: a callback called when this handler is dropped. -/// -/// It is guaranteed that: -/// -/// - `callback` will never be called once `drop` has started. -/// - `drop` will only be called **once**, and **after every** `callback` has ended. -/// - The two previous guarantees imply that `call` and `drop` are never called concurrently. -pub struct CallbackPair -where - DropFn: FnMut() + Send + Sync + 'static, -{ - pub callback: Callback, - pub drop: DropFn, -} - -impl Drop for CallbackPair -where - DropFn: FnMut() + Send + Sync + 'static, -{ - fn drop(&mut self) { - (self.drop)() - } -} - -impl<'a, OnEvent, Event, DropFn> IntoCallbackReceiverPair<'a, Event> - for CallbackPair -where - OnEvent: Fn(Event) + Send + Sync + 'a, - DropFn: FnMut() + Send + Sync + 'static, -{ - type Receiver = (); - fn into_cb_receiver_pair(self) -> (Callback<'a, Event>, Self::Receiver) { - (Dyn::from(move |evt| (self.callback)(evt)), ()) - } -} diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index e8db68b790..0190acc319 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -34,13 +34,13 @@ //! ### Publishing Data //! The example below shows how to produce a value for a key expression. //! ``` -//! use zenoh::prelude::r#async::*; +//! use zenoh::prelude::*; //! //! #[tokio::main] //! async fn main() { -//! let session = zenoh::open(config::default()).res().await.unwrap(); -//! session.put("key/expression", "value").res().await.unwrap(); -//! session.close().res().await.unwrap(); +//! let session = zenoh::open(zenoh::config::default()).await.unwrap(); +//! session.put("key/expression", "value").await.unwrap(); +//! session.close().await.unwrap(); //! } //! ``` //! @@ -48,14 +48,14 @@ //! The example below shows how to consume values for a key expressions. //! ```no_run //! use futures::prelude::*; -//! use zenoh::prelude::r#async::*; +//! use zenoh::prelude::*; //! //! #[tokio::main] //! async fn main() { -//! let session = zenoh::open(config::default()).res().await.unwrap(); -//! let subscriber = session.declare_subscriber("key/expression").res().await.unwrap(); +//! let session = zenoh::open(zenoh::config::default()).await.unwrap(); +//! let subscriber = session.declare_subscriber("key/expression").await.unwrap(); //! while let Ok(sample) = subscriber.recv_async().await { -//! println!("Received: {}", sample); +//! println!("Received: {:?}", sample); //! }; //! } //! ``` @@ -65,14 +65,14 @@ //! resources whose key match the given *key expression*. //! ``` //! use futures::prelude::*; -//! use zenoh::prelude::r#async::*; +//! use zenoh::prelude::*; //! //! #[tokio::main] //! async fn main() { -//! let session = zenoh::open(config::default()).res().await.unwrap(); -//! let replies = session.get("key/expression").res().await.unwrap(); +//! let session = zenoh::open(zenoh::config::default()).await.unwrap(); +//! let replies = session.get("key/expression").await.unwrap(); //! while let Ok(reply) = replies.recv_async().await { -//! println!(">> Received {:?}", reply.sample); +//! println!(">> Received {:?}", reply.result()); //! } //! } //! ``` @@ -81,36 +81,19 @@ extern crate zenoh_core; #[macro_use] extern crate zenoh_result; -use git_version::git_version; -use handlers::DefaultHandler; -#[zenoh_macros::unstable] -use net::runtime::Runtime; -use prelude::*; -use scouting::ScoutBuilder; -use std::future::Ready; -use zenoh_core::{AsyncResolve, Resolvable, SyncResolve}; -pub use zenoh_macros::{ke, kedefine, keformat, kewrite}; -use zenoh_protocol::core::WhatAmIMatcher; -use zenoh_result::{zerror, ZResult}; -use zenoh_util::concat_enabled_features; - -/// A zenoh error. -pub use zenoh_result::Error; -/// A zenoh result. -pub use zenoh_result::ZResult as Result; - -const GIT_VERSION: &str = git_version!(prefix = "v", cargo_prefix = "v"); +mod api; +mod net; lazy_static::lazy_static!( static ref LONG_VERSION: String = format!("{} built with {}", GIT_VERSION, env!("RUSTC_VERSION")); ); -pub const FEATURES: &str = concat_enabled_features!( +const GIT_VERSION: &str = git_version::git_version!(prefix = "v", cargo_prefix = "v"); +pub const FEATURES: &str = zenoh_util::concat_enabled_features!( prefix = "zenoh", features = [ "auth_pubkey", "auth_usrpwd", - "complete_n", "shared-memory", "stats", "transport_multilink", @@ -128,267 +111,347 @@ pub const FEATURES: &str = concat_enabled_features!( ] ); -mod admin; -#[macro_use] -mod session; -pub use session::*; - -pub mod key_expr; -pub(crate) mod net; -pub use net::runtime; -pub mod selector; -#[deprecated = "This module is now a separate crate. Use the crate directly for shorter compile-times"] -pub use zenoh_config as config; -pub mod handlers; -pub mod info; -#[cfg(feature = "unstable")] -pub mod liveliness; -#[cfg(all(feature = "unstable", feature = "plugins"))] -pub mod plugins; +#[allow(deprecated)] +pub use zenoh_core::{AsyncResolve, SyncResolve}; +pub use zenoh_core::{Resolvable, Resolve, Wait}; +/// A zenoh error. +pub use zenoh_result::Error; +/// A zenoh result. +pub use zenoh_result::ZResult as Result; +#[doc(inline)] +pub use zenoh_util::{init_log_from_env_or, try_init_log_from_env}; + +#[doc(inline)] +pub use crate::{ + config::Config, + scouting::scout, + session::{open, Session}, +}; + pub mod prelude; -pub mod publication; -pub mod query; -pub mod queryable; -pub mod sample; -pub mod subscriber; -pub mod value; -#[cfg(feature = "shared-memory")] -pub use zenoh_shm as shm; -/// A collection of useful buffers used by zenoh internally and exposed to the user to facilitate -/// reading and writing data. -pub use zenoh_buffers as buffers; +/// [Key expression](https://github.com/eclipse-zenoh/roadmap/blob/main/rfcs/ALL/Key%20Expressions.md) are Zenoh's address space. +/// +/// In Zenoh, operations are performed on keys. To allow addressing multiple keys with a single operation, we use Key Expressions (KE). +/// KEs are a small language that express sets of keys through a glob-like language. +/// +/// These semantics can be a bit difficult to implement, so this module provides the following facilities: +/// +/// # Storing Key Expressions +/// This module provides 3 flavours to store strings that have been validated to respect the KE syntax: +/// - [`keyexpr`](crate::key_expr::keyexpr) is the equivalent of a [`str`], +/// - [`OwnedKeyExpr`](crate::key_expr::OwnedKeyExpr) works like an [`std::sync::Arc`], +/// - [`KeyExpr`](crate::key_expr::KeyExpr) works like a [`std::borrow::Cow`], but also stores some additional context internal to Zenoh to optimize +/// routing and network usage. +/// +/// All of these types [`Deref`](std::ops::Deref) to [`keyexpr`](crate::key_expr::keyexpr), which notably has methods to check whether a given [`intersects`](crate::key_expr::keyexpr::includes) with another, +/// or even if a [`includes`](crate::key_expr::keyexpr::includes) another. +/// +/// # Tying values to Key Expressions +/// When storing values tied to Key Expressions, you might want something more specialized than a [`HashMap`](std::collections::HashMap) if you want to respect +/// the Key Expression semantics with high performance. +/// +/// Enter [KeTrees](crate::key_expr::keyexpr_tree). These are data-structures specially built to store KE-value pairs in a manner that supports the set-semantics of KEs. +/// +/// # Building and parsing Key Expressions +/// A common issue in REST API is the association of meaning to sections of the URL, and respecting that API in a convenient manner. +/// The same issue arises naturally when designing a KE space, and [`KeFormat`](crate::key_expr::format::KeFormat) was designed to help you with this, +/// both in constructing and in parsing KEs that fit the formats you've defined. +/// +/// [`kedefine`](crate::key_expr::format::kedefine) also allows you to define formats at compile time, allowing a more performant, but more importantly safer and more convenient use of said formats, +/// as the [`keformat`](crate::key_expr::format::keformat) and [`kewrite`](crate::key_expr::format::kewrite) macros will be able to tell you if you're attempting to set fields of the format that do not exist. +pub mod key_expr { + #[zenoh_macros::unstable] + pub mod keyexpr_tree { + pub use zenoh_keyexpr::keyexpr_tree::{ + impls::KeyedSetProvider, + support::{NonWild, UnknownWildness}, + IKeyExprTree, IKeyExprTreeMut, KeBoxTree, + }; + } + #[zenoh_macros::unstable] + pub use zenoh_keyexpr::SetIntersectionLevel; + pub use zenoh_keyexpr::{canon::Canonize, keyexpr, OwnedKeyExpr}; + + pub use crate::api::key_expr::{KeyExpr, KeyExprUndeclaration}; + // keyexpr format macro support + #[zenoh_macros::unstable] + pub mod format { + pub use zenoh_keyexpr::format::*; + pub use zenoh_macros::{kedefine, keformat, kewrite}; + pub mod macro_support { + pub use zenoh_keyexpr::format::macro_support::*; + } + } +} -/// Time related types and functions. -pub mod time { - use std::convert::TryFrom; +/// Zenoh [`Session`] and associated types +pub mod session { + #[zenoh_macros::unstable] + pub use zenoh_config::wrappers::{EntityGlobalId, ZenohId}; + pub use zenoh_protocol::core::EntityId; + + #[zenoh_macros::internal] + pub use crate::api::session::{init, InitBuilder}; + pub use crate::api::{ + builders::publisher::{SessionDeleteBuilder, SessionPutBuilder}, + info::{PeersZenohIdBuilder, RoutersZenohIdBuilder, SessionInfo, ZenohIdBuilder}, + query::SessionGetBuilder, + session::{open, OpenBuilder, Session, SessionDeclarations, SessionRef, Undeclarable}, + }; +} - pub use zenoh_protocol::core::{Timestamp, TimestampId, NTP64}; +/// Sample primitives +pub mod sample { + #[zenoh_macros::unstable] + pub use crate::api::sample::Locality; + #[zenoh_macros::unstable] + pub use crate::api::sample::SourceInfo; + pub use crate::api::{ + builders::sample::{ + SampleBuilder, SampleBuilderAny, SampleBuilderDelete, SampleBuilderPut, + SampleBuilderTrait, TimestampBuilderTrait, + }, + sample::{Sample, SampleFields, SampleKind, SourceSn}, + }; +} - /// Generates a reception [`Timestamp`] with id=0x01. - /// This operation should be called if a timestamp is required for an incoming [`zenoh::Sample`](crate::Sample) - /// that doesn't contain any timestamp. - pub fn new_reception_timestamp() -> Timestamp { - use std::time::{SystemTime, UNIX_EPOCH}; +/// Payload primitives +pub mod bytes { + pub use crate::api::{ + builders::sample::EncodingBuilderTrait, + bytes::{ + Deserialize, OptionZBytes, Serialize, ZBytes, ZBytesIterator, ZBytesReader, + ZBytesWriter, ZDeserializeError, ZSerde, + }, + encoding::Encoding, + }; +} - let now = SystemTime::now().duration_since(UNIX_EPOCH).unwrap(); - Timestamp::new(now.into(), TimestampId::try_from([1]).unwrap()) - } +/// Pub/sub primitives +pub mod pubsub { + #[zenoh_macros::unstable] + pub use zenoh_protocol::core::Reliability; + + #[zenoh_macros::unstable] + pub use crate::api::publisher::{ + MatchingListener, MatchingListenerBuilder, MatchingListenerUndeclaration, MatchingStatus, + PublisherDeclarations, PublisherRef, + }; + pub use crate::api::{ + builders::publisher::{ + PublicationBuilder, PublicationBuilderDelete, PublicationBuilderPut, PublisherBuilder, + PublisherDeleteBuilder, PublisherPutBuilder, + }, + publisher::{Publisher, PublisherUndeclaration}, + subscriber::{FlumeSubscriber, Subscriber, SubscriberBuilder}, + }; } -/// A map of key/value (String,String) properties. -pub mod properties { - use super::prelude::Value; - pub use zenoh_collections::Properties; - - /// Convert a set of [`Properties`] into a [`Value`]. - /// For instance, Properties: `[("k1", "v1"), ("k2, v2")]` - /// is converted into Json: `{ "k1": "v1", "k2": "v2" }` - pub fn properties_to_json_value(props: &Properties) -> Value { - let json_map = props - .iter() - .map(|(k, v)| (k.clone(), serde_json::Value::String(v.clone()))) - .collect::>(); - serde_json::Value::Object(json_map).into() - } +/// Query/reply primitives +pub mod query { + pub use zenoh_protocol::core::Parameters; + #[zenoh_macros::unstable] + pub use zenoh_util::time_range::{TimeBound, TimeExpr, TimeRange}; + + #[zenoh_macros::internal] + pub use crate::api::queryable::ReplySample; + #[zenoh_macros::unstable] + pub use crate::api::{query::ReplyKeyExpr, selector::ZenohParameters}; + pub use crate::api::{ + query::{ConsolidationMode, QueryConsolidation, QueryTarget, Reply, ReplyError}, + queryable::{ + Query, Queryable, QueryableBuilder, QueryableUndeclaration, ReplyBuilder, + ReplyBuilderDelete, ReplyBuilderPut, ReplyErrBuilder, + }, + selector::Selector, + }; } -/// Scouting primitives. -pub mod scouting; +/// Callback handler trait +pub mod handlers { + pub use crate::api::handlers::{ + locked, Callback, CallbackDrop, DefaultHandler, FifoChannel, IntoHandler, RingChannel, + RingChannelHandler, + }; +} -/// Scout for routers and/or peers. -/// -/// [`scout`] spawns a task that periodically sends scout messages and waits for [`Hello`](crate::scouting::Hello) replies. -/// -/// Drop the returned [`Scout`](crate::scouting::Scout) to stop the scouting task. -/// -/// # Arguments -/// -/// * `what` - The kind of zenoh process to scout for -/// * `config` - The configuration [`Config`] to use for scouting -/// -/// # Examples -/// ```no_run -/// # #[tokio::main] -/// # async fn main() { -/// use zenoh::prelude::r#async::*; -/// use zenoh::scouting::WhatAmI; -/// -/// let receiver = zenoh::scout(WhatAmI::Peer | WhatAmI::Router, config::default()) -/// .res() -/// .await -/// .unwrap(); -/// while let Ok(hello) = receiver.recv_async().await { -/// println!("{}", hello); -/// } -/// # } -/// ``` -pub fn scout, TryIntoConfig>( - what: I, - config: TryIntoConfig, -) -> ScoutBuilder -where - TryIntoConfig: std::convert::TryInto + Send + 'static, - >::Error: - Into, -{ - ScoutBuilder { - what: what.into(), - config: config.try_into().map_err(|e| e.into()), - handler: DefaultHandler, - } +/// Quality of service primitives +pub mod qos { + pub use zenoh_protocol::core::CongestionControl; + + pub use crate::api::{builders::sample::QoSBuilderTrait, publisher::Priority}; } -/// Open a zenoh [`Session`]. -/// -/// # Arguments +/// Scouting primitives +pub mod scouting { + pub use zenoh_config::wrappers::Hello; + + pub use crate::api::scouting::{scout, Scout, ScoutBuilder}; +} + +/// Liveliness primitives /// -/// * `config` - The [`Config`] for the zenoh session +/// A [`LivelinessToken`](liveliness::LivelinessToken) is a token which liveliness is tied +/// to the Zenoh [`Session`](Session) and can be monitored by remote applications. /// /// # Examples +/// ### Declaring a token /// ``` /// # #[tokio::main] /// # async fn main() { -/// use zenoh::prelude::r#async::*; +/// use zenoh::prelude::*; /// -/// let session = zenoh::open(config::peer()).res().await.unwrap(); +/// let session = zenoh::open(zenoh::config::peer()).await.unwrap(); +/// let liveliness = session +/// .liveliness() +/// .declare_token("key/expression") +/// .await +/// .unwrap(); /// # } /// ``` /// +/// ### Querying tokens /// ``` /// # #[tokio::main] /// # async fn main() { -/// use std::str::FromStr; -/// use zenoh::prelude::r#async::*; -/// -/// let mut config = config::peer(); -/// config.set_id(ZenohId::from_str("221b72df20924c15b8794c6bdb471150").unwrap()); -/// config.connect.endpoints.extend("tcp/10.10.10.10:7447,tcp/11.11.11.11:7447".split(',').map(|s|s.parse().unwrap())); +/// use zenoh::prelude::*; /// -/// let session = zenoh::open(config).res().await.unwrap(); +/// let session = zenoh::open(zenoh::config::peer()).await.unwrap(); +/// let replies = session.liveliness().get("key/**").await.unwrap(); +/// while let Ok(reply) = replies.recv_async().await { +/// if let Ok(sample) = reply.result() { +/// println!(">> Liveliness token {}", sample.key_expr()); +/// } +/// } /// # } /// ``` -pub fn open(config: TryIntoConfig) -> OpenBuilder -where - TryIntoConfig: std::convert::TryInto + Send + 'static, - >::Error: std::fmt::Debug, -{ - OpenBuilder { config } -} - -/// A builder returned by [`open`] used to open a zenoh [`Session`]. /// -/// # Examples -/// ``` +/// ### Subscribing to liveliness changes +/// ```no_run /// # #[tokio::main] /// # async fn main() { -/// use zenoh::prelude::r#async::*; +/// use zenoh::{prelude::*, sample::SampleKind}; /// -/// let session = zenoh::open(config::peer()).res().await.unwrap(); +/// let session = zenoh::open(zenoh::config::peer()).await.unwrap(); +/// let subscriber = session.liveliness().declare_subscriber("key/**").await.unwrap(); +/// while let Ok(sample) = subscriber.recv_async().await { +/// match sample.kind() { +/// SampleKind::Put => println!("New liveliness: {}", sample.key_expr()), +/// SampleKind::Delete => println!("Lost liveliness: {}", sample.key_expr()), +/// } +/// } /// # } /// ``` -#[must_use = "Resolvables do nothing unless you resolve them using the `res` method from either `SyncResolve` or `AsyncResolve`"] -pub struct OpenBuilder -where - TryIntoConfig: std::convert::TryInto + Send + 'static, - >::Error: std::fmt::Debug, -{ - config: TryIntoConfig, +#[zenoh_macros::unstable] +pub mod liveliness { + pub use crate::api::liveliness::{ + Liveliness, LivelinessGetBuilder, LivelinessSubscriberBuilder, LivelinessToken, + LivelinessTokenBuilder, LivelinessTokenUndeclaration, + }; } -impl Resolvable for OpenBuilder -where - TryIntoConfig: std::convert::TryInto + Send + 'static, - >::Error: std::fmt::Debug, -{ - type To = ZResult; +/// Timestamp support +pub mod time { + pub use zenoh_protocol::core::{Timestamp, TimestampId, NTP64}; } -impl SyncResolve for OpenBuilder -where - TryIntoConfig: std::convert::TryInto + Send + 'static, - >::Error: std::fmt::Debug, -{ - fn res_sync(self) -> ::To { - let config: crate::config::Config = self - .config - .try_into() - .map_err(|e| zerror!("Invalid Zenoh configuration {:?}", &e))?; - Session::new(config).res_sync() - } +/// Configuration to pass to [`open`] and [`scout`] functions and associated constants +pub mod config { + // pub use zenoh_config::{ + // client, default, peer, Config, EndPoint, Locator, ModeDependentValue, PermissionsConf, + // PluginLoad, ValidatedMap, ZenohId, + // }; + pub use zenoh_config::*; } -impl AsyncResolve for OpenBuilder -where - TryIntoConfig: std::convert::TryInto + Send + 'static, - >::Error: std::fmt::Debug, -{ - type Future = Ready; - - fn res_async(self) -> Self::Future { - std::future::ready(self.res_sync()) - } -} +#[cfg(all( + feature = "plugins", + not(all(feature = "unstable", feature = "internal")) +))] +compile_error!( + "The plugins support is internal and unstable. The `unstable` and `internal` features must be enabled to use `plugins`." +); -/// Initialize a Session with an existing Runtime. -/// This operation is used by the plugins to share the same Runtime as the router. -#[doc(hidden)] -#[zenoh_macros::unstable] -pub fn init(runtime: Runtime) -> InitBuilder { - InitBuilder { - runtime, - aggregated_subscribers: vec![], - aggregated_publishers: vec![], +#[zenoh_macros::internal] +pub mod internal { + pub use zenoh_core::{ + zasync_executor_init, zasynclock, zerror, zlock, zread, ztimeout, zwrite, ResolveFuture, + }; + pub use zenoh_result::bail; + pub use zenoh_sync::Condition; + pub use zenoh_task::{TaskController, TerminatableTask}; + pub use zenoh_util::{ + zenoh_home, LibLoader, Timed, TimedEvent, TimedHandle, Timer, ZENOH_HOME_ENV_VAR, + }; + + /// A collection of useful buffers used by zenoh internally and exposed to the user to facilitate + /// reading and writing data. + pub mod buffers { + pub use zenoh_buffers::{ + buffer::{Buffer, SplitBuffer}, + reader::{ + AdvanceableReader, BacktrackableReader, DidntRead, DidntSiphon, HasReader, Reader, + SiphonableReader, + }, + writer::{BacktrackableWriter, DidntWrite, HasWriter, Writer}, + ZBuf, ZBufReader, ZSlice, ZSliceBuffer, + }; } -} - -/// A builder returned by [`init`] and used to initialize a Session with an existing Runtime. -#[must_use = "Resolvables do nothing unless you resolve them using the `res` method from either `SyncResolve` or `AsyncResolve`"] -#[doc(hidden)] -#[zenoh_macros::unstable] -pub struct InitBuilder { - runtime: Runtime, - aggregated_subscribers: Vec, - aggregated_publishers: Vec, -} + /// Initialize a Session with an existing Runtime. + /// This operation is used by the plugins to share the same Runtime as the router. + #[zenoh_macros::internal] + pub mod runtime { + pub use zenoh_runtime::ZRuntime; -#[zenoh_macros::unstable] -impl InitBuilder { - #[inline] - pub fn aggregated_subscribers(mut self, exprs: Vec) -> Self { - self.aggregated_subscribers = exprs; - self + pub use crate::net::runtime::{AdminSpace, Runtime, RuntimeBuilder}; } - - #[inline] - pub fn aggregated_publishers(mut self, exprs: Vec) -> Self { - self.aggregated_publishers = exprs; - self + /// Plugins support + #[cfg(feature = "plugins")] + pub mod plugins { + pub use crate::api::plugins::{ + PluginsManager, Response, RunningPlugin, RunningPluginTrait, ZenohPlugin, PLUGIN_PREFIX, + }; } -} -#[zenoh_macros::unstable] -impl Resolvable for InitBuilder { - type To = ZResult; -} + pub use zenoh_result::ErrNo; -#[zenoh_macros::unstable] -impl SyncResolve for InitBuilder { - fn res_sync(self) -> ::To { - Ok(Session::init( - self.runtime, - self.aggregated_subscribers, - self.aggregated_publishers, - ) - .res_sync()) - } + pub use crate::api::value::Value; } #[zenoh_macros::unstable] -impl AsyncResolve for InitBuilder { - type Future = Ready; - - fn res_async(self) -> Self::Future { - std::future::ready(self.res_sync()) - } +#[cfg(feature = "shared-memory")] +pub mod shm { + pub use zenoh_shm::api::{ + buffer::{ + zshm::{zshm, ZShm}, + zshmmut::{zshmmut, ZShmMut}, + }, + client::{shm_client::ShmClient, shm_segment::ShmSegment}, + client_storage::{ShmClientStorage, GLOBAL_CLIENT_STORAGE}, + common::types::{ChunkID, ProtocolID, SegmentID}, + protocol_implementations::posix::{ + posix_shm_client::PosixShmClient, + posix_shm_provider_backend::{ + LayoutedPosixShmProviderBackendBuilder, PosixShmProviderBackend, + PosixShmProviderBackendBuilder, + }, + protocol_id::POSIX_PROTOCOL_ID, + }, + provider::{ + chunk::{AllocatedChunk, ChunkDescriptor}, + shm_provider::{ + AllocBuilder, AllocBuilder2, AllocLayout, AllocLayoutSizedBuilder, AllocPolicy, + AsyncAllocPolicy, BlockOn, DeallocEldest, DeallocOptimal, DeallocYoungest, + Deallocate, Defragment, DynamicProtocolID, ForceDeallocPolicy, GarbageCollect, + JustAlloc, ProtocolIDSource, ShmProvider, ShmProviderBuilder, + ShmProviderBuilderBackendID, ShmProviderBuilderID, StaticProtocolID, + }, + shm_provider_backend::ShmProviderBackend, + types::{ + AllocAlignment, BufAllocResult, BufLayoutAllocResult, ChunkAllocResult, + MemoryLayout, ZAllocError, ZLayoutAllocError, ZLayoutError, + }, + }, + }; } diff --git a/zenoh/src/net/codec/linkstate.rs b/zenoh/src/net/codec/linkstate.rs index 4954062a3d..7ee150d8bb 100644 --- a/zenoh/src/net/codec/linkstate.rs +++ b/zenoh/src/net/codec/linkstate.rs @@ -11,12 +11,8 @@ // Contributors: // ZettaScale Zenoh Team, // -use super::Zenoh080Routing; -use crate::net::protocol::{ - linkstate, - linkstate::{LinkState, LinkStateList}, -}; use core::convert::TryFrom; + use zenoh_buffers::{ reader::{DidntRead, Reader}, writer::{DidntWrite, Writer}, @@ -24,7 +20,13 @@ use zenoh_buffers::{ use zenoh_codec::{RCodec, WCodec, Zenoh080}; use zenoh_protocol::{ common::imsg, - core::{Locator, WhatAmI, ZenohId}, + core::{Locator, WhatAmI, ZenohIdProto}, +}; + +use super::Zenoh080Routing; +use crate::net::protocol::{ + linkstate, + linkstate::{LinkState, LinkStateList}, }; // LinkState @@ -83,7 +85,7 @@ where let psid: u64 = codec.read(&mut *reader)?; let sn: u64 = codec.read(&mut *reader)?; let zid = if imsg::has_option(options, linkstate::PID) { - let zid: ZenohId = codec.read(&mut *reader)?; + let zid: ZenohIdProto = codec.read(&mut *reader)?; Some(zid) } else { None diff --git a/zenoh/src/net/primitives/demux.rs b/zenoh/src/net/primitives/demux.rs index fe096a9dfe..59111e5441 100644 --- a/zenoh/src/net/primitives/demux.rs +++ b/zenoh/src/net/primitives/demux.rs @@ -11,18 +11,19 @@ // Contributors: // ZettaScale Zenoh Team, // +use std::{any::Any, sync::Arc}; + +use zenoh_link::Link; +use zenoh_protocol::network::{NetworkBody, NetworkMessage}; +use zenoh_result::ZResult; +use zenoh_transport::{unicast::TransportUnicast, TransportPeerEventHandler}; + use super::Primitives; use crate::net::routing::{ dispatcher::face::Face, interceptor::{InterceptorTrait, InterceptorsChain}, RoutingContext, }; -use std::{any::Any, sync::Arc}; -use zenoh_link::Link; -use zenoh_protocol::network::{NetworkBody, NetworkMessage}; -use zenoh_result::ZResult; -use zenoh_transport::unicast::TransportUnicast; -use zenoh_transport::TransportPeerEventHandler; pub struct DeMux { face: Face, @@ -67,6 +68,7 @@ impl TransportPeerEventHandler for DeMux { match msg.body { NetworkBody::Push(m) => self.face.send_push(m), NetworkBody::Declare(m) => self.face.send_declare(m), + NetworkBody::Interest(m) => self.face.send_interest(m), NetworkBody::Request(m) => self.face.send_request(m), NetworkBody::Response(m) => self.face.send_response(m), NetworkBody::ResponseFinal(m) => self.face.send_response_final(m), diff --git a/zenoh/src/net/primitives/mod.rs b/zenoh/src/net/primitives/mod.rs index 30c96f400e..837571f7f6 100644 --- a/zenoh/src/net/primitives/mod.rs +++ b/zenoh/src/net/primitives/mod.rs @@ -18,11 +18,15 @@ use std::any::Any; pub use demux::*; pub use mux::*; -use zenoh_protocol::network::{Declare, Push, Request, Response, ResponseFinal}; +use zenoh_protocol::network::{ + interest::Interest, Declare, Push, Request, Response, ResponseFinal, +}; use super::routing::RoutingContext; pub trait Primitives: Send + Sync { + fn send_interest(&self, msg: Interest); + fn send_declare(&self, msg: Declare); fn send_push(&self, msg: Push); @@ -39,21 +43,25 @@ pub trait Primitives: Send + Sync { pub(crate) trait EPrimitives: Send + Sync { fn as_any(&self) -> &dyn Any; + fn send_interest(&self, ctx: RoutingContext); + fn send_declare(&self, ctx: RoutingContext); fn send_push(&self, msg: Push); - fn send_request(&self, ctx: RoutingContext); + fn send_request(&self, msg: Request); - fn send_response(&self, ctx: RoutingContext); + fn send_response(&self, msg: Response); - fn send_response_final(&self, ctx: RoutingContext); + fn send_response_final(&self, msg: ResponseFinal); } #[derive(Default)] pub struct DummyPrimitives; impl Primitives for DummyPrimitives { + fn send_interest(&self, _msg: Interest) {} + fn send_declare(&self, _msg: Declare) {} fn send_push(&self, _msg: Push) {} @@ -68,15 +76,17 @@ impl Primitives for DummyPrimitives { } impl EPrimitives for DummyPrimitives { + fn send_interest(&self, _ctx: RoutingContext) {} + fn send_declare(&self, _ctx: RoutingContext) {} fn send_push(&self, _msg: Push) {} - fn send_request(&self, _ctx: RoutingContext) {} + fn send_request(&self, _msg: Request) {} - fn send_response(&self, _ctx: RoutingContext) {} + fn send_response(&self, _msg: Response) {} - fn send_response_final(&self, _ctx: RoutingContext) {} + fn send_response_final(&self, _msg: ResponseFinal) {} fn as_any(&self) -> &dyn Any { self diff --git a/zenoh/src/net/primitives/mux.rs b/zenoh/src/net/primitives/mux.rs index 3f96ae7890..bc718ba324 100644 --- a/zenoh/src/net/primitives/mux.rs +++ b/zenoh/src/net/primitives/mux.rs @@ -11,17 +11,20 @@ // Contributors: // ZettaScale Zenoh Team, // +use std::sync::OnceLock; + +use zenoh_protocol::network::{ + interest::Interest, Declare, NetworkBody, NetworkMessage, Push, Request, Response, + ResponseFinal, +}; +use zenoh_transport::{multicast::TransportMulticast, unicast::TransportUnicast}; + use super::{EPrimitives, Primitives}; use crate::net::routing::{ dispatcher::face::{Face, WeakFace}, interceptor::{InterceptorTrait, InterceptorsChain}, RoutingContext, }; -use std::sync::OnceLock; -use zenoh_protocol::network::{ - Declare, NetworkBody, NetworkMessage, Push, Request, Response, ResponseFinal, -}; -use zenoh_transport::{multicast::TransportMulticast, unicast::TransportUnicast}; pub struct Mux { pub handler: TransportUnicast, @@ -40,6 +43,34 @@ impl Mux { } impl Primitives for Mux { + fn send_interest(&self, msg: Interest) { + let msg = NetworkMessage { + body: NetworkBody::Interest(msg), + #[cfg(feature = "stats")] + size: None, + }; + if self.interceptor.interceptors.is_empty() { + let _ = self.handler.schedule(msg); + } else if let Some(face) = self.face.get() { + let Some(face) = face.upgrade() else { + tracing::debug!("Invalid face: {:?}. Interest not sent: {:?}", face, msg); + return; + }; + let ctx = RoutingContext::new_out(msg, face.clone()); + let prefix = ctx + .wire_expr() + .and_then(|we| (!we.has_suffix()).then(|| ctx.prefix())) + .flatten() + .cloned(); + let cache = prefix.as_ref().and_then(|p| p.get_egress_cache(&face)); + if let Some(ctx) = self.interceptor.intercept(ctx, cache) { + let _ = self.handler.schedule(ctx.msg); + } + } else { + tracing::debug!("Uninitialized multiplexer. Interest not sent: {:?}", msg); + } + } + fn send_declare(&self, msg: Declare) { let msg = NetworkMessage { body: NetworkBody::Declare(msg), @@ -166,6 +197,31 @@ impl Primitives for Mux { } impl EPrimitives for Mux { + fn send_interest(&self, ctx: RoutingContext) { + let ctx = RoutingContext { + msg: NetworkMessage { + body: NetworkBody::Interest(ctx.msg), + #[cfg(feature = "stats")] + size: None, + }, + inface: ctx.inface, + outface: ctx.outface, + prefix: ctx.prefix, + full_expr: ctx.full_expr, + }; + let prefix = ctx + .wire_expr() + .and_then(|we| (!we.has_suffix()).then(|| ctx.prefix())) + .flatten() + .cloned(); + let cache = prefix + .as_ref() + .and_then(|p| p.get_egress_cache(ctx.outface.get().unwrap())); + if let Some(ctx) = self.interceptor.intercept(ctx, cache) { + let _ = self.handler.schedule(ctx.msg); + } + } + fn send_declare(&self, ctx: RoutingContext) { let ctx = RoutingContext { msg: NetworkMessage { @@ -215,78 +271,75 @@ impl EPrimitives for Mux { } } - fn send_request(&self, ctx: RoutingContext) { - let ctx = RoutingContext { - msg: NetworkMessage { - body: NetworkBody::Request(ctx.msg), - #[cfg(feature = "stats")] - size: None, - }, - inface: ctx.inface, - outface: ctx.outface, - prefix: ctx.prefix, - full_expr: ctx.full_expr, + fn send_request(&self, msg: Request) { + let msg = NetworkMessage { + body: NetworkBody::Request(msg), + #[cfg(feature = "stats")] + size: None, }; - let prefix = ctx - .wire_expr() - .and_then(|we| (!we.has_suffix()).then(|| ctx.prefix())) - .flatten() - .cloned(); - let cache = prefix - .as_ref() - .and_then(|p| p.get_egress_cache(ctx.outface.get().unwrap())); - if let Some(ctx) = self.interceptor.intercept(ctx, cache) { - let _ = self.handler.schedule(ctx.msg); + if self.interceptor.interceptors.is_empty() { + let _ = self.handler.schedule(msg); + } else if let Some(face) = self.face.get().and_then(|f| f.upgrade()) { + let ctx = RoutingContext::new_out(msg, face.clone()); + let prefix = ctx + .wire_expr() + .and_then(|we| (!we.has_suffix()).then(|| ctx.prefix())) + .flatten() + .cloned(); + let cache = prefix.as_ref().and_then(|p| p.get_egress_cache(&face)); + if let Some(ctx) = self.interceptor.intercept(ctx, cache) { + let _ = self.handler.schedule(ctx.msg); + } + } else { + tracing::error!("Uninitialized multiplexer!"); } } - fn send_response(&self, ctx: RoutingContext) { - let ctx = RoutingContext { - msg: NetworkMessage { - body: NetworkBody::Response(ctx.msg), - #[cfg(feature = "stats")] - size: None, - }, - inface: ctx.inface, - outface: ctx.outface, - prefix: ctx.prefix, - full_expr: ctx.full_expr, + fn send_response(&self, msg: Response) { + let msg = NetworkMessage { + body: NetworkBody::Response(msg), + #[cfg(feature = "stats")] + size: None, }; - let prefix = ctx - .wire_expr() - .and_then(|we| (!we.has_suffix()).then(|| ctx.prefix())) - .flatten() - .cloned(); - let cache = prefix - .as_ref() - .and_then(|p| p.get_egress_cache(ctx.outface.get().unwrap())); - if let Some(ctx) = self.interceptor.intercept(ctx, cache) { - let _ = self.handler.schedule(ctx.msg); + if self.interceptor.interceptors.is_empty() { + let _ = self.handler.schedule(msg); + } else if let Some(face) = self.face.get().and_then(|f| f.upgrade()) { + let ctx = RoutingContext::new_out(msg, face.clone()); + let prefix = ctx + .wire_expr() + .and_then(|we| (!we.has_suffix()).then(|| ctx.prefix())) + .flatten() + .cloned(); + let cache = prefix.as_ref().and_then(|p| p.get_egress_cache(&face)); + if let Some(ctx) = self.interceptor.intercept(ctx, cache) { + let _ = self.handler.schedule(ctx.msg); + } + } else { + tracing::error!("Uninitialized multiplexer!"); } } - fn send_response_final(&self, ctx: RoutingContext) { - let ctx = RoutingContext { - msg: NetworkMessage { - body: NetworkBody::ResponseFinal(ctx.msg), - #[cfg(feature = "stats")] - size: None, - }, - inface: ctx.inface, - outface: ctx.outface, - prefix: ctx.prefix, - full_expr: ctx.full_expr, + fn send_response_final(&self, msg: ResponseFinal) { + let msg = NetworkMessage { + body: NetworkBody::ResponseFinal(msg), + #[cfg(feature = "stats")] + size: None, }; - let prefix = ctx - .wire_expr() - .and_then(|we| (!we.has_suffix()).then(|| ctx.prefix())) - .flatten() - .cloned(); - let cache = prefix - .as_ref() - .and_then(|p| p.get_egress_cache(ctx.outface.get().unwrap())); - if let Some(ctx) = self.interceptor.intercept(ctx, cache) { - let _ = self.handler.schedule(ctx.msg); + if self.interceptor.interceptors.is_empty() { + let _ = self.handler.schedule(msg); + } else if let Some(face) = self.face.get().and_then(|f| f.upgrade()) { + let ctx = RoutingContext::new_out(msg, face.clone()); + let prefix = ctx + .wire_expr() + .and_then(|we| (!we.has_suffix()).then(|| ctx.prefix())) + .flatten() + .cloned(); + let cache = prefix.as_ref().and_then(|p| p.get_egress_cache(&face)); + if let Some(ctx) = self.interceptor.intercept(ctx, cache) { + let _ = self.handler.schedule(ctx.msg); + } + } else { + tracing::error!("Uninitialized multiplexer!"); } } @@ -312,6 +365,30 @@ impl McastMux { } impl Primitives for McastMux { + fn send_interest(&self, msg: Interest) { + let msg = NetworkMessage { + body: NetworkBody::Interest(msg), + #[cfg(feature = "stats")] + size: None, + }; + if self.interceptor.interceptors.is_empty() { + let _ = self.handler.schedule(msg); + } else if let Some(face) = self.face.get() { + let ctx = RoutingContext::new_out(msg, face.clone()); + let prefix = ctx + .wire_expr() + .and_then(|we| (!we.has_suffix()).then(|| ctx.prefix())) + .flatten() + .cloned(); + let cache = prefix.as_ref().and_then(|p| p.get_egress_cache(face)); + if let Some(ctx) = self.interceptor.intercept(ctx, cache) { + let _ = self.handler.schedule(ctx.msg); + } + } else { + tracing::error!("Uninitialized multiplexer!"); + } + } + fn send_declare(&self, msg: Declare) { let msg = NetworkMessage { body: NetworkBody::Declare(msg), @@ -438,6 +515,31 @@ impl Primitives for McastMux { } impl EPrimitives for McastMux { + fn send_interest(&self, ctx: RoutingContext) { + let ctx = RoutingContext { + msg: NetworkMessage { + body: NetworkBody::Interest(ctx.msg), + #[cfg(feature = "stats")] + size: None, + }, + inface: ctx.inface, + outface: ctx.outface, + prefix: ctx.prefix, + full_expr: ctx.full_expr, + }; + let prefix = ctx + .wire_expr() + .and_then(|we| (!we.has_suffix()).then(|| ctx.prefix())) + .flatten() + .cloned(); + let cache = prefix + .as_ref() + .and_then(|p| p.get_egress_cache(ctx.outface.get().unwrap())); + if let Some(ctx) = self.interceptor.intercept(ctx, cache) { + let _ = self.handler.schedule(ctx.msg); + } + } + fn send_declare(&self, ctx: RoutingContext) { let ctx = RoutingContext { msg: NetworkMessage { @@ -487,78 +589,75 @@ impl EPrimitives for McastMux { } } - fn send_request(&self, ctx: RoutingContext) { - let ctx = RoutingContext { - msg: NetworkMessage { - body: NetworkBody::Request(ctx.msg), - #[cfg(feature = "stats")] - size: None, - }, - inface: ctx.inface, - outface: ctx.outface, - prefix: ctx.prefix, - full_expr: ctx.full_expr, + fn send_request(&self, msg: Request) { + let msg = NetworkMessage { + body: NetworkBody::Request(msg), + #[cfg(feature = "stats")] + size: None, }; - let prefix = ctx - .wire_expr() - .and_then(|we| (!we.has_suffix()).then(|| ctx.prefix())) - .flatten() - .cloned(); - let cache = prefix - .as_ref() - .and_then(|p| p.get_egress_cache(ctx.outface.get().unwrap())); - if let Some(ctx) = self.interceptor.intercept(ctx, cache) { - let _ = self.handler.schedule(ctx.msg); + if self.interceptor.interceptors.is_empty() { + let _ = self.handler.schedule(msg); + } else if let Some(face) = self.face.get() { + let ctx = RoutingContext::new_out(msg, face.clone()); + let prefix = ctx + .wire_expr() + .and_then(|we| (!we.has_suffix()).then(|| ctx.prefix())) + .flatten() + .cloned(); + let cache = prefix.as_ref().and_then(|p| p.get_egress_cache(face)); + if let Some(ctx) = self.interceptor.intercept(ctx, cache) { + let _ = self.handler.schedule(ctx.msg); + } + } else { + tracing::error!("Uninitialized multiplexer!"); } } - fn send_response(&self, ctx: RoutingContext) { - let ctx = RoutingContext { - msg: NetworkMessage { - body: NetworkBody::Response(ctx.msg), - #[cfg(feature = "stats")] - size: None, - }, - inface: ctx.inface, - outface: ctx.outface, - prefix: ctx.prefix, - full_expr: ctx.full_expr, + fn send_response(&self, msg: Response) { + let msg = NetworkMessage { + body: NetworkBody::Response(msg), + #[cfg(feature = "stats")] + size: None, }; - let prefix = ctx - .wire_expr() - .and_then(|we| (!we.has_suffix()).then(|| ctx.prefix())) - .flatten() - .cloned(); - let cache = prefix - .as_ref() - .and_then(|p| p.get_egress_cache(ctx.outface.get().unwrap())); - if let Some(ctx) = self.interceptor.intercept(ctx, cache) { - let _ = self.handler.schedule(ctx.msg); + if self.interceptor.interceptors.is_empty() { + let _ = self.handler.schedule(msg); + } else if let Some(face) = self.face.get() { + let ctx = RoutingContext::new_out(msg, face.clone()); + let prefix = ctx + .wire_expr() + .and_then(|we| (!we.has_suffix()).then(|| ctx.prefix())) + .flatten() + .cloned(); + let cache = prefix.as_ref().and_then(|p| p.get_egress_cache(face)); + if let Some(ctx) = self.interceptor.intercept(ctx, cache) { + let _ = self.handler.schedule(ctx.msg); + } + } else { + tracing::error!("Uninitialized multiplexer!"); } } - fn send_response_final(&self, ctx: RoutingContext) { - let ctx = RoutingContext { - msg: NetworkMessage { - body: NetworkBody::ResponseFinal(ctx.msg), - #[cfg(feature = "stats")] - size: None, - }, - inface: ctx.inface, - outface: ctx.outface, - prefix: ctx.prefix, - full_expr: ctx.full_expr, + fn send_response_final(&self, msg: ResponseFinal) { + let msg = NetworkMessage { + body: NetworkBody::ResponseFinal(msg), + #[cfg(feature = "stats")] + size: None, }; - let prefix = ctx - .wire_expr() - .and_then(|we| (!we.has_suffix()).then(|| ctx.prefix())) - .flatten() - .cloned(); - let cache = prefix - .as_ref() - .and_then(|p| p.get_egress_cache(ctx.outface.get().unwrap())); - if let Some(ctx) = self.interceptor.intercept(ctx, cache) { - let _ = self.handler.schedule(ctx.msg); + if self.interceptor.interceptors.is_empty() { + let _ = self.handler.schedule(msg); + } else if let Some(face) = self.face.get() { + let ctx = RoutingContext::new_out(msg, face.clone()); + let prefix = ctx + .wire_expr() + .and_then(|we| (!we.has_suffix()).then(|| ctx.prefix())) + .flatten() + .cloned(); + let cache = prefix.as_ref().and_then(|p| p.get_egress_cache(face)); + if let Some(ctx) = self.interceptor.intercept(ctx, cache) { + let _ = self.handler.schedule(ctx.msg); + } + } else { + tracing::error!("Uninitialized multiplexer!"); } } diff --git a/zenoh/src/net/protocol/linkstate.rs b/zenoh/src/net/protocol/linkstate.rs index ccb5612011..cd8d1a91bf 100644 --- a/zenoh/src/net/protocol/linkstate.rs +++ b/zenoh/src/net/protocol/linkstate.rs @@ -11,7 +11,7 @@ // Contributors: // ZettaScale Zenoh Team, // -use zenoh_protocol::core::{Locator, WhatAmI, ZenohId}; +use zenoh_protocol::core::{Locator, WhatAmI, ZenohIdProto}; pub const PID: u64 = 1; // 0x01 pub const WAI: u64 = 1 << 1; // 0x02 @@ -37,7 +37,7 @@ pub const LOC: u64 = 1 << 2; // 0x04 pub(crate) struct LinkState { pub(crate) psid: u64, pub(crate) sn: u64, - pub(crate) zid: Option, + pub(crate) zid: Option, pub(crate) whatami: Option, pub(crate) locators: Option>, pub(crate) links: Vec, @@ -56,7 +56,7 @@ impl LinkState { let psid: u64 = rng.gen(); let sn: u64 = rng.gen(); let zid = if rng.gen_bool(0.5) { - Some(ZenohId::default()) + Some(ZenohIdProto::default()) } else { None }; diff --git a/zenoh/src/net/routing/dispatcher/face.rs b/zenoh/src/net/routing/dispatcher/face.rs index 4df9b7054c..bbc910b124 100644 --- a/zenoh/src/net/routing/dispatcher/face.rs +++ b/zenoh/src/net/routing/dispatcher/face.rs @@ -11,21 +11,21 @@ // Contributors: // ZettaScale Zenoh Team, // -use super::super::router::*; -use super::tables::TablesLock; -use super::{resource::*, tables}; -use crate::net::primitives::{McastMux, Mux, Primitives}; -use crate::net::routing::interceptor::{InterceptorTrait, InterceptorsChain}; -use crate::KeyExpr; -use std::any::Any; -use std::collections::HashMap; -use std::fmt; -use std::sync::{Arc, Weak}; +use std::{ + any::Any, + collections::HashMap, + fmt, + sync::{Arc, Weak}, +}; + use tokio_util::sync::CancellationToken; -use zenoh_protocol::zenoh::RequestBody; use zenoh_protocol::{ - core::{ExprId, WhatAmI, ZenohId}, - network::{Mapping, Push, Request, RequestId, Response, ResponseFinal}, + core::{ExprId, WhatAmI, ZenohIdProto}, + network::{ + interest::{InterestId, InterestMode, InterestOptions}, + Mapping, Push, Request, RequestId, Response, ResponseFinal, + }, + zenoh::RequestBody, }; use zenoh_sync::get_mut_unchecked; use zenoh_task::TaskController; @@ -33,13 +33,37 @@ use zenoh_transport::multicast::TransportMulticast; #[cfg(feature = "stats")] use zenoh_transport::stats::TransportStats; +use super::{ + super::router::*, + interests::{declare_final, declare_interest, undeclare_interest, CurrentInterest}, + resource::*, + tables::{self, TablesLock}, +}; +use crate::{ + api::key_expr::KeyExpr, + net::{ + primitives::{McastMux, Mux, Primitives}, + routing::interceptor::{InterceptorTrait, InterceptorsChain}, + }, +}; + +pub(crate) struct InterestState { + pub(crate) options: InterestOptions, + pub(crate) res: Option>, + pub(crate) finalized: bool, +} + pub struct FaceState { pub(crate) id: usize, - pub(crate) zid: ZenohId, + pub(crate) zid: ZenohIdProto, pub(crate) whatami: WhatAmI, #[cfg(feature = "stats")] pub(crate) stats: Option>, pub(crate) primitives: Arc, + pub(crate) local_interests: HashMap, + pub(crate) remote_key_interests: HashMap>>, + pub(crate) pending_current_interests: + HashMap, CancellationToken)>, pub(crate) local_mappings: HashMap>, pub(crate) remote_mappings: HashMap>, pub(crate) next_qid: RequestId, @@ -54,7 +78,7 @@ impl FaceState { #[allow(clippy::too_many_arguments)] // @TODO fix warning pub(crate) fn new( id: usize, - zid: ZenohId, + zid: ZenohIdProto, whatami: WhatAmI, #[cfg(feature = "stats")] stats: Option>, primitives: Arc, @@ -69,6 +93,9 @@ impl FaceState { #[cfg(feature = "stats")] stats, primitives, + local_interests: HashMap::new(), + remote_key_interests: HashMap::new(), + pending_current_interests: HashMap::new(), local_mappings: HashMap::new(), remote_mappings: HashMap::new(), next_qid: 0, @@ -154,7 +181,7 @@ impl fmt::Display for FaceState { } } -#[derive(Clone)] +#[derive(Clone, Debug)] pub struct WeakFace { pub(crate) tables: Weak, pub(crate) state: Weak, @@ -185,6 +212,34 @@ impl Face { } impl Primitives for Face { + fn send_interest(&self, msg: zenoh_protocol::network::Interest) { + let ctrl_lock = zlock!(self.tables.ctrl_lock); + if msg.mode != InterestMode::Final { + let mut declares = vec![]; + declare_interest( + ctrl_lock.as_ref(), + &self.tables, + &mut self.state.clone(), + msg.id, + msg.wire_expr.as_ref(), + msg.mode, + msg.options, + &mut |p, m| declares.push((p.clone(), m)), + ); + drop(ctrl_lock); + for (p, m) in declares { + p.send_declare(m); + } + } else { + undeclare_interest( + ctrl_lock.as_ref(), + &self.tables, + &mut self.state.clone(), + msg.id, + ); + } + } + fn send_declare(&self, msg: zenoh_protocol::network::Declare) { let ctrl_lock = zlock!(self.tables.ctrl_lock); match msg.body { @@ -200,6 +255,7 @@ impl Primitives for Face { ctrl_lock.as_ref(), &self.tables, &mut self.state.clone(), + m.id, &m.wire_expr, &m.ext_info, msg.ext_nodeid.node_id, @@ -216,6 +272,7 @@ impl Primitives for Face { ctrl_lock.as_ref(), &self.tables, &mut self.state.clone(), + m.id, &m.ext_wire_expr.wire_expr, msg.ext_nodeid.node_id, &mut |p, m| declares.push((p.clone(), m)), @@ -231,6 +288,7 @@ impl Primitives for Face { ctrl_lock.as_ref(), &self.tables, &mut self.state.clone(), + m.id, &m.wire_expr, &m.ext_info, msg.ext_nodeid.node_id, @@ -247,6 +305,7 @@ impl Primitives for Face { ctrl_lock.as_ref(), &self.tables, &mut self.state.clone(), + m.id, &m.ext_wire_expr.wire_expr, msg.ext_nodeid.node_id, &mut |p, m| declares.push((p.clone(), m)), @@ -256,11 +315,66 @@ impl Primitives for Face { p.send_declare(m); } } - zenoh_protocol::network::DeclareBody::DeclareToken(_m) => todo!(), - zenoh_protocol::network::DeclareBody::UndeclareToken(_m) => todo!(), - zenoh_protocol::network::DeclareBody::DeclareInterest(_m) => todo!(), - zenoh_protocol::network::DeclareBody::FinalInterest(_m) => todo!(), - zenoh_protocol::network::DeclareBody::UndeclareInterest(_m) => todo!(), + zenoh_protocol::network::DeclareBody::DeclareToken(m) => { + let mut declares = vec![]; + declare_token( + ctrl_lock.as_ref(), + &self.tables, + &mut self.state.clone(), + m.id, + &m.wire_expr, + msg.ext_nodeid.node_id, + msg.interest_id, + &mut |p, m| declares.push((p.clone(), m)), + ); + drop(ctrl_lock); + for (p, m) in declares { + p.send_declare(m); + } + } + zenoh_protocol::network::DeclareBody::UndeclareToken(m) => { + let mut declares = vec![]; + undeclare_token( + ctrl_lock.as_ref(), + &self.tables, + &mut self.state.clone(), + m.id, + &m.ext_wire_expr, + msg.ext_nodeid.node_id, + &mut |p, m| declares.push((p.clone(), m)), + ); + drop(ctrl_lock); + for (p, m) in declares { + p.send_declare(m); + } + } + zenoh_protocol::network::DeclareBody::DeclareFinal(_) => { + if let Some(id) = msg.interest_id { + get_mut_unchecked(&mut self.state.clone()) + .local_interests + .entry(id) + .and_modify(|interest| interest.finalized = true); + + let mut declares = vec![]; + declare_final(&mut self.state.clone(), id, &mut |p, m| { + declares.push((p.clone(), m)) + }); + + // recompute routes + // TODO: disable routes and recompute them in parallel to avoid holding + // tables write lock for a long time. + let mut wtables = zwrite!(self.tables.tables); + let mut root_res = wtables.root_res.clone(); + update_data_routes_from(&mut wtables, &mut root_res); + update_query_routes_from(&mut wtables, &mut root_res); + + drop(wtables); + drop(ctrl_lock); + for (p, m) in declares { + p.send_declare(m); + } + } + } } } @@ -285,6 +399,8 @@ impl Primitives for Face { &self.state, &msg.wire_expr, msg.id, + msg.ext_qos, + msg.ext_tstamp, msg.ext_target, msg.ext_budget, msg.ext_timeout, @@ -292,12 +408,6 @@ impl Primitives for Face { msg.ext_nodeid.node_id, ); } - RequestBody::Pull(_) => { - pull_data(&self.tables.tables, &self.state.clone(), msg.wire_expr); - } - _ => { - tracing::error!("Unsupported request"); - } } } @@ -306,6 +416,8 @@ impl Primitives for Face { &self.tables, &mut self.state.clone(), msg.rid, + msg.ext_qos, + msg.ext_tstamp, msg.ext_respid, msg.wire_expr, msg.payload, diff --git a/zenoh/src/net/routing/dispatcher/interests.rs b/zenoh/src/net/routing/dispatcher/interests.rs new file mode 100644 index 0000000000..d088b1f3f6 --- /dev/null +++ b/zenoh/src/net/routing/dispatcher/interests.rs @@ -0,0 +1,249 @@ +// +// Copyright (c) 2024 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// + +use std::{ + sync::{Arc, Weak}, + time::Duration, +}; + +use async_trait::async_trait; +use tokio_util::sync::CancellationToken; +use zenoh_keyexpr::keyexpr; +use zenoh_protocol::{ + core::WireExpr, + network::{ + declare::ext, + interest::{InterestId, InterestMode, InterestOptions}, + Declare, DeclareBody, DeclareFinal, + }, +}; +use zenoh_sync::get_mut_unchecked; +use zenoh_util::Timed; + +use super::{ + face::FaceState, + tables::{register_expr_interest, TablesLock}, +}; +use crate::net::routing::{ + hat::{HatTrait, SendDeclare}, + router::{unregister_expr_interest, Resource}, + RoutingContext, +}; + +static INTEREST_TIMEOUT_MS: u64 = 10000; + +pub(crate) struct CurrentInterest { + pub(crate) src_face: Arc, + pub(crate) src_interest_id: InterestId, +} + +pub(crate) fn declare_final( + face: &mut Arc, + id: InterestId, + send_declare: &mut SendDeclare, +) { + if let Some(interest) = get_mut_unchecked(face) + .pending_current_interests + .remove(&id) + { + finalize_pending_interest(interest, send_declare); + } +} + +pub(crate) fn finalize_pending_interests( + _tables_ref: &TablesLock, + face: &mut Arc, + send_declare: &mut SendDeclare, +) { + for (_, interest) in get_mut_unchecked(face).pending_current_interests.drain() { + finalize_pending_interest(interest, send_declare); + } +} + +pub(crate) fn finalize_pending_interest( + interest: (Arc, CancellationToken), + send_declare: &mut SendDeclare, +) { + let (interest, cancellation_token) = interest; + cancellation_token.cancel(); + if let Some(interest) = Arc::into_inner(interest) { + tracing::debug!( + "Propagate DeclareFinal {}:{}", + interest.src_face, + interest.src_interest_id + ); + send_declare( + &interest.src_face.primitives, + RoutingContext::new(Declare { + interest_id: Some(interest.src_interest_id), + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::DeclareFinal(DeclareFinal), + }), + ); + } +} + +#[derive(Clone)] +pub(crate) struct CurrentInterestCleanup { + tables: Arc, + face: Weak, + id: InterestId, +} + +impl CurrentInterestCleanup { + pub(crate) fn spawn_interest_clean_up_task( + face: &Arc, + tables_ref: &Arc, + id: u32, + ) { + let mut cleanup = CurrentInterestCleanup { + tables: tables_ref.clone(), + face: Arc::downgrade(face), + id, + }; + if let Some((_, cancellation_token)) = face.pending_current_interests.get(&id) { + let c_cancellation_token = cancellation_token.clone(); + face.task_controller + .spawn_with_rt(zenoh_runtime::ZRuntime::Net, async move { + tokio::select! { + _ = tokio::time::sleep(Duration::from_millis(INTEREST_TIMEOUT_MS)) => { cleanup.run().await } + _ = c_cancellation_token.cancelled() => {} + } + }); + } + } +} + +#[async_trait] +impl Timed for CurrentInterestCleanup { + async fn run(&mut self) { + if let Some(mut face) = self.face.upgrade() { + let ctrl_lock = zlock!(self.tables.ctrl_lock); + if let Some(interest) = get_mut_unchecked(&mut face) + .pending_current_interests + .remove(&self.id) + { + drop(ctrl_lock); + tracing::warn!( + "Didn't receive DeclareFinal {}:{} from {}: Timeout({:#?})!", + interest.0.src_face, + self.id, + face, + Duration::from_millis(INTEREST_TIMEOUT_MS), + ); + finalize_pending_interest(interest, &mut |p, m| p.send_declare(m)); + } + } + } +} + +#[allow(clippy::too_many_arguments)] +pub(crate) fn declare_interest( + hat_code: &(dyn HatTrait + Send + Sync), + tables_ref: &Arc, + face: &mut Arc, + id: InterestId, + expr: Option<&WireExpr>, + mode: InterestMode, + options: InterestOptions, + send_declare: &mut SendDeclare, +) { + if options.keyexprs() && mode != InterestMode::Current { + register_expr_interest(tables_ref, face, id, expr); + } + + if let Some(expr) = expr { + let rtables = zread!(tables_ref.tables); + match rtables + .get_mapping(face, &expr.scope, expr.mapping) + .cloned() + { + Some(mut prefix) => { + tracing::debug!( + "{} Declare interest {} ({}{})", + face, + id, + prefix.expr(), + expr.suffix + ); + let res = Resource::get_resource(&prefix, &expr.suffix); + let (mut res, mut wtables) = if res + .as_ref() + .map(|r| r.context.is_some()) + .unwrap_or(false) + { + drop(rtables); + let wtables = zwrite!(tables_ref.tables); + (res.unwrap(), wtables) + } else { + let mut fullexpr = prefix.expr(); + fullexpr.push_str(expr.suffix.as_ref()); + let mut matches = keyexpr::new(fullexpr.as_str()) + .map(|ke| Resource::get_matches(&rtables, ke)) + .unwrap_or_default(); + drop(rtables); + let mut wtables = zwrite!(tables_ref.tables); + let mut res = + Resource::make_resource(&mut wtables, &mut prefix, expr.suffix.as_ref()); + matches.push(Arc::downgrade(&res)); + Resource::match_resource(&wtables, &mut res, matches); + (res, wtables) + }; + + hat_code.declare_interest( + &mut wtables, + tables_ref, + face, + id, + Some(&mut res), + mode, + options, + send_declare, + ); + } + None => tracing::error!( + "{} Declare interest {} for unknown scope {}!", + face, + id, + expr.scope + ), + } + } else { + let mut wtables = zwrite!(tables_ref.tables); + hat_code.declare_interest( + &mut wtables, + tables_ref, + face, + id, + None, + mode, + options, + send_declare, + ); + } +} + +pub(crate) fn undeclare_interest( + hat_code: &(dyn HatTrait + Send + Sync), + tables: &TablesLock, + face: &mut Arc, + id: InterestId, +) { + tracing::debug!("{} Undeclare interest {}", face, id,); + unregister_expr_interest(tables, face, id); + let mut wtables = zwrite!(tables.tables); + hat_code.undeclare_interest(&mut wtables, face, id); +} diff --git a/zenoh/src/net/routing/dispatcher/mod.rs b/zenoh/src/net/routing/dispatcher/mod.rs index 53c32fb5ff..dc17b91b6b 100644 --- a/zenoh/src/net/routing/dispatcher/mod.rs +++ b/zenoh/src/net/routing/dispatcher/mod.rs @@ -18,7 +18,9 @@ //! //! [Click here for Zenoh's documentation](../zenoh/index.html) pub mod face; +pub mod interests; pub mod pubsub; pub mod queries; pub mod resource; pub mod tables; +pub mod token; diff --git a/zenoh/src/net/routing/dispatcher/pubsub.rs b/zenoh/src/net/routing/dispatcher/pubsub.rs index 5ac1f60627..84c8433a48 100644 --- a/zenoh/src/net/routing/dispatcher/pubsub.rs +++ b/zenoh/src/net/routing/dispatcher/pubsub.rs @@ -11,41 +11,52 @@ // Contributors: // ZettaScale Zenoh Team, // -use super::face::FaceState; -use super::resource::{DataRoutes, Direction, PullCaches, Resource}; -use super::tables::{NodeId, Route, RoutingExpr, Tables, TablesLock}; -use crate::net::routing::hat::{HatTrait, SendDeclare}; -use std::borrow::Cow; -use std::collections::HashMap; -use std::sync::Arc; -use std::sync::RwLock; +use std::{collections::HashMap, sync::Arc}; + use zenoh_core::zread; -use zenoh_protocol::core::key_expr::{keyexpr, OwnedKeyExpr}; -use zenoh_protocol::network::declare::subscriber::ext::SubscriberInfo; -use zenoh_protocol::network::declare::Mode; use zenoh_protocol::{ - core::{WhatAmI, WireExpr}, - network::{declare::ext, Push}, + core::{key_expr::keyexpr, WhatAmI, WireExpr}, + network::{ + declare::{ext, subscriber::ext::SubscriberInfo, SubscriberId}, + Push, + }, zenoh::PushBody, }; use zenoh_sync::get_mut_unchecked; +use super::{ + face::FaceState, + resource::{DataRoutes, Direction, Resource}, + tables::{NodeId, Route, RoutingExpr, Tables, TablesLock}, +}; +#[zenoh_macros::unstable] +use crate::key_expr::KeyExpr; +use crate::net::routing::hat::{HatTrait, SendDeclare}; + +#[allow(clippy::too_many_arguments)] pub(crate) fn declare_subscription( hat_code: &(dyn HatTrait + Send + Sync), tables: &TablesLock, face: &mut Arc, + id: SubscriberId, expr: &WireExpr, sub_info: &SubscriberInfo, node_id: NodeId, send_declare: &mut SendDeclare, ) { - tracing::debug!("Declare subscription {}", face); let rtables = zread!(tables.tables); match rtables .get_mapping(face, &expr.scope, expr.mapping) .cloned() { Some(mut prefix) => { + tracing::debug!( + "{} Declare subscriber {} ({}{})", + face, + id, + prefix.expr(), + expr.suffix + ); let res = Resource::get_resource(&prefix, &expr.suffix); let (mut res, mut wtables) = if res.as_ref().map(|r| r.context.is_some()).unwrap_or(false) { @@ -70,6 +81,7 @@ pub(crate) fn declare_subscription( hat_code.declare_subscription( &mut wtables, face, + id, &mut res, sub_info, node_id, @@ -84,17 +96,19 @@ pub(crate) fn declare_subscription( drop(rtables); let wtables = zwrite!(tables.tables); - for (mut res, data_routes, matching_pulls) in matches_data_routes { + for (mut res, data_routes) in matches_data_routes { get_mut_unchecked(&mut res) .context_mut() .update_data_routes(data_routes); - get_mut_unchecked(&mut res) - .context_mut() - .update_matching_pulls(matching_pulls); } drop(wtables); } - None => tracing::error!("Declare subscription for unknown scope {}!", expr.scope), + None => tracing::error!( + "{} Declare subscriber {} for unknown scope {}!", + face, + id, + expr.scope + ), } } @@ -102,48 +116,61 @@ pub(crate) fn undeclare_subscription( hat_code: &(dyn HatTrait + Send + Sync), tables: &TablesLock, face: &mut Arc, + id: SubscriberId, expr: &WireExpr, node_id: NodeId, send_declare: &mut SendDeclare, ) { tracing::debug!("Undeclare subscription {}", face); - let rtables = zread!(tables.tables); - match rtables.get_mapping(face, &expr.scope, expr.mapping) { - Some(prefix) => match Resource::get_resource(prefix, expr.suffix.as_ref()) { - Some(mut res) => { - drop(rtables); - let mut wtables = zwrite!(tables.tables); - - hat_code.undeclare_subscription( - &mut wtables, + let res = if expr.is_empty() { + None + } else { + let rtables = zread!(tables.tables); + match rtables.get_mapping(face, &expr.scope, expr.mapping) { + Some(prefix) => match Resource::get_resource(prefix, expr.suffix.as_ref()) { + Some(res) => Some(res), + None => { + tracing::error!( + "{} Undeclare unknown subscriber {}{}!", + face, + prefix.expr(), + expr.suffix + ); + return; + } + }, + None => { + tracing::error!( + "{} Undeclare subscriber with unknown scope {}", face, - &mut res, - node_id, - send_declare, + expr.scope ); - - disable_matches_data_routes(&mut wtables, &mut res); - drop(wtables); - - let rtables = zread!(tables.tables); - let matches_data_routes = compute_matches_data_routes(&rtables, &res); - drop(rtables); - - let wtables = zwrite!(tables.tables); - for (mut res, data_routes, matching_pulls) in matches_data_routes { - get_mut_unchecked(&mut res) - .context_mut() - .update_data_routes(data_routes); - get_mut_unchecked(&mut res) - .context_mut() - .update_matching_pulls(matching_pulls); - } - Resource::clean(&mut res); - drop(wtables); + return; } - None => tracing::error!("Undeclare unknown subscription!"), - }, - None => tracing::error!("Undeclare subscription with unknown scope!"), + } + }; + let mut wtables = zwrite!(tables.tables); + if let Some(mut res) = + hat_code.undeclare_subscription(&mut wtables, face, id, res, node_id, send_declare) + { + tracing::debug!("{} Undeclare subscriber {} ({})", face, id, res.expr()); + disable_matches_data_routes(&mut wtables, &mut res); + drop(wtables); + + let rtables = zread!(tables.tables); + let matches_data_routes = compute_matches_data_routes(&rtables, &res); + drop(rtables); + + let wtables = zwrite!(tables.tables); + for (mut res, data_routes) in matches_data_routes { + get_mut_unchecked(&mut res) + .context_mut() + .update_data_routes(data_routes); + } + Resource::clean(&mut res); + drop(wtables); + } else { + tracing::error!("{} Undeclare unknown subscriber {}", face, id); } } @@ -207,7 +234,6 @@ pub(crate) fn update_data_routes(tables: &Tables, res: &mut Arc) { pub(crate) fn update_data_routes_from(tables: &mut Tables, res: &mut Arc) { update_data_routes(tables, res); - update_matching_pulls(tables, res); let res = get_mut_unchecked(res); for child in res.children.values_mut() { update_data_routes_from(tables, child); @@ -217,22 +243,17 @@ pub(crate) fn update_data_routes_from(tables: &mut Tables, res: &mut Arc( tables: &'a Tables, res: &'a Arc, -) -> Vec<(Arc, DataRoutes, Arc)> { +) -> Vec<(Arc, DataRoutes)> { let mut routes = vec![]; if res.context.is_some() { let mut expr = RoutingExpr::new(res, ""); - routes.push(( - res.clone(), - compute_data_routes(tables, &mut expr), - compute_matching_pulls(tables, &mut expr), - )); + routes.push((res.clone(), compute_data_routes(tables, &mut expr))); for match_ in &res.context().matches { let match_ = match_.upgrade().unwrap(); if !Arc::ptr_eq(&match_, res) { let mut expr = RoutingExpr::new(&match_, ""); let match_routes = compute_data_routes(tables, &mut expr); - let matching_pulls = compute_matching_pulls(tables, &mut expr); - routes.push((match_, match_routes, matching_pulls)); + routes.push((match_, match_routes)); } } } @@ -242,12 +263,10 @@ pub(crate) fn compute_matches_data_routes<'a>( pub(crate) fn update_matches_data_routes<'a>(tables: &'a mut Tables, res: &'a mut Arc) { if res.context.is_some() { update_data_routes(tables, res); - update_matching_pulls(tables, res); for match_ in &res.context().matches { let mut match_ = match_.upgrade().unwrap(); if !Arc::ptr_eq(&match_, res) { update_data_routes(tables, &mut match_); - update_matching_pulls(tables, &mut match_); } } } @@ -262,9 +281,6 @@ pub(crate) fn disable_matches_data_routes(_tables: &mut Tables, res: &mut Arc>, - expr: &mut RoutingExpr, -) -> Arc { - res.as_ref() - .and_then(|res| res.data_route(WhatAmI::Client, 0)) - .unwrap_or_else(|| { - tables - .hat_code - .compute_data_route(tables, expr, 0, WhatAmI::Client) - }) -} - -fn compute_matching_pulls_(tables: &Tables, pull_caches: &mut PullCaches, expr: &mut RoutingExpr) { - let ke = if let Ok(ke) = OwnedKeyExpr::try_from(expr.full_expr()) { - ke - } else { - return; - }; - let res = Resource::get_resource(expr.prefix, expr.suffix); - let matches = res - .as_ref() - .and_then(|res| res.context.as_ref()) - .map(|ctx| Cow::from(&ctx.matches)) - .unwrap_or_else(|| Cow::from(Resource::get_matches(tables, &ke))); - - for mres in matches.iter() { - let mres = mres.upgrade().unwrap(); - for context in mres.session_ctxs.values() { - if let Some(subinfo) = &context.subs { - if subinfo.mode == Mode::Pull { - pull_caches.push(context.clone()); - } - } - } - } -} - -pub(crate) fn compute_matching_pulls(tables: &Tables, expr: &mut RoutingExpr) -> Arc { - let mut pull_caches = PullCaches::default(); - compute_matching_pulls_(tables, &mut pull_caches, expr); - Arc::new(pull_caches) -} - -pub(crate) fn update_matching_pulls(tables: &Tables, res: &mut Arc) { - if res.context.is_some() { - let mut res_mut = res.clone(); - let res_mut = get_mut_unchecked(&mut res_mut); - if res_mut.context_mut().matching_pulls.is_none() { - res_mut.context_mut().matching_pulls = Some(Arc::new(PullCaches::default())); - } - compute_matching_pulls_( - tables, - get_mut_unchecked(res_mut.context_mut().matching_pulls.as_mut().unwrap()), - &mut RoutingExpr::new(res, ""), - ); - } -} - -#[inline] -fn get_matching_pulls( - tables: &Tables, - res: &Option>, - expr: &mut RoutingExpr, -) -> Arc { - res.as_ref() - .and_then(|res| res.context.as_ref()) - .and_then(|ctx| ctx.matching_pulls.clone()) - .unwrap_or_else(|| compute_matching_pulls(tables, expr)) -} - -macro_rules! cache_data { - ( - $matching_pulls:expr, - $expr:expr, - $payload:expr - ) => { - for context in $matching_pulls.iter() { - get_mut_unchecked(&mut context.clone()) - .last_values - .insert($expr.full_expr().to_string(), $payload.clone()); - } - }; + key_expr: &KeyExpr<'_>, +) -> HashMap> { + tables.hat_code.get_matching_subscriptions(tables, key_expr) } #[cfg(feature = "stats")] @@ -428,10 +365,19 @@ macro_rules! inc_stats { match &$body { PushBody::Put(p) => { stats.[<$txrx _z_put_msgs>].[](1); - stats.[<$txrx _z_put_pl_bytes>].[](p.payload.len()); + let mut n = p.payload.len(); + if let Some(a) = p.ext_attachment.as_ref() { + n += a.buffer.len(); + } + stats.[<$txrx _z_put_pl_bytes>].[](n); } - PushBody::Del(_) => { + PushBody::Del(d) => { stats.[<$txrx _z_del_msgs>].[](1); + let mut n = 0; + if let Some(a) = d.ext_attachment.as_ref() { + n += a.buffer.len(); + } + stats.[<$txrx _z_del_pl_bytes>].[](n); } } } @@ -452,7 +398,8 @@ pub fn full_reentrant_route_data( match tables.get_mapping(face, &expr.scope, expr.mapping).cloned() { Some(prefix) => { tracing::trace!( - "Route data for res {}{}", + "{} Route data for res {}{}", + face, prefix.expr(), expr.suffix.as_ref() ); @@ -472,12 +419,10 @@ pub fn full_reentrant_route_data( let route = get_data_route(&tables, face, &res, &mut expr, routing_context); - let matching_pulls = get_matching_pulls(&tables, &res, &mut expr); - - if !(route.is_empty() && matching_pulls.is_empty()) { + if !route.is_empty() { treat_timestamp!(&tables.hlc, payload, tables.drop_future_timestamp); - if route.len() == 1 && matching_pulls.len() == 0 { + if route.len() == 1 { let (outface, key_expr, context) = route.values().next().unwrap(); if tables .hat_code @@ -499,26 +444,43 @@ pub fn full_reentrant_route_data( payload, }) } - } else { - if !matching_pulls.is_empty() { - let lock = zlock!(tables.pull_caches_lock); - cache_data!(matching_pulls, expr, payload); - drop(lock); - } + } else if tables.whatami == WhatAmI::Router { + let route = route + .values() + .filter(|(outface, _key_expr, _context)| { + tables + .hat_code + .egress_filter(&tables, face, outface, &mut expr) + }) + .cloned() + .collect::>(); - if tables.whatami == WhatAmI::Router { - let route = route - .values() - .filter(|(outface, _key_expr, _context)| { - tables - .hat_code - .egress_filter(&tables, face, outface, &mut expr) - }) - .cloned() - .collect::>(); + drop(tables); + for (outface, key_expr, context) in route { + #[cfg(feature = "stats")] + if !admin { + inc_stats!(face, tx, user, payload) + } else { + inc_stats!(face, tx, admin, payload) + } - drop(tables); - for (outface, key_expr, context) in route { + outface.primitives.send_push(Push { + wire_expr: key_expr, + ext_qos, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType { node_id: context }, + payload: payload.clone(), + }) + } + } else { + drop(tables); + for (outface, key_expr, context) in route.values() { + if face.id != outface.id + && match (face.mcast_group.as_ref(), outface.mcast_group.as_ref()) { + (Some(l), Some(r)) => l != r, + _ => true, + } + { #[cfg(feature = "stats")] if !admin { inc_stats!(face, tx, user, payload) @@ -527,110 +489,20 @@ pub fn full_reentrant_route_data( } outface.primitives.send_push(Push { - wire_expr: key_expr, + wire_expr: key_expr.into(), ext_qos, - ext_tstamp, - ext_nodeid: ext::NodeIdType { node_id: context }, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType { node_id: *context }, payload: payload.clone(), }) } - } else { - drop(tables); - for (outface, key_expr, context) in route.values() { - if face.id != outface.id - && match ( - face.mcast_group.as_ref(), - outface.mcast_group.as_ref(), - ) { - (Some(l), Some(r)) => l != r, - _ => true, - } - { - #[cfg(feature = "stats")] - if !admin { - inc_stats!(face, tx, user, payload) - } else { - inc_stats!(face, tx, admin, payload) - } - - outface.primitives.send_push(Push { - wire_expr: key_expr.into(), - ext_qos, - ext_tstamp, - ext_nodeid: ext::NodeIdType { node_id: *context }, - payload: payload.clone(), - }) - } - } } } } } } None => { - tracing::error!("Route data with unknown scope {}!", expr.scope); + tracing::error!("{} Route data with unknown scope {}!", face, expr.scope); } } } - -pub fn pull_data(tables_ref: &RwLock, face: &Arc, expr: WireExpr) { - let tables = zread!(tables_ref); - match tables.get_mapping(face, &expr.scope, expr.mapping) { - Some(prefix) => match Resource::get_resource(prefix, expr.suffix.as_ref()) { - Some(mut res) => { - let res = get_mut_unchecked(&mut res); - match res.session_ctxs.get_mut(&face.id) { - Some(ctx) => match &ctx.subs { - Some(_subinfo) => { - // let reliability = subinfo.reliability; - let lock = zlock!(tables.pull_caches_lock); - let route = get_mut_unchecked(ctx) - .last_values - .drain() - .map(|(name, sample)| { - ( - Resource::get_best_key(&tables.root_res, &name, face.id) - .to_owned(), - sample, - ) - }) - .collect::>(); - drop(lock); - drop(tables); - for (key_expr, payload) in route { - face.primitives.send_push(Push { - wire_expr: key_expr, - ext_qos: ext::QoSType::push_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - payload, - }); - } - } - None => { - tracing::error!( - "Pull data for unknown subscription {} (no info)!", - prefix.expr() + expr.suffix.as_ref() - ); - } - }, - None => { - tracing::error!( - "Pull data for unknown subscription {} (no context)!", - prefix.expr() + expr.suffix.as_ref() - ); - } - } - } - None => { - tracing::error!( - "Pull data for unknown subscription {} (no resource)!", - prefix.expr() + expr.suffix.as_ref() - ); - } - }, - None => { - tracing::error!("Pull data with unknown scope {}!", expr.scope); - } - }; -} diff --git a/zenoh/src/net/routing/dispatcher/queries.rs b/zenoh/src/net/routing/dispatcher/queries.rs index 9de841949c..c117bd51df 100644 --- a/zenoh/src/net/routing/dispatcher/queries.rs +++ b/zenoh/src/net/routing/dispatcher/queries.rs @@ -11,60 +11,69 @@ // Contributors: // ZettaScale Zenoh Team, // -use super::face::FaceState; -use super::resource::{QueryRoute, QueryRoutes, QueryTargetQablSet, Resource}; -use super::tables::NodeId; -use super::tables::{RoutingExpr, Tables, TablesLock}; -use crate::net::routing::hat::{HatTrait, SendDeclare}; -use crate::net::routing::RoutingContext; +use std::{ + collections::HashMap, + sync::{Arc, Weak}, + time::Duration, +}; + use async_trait::async_trait; -use std::collections::HashMap; -use std::sync::{Arc, Weak}; -use std::time::Duration; use tokio_util::sync::CancellationToken; use zenoh_buffers::ZBuf; use zenoh_config::WhatAmI; -use zenoh_protocol::core::key_expr::keyexpr; -use zenoh_protocol::core::KnownEncoding; -use zenoh_protocol::network::declare::queryable::ext::QueryableInfo; -use zenoh_protocol::zenoh; -use zenoh_protocol::zenoh::ext::ValueType; +#[cfg(feature = "stats")] +use zenoh_protocol::zenoh::reply::ReplyBody; use zenoh_protocol::{ - core::{Encoding, WireExpr}, + core::{key_expr::keyexpr, Encoding, WireExpr}, network::{ - declare::ext, + declare::{ext, queryable::ext::QueryableInfoType, QueryableId}, request::{ ext::{BudgetType, TargetType, TimeoutType}, Request, RequestId, }, response::{self, ext::ResponderIdType, Response, ResponseFinal}, }, - zenoh::{reply::ext::ConsolidationType, Reply, RequestBody, ResponseBody}, + zenoh::{self, RequestBody, ResponseBody}, }; use zenoh_sync::get_mut_unchecked; use zenoh_util::Timed; +use super::{ + face::FaceState, + resource::{QueryRoute, QueryRoutes, QueryTargetQablSet, Resource}, + tables::{NodeId, RoutingExpr, Tables, TablesLock}, +}; +use crate::net::routing::hat::{HatTrait, SendDeclare}; + pub(crate) struct Query { src_face: Arc, src_qid: RequestId, } +#[allow(clippy::too_many_arguments)] pub(crate) fn declare_queryable( hat_code: &(dyn HatTrait + Send + Sync), tables: &TablesLock, face: &mut Arc, + id: QueryableId, expr: &WireExpr, - qabl_info: &QueryableInfo, + qabl_info: &QueryableInfoType, node_id: NodeId, send_declare: &mut SendDeclare, ) { - tracing::debug!("Register queryable {}", face); let rtables = zread!(tables.tables); match rtables .get_mapping(face, &expr.scope, expr.mapping) .cloned() { Some(mut prefix) => { + tracing::debug!( + "{} Declare queryable {} ({}{})", + face, + id, + prefix.expr(), + expr.suffix + ); let res = Resource::get_resource(&prefix, &expr.suffix); let (mut res, mut wtables) = if res.as_ref().map(|r| r.context.is_some()).unwrap_or(false) { @@ -89,6 +98,7 @@ pub(crate) fn declare_queryable( hat_code.declare_queryable( &mut wtables, face, + id, &mut res, qabl_info, node_id, @@ -110,7 +120,12 @@ pub(crate) fn declare_queryable( } drop(wtables); } - None => tracing::error!("Declare queryable for unknown scope {}!", expr.scope), + None => tracing::error!( + "{} Declare queryable {} for unknown scope {}!", + face, + id, + expr.scope + ), } } @@ -118,38 +133,60 @@ pub(crate) fn undeclare_queryable( hat_code: &(dyn HatTrait + Send + Sync), tables: &TablesLock, face: &mut Arc, + id: QueryableId, expr: &WireExpr, node_id: NodeId, send_declare: &mut SendDeclare, ) { - let rtables = zread!(tables.tables); - match rtables.get_mapping(face, &expr.scope, expr.mapping) { - Some(prefix) => match Resource::get_resource(prefix, expr.suffix.as_ref()) { - Some(mut res) => { - drop(rtables); - let mut wtables = zwrite!(tables.tables); - - hat_code.undeclare_queryable(&mut wtables, face, &mut res, node_id, send_declare); - - disable_matches_query_routes(&mut wtables, &mut res); - drop(wtables); - - let rtables = zread!(tables.tables); - let matches_query_routes = compute_matches_query_routes(&rtables, &res); - drop(rtables); - - let wtables = zwrite!(tables.tables); - for (mut res, query_routes) in matches_query_routes { - get_mut_unchecked(&mut res) - .context_mut() - .update_query_routes(query_routes); + let res = if expr.is_empty() { + None + } else { + let rtables = zread!(tables.tables); + match rtables.get_mapping(face, &expr.scope, expr.mapping) { + Some(prefix) => match Resource::get_resource(prefix, expr.suffix.as_ref()) { + Some(res) => Some(res), + None => { + tracing::error!( + "{} Undeclare unknown queryable {}{}!", + face, + prefix.expr(), + expr.suffix + ); + return; } - Resource::clean(&mut res); - drop(wtables); + }, + None => { + tracing::error!( + "{} Undeclare queryable with unknown scope {}", + face, + expr.scope + ); + return; } - None => tracing::error!("Undeclare unknown queryable!"), - }, - None => tracing::error!("Undeclare queryable with unknown scope!"), + } + }; + let mut wtables = zwrite!(tables.tables); + if let Some(mut res) = + hat_code.undeclare_queryable(&mut wtables, face, id, res, node_id, send_declare) + { + tracing::debug!("{} Undeclare queryable {} ({})", face, id, res.expr()); + disable_matches_query_routes(&mut wtables, &mut res); + drop(wtables); + + let rtables = zread!(tables.tables); + let matches_query_routes = compute_matches_query_routes(&rtables, &res); + drop(rtables); + + let wtables = zwrite!(tables.tables); + for (mut res, query_routes) in matches_query_routes { + get_mut_unchecked(&mut res) + .context_mut() + .update_query_routes(query_routes); + } + Resource::clean(&mut res); + drop(wtables); + } else { + tracing::error!("{} Undeclare unknown queryable {}", face, id); } } @@ -278,22 +315,11 @@ fn compute_final_route( .hat_code .egress_filter(tables, src_face, &qabl.direction.0, expr) { - #[cfg(feature = "complete_n")] - { - route.entry(qabl.direction.0.id).or_insert_with(|| { - let mut direction = qabl.direction.clone(); - let qid = insert_pending_query(&mut direction.0, query.clone()); - (direction, qid, *target) - }); - } - #[cfg(not(feature = "complete_n"))] - { - route.entry(qabl.direction.0.id).or_insert_with(|| { - let mut direction = qabl.direction.clone(); - let qid = insert_pending_query(&mut direction.0, query.clone()); - (direction, qid) - }); - } + route.entry(qabl.direction.0.id).or_insert_with(|| { + let mut direction = qabl.direction.clone(); + let qid = insert_pending_query(&mut direction.0, query.clone()); + (direction, qid) + }); } } route @@ -306,46 +332,11 @@ fn compute_final_route( .hat_code .egress_filter(tables, src_face, &qabl.direction.0, expr) { - #[cfg(feature = "complete_n")] - { - route.entry(qabl.direction.0.id).or_insert_with(|| { - let mut direction = qabl.direction.clone(); - let qid = insert_pending_query(&mut direction.0, query.clone()); - (direction, qid, *target) - }); - } - #[cfg(not(feature = "complete_n"))] - { - route.entry(qabl.direction.0.id).or_insert_with(|| { - let mut direction = qabl.direction.clone(); - let qid = insert_pending_query(&mut direction.0, query.clone()); - (direction, qid) - }); - } - } - } - route - } - #[cfg(feature = "complete_n")] - TargetType::Complete(n) => { - let mut route = HashMap::new(); - let mut remaining = *n; - for qabl in qabls.iter() { - if qabl.complete > 0 - && tables - .hat_code - .egress_filter(tables, src_face, &qabl.direction.0, expr) - { - let nb = std::cmp::min(qabl.complete, remaining); route.entry(qabl.direction.0.id).or_insert_with(|| { let mut direction = qabl.direction.clone(); let qid = insert_pending_query(&mut direction.0, query.clone()); - (direction, qid, TargetType::Complete(nb)) + (direction, qid) }); - remaining -= nb; - if remaining == 0 { - break; - } } } route @@ -356,18 +347,11 @@ fn compute_final_route( .find(|qabl| qabl.direction.0.id != src_face.id && qabl.complete > 0) { let mut route = HashMap::new(); - #[cfg(feature = "complete_n")] - { - let mut direction = qabl.direction.clone(); - let qid = insert_pending_query(&mut direction.0, query); - route.insert(direction.0.id, (direction, qid, *target)); - } - #[cfg(not(feature = "complete_n"))] - { - let mut direction = qabl.direction.clone(); - let qid = insert_pending_query(&mut direction.0, query); - route.insert(direction.0.id, (direction, qid)); - } + + let mut direction = qabl.direction.clone(); + let qid = insert_pending_query(&mut direction.0, query); + route.insert(direction.0.id, (direction, qid)); + route } else { compute_final_route(tables, qabls, src_face, expr, &TargetType::All, query) @@ -422,20 +406,17 @@ impl Timed for QueryCleanup { &self.tables, &mut face, self.qid, + response::ext::QoSType::RESPONSE, + None, ext_respid, WireExpr::empty(), ResponseBody::Err(zenoh::Err { - timestamp: None, - is_infrastructure: false, + encoding: Encoding::default(), ext_sinfo: None, + #[cfg(feature = "shared-memory")] + ext_shm: None, ext_unknown: vec![], - ext_body: Some(ValueType { - #[cfg(feature = "shared-memory")] - ext_shm: None, - payload: ZBuf::from("Timeout".as_bytes().to_vec()), - encoding: KnownEncoding::TextPlain.into(), - }), - code: 0, // TODO + payload: ZBuf::from("Timeout".as_bytes().to_vec()), }), ); let queries_lock = zwrite!(self.tables.queries_lock); @@ -503,20 +484,12 @@ macro_rules! inc_req_stats { if let Some(stats) = $face.stats.as_ref() { use zenoh_buffers::buffer::Buffer; match &$body { - RequestBody::Put(p) => { - stats.[<$txrx _z_put_msgs>].[](1); - stats.[<$txrx _z_put_pl_bytes>].[](p.payload.len()); - } - RequestBody::Del(_) => { - stats.[<$txrx _z_del_msgs>].[](1); - } RequestBody::Query(q) => { stats.[<$txrx _z_query_msgs>].[](1); stats.[<$txrx _z_query_pl_bytes>].[]( q.ext_body.as_ref().map(|b| b.payload.len()).unwrap_or(0), ); } - RequestBody::Pull(_) => (), } } } @@ -535,21 +508,30 @@ macro_rules! inc_res_stats { if let Some(stats) = $face.stats.as_ref() { use zenoh_buffers::buffer::Buffer; match &$body { - ResponseBody::Put(p) => { - stats.[<$txrx _z_put_msgs>].[](1); - stats.[<$txrx _z_put_pl_bytes>].[](p.payload.len()); - } ResponseBody::Reply(r) => { stats.[<$txrx _z_reply_msgs>].[](1); - stats.[<$txrx _z_reply_pl_bytes>].[](r.payload.len()); + let mut n = 0; + match &r.payload { + ReplyBody::Put(p) => { + if let Some(a) = p.ext_attachment.as_ref() { + n += a.buffer.len(); + } + n += p.payload.len(); + } + ReplyBody::Del(d) => { + if let Some(a) = d.ext_attachment.as_ref() { + n += a.buffer.len(); + } + } + } + stats.[<$txrx _z_reply_pl_bytes>].[](n); } ResponseBody::Err(e) => { stats.[<$txrx _z_reply_msgs>].[](1); stats.[<$txrx _z_reply_pl_bytes>].[]( - e.ext_body.as_ref().map(|b| b.payload.len()).unwrap_or(0), + e.payload.len() ); } - ResponseBody::Ack(_) => (), } } } @@ -562,6 +544,8 @@ pub fn route_query( face: &Arc, expr: &WireExpr, qid: RequestId, + ext_qos: ext::QoSType, + ext_tstamp: Option, ext_target: TargetType, ext_budget: Option, ext_timeout: Option, @@ -603,172 +587,78 @@ pub fn route_query( let queries_lock = zwrite!(tables_ref.queries_lock); let route = compute_final_route(&rtables, &route, face, &mut expr, &ext_target, query); - let local_replies = - rtables - .hat_code - .compute_local_replies(&rtables, &prefix, expr.suffix, face); - let zid = rtables.zid; - let timeout = ext_timeout.unwrap_or(rtables.queries_default_timeout); - drop(queries_lock); drop(rtables); - for (wexpr, payload) in local_replies { - let payload = ResponseBody::Reply(Reply { - timestamp: None, - encoding: Encoding::default(), - ext_sinfo: None, - ext_consolidation: ConsolidationType::default(), - #[cfg(feature = "shared-memory")] - ext_shm: None, - ext_attachment: None, // @TODO: expose it in the API - ext_unknown: vec![], - payload, - }); - #[cfg(feature = "stats")] - if !admin { - inc_res_stats!(face, tx, user, payload) - } else { - inc_res_stats!(face, tx, admin, payload) - } - - face.primitives - .clone() - .send_response(RoutingContext::with_expr( - Response { - rid: qid, - wire_expr: wexpr, - payload, - ext_qos: response::ext::QoSType::declare_default(), - ext_tstamp: None, - ext_respid: Some(response::ext::ResponderIdType { - zid, - eid: 0, // @TODO use proper ResponderId (#703) - }), - }, - expr.full_expr().to_string(), - )); - } - if route.is_empty() { tracing::debug!( "Send final reply {}:{} (no matching queryables or not master)", face, qid ); - face.primitives - .clone() - .send_response_final(RoutingContext::with_expr( - ResponseFinal { - rid: qid, - ext_qos: response::ext::QoSType::response_final_default(), - ext_tstamp: None, - }, - expr.full_expr().to_string(), - )); + face.primitives.clone().send_response_final(ResponseFinal { + rid: qid, + ext_qos: response::ext::QoSType::RESPONSE_FINAL, + ext_tstamp: None, + }); } else { - #[cfg(feature = "complete_n")] - { - for ((outface, key_expr, context), qid, t) in route.values() { - QueryCleanup::spawn_query_clean_up_task( - outface, tables_ref, *qid, timeout, - ); - #[cfg(feature = "stats")] - if !admin { - inc_req_stats!(outface, tx, user, body) - } else { - inc_req_stats!(outface, tx, admin, body) - } - - tracing::trace!("Propagate query {}:{} to {}", face, qid, outface); - outface.primitives.send_request(RoutingContext::with_expr( - Request { - id: *qid, - wire_expr: key_expr.into(), - ext_qos: ext::QoSType::request_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType { node_id: *context }, - ext_target: *t, - ext_budget, - ext_timeout, - payload: body.clone(), - }, - expr.full_expr().to_string(), - )); + for ((outface, key_expr, context), qid) in route.values() { + QueryCleanup::spawn_query_clean_up_task(outface, tables_ref, *qid, timeout); + #[cfg(feature = "stats")] + if !admin { + inc_req_stats!(outface, tx, user, body) + } else { + inc_req_stats!(outface, tx, admin, body) } - } - - #[cfg(not(feature = "complete_n"))] - { - for ((outface, key_expr, context), qid) in route.values() { - QueryCleanup::spawn_query_clean_up_task( - outface, tables_ref, *qid, timeout, - ); - #[cfg(feature = "stats")] - if !admin { - inc_req_stats!(outface, tx, user, body) - } else { - inc_req_stats!(outface, tx, admin, body) - } - tracing::trace!("Propagate query {}:{} to {}", face, qid, outface); - outface.primitives.send_request(RoutingContext::with_expr( - Request { - id: *qid, - wire_expr: key_expr.into(), - ext_qos: ext::QoSType::request_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType { node_id: *context }, - ext_target, - ext_budget, - ext_timeout, - payload: body.clone(), - }, - expr.full_expr().to_string(), - )); - } + tracing::trace!("Propagate query {}:{} to {}", face, qid, outface); + outface.primitives.send_request(Request { + id: *qid, + wire_expr: key_expr.into(), + ext_qos, + ext_tstamp, + ext_nodeid: ext::NodeIdType { node_id: *context }, + ext_target, + ext_budget, + ext_timeout, + payload: body.clone(), + }); } } } else { tracing::debug!("Send final reply {}:{} (not master)", face, qid); drop(rtables); - face.primitives - .clone() - .send_response_final(RoutingContext::with_expr( - ResponseFinal { - rid: qid, - ext_qos: response::ext::QoSType::response_final_default(), - ext_tstamp: None, - }, - expr.full_expr().to_string(), - )); + face.primitives.clone().send_response_final(ResponseFinal { + rid: qid, + ext_qos: response::ext::QoSType::RESPONSE_FINAL, + ext_tstamp: None, + }); } } None => { tracing::error!( - "Route query with unknown scope {}! Send final reply.", - expr.scope + "{} Route query with unknown scope {}! Send final reply.", + face, + expr.scope, ); drop(rtables); - face.primitives - .clone() - .send_response_final(RoutingContext::with_expr( - ResponseFinal { - rid: qid, - ext_qos: response::ext::QoSType::response_final_default(), - ext_tstamp: None, - }, - "".to_string(), - )); + face.primitives.clone().send_response_final(ResponseFinal { + rid: qid, + ext_qos: response::ext::QoSType::RESPONSE_FINAL, + ext_tstamp: None, + }); } } } +#[allow(clippy::too_many_arguments)] pub(crate) fn route_send_response( tables_ref: &Arc, face: &mut Arc, qid: RequestId, + ext_qos: ext::QoSType, + ext_tstamp: Option, ext_respid: Option, key_expr: WireExpr, body: ResponseBody, @@ -794,24 +684,17 @@ pub(crate) fn route_send_response( inc_res_stats!(query.src_face, tx, admin, body) } - query - .src_face - .primitives - .clone() - .send_response(RoutingContext::with_expr( - Response { - rid: query.src_qid, - wire_expr: key_expr.to_owned(), - payload: body, - ext_qos: response::ext::QoSType::response_default(), - ext_tstamp: None, - ext_respid, - }, - "".to_string(), // @TODO provide the proper key expression of the response for interceptors - )); + query.src_face.primitives.send_response(Response { + rid: query.src_qid, + wire_expr: key_expr.to_owned(), + payload: body, + ext_qos, + ext_tstamp, + ext_respid, + }); } None => tracing::warn!( - "Route reply {}:{} from {}: Query nof found!", + "Route reply {}:{} from {}: Query not found!", face, qid, face @@ -837,7 +720,7 @@ pub(crate) fn route_send_response_final( finalize_pending_query(query); } None => tracing::warn!( - "Route final reply {}:{} from {}: Query nof found!", + "Route final reply {}:{} from {}: Query not found!", face, qid, face @@ -862,13 +745,10 @@ pub(crate) fn finalize_pending_query(query: (Arc, CancellationToken)) { .src_face .primitives .clone() - .send_response_final(RoutingContext::with_expr( - ResponseFinal { - rid: query.src_qid, - ext_qos: response::ext::QoSType::response_final_default(), - ext_tstamp: None, - }, - "".to_string(), - )); + .send_response_final(ResponseFinal { + rid: query.src_qid, + ext_qos: response::ext::QoSType::RESPONSE_FINAL, + ext_tstamp: None, + }); } } diff --git a/zenoh/src/net/routing/dispatcher/resource.rs b/zenoh/src/net/routing/dispatcher/resource.rs index edfcf26925..01ff9b2817 100644 --- a/zenoh/src/net/routing/dispatcher/resource.rs +++ b/zenoh/src/net/routing/dispatcher/resource.rs @@ -11,39 +11,38 @@ // Contributors: // ZettaScale Zenoh Team, // -use super::face::FaceState; -use super::tables::{Tables, TablesLock}; -use crate::net::routing::dispatcher::face::Face; -use crate::net::routing::RoutingContext; -use std::any::Any; -use std::collections::HashMap; -use std::convert::TryInto; -use std::hash::{Hash, Hasher}; -use std::sync::{Arc, Weak}; +use std::{ + any::Any, + collections::HashMap, + convert::TryInto, + hash::{Hash, Hasher}, + sync::{Arc, Weak}, +}; + use zenoh_config::WhatAmI; -#[cfg(feature = "complete_n")] -use zenoh_protocol::network::request::ext::TargetType; -use zenoh_protocol::network::RequestId; -use zenoh_protocol::zenoh::PushBody; use zenoh_protocol::{ core::{key_expr::keyexpr, ExprId, WireExpr}, network::{ declare::{ - ext, queryable::ext::QueryableInfo, subscriber::ext::SubscriberInfo, Declare, + ext, queryable::ext::QueryableInfoType, subscriber::ext::SubscriberInfo, Declare, DeclareBody, DeclareKeyExpr, }, - Mapping, + interest::InterestId, + Mapping, RequestId, }, }; use zenoh_sync::get_mut_unchecked; +use super::{ + face::FaceState, + tables::{Tables, TablesLock}, +}; +use crate::net::routing::{dispatcher::face::Face, RoutingContext}; + pub(crate) type NodeId = u16; pub(crate) type Direction = (Arc, WireExpr<'static>, NodeId); pub(crate) type Route = HashMap; -#[cfg(feature = "complete_n")] -pub(crate) type QueryRoute = HashMap; -#[cfg(not(feature = "complete_n"))] pub(crate) type QueryRoute = HashMap; pub(crate) struct QueryTargetQabl { pub(crate) direction: Direction, @@ -51,19 +50,33 @@ pub(crate) struct QueryTargetQabl { pub(crate) distance: f64, } pub(crate) type QueryTargetQablSet = Vec; -pub(crate) type PullCaches = Vec>; pub(crate) struct SessionContext { pub(crate) face: Arc, pub(crate) local_expr_id: Option, pub(crate) remote_expr_id: Option, pub(crate) subs: Option, - pub(crate) qabl: Option, - pub(crate) last_values: HashMap, + pub(crate) qabl: Option, + pub(crate) token: bool, pub(crate) in_interceptor_cache: Option>, pub(crate) e_interceptor_cache: Option>, } +impl SessionContext { + pub(crate) fn new(face: Arc) -> Self { + Self { + face, + local_expr_id: None, + remote_expr_id: None, + subs: None, + qabl: None, + token: false, + in_interceptor_cache: None, + e_interceptor_cache: None, + } + } +} + #[derive(Default)] pub(crate) struct RoutesIndexes { pub(crate) routers: Vec, @@ -121,7 +134,6 @@ impl QueryRoutes { pub(crate) struct ResourceContext { pub(crate) matches: Vec>, - pub(crate) matching_pulls: Option>, pub(crate) hat: Box, pub(crate) valid_data_routes: bool, pub(crate) data_routes: DataRoutes, @@ -133,7 +145,6 @@ impl ResourceContext { fn new(hat: Box) -> ResourceContext { ResourceContext { matches: Vec::new(), - matching_pulls: None, hat, valid_data_routes: false, data_routes: DataRoutes::default(), @@ -159,14 +170,6 @@ impl ResourceContext { pub(crate) fn disable_query_routes(&mut self) { self.valid_query_routes = false; } - - pub(crate) fn update_matching_pulls(&mut self, pulls: Arc) { - self.matching_pulls = Some(pulls); - } - - pub(crate) fn disable_matching_pulls(&mut self) { - self.matching_pulls = None; - } } pub struct Resource { @@ -235,6 +238,16 @@ impl Resource { self.context.as_mut().unwrap() } + #[inline(always)] + pub(crate) fn matches(&self, other: &Arc) -> bool { + self.context + .as_ref() + .unwrap() + .matches + .iter() + .any(|m| m.upgrade().is_some_and(|m| &m == other)) + } + pub fn nonwild_prefix(res: &Arc) -> (Option>, String) { match &res.nonwild_prefix { None => (Some(res.clone()), "".to_string()), @@ -448,39 +461,44 @@ impl Resource { } #[inline] - pub fn decl_key(res: &Arc, face: &mut Arc) -> WireExpr<'static> { + pub fn decl_key( + res: &Arc, + face: &mut Arc, + push: bool, + ) -> WireExpr<'static> { let (nonwild_prefix, wildsuffix) = Resource::nonwild_prefix(res); match nonwild_prefix { Some(mut nonwild_prefix) => { - let ctx = get_mut_unchecked(&mut nonwild_prefix) + if let Some(ctx) = get_mut_unchecked(&mut nonwild_prefix) .session_ctxs - .entry(face.id) - .or_insert_with(|| { - Arc::new(SessionContext { - face: face.clone(), - local_expr_id: None, - remote_expr_id: None, - subs: None, - qabl: None, - last_values: HashMap::new(), - in_interceptor_cache: None, - e_interceptor_cache: None, - }) - }); - - if let Some(expr_id) = ctx.remote_expr_id { - WireExpr { - scope: expr_id, - suffix: wildsuffix.into(), - mapping: Mapping::Receiver, + .get(&face.id) + { + if let Some(expr_id) = ctx.remote_expr_id { + return WireExpr { + scope: expr_id, + suffix: wildsuffix.into(), + mapping: Mapping::Receiver, + }; } - } else if let Some(expr_id) = ctx.local_expr_id { - WireExpr { - scope: expr_id, - suffix: wildsuffix.into(), - mapping: Mapping::Sender, + if let Some(expr_id) = ctx.local_expr_id { + return WireExpr { + scope: expr_id, + suffix: wildsuffix.into(), + mapping: Mapping::Sender, + }; } - } else { + } + if push + || face.remote_key_interests.values().any(|res| { + res.as_ref() + .map(|res| res.matches(&nonwild_prefix)) + .unwrap_or(true) + }) + { + let ctx = get_mut_unchecked(&mut nonwild_prefix) + .session_ctxs + .entry(face.id) + .or_insert_with(|| Arc::new(SessionContext::new(face.clone()))); let expr_id = face.get_next_local_id(); get_mut_unchecked(ctx).local_expr_id = Some(expr_id); get_mut_unchecked(face) @@ -488,9 +506,10 @@ impl Resource { .insert(expr_id, nonwild_prefix.clone()); face.primitives.send_declare(RoutingContext::with_expr( Declare { - ext_qos: ext::QoSType::declare_default(), + interest_id: None, + ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), + ext_nodeid: ext::NodeIdType::DEFAULT, body: DeclareBody::DeclareKeyExpr(DeclareKeyExpr { id: expr_id, wire_expr: nonwild_prefix.expr().into(), @@ -504,6 +523,8 @@ impl Resource { suffix: wildsuffix.into(), mapping: Mapping::Sender, } + } else { + res.expr().into() } } None => wildsuffix.into(), @@ -516,9 +537,9 @@ impl Resource { prefix: &Arc, suffix: &'a str, sid: usize, - checkchilds: bool, + checkclildren: bool, ) -> WireExpr<'a> { - if checkchilds && !suffix.is_empty() { + if checkclildren && !suffix.is_empty() { let (chunk, rest) = suffix.split_at(suffix.find('/').unwrap_or(suffix.len())); if let Some(child) = prefix.children.get(chunk) { return get_best_key_(child, rest, sid, true); @@ -668,7 +689,7 @@ impl Resource { } } -pub fn register_expr( +pub(crate) fn register_expr( tables: &TablesLock, face: &mut Arc, expr_id: ExprId, @@ -684,7 +705,11 @@ pub fn register_expr( let mut fullexpr = prefix.expr(); fullexpr.push_str(expr.suffix.as_ref()); if res.expr() != fullexpr { - tracing::error!("Resource {} remapped. Remapping unsupported!", expr_id); + tracing::error!( + "{} Resource {} remapped. Remapping unsupported!", + face, + expr_id + ); } } None => { @@ -711,21 +736,12 @@ pub fn register_expr( Resource::match_resource(&wtables, &mut res, matches); (res, wtables) }; - get_mut_unchecked(&mut res) + let ctx = get_mut_unchecked(&mut res) .session_ctxs .entry(face.id) - .or_insert_with(|| { - Arc::new(SessionContext { - face: face.clone(), - local_expr_id: None, - remote_expr_id: Some(expr_id), - subs: None, - qabl: None, - last_values: HashMap::new(), - in_interceptor_cache: None, - e_interceptor_cache: None, - }) - }); + .or_insert_with(|| Arc::new(SessionContext::new(face.clone()))); + + get_mut_unchecked(ctx).remote_expr_id = Some(expr_id); get_mut_unchecked(face) .remote_mappings @@ -735,15 +751,80 @@ pub fn register_expr( drop(wtables); } }, - None => tracing::error!("Declare resource with unknown scope {}!", expr.scope), + None => tracing::error!( + "{} Declare resource with unknown scope {}!", + face, + expr.scope + ), } } -pub fn unregister_expr(tables: &TablesLock, face: &mut Arc, expr_id: ExprId) { +pub(crate) fn unregister_expr(tables: &TablesLock, face: &mut Arc, expr_id: ExprId) { let wtables = zwrite!(tables.tables); match get_mut_unchecked(face).remote_mappings.remove(&expr_id) { Some(mut res) => Resource::clean(&mut res), - None => tracing::error!("Undeclare unknown resource!"), + None => tracing::error!("{} Undeclare unknown resource!", face), + } + drop(wtables); +} + +pub(crate) fn register_expr_interest( + tables: &TablesLock, + face: &mut Arc, + id: InterestId, + expr: Option<&WireExpr>, +) { + if let Some(expr) = expr { + let rtables = zread!(tables.tables); + match rtables + .get_mapping(face, &expr.scope, expr.mapping) + .cloned() + { + Some(mut prefix) => { + let res = Resource::get_resource(&prefix, &expr.suffix); + let (res, wtables) = if res.as_ref().map(|r| r.context.is_some()).unwrap_or(false) { + drop(rtables); + let wtables = zwrite!(tables.tables); + (res.unwrap(), wtables) + } else { + let mut fullexpr = prefix.expr(); + fullexpr.push_str(expr.suffix.as_ref()); + let mut matches = keyexpr::new(fullexpr.as_str()) + .map(|ke| Resource::get_matches(&rtables, ke)) + .unwrap_or_default(); + drop(rtables); + let mut wtables = zwrite!(tables.tables); + let mut res = + Resource::make_resource(&mut wtables, &mut prefix, expr.suffix.as_ref()); + matches.push(Arc::downgrade(&res)); + Resource::match_resource(&wtables, &mut res, matches); + (res, wtables) + }; + get_mut_unchecked(face) + .remote_key_interests + .insert(id, Some(res)); + drop(wtables); + } + None => tracing::error!( + "Declare keyexpr interest with unknown scope {}!", + expr.scope + ), + } + } else { + let wtables = zwrite!(tables.tables); + get_mut_unchecked(face) + .remote_key_interests + .insert(id, None); + drop(wtables); } +} + +pub(crate) fn unregister_expr_interest( + tables: &TablesLock, + face: &mut Arc, + id: InterestId, +) { + let wtables = zwrite!(tables.tables); + get_mut_unchecked(face).remote_key_interests.remove(&id); drop(wtables); } diff --git a/zenoh/src/net/routing/dispatcher/tables.rs b/zenoh/src/net/routing/dispatcher/tables.rs index 9e71eee853..2c5cfffffb 100644 --- a/zenoh/src/net/routing/dispatcher/tables.rs +++ b/zenoh/src/net/routing/dispatcher/tables.rs @@ -11,27 +11,30 @@ // Contributors: // ZettaScale Zenoh Team, // -use super::face::FaceState; -pub use super::pubsub::*; -pub use super::queries::*; -pub use super::resource::*; -use crate::net::routing::hat; -use crate::net::routing::hat::HatTrait; -use crate::net::routing::interceptor::interceptor_factories; -use crate::net::routing::interceptor::InterceptorFactory; -use std::any::Any; -use std::collections::HashMap; -use std::sync::{Arc, Weak}; -use std::sync::{Mutex, RwLock}; -use std::time::Duration; +use std::{ + any::Any, + collections::HashMap, + sync::{Arc, Mutex, RwLock, Weak}, + time::Duration, +}; + use uhlc::HLC; -use zenoh_config::unwrap_or_default; -use zenoh_config::Config; -use zenoh_protocol::core::{ExprId, WhatAmI, ZenohId}; -use zenoh_protocol::network::Mapping; +use zenoh_config::{unwrap_or_default, Config}; +use zenoh_protocol::{ + core::{ExprId, WhatAmI, ZenohIdProto}, + network::Mapping, +}; use zenoh_result::ZResult; use zenoh_sync::get_mut_unchecked; +use super::face::FaceState; +pub use super::{pubsub::*, queries::*, resource::*}; +use crate::net::routing::{ + dispatcher::interests::finalize_pending_interests, + hat::{self, HatTrait}, + interceptor::{interceptor_factories, InterceptorFactory}, +}; + pub(crate) struct RoutingExpr<'a> { pub(crate) prefix: &'a Arc, pub(crate) suffix: &'a str, @@ -58,7 +61,7 @@ impl<'a> RoutingExpr<'a> { } pub struct Tables { - pub(crate) zid: ZenohId, + pub(crate) zid: ZenohIdProto, pub(crate) whatami: WhatAmI, pub(crate) face_counter: usize, #[allow(dead_code)] @@ -70,14 +73,13 @@ pub struct Tables { pub(crate) mcast_groups: Vec>, pub(crate) mcast_faces: Vec>, pub(crate) interceptors: Vec, - pub(crate) pull_caches_lock: Mutex<()>, pub(crate) hat: Box, pub(crate) hat_code: Arc, // @TODO make this a Box } impl Tables { pub fn new( - zid: ZenohId, + zid: ZenohIdProto, whatami: WhatAmI, hlc: Option>, config: &Config, @@ -101,7 +103,6 @@ impl Tables { mcast_groups: vec![], mcast_faces: vec![], interceptors: interceptor_factories(config)?, - pull_caches_lock: Mutex::new(()), hat: hat_code.new_tables(router_peers_failover_brokering), hat_code: hat_code.into(), }) @@ -144,7 +145,7 @@ impl Tables { } #[inline] - pub(crate) fn get_face(&self, zid: &ZenohId) -> Option<&Arc> { + pub(crate) fn get_face(&self, zid: &ZenohIdProto) -> Option<&Arc> { self.faces.values().find(|face| face.zid == *zid) } @@ -176,6 +177,9 @@ pub fn close_face(tables: &TablesLock, face: &Weak) { finalize_pending_queries(tables, &mut face); let mut declares = vec![]; let ctrl_lock = zlock!(tables.ctrl_lock); + finalize_pending_interests(tables, &mut face, &mut |p, m| { + declares.push((p.clone(), m)) + }); ctrl_lock.close_face(tables, &mut face, &mut |p, m| declares.push((p.clone(), m))); drop(ctrl_lock); for (p, m) in declares { diff --git a/zenoh/src/net/routing/dispatcher/token.rs b/zenoh/src/net/routing/dispatcher/token.rs new file mode 100644 index 0000000000..a34e35af68 --- /dev/null +++ b/zenoh/src/net/routing/dispatcher/token.rs @@ -0,0 +1,161 @@ +// +// Copyright (c) 2024 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// + +use std::sync::Arc; + +use zenoh_keyexpr::keyexpr; +use zenoh_protocol::{ + core::WireExpr, + network::{ + declare::{common::ext, TokenId}, + interest::InterestId, + }, +}; + +use super::{ + face::FaceState, + tables::{NodeId, TablesLock}, +}; +use crate::net::routing::{ + hat::{HatTrait, SendDeclare}, + router::Resource, +}; + +#[allow(clippy::too_many_arguments)] +pub(crate) fn declare_token( + hat_code: &(dyn HatTrait + Send + Sync), + tables: &TablesLock, + face: &mut Arc, + id: TokenId, + expr: &WireExpr, + node_id: NodeId, + interest_id: Option, + send_declare: &mut SendDeclare, +) { + let rtables = zread!(tables.tables); + match rtables + .get_mapping(face, &expr.scope, expr.mapping) + .cloned() + { + Some(mut prefix) => { + tracing::debug!( + "{} Declare token {} ({}{})", + face, + id, + prefix.expr(), + expr.suffix + ); + let res = Resource::get_resource(&prefix, &expr.suffix); + let (mut res, mut wtables) = + if res.as_ref().map(|r| r.context.is_some()).unwrap_or(false) { + drop(rtables); + let wtables = zwrite!(tables.tables); + (res.unwrap(), wtables) + } else { + let mut fullexpr = prefix.expr(); + fullexpr.push_str(expr.suffix.as_ref()); + let mut matches = keyexpr::new(fullexpr.as_str()) + .map(|ke| Resource::get_matches(&rtables, ke)) + .unwrap_or_default(); + drop(rtables); + let mut wtables = zwrite!(tables.tables); + let mut res = + Resource::make_resource(&mut wtables, &mut prefix, expr.suffix.as_ref()); + matches.push(Arc::downgrade(&res)); + Resource::match_resource(&wtables, &mut res, matches); + (res, wtables) + }; + + hat_code.declare_token( + &mut wtables, + face, + id, + &mut res, + node_id, + interest_id, + send_declare, + ); + drop(wtables); + } + None => tracing::error!( + "{} Declare token {} for unknown scope {}!", + face, + id, + expr.scope + ), + } +} + +pub(crate) fn undeclare_token( + hat_code: &(dyn HatTrait + Send + Sync), + tables: &TablesLock, + face: &mut Arc, + id: TokenId, + expr: &ext::WireExprType, + node_id: NodeId, + send_declare: &mut SendDeclare, +) { + let (res, mut wtables) = if expr.wire_expr.is_empty() { + (None, zwrite!(tables.tables)) + } else { + let rtables = zread!(tables.tables); + match rtables + .get_mapping(face, &expr.wire_expr.scope, expr.wire_expr.mapping) + .cloned() + { + Some(mut prefix) => { + match Resource::get_resource(&prefix, expr.wire_expr.suffix.as_ref()) { + Some(res) => { + drop(rtables); + (Some(res), zwrite!(tables.tables)) + } + None => { + // Here we create a Resource that will immediately be removed after treatment + // TODO this could be improved + let mut fullexpr = prefix.expr(); + fullexpr.push_str(expr.wire_expr.suffix.as_ref()); + let mut matches = keyexpr::new(fullexpr.as_str()) + .map(|ke| Resource::get_matches(&rtables, ke)) + .unwrap_or_default(); + drop(rtables); + let mut wtables = zwrite!(tables.tables); + let mut res = Resource::make_resource( + &mut wtables, + &mut prefix, + expr.wire_expr.suffix.as_ref(), + ); + matches.push(Arc::downgrade(&res)); + Resource::match_resource(&wtables, &mut res, matches); + (Some(res), wtables) + } + } + } + None => { + tracing::error!( + "{} Undeclare liveliness token with unknown scope {}", + face, + expr.wire_expr.scope + ); + return; + } + } + }; + + if let Some(res) = hat_code.undeclare_token(&mut wtables, face, id, res, node_id, send_declare) + { + tracing::debug!("{} Undeclare token {} ({})", face, id, res.expr()); + } else { + tracing::error!("{} Undeclare unknown token {}", face, id); + } +} diff --git a/zenoh/src/net/routing/hat/client/interests.rs b/zenoh/src/net/routing/hat/client/interests.rs new file mode 100644 index 0000000000..9347b3f0e5 --- /dev/null +++ b/zenoh/src/net/routing/hat/client/interests.rs @@ -0,0 +1,225 @@ +// +// Copyright (c) 2024 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// +use std::sync::{atomic::Ordering, Arc}; + +use zenoh_protocol::{ + core::WhatAmI, + network::{ + declare::ext, + interest::{InterestId, InterestMode, InterestOptions}, + Declare, DeclareBody, DeclareFinal, Interest, + }, +}; +use zenoh_sync::get_mut_unchecked; + +use super::{face_hat, face_hat_mut, token::declare_token_interest, HatCode, HatFace}; +use crate::net::routing::{ + dispatcher::{ + face::{FaceState, InterestState}, + interests::{CurrentInterest, CurrentInterestCleanup}, + resource::Resource, + tables::{Tables, TablesLock}, + }, + hat::{CurrentFutureTrait, HatInterestTrait, SendDeclare}, + RoutingContext, +}; + +pub(super) fn interests_new_face(tables: &mut Tables, face: &mut Arc) { + if face.whatami != WhatAmI::Client { + for mut src_face in tables + .faces + .values() + .cloned() + .collect::>>() + { + for (res, options) in face_hat_mut!(&mut src_face).remote_interests.values() { + let id = face_hat!(face).next_id.fetch_add(1, Ordering::SeqCst); + get_mut_unchecked(face).local_interests.insert( + id, + InterestState { + options: *options, + res: res.as_ref().map(|res| (*res).clone()), + finalized: false, + }, + ); + let wire_expr = res.as_ref().map(|res| Resource::decl_key(res, face, true)); + face.primitives.send_interest(RoutingContext::with_expr( + Interest { + id, + mode: InterestMode::CurrentFuture, + options: *options, + wire_expr, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + }, + res.as_ref().map(|res| res.expr()).unwrap_or_default(), + )); + } + } + } +} + +impl HatInterestTrait for HatCode { + fn declare_interest( + &self, + tables: &mut Tables, + tables_ref: &Arc, + face: &mut Arc, + id: InterestId, + res: Option<&mut Arc>, + mode: InterestMode, + options: InterestOptions, + send_declare: &mut SendDeclare, + ) { + if options.tokens() { + declare_token_interest( + tables, + face, + id, + res.as_ref().map(|r| (*r).clone()).as_mut(), + mode, + options.aggregate(), + send_declare, + ) + } + face_hat_mut!(face) + .remote_interests + .insert(id, (res.as_ref().map(|res| (*res).clone()), options)); + + let interest = Arc::new(CurrentInterest { + src_face: face.clone(), + src_interest_id: id, + }); + + for dst_face in tables + .faces + .values_mut() + .filter(|f| f.whatami != WhatAmI::Client) + { + let id = face_hat!(dst_face).next_id.fetch_add(1, Ordering::SeqCst); + get_mut_unchecked(dst_face).local_interests.insert( + id, + InterestState { + options, + res: res.as_ref().map(|res| (*res).clone()), + finalized: mode == InterestMode::Future, + }, + ); + if mode.current() && options.tokens() { + let dst_face_mut = get_mut_unchecked(dst_face); + let cancellation_token = dst_face_mut.task_controller.get_cancellation_token(); + dst_face_mut + .pending_current_interests + .insert(id, (interest.clone(), cancellation_token)); + CurrentInterestCleanup::spawn_interest_clean_up_task(dst_face, tables_ref, id); + } + let wire_expr = res + .as_ref() + .map(|res| Resource::decl_key(res, dst_face, true)); + dst_face.primitives.send_interest(RoutingContext::with_expr( + Interest { + id, + mode, + options, + wire_expr, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + }, + res.as_ref().map(|res| res.expr()).unwrap_or_default(), + )); + } + + if mode.current() { + if options.tokens() { + if let Some(interest) = Arc::into_inner(interest) { + tracing::debug!( + "Propagate DeclareFinal {}:{}", + interest.src_face, + interest.src_interest_id + ); + send_declare( + &interest.src_face.primitives, + RoutingContext::new(Declare { + interest_id: Some(interest.src_interest_id), + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::DeclareFinal(DeclareFinal), + }), + ); + } + } else { + send_declare( + &face.primitives, + RoutingContext::new(Declare { + interest_id: Some(id), + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::DeclareFinal(DeclareFinal), + }), + ); + } + } + } + + fn undeclare_interest(&self, tables: &mut Tables, face: &mut Arc, id: InterestId) { + if let Some(interest) = face_hat_mut!(face).remote_interests.remove(&id) { + if !tables.faces.values().any(|f| { + f.whatami == WhatAmI::Client + && face_hat!(f) + .remote_interests + .values() + .any(|i| *i == interest) + }) { + for dst_face in tables + .faces + .values_mut() + .filter(|f| f.whatami != WhatAmI::Client) + { + for id in dst_face + .local_interests + .keys() + .cloned() + .collect::>() + { + let local_interest = dst_face.local_interests.get(&id).unwrap(); + if local_interest.res == interest.0 && local_interest.options == interest.1 + { + dst_face.primitives.send_interest(RoutingContext::with_expr( + Interest { + id, + mode: InterestMode::Final, + options: InterestOptions::empty(), + wire_expr: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + }, + local_interest + .res + .as_ref() + .map(|res| res.expr()) + .unwrap_or_default(), + )); + get_mut_unchecked(dst_face).local_interests.remove(&id); + } + } + } + } + } + } +} diff --git a/zenoh/src/net/routing/hat/client/mod.rs b/zenoh/src/net/routing/hat/client/mod.rs index 8e8d8d4cb6..a1a1eb08d1 100644 --- a/zenoh/src/net/routing/hat/client/mod.rs +++ b/zenoh/src/net/routing/hat/client/mod.rs @@ -17,19 +17,27 @@ //! This module is intended for Zenoh's internal use. //! //! [Click here for Zenoh's documentation](../zenoh/index.html) -use crate::{ - net::routing::{ - dispatcher::face::Face, - router::{ - compute_data_routes, compute_matching_pulls, compute_query_routes, RoutesIndexes, - }, - }, - runtime::Runtime, +use std::{ + any::Any, + collections::HashMap, + sync::{atomic::AtomicU32, Arc}, +}; + +use token::{token_new_face, undeclare_simple_token}; +use zenoh_config::WhatAmI; +use zenoh_protocol::network::{ + declare::{queryable::ext::QueryableInfoType, QueryableId, SubscriberId, TokenId}, + interest::{InterestId, InterestOptions}, + Oam, }; +use zenoh_result::ZResult; +use zenoh_sync::get_mut_unchecked; +use zenoh_transport::unicast::TransportUnicast; use self::{ - pubsub::{pubsub_new_face, undeclare_client_subscription}, - queries::{queries_new_face, undeclare_client_queryable}, + interests::interests_new_face, + pubsub::{pubsub_new_face, undeclare_simple_subscription}, + queries::{queries_new_face, undeclare_simple_queryable}, }; use super::{ super::dispatcher::{ @@ -38,20 +46,18 @@ use super::{ }, HatBaseTrait, HatTrait, SendDeclare, }; -use std::{ - any::Any, - collections::{HashMap, HashSet}, - sync::Arc, +use crate::net::{ + routing::{ + dispatcher::face::Face, + router::{compute_data_routes, compute_query_routes, RoutesIndexes}, + }, + runtime::Runtime, }; -use zenoh_config::WhatAmI; -use zenoh_protocol::network::declare::queryable::ext::QueryableInfo; -use zenoh_protocol::network::Oam; -use zenoh_result::ZResult; -use zenoh_sync::get_mut_unchecked; -use zenoh_transport::unicast::TransportUnicast; +mod interests; mod pubsub; mod queries; +mod token; macro_rules! face_hat { ($f:expr) => { @@ -99,8 +105,10 @@ impl HatBaseTrait for HatCode { face: &mut Face, send_declare: &mut SendDeclare, ) -> ZResult<()> { + interests_new_face(tables, &mut face.state); pubsub_new_face(tables, &mut face.state, send_declare); queries_new_face(tables, &mut face.state, send_declare); + token_new_face(tables, &mut face.state, send_declare); Ok(()) } @@ -112,8 +120,10 @@ impl HatBaseTrait for HatCode { _transport: &TransportUnicast, send_declare: &mut SendDeclare, ) -> ZResult<()> { + interests_new_face(tables, &mut face.state); pubsub_new_face(tables, &mut face.state, send_declare); queries_new_face(tables, &mut face.state, send_declare); + token_new_face(tables, &mut face.state, send_declare); Ok(()) } @@ -126,6 +136,19 @@ impl HatBaseTrait for HatCode { let mut wtables = zwrite!(tables.tables); let mut face_clone = face.clone(); let face = get_mut_unchecked(face); + let hat_face = match face.hat.downcast_mut::() { + Some(hate_face) => hate_face, + None => { + tracing::error!("Error downcasting face hat in close_face!"); + return; + } + }; + + hat_face.remote_interests.clear(); + hat_face.local_subs.clear(); + hat_face.local_qabls.clear(); + hat_face.local_tokens.clear(); + for res in face.remote_mappings.values_mut() { get_mut_unchecked(res).session_ctxs.remove(&face.id); Resource::clean(res); @@ -138,15 +161,9 @@ impl HatBaseTrait for HatCode { face.local_mappings.clear(); let mut subs_matches = vec![]; - for mut res in face - .hat - .downcast_mut::() - .unwrap() - .remote_subs - .drain() - { + for (_id, mut res) in hat_face.remote_subs.drain() { get_mut_unchecked(&mut res).session_ctxs.remove(&face.id); - undeclare_client_subscription(&mut wtables, &mut face_clone, &mut res, send_declare); + undeclare_simple_subscription(&mut wtables, &mut face_clone, &mut res, send_declare); if res.context.is_some() { for match_ in &res.context().matches { @@ -166,15 +183,9 @@ impl HatBaseTrait for HatCode { } let mut qabls_matches = vec![]; - for mut res in face - .hat - .downcast_mut::() - .unwrap() - .remote_qabls - .drain() - { + for (_id, mut res) in hat_face.remote_qabls.drain() { get_mut_unchecked(&mut res).session_ctxs.remove(&face.id); - undeclare_client_queryable(&mut wtables, &mut face_clone, &mut res, send_declare); + undeclare_simple_queryable(&mut wtables, &mut face_clone, &mut res, send_declare); if res.context.is_some() { for match_ in &res.context().matches { @@ -192,6 +203,11 @@ impl HatBaseTrait for HatCode { qabls_matches.push(res); } } + + for (_id, mut res) in hat_face.remote_tokens.drain() { + get_mut_unchecked(&mut res).session_ctxs.remove(&face.id); + undeclare_simple_token(&mut wtables, &mut face_clone, &mut res, send_declare); + } drop(wtables); let mut matches_data_routes = vec![]; @@ -199,11 +215,7 @@ impl HatBaseTrait for HatCode { let rtables = zread!(tables.tables); for _match in subs_matches.drain(..) { let mut expr = RoutingExpr::new(&_match, ""); - matches_data_routes.push(( - _match.clone(), - compute_data_routes(&rtables, &mut expr), - compute_matching_pulls(&rtables, &mut expr), - )); + matches_data_routes.push((_match.clone(), compute_data_routes(&rtables, &mut expr))); } for _match in qabls_matches.drain(..) { matches_query_routes.push((_match.clone(), compute_query_routes(&rtables, &_match))); @@ -211,13 +223,10 @@ impl HatBaseTrait for HatCode { drop(rtables); let mut wtables = zwrite!(tables.tables); - for (mut res, data_routes, matching_pulls) in matches_data_routes { + for (mut res, data_routes) in matches_data_routes { get_mut_unchecked(&mut res) .context_mut() .update_data_routes(data_routes); - get_mut_unchecked(&mut res) - .context_mut() - .update_matching_pulls(matching_pulls); Resource::clean(&mut res); } for (mut res, query_routes) in matches_query_routes { @@ -295,19 +304,27 @@ impl HatContext { } struct HatFace { - local_subs: HashSet>, - remote_subs: HashSet>, - local_qabls: HashMap, QueryableInfo>, - remote_qabls: HashSet>, + next_id: AtomicU32, // @TODO: manage rollover and uniqueness + remote_interests: HashMap>, InterestOptions)>, + local_subs: HashMap, SubscriberId>, + remote_subs: HashMap>, + local_qabls: HashMap, (QueryableId, QueryableInfoType)>, + remote_qabls: HashMap>, + local_tokens: HashMap, TokenId>, + remote_tokens: HashMap>, } impl HatFace { fn new() -> Self { Self { - local_subs: HashSet::new(), - remote_subs: HashSet::new(), + next_id: AtomicU32::new(0), + remote_interests: HashMap::new(), + local_subs: HashMap::new(), + remote_subs: HashMap::new(), local_qabls: HashMap::new(), - remote_qabls: HashSet::new(), + remote_qabls: HashMap::new(), + local_tokens: HashMap::new(), + remote_tokens: HashMap::new(), } } } diff --git a/zenoh/src/net/routing/hat/client/pubsub.rs b/zenoh/src/net/routing/hat/client/pubsub.rs index 3f194e4e56..5a19f3549c 100644 --- a/zenoh/src/net/routing/hat/client/pubsub.rs +++ b/zenoh/src/net/routing/hat/client/pubsub.rs @@ -11,28 +11,36 @@ // Contributors: // ZettaScale Zenoh Team, // -use super::{face_hat, face_hat_mut, get_routes_entries}; -use super::{HatCode, HatFace}; -use crate::net::routing::dispatcher::face::FaceState; -use crate::net::routing::dispatcher::resource::{NodeId, Resource, SessionContext}; -use crate::net::routing::dispatcher::tables::Tables; -use crate::net::routing::dispatcher::tables::{Route, RoutingExpr}; -use crate::net::routing::hat::{HatPubSubTrait, SendDeclare, Sources}; -use crate::net::routing::router::RoutesIndexes; -use crate::net::routing::{RoutingContext, PREFIX_LIVELINESS}; -use std::borrow::Cow; -use std::collections::HashMap; -use std::sync::Arc; -use zenoh_protocol::core::key_expr::OwnedKeyExpr; +use std::{ + borrow::Cow, + collections::HashMap, + sync::{atomic::Ordering, Arc}, +}; + use zenoh_protocol::{ - core::{Reliability, WhatAmI}, + core::{key_expr::OwnedKeyExpr, Reliability, WhatAmI}, network::declare::{ common::ext::WireExprType, ext, subscriber::ext::SubscriberInfo, Declare, DeclareBody, - DeclareSubscriber, Mode, UndeclareSubscriber, + DeclareSubscriber, SubscriberId, UndeclareSubscriber, }, }; use zenoh_sync::get_mut_unchecked; +use super::{face_hat, face_hat_mut, get_routes_entries, HatCode, HatFace}; +use crate::{ + key_expr::KeyExpr, + net::routing::{ + dispatcher::{ + face::FaceState, + resource::{NodeId, Resource, SessionContext}, + tables::{Route, RoutingExpr, Tables}, + }, + hat::{HatPubSubTrait, SendDeclare, Sources}, + router::{update_data_routes_from, RoutesIndexes}, + RoutingContext, + }, +}; + #[inline] fn propagate_simple_subscription_to( _tables: &mut Tables, @@ -42,22 +50,23 @@ fn propagate_simple_subscription_to( src_face: &mut Arc, send_declare: &mut SendDeclare, ) { - if (src_face.id != dst_face.id - || (dst_face.whatami == WhatAmI::Client && res.expr().starts_with(PREFIX_LIVELINESS))) - && !face_hat!(dst_face).local_subs.contains(res) + if src_face.id != dst_face.id + && !face_hat!(dst_face).local_subs.contains_key(res) && (src_face.whatami == WhatAmI::Client || dst_face.whatami == WhatAmI::Client) { - face_hat_mut!(dst_face).local_subs.insert(res.clone()); - let key_expr = Resource::decl_key(res, dst_face); + let id = face_hat!(dst_face).next_id.fetch_add(1, Ordering::SeqCst); + face_hat_mut!(dst_face).local_subs.insert(res.clone(), id); + let key_expr = Resource::decl_key(res, dst_face, true); send_declare( &dst_face.primitives, RoutingContext::with_expr( Declare { - ext_qos: ext::QoSType::declare_default(), + interest_id: None, + ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), + ext_nodeid: ext::NodeIdType::DEFAULT, body: DeclareBody::DeclareSubscriber(DeclareSubscriber { - id: 0, // @TODO use proper SubscriberId (#703) + id, wire_expr: key_expr, ext_info: *sub_info, }), @@ -92,59 +101,45 @@ fn propagate_simple_subscription( } } -fn register_client_subscription( +fn register_simple_subscription( _tables: &mut Tables, face: &mut Arc, + id: SubscriberId, res: &mut Arc, sub_info: &SubscriberInfo, ) { // Register subscription { let res = get_mut_unchecked(res); - tracing::debug!("Register subscription {} for {}", res.expr(), face); match res.session_ctxs.get_mut(&face.id) { - Some(ctx) => match &ctx.subs { - Some(info) => { - if Mode::Pull == info.mode { - get_mut_unchecked(ctx).subs = Some(*sub_info); - } - } - None => { + Some(ctx) => { + if ctx.subs.is_none() { get_mut_unchecked(ctx).subs = Some(*sub_info); } - }, + } None => { - res.session_ctxs.insert( - face.id, - Arc::new(SessionContext { - face: face.clone(), - local_expr_id: None, - remote_expr_id: None, - subs: Some(*sub_info), - qabl: None, - last_values: HashMap::new(), - in_interceptor_cache: None, - e_interceptor_cache: None, - }), - ); + let ctx = res + .session_ctxs + .entry(face.id) + .or_insert_with(|| Arc::new(SessionContext::new(face.clone()))); + get_mut_unchecked(ctx).subs = Some(*sub_info); } } } - face_hat_mut!(face).remote_subs.insert(res.clone()); + face_hat_mut!(face).remote_subs.insert(id, res.clone()); } -fn declare_client_subscription( +fn declare_simple_subscription( tables: &mut Tables, face: &mut Arc, + id: SubscriberId, res: &mut Arc, sub_info: &SubscriberInfo, send_declare: &mut SendDeclare, ) { - register_client_subscription(tables, face, res, sub_info); - let mut propa_sub_info = *sub_info; - propa_sub_info.mode = Mode::Push; + register_simple_subscription(tables, face, id, res, sub_info); - propagate_simple_subscription(tables, res, &propa_sub_info, face, send_declare); + propagate_simple_subscription(tables, res, sub_info, face, send_declare); // This introduced a buffer overflow on windows // @TODO: Let's deactivate this on windows until Fixed #[cfg(not(windows))] @@ -153,11 +148,12 @@ fn declare_client_subscription( .primitives .send_declare(RoutingContext::with_expr( Declare { - ext_qos: ext::QoSType::declare_default(), + interest_id: None, + ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), + ext_nodeid: ext::NodeIdType::DEFAULT, body: DeclareBody::DeclareSubscriber(DeclareSubscriber { - id: 0, // @TODO use proper SubscriberId (#703) + id: 0, // @TODO use proper SubscriberId wire_expr: res.expr().into(), ext_info: *sub_info, }), @@ -168,7 +164,7 @@ fn declare_client_subscription( } #[inline] -fn client_subs(res: &Arc) -> Vec> { +fn simple_subs(res: &Arc) -> Vec> { res.session_ctxs .values() .filter_map(|ctx| { @@ -187,77 +183,78 @@ fn propagate_forget_simple_subscription( send_declare: &mut SendDeclare, ) { for face in tables.faces.values_mut() { - if face_hat!(face).local_subs.contains(res) { - let wire_expr = Resource::get_best_key(res, "", face.id); + if let Some(id) = face_hat_mut!(face).local_subs.remove(res) { send_declare( &face.primitives, RoutingContext::with_expr( Declare { - ext_qos: ext::QoSType::declare_default(), + interest_id: None, + ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), + ext_nodeid: ext::NodeIdType::DEFAULT, body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { - id: 0, // @TODO use proper SubscriberId (#703) - ext_wire_expr: WireExprType { wire_expr }, + id, + ext_wire_expr: WireExprType::null(), }), }, res.expr(), ), ); - face_hat_mut!(face).local_subs.remove(res); } } } -pub(super) fn undeclare_client_subscription( +pub(super) fn undeclare_simple_subscription( tables: &mut Tables, face: &mut Arc, res: &mut Arc, send_declare: &mut SendDeclare, ) { - tracing::debug!("Unregister client subscription {} for {}", res.expr(), face); - if let Some(ctx) = get_mut_unchecked(res).session_ctxs.get_mut(&face.id) { - get_mut_unchecked(ctx).subs = None; - } - face_hat_mut!(face).remote_subs.remove(res); - - let mut client_subs = client_subs(res); - if client_subs.is_empty() { - propagate_forget_simple_subscription(tables, res, send_declare); - } - if client_subs.len() == 1 { - let face = &mut client_subs[0]; - if face_hat!(face).local_subs.contains(res) - && !(face.whatami == WhatAmI::Client && res.expr().starts_with(PREFIX_LIVELINESS)) - { - let wire_expr = Resource::get_best_key(res, "", face.id); - send_declare( - &face.primitives, - RoutingContext::with_expr( - Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { - id: 0, // @TODO use proper SubscriberId (#703) - ext_wire_expr: WireExprType { wire_expr }, - }), - }, - res.expr(), - ), - ); + if !face_hat_mut!(face).remote_subs.values().any(|s| *s == *res) { + if let Some(ctx) = get_mut_unchecked(res).session_ctxs.get_mut(&face.id) { + get_mut_unchecked(ctx).subs = None; + } - face_hat_mut!(face).local_subs.remove(res); + let mut simple_subs = simple_subs(res); + if simple_subs.is_empty() { + propagate_forget_simple_subscription(tables, res, send_declare); + } + if simple_subs.len() == 1 { + let face = &mut simple_subs[0]; + if let Some(id) = face_hat_mut!(face).local_subs.remove(res) { + send_declare( + &face.primitives, + RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { + id, + ext_wire_expr: WireExprType::null(), + }), + }, + res.expr(), + ), + ); + } } } } -fn forget_client_subscription( + +fn forget_simple_subscription( tables: &mut Tables, face: &mut Arc, - res: &mut Arc, + id: SubscriberId, send_declare: &mut SendDeclare, -) { - undeclare_client_subscription(tables, face, res, send_declare); +) -> Option> { + if let Some(mut res) = face_hat_mut!(face).remote_subs.remove(&id) { + undeclare_simple_subscription(tables, face, &mut res, send_declare); + Some(res) + } else { + None + } } pub(super) fn pubsub_new_face( @@ -267,7 +264,6 @@ pub(super) fn pubsub_new_face( ) { let sub_info = SubscriberInfo { reliability: Reliability::Reliable, // @TODO compute proper reliability to propagate from reliability of known subscribers - mode: Mode::Push, }; for src_face in tables .faces @@ -275,7 +271,7 @@ pub(super) fn pubsub_new_face( .cloned() .collect::>>() { - for sub in &face_hat!(src_face).remote_subs { + for sub in face_hat!(src_face).remote_subs.values() { propagate_simple_subscription_to( tables, face, @@ -286,6 +282,8 @@ pub(super) fn pubsub_new_face( ); } } + // recompute routes + update_data_routes_from(tables, &mut tables.root_res.clone()); } impl HatPubSubTrait for HatCode { @@ -293,30 +291,32 @@ impl HatPubSubTrait for HatCode { &self, tables: &mut Tables, face: &mut Arc, + id: SubscriberId, res: &mut Arc, sub_info: &SubscriberInfo, _node_id: NodeId, send_declare: &mut SendDeclare, ) { - declare_client_subscription(tables, face, res, sub_info, send_declare); + declare_simple_subscription(tables, face, id, res, sub_info, send_declare); } fn undeclare_subscription( &self, tables: &mut Tables, face: &mut Arc, - res: &mut Arc, + id: SubscriberId, + _res: Option>, _node_id: NodeId, send_declare: &mut SendDeclare, - ) { - forget_client_subscription(tables, face, res, send_declare); + ) -> Option> { + forget_simple_subscription(tables, face, id, send_declare) } fn get_subscriptions(&self, tables: &Tables) -> Vec<(Arc, Sources)> { // Compute the list of known suscriptions (keys) let mut subs = HashMap::new(); for src_face in tables.faces.values() { - for sub in &face_hat!(src_face).remote_subs { + for sub in face_hat!(src_face).remote_subs.values() { // Insert the key in the list of known suscriptions let srcs = subs.entry(sub.clone()).or_insert_with(Sources::empty); // Append src_face as a suscription source in the proper list @@ -355,6 +355,53 @@ impl HatPubSubTrait for HatCode { return Arc::new(route); } }; + + if source_type == WhatAmI::Client { + for face in tables + .faces + .values() + .filter(|f| f.whatami != WhatAmI::Client) + { + if face.local_interests.values().any(|interest| { + interest.finalized + && interest.options.subscribers() + && interest + .res + .as_ref() + .map(|res| { + KeyExpr::try_from(res.expr()) + .and_then(|intres| { + KeyExpr::try_from(expr.full_expr()) + .map(|putres| intres.includes(&putres)) + }) + .unwrap_or(false) + }) + .unwrap_or(true) + }) { + if face_hat!(face).remote_subs.values().any(|sub| { + KeyExpr::try_from(sub.expr()) + .and_then(|subres| { + KeyExpr::try_from(expr.full_expr()) + .map(|putres| subres.intersects(&putres)) + }) + .unwrap_or(false) + }) { + let key_expr = Resource::get_best_key(expr.prefix, expr.suffix, face.id); + route.insert( + face.id, + (face.clone(), key_expr.to_owned(), NodeId::default()), + ); + } + } else { + let key_expr = Resource::get_best_key(expr.prefix, expr.suffix, face.id); + route.insert( + face.id, + (face.clone(), key_expr.to_owned(), NodeId::default()), + ); + } + } + } + let res = Resource::get_resource(expr.prefix, expr.suffix); let matches = res .as_ref() @@ -366,20 +413,11 @@ impl HatPubSubTrait for HatCode { let mres = mres.upgrade().unwrap(); for (sid, context) in &mres.session_ctxs { - if let Some(subinfo) = &context.subs { - if match tables.whatami { - WhatAmI::Router => context.face.whatami != WhatAmI::Router, - _ => { - source_type == WhatAmI::Client - || context.face.whatami == WhatAmI::Client - } - } && subinfo.mode == Mode::Push - { - route.entry(*sid).or_insert_with(|| { - let key_expr = Resource::get_best_key(expr.prefix, expr.suffix, *sid); - (context.face.clone(), key_expr.to_owned(), NodeId::default()) - }); - } + if context.subs.is_some() && context.face.whatami == WhatAmI::Client { + route.entry(*sid).or_insert_with(|| { + let key_expr = Resource::get_best_key(expr.prefix, expr.suffix, *sid); + (context.face.clone(), key_expr.to_owned(), NodeId::default()) + }); } } } @@ -399,4 +437,63 @@ impl HatPubSubTrait for HatCode { fn get_data_routes_entries(&self, _tables: &Tables) -> RoutesIndexes { get_routes_entries() } + + #[zenoh_macros::unstable] + fn get_matching_subscriptions( + &self, + tables: &Tables, + key_expr: &KeyExpr<'_>, + ) -> HashMap> { + let mut matching_subscriptions = HashMap::new(); + if key_expr.ends_with('/') { + return matching_subscriptions; + } + tracing::trace!("get_matching_subscriptions({})", key_expr,); + + for face in tables + .faces + .values() + .filter(|f| f.whatami != WhatAmI::Client) + { + if face.local_interests.values().any(|interest| { + interest.finalized + && interest.options.subscribers() + && interest + .res + .as_ref() + .map(|res| { + KeyExpr::try_from(res.expr()) + .map(|intres| intres.includes(key_expr)) + .unwrap_or(false) + }) + .unwrap_or(true) + }) && face_hat!(face).remote_subs.values().any(|sub| { + KeyExpr::try_from(sub.expr()) + .map(|subres| subres.intersects(key_expr)) + .unwrap_or(false) + }) { + matching_subscriptions.insert(face.id, face.clone()); + } + } + + let res = Resource::get_resource(&tables.root_res, key_expr); + let matches = res + .as_ref() + .and_then(|res| res.context.as_ref()) + .map(|ctx| Cow::from(&ctx.matches)) + .unwrap_or_else(|| Cow::from(Resource::get_matches(tables, key_expr))); + + for mres in matches.iter() { + let mres = mres.upgrade().unwrap(); + + for (sid, context) in &mres.session_ctxs { + if context.subs.is_some() && context.face.whatami == WhatAmI::Client { + matching_subscriptions + .entry(*sid) + .or_insert_with(|| context.face.clone()); + } + } + } + matching_subscriptions + } } diff --git a/zenoh/src/net/routing/hat/client/queries.rs b/zenoh/src/net/routing/hat/client/queries.rs index 445f618845..e711ccf2e8 100644 --- a/zenoh/src/net/routing/hat/client/queries.rs +++ b/zenoh/src/net/routing/hat/client/queries.rs @@ -11,48 +11,52 @@ // Contributors: // ZettaScale Zenoh Team, // -use super::{face_hat, face_hat_mut, get_routes_entries}; -use super::{HatCode, HatFace}; -use crate::net::routing::dispatcher::face::FaceState; -use crate::net::routing::dispatcher::resource::{NodeId, Resource, SessionContext}; -use crate::net::routing::dispatcher::tables::Tables; -use crate::net::routing::dispatcher::tables::{QueryTargetQabl, QueryTargetQablSet, RoutingExpr}; -use crate::net::routing::hat::{HatQueriesTrait, SendDeclare, Sources}; -use crate::net::routing::router::RoutesIndexes; -use crate::net::routing::{RoutingContext, PREFIX_LIVELINESS}; +use std::{ + borrow::Cow, + collections::HashMap, + sync::{atomic::Ordering, Arc}, +}; + use ordered_float::OrderedFloat; -use std::borrow::Cow; -use std::collections::HashMap; -use std::sync::Arc; -use zenoh_buffers::ZBuf; -use zenoh_protocol::core::key_expr::include::{Includer, DEFAULT_INCLUDER}; -use zenoh_protocol::core::key_expr::OwnedKeyExpr; use zenoh_protocol::{ - core::{WhatAmI, WireExpr}, + core::{ + key_expr::{ + include::{Includer, DEFAULT_INCLUDER}, + OwnedKeyExpr, + }, + WhatAmI, + }, network::declare::{ - common::ext::WireExprType, ext, queryable::ext::QueryableInfo, Declare, DeclareBody, - DeclareQueryable, UndeclareQueryable, + common::ext::WireExprType, ext, queryable::ext::QueryableInfoType, Declare, DeclareBody, + DeclareQueryable, QueryableId, UndeclareQueryable, }, }; use zenoh_sync::get_mut_unchecked; -#[cfg(feature = "complete_n")] -#[inline] -fn merge_qabl_infos(mut this: QueryableInfo, info: &QueryableInfo) -> QueryableInfo { - this.complete += info.complete; - this.distance = std::cmp::min(this.distance, info.distance); - this -} +use super::{face_hat, face_hat_mut, get_routes_entries, HatCode, HatFace}; +use crate::net::routing::{ + dispatcher::{ + face::FaceState, + resource::{NodeId, Resource, SessionContext}, + tables::{QueryTargetQabl, QueryTargetQablSet, RoutingExpr, Tables}, + }, + hat::{HatQueriesTrait, SendDeclare, Sources}, + router::RoutesIndexes, + RoutingContext, +}; -#[cfg(not(feature = "complete_n"))] #[inline] -fn merge_qabl_infos(mut this: QueryableInfo, info: &QueryableInfo) -> QueryableInfo { - this.complete = u8::from(this.complete != 0 || info.complete != 0); +fn merge_qabl_infos(mut this: QueryableInfoType, info: &QueryableInfoType) -> QueryableInfoType { + this.complete = this.complete || info.complete; this.distance = std::cmp::min(this.distance, info.distance); this } -fn local_qabl_info(_tables: &Tables, res: &Arc, face: &Arc) -> QueryableInfo { +fn local_qabl_info( + _tables: &Tables, + res: &Arc, + face: &Arc, +) -> QueryableInfoType { res.session_ctxs .values() .fold(None, |accu, ctx| { @@ -69,10 +73,7 @@ fn local_qabl_info(_tables: &Tables, res: &Arc, face: &Arc) accu } }) - .unwrap_or(QueryableInfo { - complete: 0, - distance: 0, - }) + .unwrap_or(QueryableInfoType::DEFAULT) } fn propagate_simple_queryable( @@ -84,26 +85,36 @@ fn propagate_simple_queryable( let faces = tables.faces.values().cloned(); for mut dst_face in faces { let info = local_qabl_info(tables, res, &dst_face); - let current_info = face_hat!(dst_face).local_qabls.get(res); - if (src_face.is_none() || src_face.as_ref().unwrap().id != dst_face.id) - && (current_info.is_none() || *current_info.unwrap() != info) - && (src_face.is_none() - || src_face.as_ref().unwrap().whatami == WhatAmI::Client - || dst_face.whatami == WhatAmI::Client) + let current = face_hat!(dst_face).local_qabls.get(res); + if src_face + .as_ref() + .map(|src_face| dst_face.id != src_face.id) + .unwrap_or(true) + && (current.is_none() || current.unwrap().1 != info) + && src_face + .as_ref() + .map(|src_face| { + src_face.whatami == WhatAmI::Client || dst_face.whatami == WhatAmI::Client + }) + .unwrap_or(true) { + let id = current + .map(|c| c.0) + .unwrap_or(face_hat!(dst_face).next_id.fetch_add(1, Ordering::SeqCst)); face_hat_mut!(&mut dst_face) .local_qabls - .insert(res.clone(), info); - let key_expr = Resource::decl_key(res, &mut dst_face); + .insert(res.clone(), (id, info)); + let key_expr = Resource::decl_key(res, &mut dst_face, true); send_declare( &dst_face.primitives, RoutingContext::with_expr( Declare { - ext_qos: ext::QoSType::declare_default(), + interest_id: None, + ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), + ext_nodeid: ext::NodeIdType::DEFAULT, body: DeclareBody::DeclareQueryable(DeclareQueryable { - id: 0, // @TODO use proper QueryableId (#703) + id, wire_expr: key_expr, ext_info: info, }), @@ -115,46 +126,40 @@ fn propagate_simple_queryable( } } -fn register_client_queryable( +fn register_simple_queryable( _tables: &mut Tables, face: &mut Arc, + id: QueryableId, res: &mut Arc, - qabl_info: &QueryableInfo, + qabl_info: &QueryableInfoType, ) { // Register queryable { let res = get_mut_unchecked(res); - tracing::debug!("Register queryable {} (face: {})", res.expr(), face,); - get_mut_unchecked(res.session_ctxs.entry(face.id).or_insert_with(|| { - Arc::new(SessionContext { - face: face.clone(), - local_expr_id: None, - remote_expr_id: None, - subs: None, - qabl: None, - last_values: HashMap::new(), - in_interceptor_cache: None, - e_interceptor_cache: None, - }) - })) + get_mut_unchecked( + res.session_ctxs + .entry(face.id) + .or_insert_with(|| Arc::new(SessionContext::new(face.clone()))), + ) .qabl = Some(*qabl_info); } - face_hat_mut!(face).remote_qabls.insert(res.clone()); + face_hat_mut!(face).remote_qabls.insert(id, res.clone()); } -fn declare_client_queryable( +fn declare_simple_queryable( tables: &mut Tables, face: &mut Arc, + id: QueryableId, res: &mut Arc, - qabl_info: &QueryableInfo, + qabl_info: &QueryableInfoType, send_declare: &mut SendDeclare, ) { - register_client_queryable(tables, face, res, qabl_info); + register_simple_queryable(tables, face, id, res, qabl_info); propagate_simple_queryable(tables, res, Some(face), send_declare); } #[inline] -fn client_qabls(res: &Arc) -> Vec> { +fn simple_qabls(res: &Arc) -> Vec> { res.session_ctxs .values() .filter_map(|ctx| { @@ -173,81 +178,84 @@ fn propagate_forget_simple_queryable( send_declare: &mut SendDeclare, ) { for face in tables.faces.values_mut() { - if face_hat!(face).local_qabls.contains_key(res) { - let wire_expr = Resource::get_best_key(res, "", face.id); + if let Some((id, _)) = face_hat_mut!(face).local_qabls.remove(res) { send_declare( &face.primitives, RoutingContext::with_expr( Declare { - ext_qos: ext::QoSType::declare_default(), + interest_id: None, + ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), + ext_nodeid: ext::NodeIdType::DEFAULT, body: DeclareBody::UndeclareQueryable(UndeclareQueryable { - id: 0, // @TODO use proper QueryableId (#703) - ext_wire_expr: WireExprType { wire_expr }, + id, + ext_wire_expr: WireExprType::null(), }), }, res.expr(), ), ); - - face_hat_mut!(face).local_qabls.remove(res); } } } -pub(super) fn undeclare_client_queryable( +pub(super) fn undeclare_simple_queryable( tables: &mut Tables, face: &mut Arc, res: &mut Arc, send_declare: &mut SendDeclare, ) { - tracing::debug!("Unregister client queryable {} for {}", res.expr(), face); - if let Some(ctx) = get_mut_unchecked(res).session_ctxs.get_mut(&face.id) { - get_mut_unchecked(ctx).qabl = None; - if ctx.qabl.is_none() { - face_hat_mut!(face).remote_qabls.remove(res); + if !face_hat_mut!(face) + .remote_qabls + .values() + .any(|s| *s == *res) + { + if let Some(ctx) = get_mut_unchecked(res).session_ctxs.get_mut(&face.id) { + get_mut_unchecked(ctx).qabl = None; } - } - let mut client_qabls = client_qabls(res); - if client_qabls.is_empty() { - propagate_forget_simple_queryable(tables, res, send_declare); - } else { - propagate_simple_queryable(tables, res, None, send_declare); - } - if client_qabls.len() == 1 { - let face = &mut client_qabls[0]; - if face_hat!(face).local_qabls.contains_key(res) { - let wire_expr = Resource::get_best_key(res, "", face.id); - send_declare( - &face.primitives, - RoutingContext::with_expr( - Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - body: DeclareBody::UndeclareQueryable(UndeclareQueryable { - id: 0, // @TODO use proper QueryableId (#703) - ext_wire_expr: WireExprType { wire_expr }, - }), - }, - res.expr(), - ), - ); - - face_hat_mut!(face).local_qabls.remove(res); + let mut simple_qabls = simple_qabls(res); + if simple_qabls.is_empty() { + propagate_forget_simple_queryable(tables, res, send_declare); + } else { + propagate_simple_queryable(tables, res, None, send_declare); + } + if simple_qabls.len() == 1 { + let face = &mut simple_qabls[0]; + if let Some((id, _)) = face_hat_mut!(face).local_qabls.remove(res) { + send_declare( + &face.primitives, + RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::UndeclareQueryable(UndeclareQueryable { + id, + ext_wire_expr: WireExprType::null(), + }), + }, + res.expr(), + ), + ); + } } } } -fn forget_client_queryable( +fn forget_simple_queryable( tables: &mut Tables, face: &mut Arc, - res: &mut Arc, + id: QueryableId, send_declare: &mut SendDeclare, -) { - undeclare_client_queryable(tables, face, res, send_declare); +) -> Option> { + if let Some(mut res) = face_hat_mut!(face).remote_qabls.remove(&id) { + undeclare_simple_queryable(tables, face, &mut res, send_declare); + Some(res) + } else { + None + } } pub(super) fn queries_new_face( @@ -261,7 +269,7 @@ pub(super) fn queries_new_face( .cloned() .collect::>>() { - for qabl in face_hat!(face).remote_qabls.iter() { + for qabl in face_hat!(face).remote_qabls.values() { propagate_simple_queryable(tables, qabl, Some(&mut face.clone()), send_declare); } } @@ -276,30 +284,32 @@ impl HatQueriesTrait for HatCode { &self, tables: &mut Tables, face: &mut Arc, + id: QueryableId, res: &mut Arc, - qabl_info: &QueryableInfo, + qabl_info: &QueryableInfoType, _node_id: NodeId, send_declare: &mut SendDeclare, ) { - declare_client_queryable(tables, face, res, qabl_info, send_declare); + declare_simple_queryable(tables, face, id, res, qabl_info, send_declare); } fn undeclare_queryable( &self, tables: &mut Tables, face: &mut Arc, - res: &mut Arc, + id: QueryableId, + _res: Option>, _node_id: NodeId, send_declare: &mut SendDeclare, - ) { - forget_client_queryable(tables, face, res, send_declare); + ) -> Option> { + forget_simple_queryable(tables, face, id, send_declare) } fn get_queryables(&self, tables: &Tables) -> Vec<(Arc, Sources)> { // Compute the list of known queryables (keys) let mut qabls = HashMap::new(); for src_face in tables.faces.values() { - for qabl in &face_hat!(src_face).remote_qabls { + for qabl in face_hat!(src_face).remote_qabls.values() { // Insert the key in the list of known queryables let srcs = qabls.entry(qabl.clone()).or_insert_with(Sources::empty); // Append src_face as a queryable source in the proper list @@ -338,6 +348,18 @@ impl HatQueriesTrait for HatCode { return EMPTY_ROUTE.clone(); } }; + + if source_type == WhatAmI::Client { + if let Some(face) = tables.faces.values().find(|f| f.whatami != WhatAmI::Client) { + let key_expr = Resource::get_best_key(expr.prefix, expr.suffix, face.id); + route.push(QueryTargetQabl { + direction: (face.clone(), key_expr.to_owned(), NodeId::default()), + complete: 0, + distance: f64::MAX, + }); + } + } + let res = Resource::get_resource(expr.prefix, expr.suffix); let matches = res .as_ref() @@ -367,44 +389,6 @@ impl HatQueriesTrait for HatCode { Arc::new(route) } - #[inline] - fn compute_local_replies( - &self, - tables: &Tables, - prefix: &Arc, - suffix: &str, - face: &Arc, - ) -> Vec<(WireExpr<'static>, ZBuf)> { - let mut result = vec![]; - // Only the first routing point in the query route - // should return the liveliness tokens - if face.whatami == WhatAmI::Client { - let key_expr = prefix.expr() + suffix; - let key_expr = match OwnedKeyExpr::try_from(key_expr) { - Ok(ke) => ke, - Err(e) => { - tracing::warn!("Invalid KE reached the system: {}", e); - return result; - } - }; - if key_expr.starts_with(PREFIX_LIVELINESS) { - let res = Resource::get_resource(prefix, suffix); - let matches = res - .as_ref() - .and_then(|res| res.context.as_ref()) - .map(|ctx| Cow::from(&ctx.matches)) - .unwrap_or_else(|| Cow::from(Resource::get_matches(tables, &key_expr))); - for mres in matches.iter() { - let mres = mres.upgrade().unwrap(); - if mres.session_ctxs.values().any(|ctx| ctx.subs.is_some()) { - result.push((Resource::get_best_key(&mres, "", face.id), ZBuf::default())); - } - } - } - } - result - } - fn get_query_routes_entries(&self, _tables: &Tables) -> RoutesIndexes { get_routes_entries() } diff --git a/zenoh/src/net/routing/hat/client/token.rs b/zenoh/src/net/routing/hat/client/token.rs new file mode 100644 index 0000000000..9e5923425c --- /dev/null +++ b/zenoh/src/net/routing/hat/client/token.rs @@ -0,0 +1,427 @@ +// +// Copyright (c) 2024 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// + +use std::sync::{atomic::Ordering, Arc}; + +use zenoh_config::WhatAmI; +use zenoh_protocol::network::{ + declare::{common::ext::WireExprType, TokenId}, + ext, + interest::{InterestId, InterestMode}, + Declare, DeclareBody, DeclareToken, UndeclareToken, +}; +use zenoh_sync::get_mut_unchecked; + +use super::{face_hat, face_hat_mut, HatCode, HatFace}; +use crate::net::routing::{ + dispatcher::{face::FaceState, tables::Tables}, + hat::{CurrentFutureTrait, HatTokenTrait, SendDeclare}, + router::{NodeId, Resource, SessionContext}, + RoutingContext, +}; + +#[inline] +fn propagate_simple_token_to( + _tables: &mut Tables, + dst_face: &mut Arc, + res: &Arc, + src_face: &mut Arc, + send_declare: &mut SendDeclare, +) { + if (src_face.id != dst_face.id || dst_face.whatami == WhatAmI::Client) + && !face_hat!(dst_face).local_tokens.contains_key(res) + && (src_face.whatami == WhatAmI::Client || dst_face.whatami == WhatAmI::Client) + { + let id = face_hat!(dst_face).next_id.fetch_add(1, Ordering::SeqCst); + face_hat_mut!(dst_face).local_tokens.insert(res.clone(), id); + let key_expr = Resource::decl_key(res, dst_face, true); + send_declare( + &dst_face.primitives, + RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::DeclareToken(DeclareToken { + id, + wire_expr: key_expr, + }), + }, + res.expr(), + ), + ); + } +} + +fn propagate_simple_token( + tables: &mut Tables, + res: &Arc, + src_face: &mut Arc, + send_declare: &mut SendDeclare, +) { + for mut dst_face in tables + .faces + .values() + .cloned() + .collect::>>() + { + propagate_simple_token_to(tables, &mut dst_face, res, src_face, send_declare); + } +} + +fn register_simple_token( + _tables: &mut Tables, + face: &mut Arc, + id: TokenId, + res: &mut Arc, +) { + // Register liveliness + { + let res = get_mut_unchecked(res); + match res.session_ctxs.get_mut(&face.id) { + Some(ctx) => { + if !ctx.token { + get_mut_unchecked(ctx).token = true; + } + } + None => { + let ctx = res + .session_ctxs + .entry(face.id) + .or_insert_with(|| Arc::new(SessionContext::new(face.clone()))); + get_mut_unchecked(ctx).token = true; + } + } + } + face_hat_mut!(face).remote_tokens.insert(id, res.clone()); +} + +fn declare_simple_token( + tables: &mut Tables, + face: &mut Arc, + id: TokenId, + res: &mut Arc, + interest_id: Option, + send_declare: &mut SendDeclare, +) { + register_simple_token(tables, face, id, res); + + propagate_simple_token(tables, res, face, send_declare); + + let wire_expr = Resource::decl_key(res, face, true); + if let Some(interest_id) = interest_id { + if let Some((interest, _)) = face.pending_current_interests.get(&interest_id) { + send_declare( + &interest.src_face.primitives, + RoutingContext::with_expr( + Declare { + interest_id: Some(interest.src_interest_id), + ext_qos: ext::QoSType::default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::default(), + body: DeclareBody::DeclareToken(DeclareToken { id, wire_expr }), + }, + res.expr(), + ), + ) + } + } +} + +#[inline] +fn simple_tokens(res: &Arc) -> Vec> { + res.session_ctxs + .values() + .filter_map(|ctx| { + if ctx.token { + Some(ctx.face.clone()) + } else { + None + } + }) + .collect() +} + +fn propagate_forget_simple_token( + tables: &mut Tables, + res: &Arc, + send_declare: &mut SendDeclare, +) { + for face in tables.faces.values_mut() { + if let Some(id) = face_hat_mut!(face).local_tokens.remove(res) { + send_declare( + &face.primitives, + RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::UndeclareToken(UndeclareToken { + id, + ext_wire_expr: WireExprType::null(), + }), + }, + res.expr(), + ), + ); + } else if face_hat!(face) + .remote_interests + .values() + .any(|(r, o)| o.tokens() && r.as_ref().map(|r| r.matches(res)).unwrap_or(true)) + { + // Token has never been declared on this face. + // Send an Undeclare with a one shot generated id and a WireExpr ext. + send_declare( + &face.primitives, + RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::UndeclareToken(UndeclareToken { + id: face_hat!(face).next_id.fetch_add(1, Ordering::SeqCst), + ext_wire_expr: WireExprType { + wire_expr: Resource::get_best_key(res, "", face.id), + }, + }), + }, + res.expr(), + ), + ); + } + } +} + +pub(super) fn undeclare_simple_token( + tables: &mut Tables, + face: &mut Arc, + res: &mut Arc, + send_declare: &mut SendDeclare, +) { + if !face_hat_mut!(face) + .remote_tokens + .values() + .any(|s| *s == *res) + { + if let Some(ctx) = get_mut_unchecked(res).session_ctxs.get_mut(&face.id) { + get_mut_unchecked(ctx).token = false; + } + + let mut simple_tokens = simple_tokens(res); + if simple_tokens.is_empty() { + propagate_forget_simple_token(tables, res, send_declare); + } + if simple_tokens.len() == 1 { + let face = &mut simple_tokens[0]; + if face.whatami != WhatAmI::Client { + if let Some(id) = face_hat_mut!(face).local_tokens.remove(res) { + send_declare( + &face.primitives, + RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::UndeclareToken(UndeclareToken { + id, + ext_wire_expr: WireExprType::null(), + }), + }, + res.expr(), + ), + ); + } + } + } + } +} + +fn forget_simple_token( + tables: &mut Tables, + face: &mut Arc, + id: TokenId, + res: Option>, + send_declare: &mut SendDeclare, +) -> Option> { + if let Some(mut res) = face_hat_mut!(face).remote_tokens.remove(&id) { + undeclare_simple_token(tables, face, &mut res, send_declare); + Some(res) + } else if let Some(mut res) = res { + undeclare_simple_token(tables, face, &mut res, send_declare); + Some(res) + } else { + None + } +} + +pub(super) fn token_new_face( + tables: &mut Tables, + face: &mut Arc, + send_declare: &mut SendDeclare, +) { + for src_face in tables + .faces + .values() + .cloned() + .collect::>>() + { + for token in face_hat!(src_face).remote_tokens.values() { + propagate_simple_token_to(tables, face, token, &mut src_face.clone(), send_declare); + } + } +} + +pub(crate) fn declare_token_interest( + tables: &mut Tables, + face: &mut Arc, + id: InterestId, + res: Option<&mut Arc>, + mode: InterestMode, + aggregate: bool, + send_declare: &mut SendDeclare, +) { + if mode.current() { + let interest_id = (!mode.future()).then_some(id); + if let Some(res) = res.as_ref() { + if aggregate { + if tables.faces.values().any(|src_face| { + face_hat!(src_face) + .remote_tokens + .values() + .any(|token| token.context.is_some() && token.matches(res)) + }) { + let id = if mode.future() { + let id = face_hat!(face).next_id.fetch_add(1, Ordering::SeqCst); + face_hat_mut!(face).local_tokens.insert((*res).clone(), id); + id + } else { + 0 + }; + let wire_expr = Resource::decl_key(res, face, true); + send_declare( + &face.primitives, + RoutingContext::with_expr( + Declare { + interest_id, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::DeclareToken(DeclareToken { id, wire_expr }), + }, + res.expr(), + ), + ); + } + } else { + for src_face in tables + .faces + .values() + .cloned() + .collect::>>() + { + for token in face_hat!(src_face).remote_tokens.values() { + if token.context.is_some() && token.matches(res) { + let id = if mode.future() { + let id = face_hat!(face).next_id.fetch_add(1, Ordering::SeqCst); + face_hat_mut!(face).local_tokens.insert(token.clone(), id); + id + } else { + 0 + }; + let wire_expr = Resource::decl_key(token, face, true); + send_declare( + &face.primitives, + RoutingContext::with_expr( + Declare { + interest_id, + ext_qos: ext::QoSType::default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::default(), + body: DeclareBody::DeclareToken(DeclareToken { + id, + wire_expr, + }), + }, + res.expr(), + ), + ) + } + } + } + } + } else { + for src_face in tables + .faces + .values() + .cloned() + .collect::>>() + { + for token in face_hat!(src_face).remote_tokens.values() { + let id = if mode.future() { + let id = face_hat!(face).next_id.fetch_add(1, Ordering::SeqCst); + face_hat_mut!(face).local_tokens.insert(token.clone(), id); + id + } else { + 0 + }; + let wire_expr = Resource::decl_key(token, face, true); + send_declare( + &face.primitives, + RoutingContext::with_expr( + Declare { + interest_id, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::DeclareToken(DeclareToken { id, wire_expr }), + }, + token.expr(), + ), + ); + } + } + } + } +} + +impl HatTokenTrait for HatCode { + fn declare_token( + &self, + tables: &mut Tables, + face: &mut Arc, + id: TokenId, + res: &mut Arc, + _node_id: NodeId, + interest_id: Option, + send_declare: &mut SendDeclare, + ) { + declare_simple_token(tables, face, id, res, interest_id, send_declare); + } + + fn undeclare_token( + &self, + tables: &mut Tables, + face: &mut Arc, + id: TokenId, + res: Option>, + _node_id: NodeId, + send_declare: &mut SendDeclare, + ) -> Option> { + forget_simple_token(tables, face, id, res, send_declare) + } +} diff --git a/zenoh/src/net/routing/hat/linkstate_peer/interests.rs b/zenoh/src/net/routing/hat/linkstate_peer/interests.rs new file mode 100644 index 0000000000..6b23f8d657 --- /dev/null +++ b/zenoh/src/net/routing/hat/linkstate_peer/interests.rs @@ -0,0 +1,104 @@ +// +// Copyright (c) 2024 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// +use std::sync::Arc; + +use zenoh_protocol::network::{ + declare::ext, + interest::{InterestId, InterestMode, InterestOptions}, + Declare, DeclareBody, DeclareFinal, +}; +use zenoh_sync::get_mut_unchecked; + +use super::{ + face_hat_mut, pubsub::declare_sub_interest, queries::declare_qabl_interest, + token::declare_token_interest, HatCode, HatFace, +}; +use crate::net::routing::{ + dispatcher::{ + face::FaceState, + resource::Resource, + tables::{Tables, TablesLock}, + }, + hat::{CurrentFutureTrait, HatInterestTrait, SendDeclare}, + RoutingContext, +}; + +impl HatInterestTrait for HatCode { + fn declare_interest( + &self, + tables: &mut Tables, + _tables_ref: &Arc, + face: &mut Arc, + id: InterestId, + res: Option<&mut Arc>, + mode: InterestMode, + options: InterestOptions, + send_declare: &mut SendDeclare, + ) { + if options.subscribers() { + declare_sub_interest( + tables, + face, + id, + res.as_ref().map(|r| (*r).clone()).as_mut(), + mode, + options.aggregate(), + send_declare, + ) + } + if options.queryables() { + declare_qabl_interest( + tables, + face, + id, + res.as_ref().map(|r| (*r).clone()).as_mut(), + mode, + options.aggregate(), + send_declare, + ) + } + if options.tokens() { + declare_token_interest( + tables, + face, + id, + res.as_ref().map(|r| (*r).clone()).as_mut(), + mode, + options.aggregate(), + send_declare, + ) + } + if mode.future() { + face_hat_mut!(face) + .remote_interests + .insert(id, (res.cloned(), options)); + } + if mode.current() { + send_declare( + &face.primitives, + RoutingContext::new(Declare { + interest_id: Some(id), + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::DeclareFinal(DeclareFinal), + }), + ); + } + } + + fn undeclare_interest(&self, _tables: &mut Tables, face: &mut Arc, id: InterestId) { + face_hat_mut!(face).remote_interests.remove(&id); + } +} diff --git a/zenoh/src/net/routing/hat/linkstate_peer/mod.rs b/zenoh/src/net/routing/hat/linkstate_peer/mod.rs index 41e1b26e72..f9e1674c3e 100644 --- a/zenoh/src/net/routing/hat/linkstate_peer/mod.rs +++ b/zenoh/src/net/routing/hat/linkstate_peer/mod.rs @@ -17,10 +17,33 @@ //! This module is intended for Zenoh's internal use. //! //! [Click here for Zenoh's documentation](../zenoh/index.html) +use std::{ + any::Any, + collections::{HashMap, HashSet}, + sync::{atomic::AtomicU32, Arc}, +}; + +use token::{token_remove_node, undeclare_simple_token}; +use zenoh_config::{unwrap_or_default, ModeDependent, WhatAmI, WhatAmIMatcher}; +use zenoh_protocol::{ + common::ZExtBody, + core::ZenohIdProto, + network::{ + declare::{queryable::ext::QueryableInfoType, QueryableId, SubscriberId}, + interest::{InterestId, InterestOptions}, + oam::id::OAM_LINKSTATE, + Oam, + }, +}; +use zenoh_result::ZResult; +use zenoh_sync::get_mut_unchecked; +use zenoh_task::TerminatableTask; +use zenoh_transport::unicast::TransportUnicast; + use self::{ network::Network, - pubsub::{pubsub_new_face, pubsub_remove_node, undeclare_client_subscription}, - queries::{queries_new_face, queries_remove_node, undeclare_client_queryable}, + pubsub::{pubsub_remove_node, undeclare_simple_subscription}, + queries::{queries_remove_node, undeclare_simple_queryable}, }; use super::{ super::dispatcher::{ @@ -29,38 +52,22 @@ use super::{ }, HatBaseTrait, HatTrait, SendDeclare, }; -use crate::{ - net::{ - codec::Zenoh080Routing, - protocol::linkstate::LinkStateList, - routing::{ - dispatcher::face::Face, - hat::TREES_COMPUTATION_DELAY_MS, - router::{ - compute_data_routes, compute_matching_pulls, compute_query_routes, RoutesIndexes, - }, - }, +use crate::net::{ + codec::Zenoh080Routing, + protocol::linkstate::LinkStateList, + routing::{ + dispatcher::face::Face, + hat::TREES_COMPUTATION_DELAY_MS, + router::{compute_data_routes, compute_query_routes, RoutesIndexes}, }, runtime::Runtime, }; -use std::{ - any::Any, - collections::{HashMap, HashSet}, - sync::Arc, -}; -use zenoh_config::{unwrap_or_default, ModeDependent, WhatAmI, WhatAmIMatcher, ZenohId}; -use zenoh_protocol::{ - common::ZExtBody, - network::{declare::queryable::ext::QueryableInfo, oam::id::OAM_LINKSTATE, Oam}, -}; -use zenoh_result::ZResult; -use zenoh_sync::get_mut_unchecked; -use zenoh_task::TerminatableTask; -use zenoh_transport::unicast::TransportUnicast; +mod interests; mod network; mod pubsub; mod queries; +mod token; macro_rules! hat { ($t:expr) => { @@ -126,11 +133,16 @@ impl TreesComputationWorker { let mut tables = zwrite!(tables_ref.tables); tracing::trace!("Compute trees"); - let new_children = hat_mut!(tables).peers_net.as_mut().unwrap().compute_trees(); + let new_children = hat_mut!(tables) + .linkstatepeers_net + .as_mut() + .unwrap() + .compute_trees(); tracing::trace!("Compute routes"); pubsub::pubsub_tree_change(&mut tables, &new_children); queries::queries_tree_change(&mut tables, &new_children); + token::token_tree_change(&mut tables, &new_children); drop(tables); } } @@ -140,25 +152,27 @@ impl TreesComputationWorker { } struct HatTables { - peer_subs: HashSet>, - peer_qabls: HashSet>, - peers_net: Option, - peers_trees_worker: TreesComputationWorker, + linkstatepeer_subs: HashSet>, + linkstatepeer_tokens: HashSet>, + linkstatepeer_qabls: HashSet>, + linkstatepeers_net: Option, + linkstatepeers_trees_worker: TreesComputationWorker, } impl HatTables { fn new() -> Self { Self { - peer_subs: HashSet::new(), - peer_qabls: HashSet::new(), - peers_net: None, - peers_trees_worker: TreesComputationWorker::new(), + linkstatepeer_subs: HashSet::new(), + linkstatepeer_tokens: HashSet::new(), + linkstatepeer_qabls: HashSet::new(), + linkstatepeers_net: None, + linkstatepeers_trees_worker: TreesComputationWorker::new(), } } fn schedule_compute_trees(&mut self, tables_ref: Arc) { tracing::trace!("Schedule trees computation"); - let _ = self.peers_trees_worker.tx.try_send(tables_ref); + let _ = self.linkstatepeers_trees_worker.tx.try_send(tables_ref); } } @@ -176,13 +190,13 @@ impl HatBaseTrait for HatCode { WhatAmIMatcher::empty() }; - let peer_full_linkstate = whatami != WhatAmI::Client - && unwrap_or_default!(config.routing().peer().mode()) == *"linkstate"; + let peer_full_linkstate = + unwrap_or_default!(config.routing().peer().mode()) == *"linkstate"; let router_peers_failover_brokering = unwrap_or_default!(config.routing().router().peers_failover_brokering()); drop(config); - hat_mut!(tables).peers_net = Some(Network::new( + hat_mut!(tables).linkstatepeers_net = Some(Network::new( "[Peers network]".to_string(), tables.zid, runtime, @@ -208,13 +222,12 @@ impl HatBaseTrait for HatCode { fn new_local_face( &self, - tables: &mut Tables, + _tables: &mut Tables, _tables_ref: &Arc, - face: &mut Face, - send_declare: &mut SendDeclare, + _face: &mut Face, + _send_declare: &mut SendDeclare, ) -> ZResult<()> { - pubsub_new_face(tables, &mut face.state, send_declare); - queries_new_face(tables, &mut face.state, send_declare); + // Nothing to do Ok(()) } @@ -224,10 +237,10 @@ impl HatBaseTrait for HatCode { tables_ref: &Arc, face: &mut Face, transport: &TransportUnicast, - send_declare: &mut SendDeclare, + _send_declare: &mut SendDeclare, ) -> ZResult<()> { let link_id = if face.state.whatami != WhatAmI::Client { - if let Some(net) = hat_mut!(tables).peers_net.as_mut() { + if let Some(net) = hat_mut!(tables).linkstatepeers_net.as_mut() { net.add_link(transport.clone()) } else { 0 @@ -237,8 +250,6 @@ impl HatBaseTrait for HatCode { }; face_hat_mut!(&mut face.state).link_id = link_id; - pubsub_new_face(tables, &mut face.state, send_declare); - queries_new_face(tables, &mut face.state, send_declare); if face.state.whatami != WhatAmI::Client { hat_mut!(tables).schedule_compute_trees(tables_ref.clone()); @@ -255,6 +266,19 @@ impl HatBaseTrait for HatCode { let mut wtables = zwrite!(tables.tables); let mut face_clone = face.clone(); let face = get_mut_unchecked(face); + let hat_face = match face.hat.downcast_mut::() { + Some(hate_face) => hate_face, + None => { + tracing::error!("Error downcasting face hat in close_face!"); + return; + } + }; + + hat_face.remote_interests.clear(); + hat_face.local_subs.clear(); + hat_face.local_qabls.clear(); + hat_face.local_tokens.clear(); + for res in face.remote_mappings.values_mut() { get_mut_unchecked(res).session_ctxs.remove(&face.id); Resource::clean(res); @@ -267,15 +291,9 @@ impl HatBaseTrait for HatCode { face.local_mappings.clear(); let mut subs_matches = vec![]; - for mut res in face - .hat - .downcast_mut::() - .unwrap() - .remote_subs - .drain() - { + for (_id, mut res) in hat_face.remote_subs.drain() { get_mut_unchecked(&mut res).session_ctxs.remove(&face.id); - undeclare_client_subscription(&mut wtables, &mut face_clone, &mut res, send_declare); + undeclare_simple_subscription(&mut wtables, &mut face_clone, &mut res, send_declare); if res.context.is_some() { for match_ in &res.context().matches { @@ -295,15 +313,9 @@ impl HatBaseTrait for HatCode { } let mut qabls_matches = vec![]; - for mut res in face - .hat - .downcast_mut::() - .unwrap() - .remote_qabls - .drain() - { + for (_, mut res) in hat_face.remote_qabls.drain() { get_mut_unchecked(&mut res).session_ctxs.remove(&face.id); - undeclare_client_queryable(&mut wtables, &mut face_clone, &mut res, send_declare); + undeclare_simple_queryable(&mut wtables, &mut face_clone, &mut res, send_declare); if res.context.is_some() { for match_ in &res.context().matches { @@ -321,6 +333,11 @@ impl HatBaseTrait for HatCode { qabls_matches.push(res); } } + + for (_id, mut res) in hat_face.remote_tokens.drain() { + get_mut_unchecked(&mut res).session_ctxs.remove(&face.id); + undeclare_simple_token(&mut wtables, &mut face_clone, &mut res, send_declare); + } drop(wtables); let mut matches_data_routes = vec![]; @@ -328,11 +345,7 @@ impl HatBaseTrait for HatCode { let rtables = zread!(tables.tables); for _match in subs_matches.drain(..) { let mut expr = RoutingExpr::new(&_match, ""); - matches_data_routes.push(( - _match.clone(), - compute_data_routes(&rtables, &mut expr), - compute_matching_pulls(&rtables, &mut expr), - )); + matches_data_routes.push((_match.clone(), compute_data_routes(&rtables, &mut expr))); } for _match in qabls_matches.drain(..) { matches_query_routes.push((_match.clone(), compute_query_routes(&rtables, &_match))); @@ -340,13 +353,10 @@ impl HatBaseTrait for HatCode { drop(rtables); let mut wtables = zwrite!(tables.tables); - for (mut res, data_routes, matching_pulls) in matches_data_routes { + for (mut res, data_routes) in matches_data_routes { get_mut_unchecked(&mut res) .context_mut() .update_data_routes(data_routes); - get_mut_unchecked(&mut res) - .context_mut() - .update_matching_pulls(matching_pulls); Resource::clean(&mut res); } for (mut res, query_routes) in matches_query_routes { @@ -378,12 +388,13 @@ impl HatBaseTrait for HatCode { let whatami = transport.get_whatami()?; if whatami != WhatAmI::Client { - if let Some(net) = hat_mut!(tables).peers_net.as_mut() { + if let Some(net) = hat_mut!(tables).linkstatepeers_net.as_mut() { let changes = net.link_states(list.link_states, zid); for (_, removed_node) in changes.removed_nodes { pubsub_remove_node(tables, &removed_node.zid, send_declare); queries_remove_node(tables, &removed_node.zid, send_declare); + token_remove_node(tables, &removed_node.zid, send_declare); } hat_mut!(tables).schedule_compute_trees(tables_ref.clone()); @@ -404,7 +415,7 @@ impl HatBaseTrait for HatCode { routing_context: NodeId, ) -> NodeId { hat!(tables) - .peers_net + .linkstatepeers_net .as_ref() .unwrap() .get_local_context(routing_context, face_hat!(face).link_id) @@ -421,13 +432,14 @@ impl HatBaseTrait for HatCode { (Ok(zid), Ok(whatami)) => { if whatami != WhatAmI::Client { for (_, removed_node) in hat_mut!(tables) - .peers_net + .linkstatepeers_net .as_mut() .unwrap() .remove_link(&zid) { pubsub_remove_node(tables, &removed_node.zid, send_declare); queries_remove_node(tables, &removed_node.zid, send_declare); + token_remove_node(tables, &removed_node.zid, send_declare); } hat_mut!(tables).schedule_compute_trees(tables_ref.clone()); @@ -461,7 +473,7 @@ impl HatBaseTrait for HatCode { fn info(&self, tables: &Tables, kind: WhatAmI) -> String { match kind { WhatAmI::Peer => hat!(tables) - .peers_net + .linkstatepeers_net .as_ref() .map(|net| net.dot()) .unwrap_or_else(|| "graph {}".to_string()), @@ -471,44 +483,52 @@ impl HatBaseTrait for HatCode { } struct HatContext { - router_subs: HashSet, - peer_subs: HashSet, - peer_qabls: HashMap, + linkstatepeer_subs: HashSet, + linkstatepeer_qabls: HashMap, + linkstatepeer_tokens: HashSet, } impl HatContext { fn new() -> Self { Self { - router_subs: HashSet::new(), - peer_subs: HashSet::new(), - peer_qabls: HashMap::new(), + linkstatepeer_subs: HashSet::new(), + linkstatepeer_qabls: HashMap::new(), + linkstatepeer_tokens: HashSet::new(), } } } struct HatFace { link_id: usize, - local_subs: HashSet>, - remote_subs: HashSet>, - local_qabls: HashMap, QueryableInfo>, - remote_qabls: HashSet>, + next_id: AtomicU32, // @TODO: manage rollover and uniqueness + remote_interests: HashMap>, InterestOptions)>, + local_subs: HashMap, SubscriberId>, + remote_subs: HashMap>, + local_tokens: HashMap, SubscriberId>, + remote_tokens: HashMap>, + local_qabls: HashMap, (QueryableId, QueryableInfoType)>, + remote_qabls: HashMap>, } impl HatFace { fn new() -> Self { Self { link_id: 0, - local_subs: HashSet::new(), - remote_subs: HashSet::new(), + next_id: AtomicU32::new(0), + remote_interests: HashMap::new(), + local_subs: HashMap::new(), + remote_subs: HashMap::new(), local_qabls: HashMap::new(), - remote_qabls: HashSet::new(), + remote_qabls: HashMap::new(), + local_tokens: HashMap::new(), + remote_tokens: HashMap::new(), } } } -fn get_peer(tables: &Tables, face: &Arc, nodeid: NodeId) -> Option { +fn get_peer(tables: &Tables, face: &Arc, nodeid: NodeId) -> Option { match hat!(tables) - .peers_net + .linkstatepeers_net .as_ref() .unwrap() .get_link(face_hat!(face).link_id) @@ -538,7 +558,7 @@ impl HatTrait for HatCode {} #[inline] fn get_routes_entries(tables: &Tables) -> RoutesIndexes { let indexes = hat!(tables) - .peers_net + .linkstatepeers_net .as_ref() .unwrap() .graph diff --git a/zenoh/src/net/routing/hat/linkstate_peer/network.rs b/zenoh/src/net/routing/hat/linkstate_peer/network.rs index 7d6e3d2850..bfa7ccf969 100644 --- a/zenoh/src/net/routing/hat/linkstate_peer/network.rs +++ b/zenoh/src/net/routing/hat/linkstate_peer/network.rs @@ -11,26 +11,34 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::net::codec::Zenoh080Routing; -use crate::net::protocol::linkstate::{LinkState, LinkStateList}; -use crate::net::routing::dispatcher::tables::NodeId; -use crate::net::runtime::Runtime; -use crate::runtime::WeakRuntime; -use petgraph::graph::NodeIndex; -use petgraph::visit::{VisitMap, Visitable}; -use rand::Rng; use std::convert::TryInto; + +use petgraph::{ + graph::NodeIndex, + visit::{VisitMap, Visitable}, +}; +use rand::Rng; use vec_map::VecMap; -use zenoh_buffers::writer::{DidntWrite, HasWriter}; -use zenoh_buffers::ZBuf; +use zenoh_buffers::{ + writer::{DidntWrite, HasWriter}, + ZBuf, +}; use zenoh_codec::WCodec; use zenoh_link::Locator; -use zenoh_protocol::common::ZExtBody; -use zenoh_protocol::core::{WhatAmI, WhatAmIMatcher, ZenohId}; -use zenoh_protocol::network::oam::id::OAM_LINKSTATE; -use zenoh_protocol::network::{oam, NetworkBody, NetworkMessage, Oam}; +use zenoh_protocol::{ + common::ZExtBody, + core::{WhatAmI, WhatAmIMatcher, ZenohIdProto}, + network::{oam, oam::id::OAM_LINKSTATE, NetworkBody, NetworkMessage, Oam}, +}; use zenoh_transport::unicast::TransportUnicast; +use crate::net::{ + codec::Zenoh080Routing, + protocol::linkstate::{LinkState, LinkStateList}, + routing::dispatcher::tables::NodeId, + runtime::{Runtime, WeakRuntime}, +}; + #[derive(Clone)] struct Details { zid: bool, @@ -40,11 +48,11 @@ struct Details { #[derive(Clone)] pub(super) struct Node { - pub(super) zid: ZenohId, + pub(super) zid: ZenohIdProto, pub(super) whatami: Option, pub(super) locators: Option>, pub(super) sn: u64, - pub(super) links: Vec, + pub(super) links: Vec, } impl std::fmt::Debug for Node { @@ -55,8 +63,8 @@ impl std::fmt::Debug for Node { pub(super) struct Link { pub(super) transport: TransportUnicast, - zid: ZenohId, - mappings: VecMap, + zid: ZenohIdProto, + mappings: VecMap, local_mappings: VecMap, } @@ -72,12 +80,12 @@ impl Link { } #[inline] - pub(super) fn set_zid_mapping(&mut self, psid: u64, zid: ZenohId) { + pub(super) fn set_zid_mapping(&mut self, psid: u64, zid: ZenohIdProto) { self.mappings.insert(psid.try_into().unwrap(), zid); } #[inline] - pub(super) fn get_zid(&self, psid: &u64) -> Option<&ZenohId> { + pub(super) fn get_zid(&self, psid: &u64) -> Option<&ZenohIdProto> { self.mappings.get((*psid).try_into().unwrap()) } @@ -124,7 +132,7 @@ impl Network { #[allow(clippy::too_many_arguments)] pub(super) fn new( name: String, - zid: ZenohId, + zid: ZenohIdProto, runtime: Runtime, full_linkstate: bool, router_peers_failover_brokering: bool, @@ -169,7 +177,7 @@ impl Network { } #[inline] - pub(super) fn get_idx(&self, zid: &ZenohId) -> Option { + pub(super) fn get_idx(&self, zid: &ZenohIdProto) -> Option { self.graph .node_indices() .find(|idx| self.graph[*idx].zid == *zid) @@ -181,7 +189,7 @@ impl Network { } #[inline] - pub(super) fn get_link_from_zid(&self, zid: &ZenohId) -> Option<&Link> { + pub(super) fn get_link_from_zid(&self, zid: &ZenohIdProto) -> Option<&Link> { self.links.values().find(|link| link.zid == *zid) } @@ -271,7 +279,7 @@ impl Network { Ok(NetworkBody::OAM(Oam { id: OAM_LINKSTATE, body: ZExtBody::ZBuf(buf), - ext_qos: oam::ext::QoSType::oam_default(), + ext_qos: oam::ext::QoSType::OAM, ext_tstamp: None, }) .into()) @@ -336,7 +344,11 @@ impl Network { self.graph.update_edge(idx1, idx2, weight); } - pub(super) fn link_states(&mut self, link_states: Vec, src: ZenohId) -> Changes { + pub(super) fn link_states( + &mut self, + link_states: Vec, + src: ZenohIdProto, + ) -> Changes { tracing::trace!("{} Received from {} raw: {:?}", self.name, src, link_states); let strong_runtime = self.runtime.upgrade().unwrap(); @@ -401,7 +413,7 @@ impl Network { let link_states = link_states .into_iter() .map(|(zid, wai, locs, sn, links)| { - let links: Vec = links + let links: Vec = links .iter() .filter_map(|l| { if let Some(zid) = src_link.get_zid(l) { @@ -551,7 +563,7 @@ impl Network { } }, ) - .collect::, NodeIndex, bool)>>(); + .collect::, NodeIndex, bool)>>(); // Add/remove edges from graph let mut reintroduced_nodes = vec![]; @@ -603,7 +615,7 @@ impl Network { let link_states = link_states .into_iter() .filter(|ls| !removed.iter().any(|(idx, _)| idx == &ls.1)) - .collect::, NodeIndex, bool)>>(); + .collect::, NodeIndex, bool)>>(); if !self.autoconnect.is_empty() { // Connect discovered peers @@ -642,8 +654,8 @@ impl Network { #[allow(clippy::type_complexity)] // This is only used here if !link_states.is_empty() { let (new_idxs, updated_idxs): ( - Vec<(Vec, NodeIndex, bool)>, - Vec<(Vec, NodeIndex, bool)>, + Vec<(Vec, NodeIndex, bool)>, + Vec<(Vec, NodeIndex, bool)>, ) = link_states.into_iter().partition(|(_, _, new)| *new); let new_idxs = new_idxs .into_iter() @@ -807,7 +819,7 @@ impl Network { free_index } - pub(super) fn remove_link(&mut self, zid: &ZenohId) -> Vec<(NodeIndex, Node)> { + pub(super) fn remove_link(&mut self, zid: &ZenohIdProto) -> Vec<(NodeIndex, Node)> { tracing::trace!("{} remove_link {}", self.name, zid); self.links.retain(|_, link| link.zid != *zid); self.graph[self.idx].links.retain(|link| *link != *zid); diff --git a/zenoh/src/net/routing/hat/linkstate_peer/pubsub.rs b/zenoh/src/net/routing/hat/linkstate_peer/pubsub.rs index 80a8eff95d..f1412ec807 100644 --- a/zenoh/src/net/routing/hat/linkstate_peer/pubsub.rs +++ b/zenoh/src/net/routing/hat/linkstate_peer/pubsub.rs @@ -11,31 +11,43 @@ // Contributors: // ZettaScale Zenoh Team, // -use super::network::Network; -use super::{face_hat, face_hat_mut, get_routes_entries, hat, hat_mut, res_hat, res_hat_mut}; -use super::{get_peer, HatCode, HatContext, HatFace, HatTables}; -use crate::net::routing::dispatcher::face::FaceState; -use crate::net::routing::dispatcher::pubsub::*; -use crate::net::routing::dispatcher::resource::{NodeId, Resource, SessionContext}; -use crate::net::routing::dispatcher::tables::Tables; -use crate::net::routing::dispatcher::tables::{Route, RoutingExpr}; -use crate::net::routing::hat::{HatPubSubTrait, SendDeclare, Sources}; -use crate::net::routing::router::RoutesIndexes; -use crate::net::routing::{RoutingContext, PREFIX_LIVELINESS}; +use std::{ + borrow::Cow, + collections::{HashMap, HashSet}, + sync::{atomic::Ordering, Arc}, +}; + use petgraph::graph::NodeIndex; -use std::borrow::Cow; -use std::collections::{HashMap, HashSet}; -use std::sync::Arc; -use zenoh_protocol::core::key_expr::OwnedKeyExpr; use zenoh_protocol::{ - core::{Reliability, WhatAmI, ZenohId}, - network::declare::{ - common::ext::WireExprType, ext, subscriber::ext::SubscriberInfo, Declare, DeclareBody, - DeclareSubscriber, Mode, UndeclareSubscriber, + core::{key_expr::OwnedKeyExpr, Reliability, WhatAmI, ZenohIdProto}, + network::{ + declare::{ + common::ext::WireExprType, ext, subscriber::ext::SubscriberInfo, Declare, DeclareBody, + DeclareSubscriber, SubscriberId, UndeclareSubscriber, + }, + interest::{InterestId, InterestMode, InterestOptions}, }, }; use zenoh_sync::get_mut_unchecked; +use super::{ + face_hat, face_hat_mut, get_peer, get_routes_entries, hat, hat_mut, network::Network, res_hat, + res_hat_mut, HatCode, HatContext, HatFace, HatTables, +}; +#[cfg(feature = "unstable")] +use crate::key_expr::KeyExpr; +use crate::net::routing::{ + dispatcher::{ + face::FaceState, + pubsub::*, + resource::{NodeId, Resource, SessionContext}, + tables::{Route, RoutingExpr, Tables}, + }, + hat::{CurrentFutureTrait, HatPubSubTrait, SendDeclare, Sources}, + router::RoutesIndexes, + RoutingContext, +}; + #[inline] fn send_sourced_subscription_to_net_children( tables: &Tables, @@ -50,20 +62,23 @@ fn send_sourced_subscription_to_net_children( if net.graph.contains_node(*child) { match tables.get_face(&net.graph[*child].zid).cloned() { Some(mut someface) => { - if src_face.is_none() || someface.id != src_face.unwrap().id { - let key_expr = Resource::decl_key(res, &mut someface); - - tracing::debug!("Send subscription {} on {}", res.expr(), someface); + if src_face + .map(|src_face| someface.id != src_face.id) + .unwrap_or(true) + { + let push_declaration = someface.whatami != WhatAmI::Client; + let key_expr = Resource::decl_key(res, &mut someface, push_declaration); someface.primitives.send_declare(RoutingContext::with_expr( Declare { - ext_qos: ext::QoSType::declare_default(), + interest_id: None, + ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType { node_id: routing_context, }, body: DeclareBody::DeclareSubscriber(DeclareSubscriber { - id: 0, // TODO + id: 0, // Sourced subscriptions do not use ids wire_expr: key_expr, ext_info: *sub_info, }), @@ -87,28 +102,72 @@ fn propagate_simple_subscription_to( src_face: &mut Arc, send_declare: &mut SendDeclare, ) { - if (src_face.id != dst_face.id || res.expr().starts_with(PREFIX_LIVELINESS)) - && !face_hat!(dst_face).local_subs.contains(res) + if (src_face.id != dst_face.id) + && !face_hat!(dst_face).local_subs.contains_key(res) && dst_face.whatami == WhatAmI::Client { - face_hat_mut!(dst_face).local_subs.insert(res.clone()); - let key_expr = Resource::decl_key(res, dst_face); - send_declare( - &dst_face.primitives, - RoutingContext::with_expr( - Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - body: DeclareBody::DeclareSubscriber(DeclareSubscriber { - id: 0, // TODO - wire_expr: key_expr, - ext_info: *sub_info, - }), - }, - res.expr(), - ), - ); + if dst_face.whatami != WhatAmI::Client { + let id = face_hat!(dst_face).next_id.fetch_add(1, Ordering::SeqCst); + face_hat_mut!(dst_face).local_subs.insert(res.clone(), id); + let key_expr = Resource::decl_key(res, dst_face, dst_face.whatami != WhatAmI::Client); + send_declare( + &dst_face.primitives, + RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::DeclareSubscriber(DeclareSubscriber { + id, + wire_expr: key_expr, + ext_info: *sub_info, + }), + }, + res.expr(), + ), + ); + } else { + let matching_interests = face_hat!(dst_face) + .remote_interests + .values() + .filter(|(r, o)| { + o.subscribers() && r.as_ref().map(|r| r.matches(res)).unwrap_or(true) + }) + .cloned() + .collect::>, InterestOptions)>>(); + + for (int_res, options) in matching_interests { + let res = if options.aggregate() { + int_res.as_ref().unwrap_or(res) + } else { + res + }; + if !face_hat!(dst_face).local_subs.contains_key(res) { + let id = face_hat!(dst_face).next_id.fetch_add(1, Ordering::SeqCst); + face_hat_mut!(dst_face).local_subs.insert(res.clone(), id); + let key_expr = + Resource::decl_key(res, dst_face, dst_face.whatami != WhatAmI::Client); + send_declare( + &dst_face.primitives, + RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::DeclareSubscriber(DeclareSubscriber { + id, + wire_expr: key_expr, + ext_info: *sub_info, + }), + }, + res.expr(), + ), + ); + } + } + } } } @@ -141,9 +200,9 @@ fn propagate_sourced_subscription( res: &Arc, sub_info: &SubscriberInfo, src_face: Option<&Arc>, - source: &ZenohId, + source: &ZenohIdProto, ) { - let net = hat!(tables).peers_net.as_ref().unwrap(); + let net = hat!(tables).linkstatepeers_net.as_ref().unwrap(); match net.get_idx(source) { Some(tree_sid) => { if net.trees.len() > tree_sid.index() { @@ -173,109 +232,92 @@ fn propagate_sourced_subscription( } } -fn register_peer_subscription( +fn register_linkstatepeer_subscription( tables: &mut Tables, face: &mut Arc, res: &mut Arc, sub_info: &SubscriberInfo, - peer: ZenohId, + peer: ZenohIdProto, send_declare: &mut SendDeclare, ) { - if !res_hat!(res).peer_subs.contains(&peer) { + if !res_hat!(res).linkstatepeer_subs.contains(&peer) { // Register peer subscription { - tracing::debug!("Register peer subscription {} (peer: {})", res.expr(), peer); - res_hat_mut!(res).peer_subs.insert(peer); - hat_mut!(tables).peer_subs.insert(res.clone()); + res_hat_mut!(res).linkstatepeer_subs.insert(peer); + hat_mut!(tables).linkstatepeer_subs.insert(res.clone()); } // Propagate subscription to peers propagate_sourced_subscription(tables, res, sub_info, Some(face), &peer); } - if tables.whatami == WhatAmI::Peer { - // Propagate subscription to clients - propagate_simple_subscription(tables, res, sub_info, face, send_declare); - } + // Propagate subscription to clients + propagate_simple_subscription(tables, res, sub_info, face, send_declare); } -fn declare_peer_subscription( +fn declare_linkstatepeer_subscription( tables: &mut Tables, face: &mut Arc, res: &mut Arc, sub_info: &SubscriberInfo, - peer: ZenohId, + peer: ZenohIdProto, send_declare: &mut SendDeclare, ) { - register_peer_subscription(tables, face, res, sub_info, peer, send_declare); + register_linkstatepeer_subscription(tables, face, res, sub_info, peer, send_declare); } -fn register_client_subscription( +fn register_simple_subscription( _tables: &mut Tables, face: &mut Arc, + id: SubscriberId, res: &mut Arc, sub_info: &SubscriberInfo, ) { // Register subscription { let res = get_mut_unchecked(res); - tracing::debug!("Register subscription {} for {}", res.expr(), face); match res.session_ctxs.get_mut(&face.id) { - Some(ctx) => match &ctx.subs { - Some(info) => { - if Mode::Pull == info.mode { - get_mut_unchecked(ctx).subs = Some(*sub_info); - } - } - None => { + Some(ctx) => { + if ctx.subs.is_none() { get_mut_unchecked(ctx).subs = Some(*sub_info); } - }, + } None => { - res.session_ctxs.insert( - face.id, - Arc::new(SessionContext { - face: face.clone(), - local_expr_id: None, - remote_expr_id: None, - subs: Some(*sub_info), - qabl: None, - last_values: HashMap::new(), - in_interceptor_cache: None, - e_interceptor_cache: None, - }), - ); + let ctx = res + .session_ctxs + .entry(face.id) + .or_insert_with(|| Arc::new(SessionContext::new(face.clone()))); + get_mut_unchecked(ctx).subs = Some(*sub_info); } } } - face_hat_mut!(face).remote_subs.insert(res.clone()); + face_hat_mut!(face).remote_subs.insert(id, res.clone()); } -fn declare_client_subscription( +fn declare_simple_subscription( tables: &mut Tables, face: &mut Arc, + id: SubscriberId, res: &mut Arc, sub_info: &SubscriberInfo, send_declare: &mut SendDeclare, ) { - register_client_subscription(tables, face, res, sub_info); - let mut propa_sub_info = *sub_info; - propa_sub_info.mode = Mode::Push; + register_simple_subscription(tables, face, id, res, sub_info); let zid = tables.zid; - register_peer_subscription(tables, face, res, &propa_sub_info, zid, send_declare); + register_linkstatepeer_subscription(tables, face, res, sub_info, zid, send_declare); } #[inline] -fn remote_peer_subs(tables: &Tables, res: &Arc) -> bool { +fn remote_linkstatepeer_subs(tables: &Tables, res: &Arc) -> bool { res.context.is_some() && res_hat!(res) - .peer_subs + .linkstatepeer_subs .iter() .any(|peer| peer != &tables.zid) } #[inline] -fn client_subs(res: &Arc) -> Vec> { +fn simple_subs(res: &Arc) -> Vec> { res.session_ctxs .values() .filter_map(|ctx| { @@ -288,6 +330,13 @@ fn client_subs(res: &Arc) -> Vec> { .collect() } +#[inline] +fn remote_simple_subs(res: &Arc, face: &Arc) -> bool { + res.session_ctxs + .values() + .any(|ctx| ctx.face.id != face.id && ctx.subs.is_some()) +} + #[inline] fn send_forget_sourced_subscription_to_net_children( tables: &Tables, @@ -301,20 +350,23 @@ fn send_forget_sourced_subscription_to_net_children( if net.graph.contains_node(*child) { match tables.get_face(&net.graph[*child].zid).cloned() { Some(mut someface) => { - if src_face.is_none() || someface.id != src_face.unwrap().id { - let wire_expr = Resource::decl_key(res, &mut someface); - - tracing::debug!("Send forget subscription {} on {}", res.expr(), someface); + if src_face + .map(|src_face| someface.id != src_face.id) + .unwrap_or(true) + { + let push_declaration = someface.whatami != WhatAmI::Client; + let wire_expr = Resource::decl_key(res, &mut someface, push_declaration); someface.primitives.send_declare(RoutingContext::with_expr( Declare { - ext_qos: ext::QoSType::declare_default(), + interest_id: None, + ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType { node_id: routing_context.unwrap_or(0), }, body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { - id: 0, // TODO + id: 0, // Sourced subscriptions do not use ids ext_wire_expr: WireExprType { wire_expr }, }), }, @@ -333,25 +385,56 @@ fn propagate_forget_simple_subscription( res: &Arc, send_declare: &mut SendDeclare, ) { - for face in tables.faces.values_mut() { - if face_hat!(face).local_subs.contains(res) { - let wire_expr = Resource::get_best_key(res, "", face.id); + for mut face in tables.faces.values().cloned() { + if let Some(id) = face_hat_mut!(&mut face).local_subs.remove(res) { send_declare( &face.primitives, RoutingContext::with_expr( Declare { - ext_qos: ext::QoSType::declare_default(), + interest_id: None, + ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), + ext_nodeid: ext::NodeIdType::DEFAULT, body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { - id: 0, // TODO - ext_wire_expr: WireExprType { wire_expr }, + id, + ext_wire_expr: WireExprType::null(), }), }, res.expr(), ), ); - face_hat_mut!(face).local_subs.remove(res); + } + for res in face_hat!(face) + .local_subs + .keys() + .cloned() + .collect::>>() + { + if !res.context().matches.iter().any(|m| { + m.upgrade().is_some_and(|m| { + m.context.is_some() + && (remote_simple_subs(&m, &face) || remote_linkstatepeer_subs(tables, &m)) + }) + }) { + if let Some(id) = face_hat_mut!(&mut face).local_subs.remove(&res) { + send_declare( + &face.primitives, + RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { + id, + ext_wire_expr: WireExprType::null(), + }), + }, + res.expr(), + ), + ); + } + } } } } @@ -360,9 +443,9 @@ fn propagate_forget_sourced_subscription( tables: &Tables, res: &Arc, src_face: Option<&Arc>, - source: &ZenohId, + source: &ZenohIdProto, ) { - let net = hat!(tables).peers_net.as_ref().unwrap(); + let net = hat!(tables).linkstatepeers_net.as_ref().unwrap(); match net.get_idx(source) { Some(tree_sid) => { if net.trees.len() > tree_sid.index() { @@ -394,146 +477,152 @@ fn propagate_forget_sourced_subscription( fn unregister_peer_subscription( tables: &mut Tables, res: &mut Arc, - peer: &ZenohId, + peer: &ZenohIdProto, send_declare: &mut SendDeclare, ) { - tracing::debug!( - "Unregister peer subscription {} (peer: {})", - res.expr(), - peer - ); - res_hat_mut!(res).peer_subs.retain(|sub| sub != peer); - - if res_hat!(res).peer_subs.is_empty() { + res_hat_mut!(res) + .linkstatepeer_subs + .retain(|sub| sub != peer); + + if res_hat!(res).linkstatepeer_subs.is_empty() { hat_mut!(tables) - .peer_subs + .linkstatepeer_subs .retain(|sub| !Arc::ptr_eq(sub, res)); - if tables.whatami == WhatAmI::Peer { - propagate_forget_simple_subscription(tables, res, send_declare); - } + propagate_forget_simple_subscription(tables, res, send_declare); } } -fn undeclare_peer_subscription( +fn undeclare_linkstatepeer_subscription( tables: &mut Tables, face: Option<&Arc>, res: &mut Arc, - peer: &ZenohId, + peer: &ZenohIdProto, send_declare: &mut SendDeclare, ) { - if res_hat!(res).peer_subs.contains(peer) { + if res_hat!(res).linkstatepeer_subs.contains(peer) { unregister_peer_subscription(tables, res, peer, send_declare); propagate_forget_sourced_subscription(tables, res, face, peer); } } -fn forget_peer_subscription( +fn forget_linkstatepeer_subscription( tables: &mut Tables, face: &mut Arc, res: &mut Arc, - peer: &ZenohId, + peer: &ZenohIdProto, send_declare: &mut SendDeclare, ) { - undeclare_peer_subscription(tables, Some(face), res, peer, send_declare); + undeclare_linkstatepeer_subscription(tables, Some(face), res, peer, send_declare); } -pub(super) fn undeclare_client_subscription( +pub(super) fn undeclare_simple_subscription( tables: &mut Tables, face: &mut Arc, res: &mut Arc, send_declare: &mut SendDeclare, ) { - tracing::debug!("Unregister client subscription {} for {}", res.expr(), face); - if let Some(ctx) = get_mut_unchecked(res).session_ctxs.get_mut(&face.id) { - get_mut_unchecked(ctx).subs = None; - } - face_hat_mut!(face).remote_subs.remove(res); + if !face_hat_mut!(face).remote_subs.values().any(|s| *s == *res) { + if let Some(ctx) = get_mut_unchecked(res).session_ctxs.get_mut(&face.id) { + get_mut_unchecked(ctx).subs = None; + } - let mut client_subs = client_subs(res); - let peer_subs = remote_peer_subs(tables, res); - if client_subs.is_empty() { - undeclare_peer_subscription(tables, None, res, &tables.zid.clone(), send_declare); - } - if client_subs.len() == 1 && !peer_subs { - let face = &mut client_subs[0]; - if face_hat!(face).local_subs.contains(res) - && !(face.whatami == WhatAmI::Client && res.expr().starts_with(PREFIX_LIVELINESS)) - { - let wire_expr = Resource::get_best_key(res, "", face.id); - send_declare( - &face.primitives, - RoutingContext::with_expr( - Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { - id: 0, // TODO - ext_wire_expr: WireExprType { wire_expr }, - }), - }, - res.expr(), - ), + let mut simple_subs = simple_subs(res); + let linkstatepeer_subs = remote_linkstatepeer_subs(tables, res); + if simple_subs.is_empty() { + undeclare_linkstatepeer_subscription( + tables, + None, + res, + &tables.zid.clone(), + send_declare, ); + } - face_hat_mut!(face).local_subs.remove(res); + if simple_subs.len() == 1 && !linkstatepeer_subs { + let mut face = &mut simple_subs[0]; + if face.whatami != WhatAmI::Client { + if let Some(id) = face_hat_mut!(face).local_subs.remove(res) { + send_declare( + &face.primitives, + RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { + id, + ext_wire_expr: WireExprType::null(), + }), + }, + res.expr(), + ), + ); + } + for res in face_hat!(face) + .local_subs + .keys() + .cloned() + .collect::>>() + { + if !res.context().matches.iter().any(|m| { + m.upgrade().is_some_and(|m| { + m.context.is_some() + && (remote_simple_subs(&m, face) + || remote_linkstatepeer_subs(tables, &m)) + }) + }) { + if let Some(id) = face_hat_mut!(&mut face).local_subs.remove(&res) { + send_declare( + &face.primitives, + RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::UndeclareSubscriber( + UndeclareSubscriber { + id, + ext_wire_expr: WireExprType::null(), + }, + ), + }, + res.expr(), + ), + ); + } + } + } + } } } } -fn forget_client_subscription( +fn forget_simple_subscription( tables: &mut Tables, face: &mut Arc, - res: &mut Arc, + id: SubscriberId, send_declare: &mut SendDeclare, -) { - undeclare_client_subscription(tables, face, res, send_declare); -} - -pub(super) fn pubsub_new_face( - tables: &mut Tables, - face: &mut Arc, - send_declare: &mut SendDeclare, -) { - let sub_info = SubscriberInfo { - reliability: Reliability::Reliable, // @TODO - mode: Mode::Push, - }; - - if face.whatami == WhatAmI::Client { - for sub in &hat!(tables).peer_subs { - face_hat_mut!(face).local_subs.insert(sub.clone()); - let key_expr = Resource::decl_key(sub, face); - send_declare( - &face.primitives, - RoutingContext::with_expr( - Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - body: DeclareBody::DeclareSubscriber(DeclareSubscriber { - id: 0, // TODO - wire_expr: key_expr, - ext_info: sub_info, - }), - }, - sub.expr(), - ), - ); - } +) -> Option> { + if let Some(mut res) = face_hat_mut!(face).remote_subs.remove(&id) { + undeclare_simple_subscription(tables, face, &mut res, send_declare); + Some(res) + } else { + None } } pub(super) fn pubsub_remove_node( tables: &mut Tables, - node: &ZenohId, + node: &ZenohIdProto, send_declare: &mut SendDeclare, ) { for mut res in hat!(tables) - .peer_subs + .linkstatepeer_subs .iter() - .filter(|res| res_hat!(res).peer_subs.contains(node)) + .filter(|res| res_hat!(res).linkstatepeer_subs.contains(node)) .cloned() .collect::>>() { @@ -545,23 +634,28 @@ pub(super) fn pubsub_remove_node( } pub(super) fn pubsub_tree_change(tables: &mut Tables, new_children: &[Vec]) { + let net = match hat!(tables).linkstatepeers_net.as_ref() { + Some(net) => net, + None => { + tracing::error!("Error accessing peers_net in pubsub_tree_change!"); + return; + } + }; // propagate subs to new children for (tree_sid, tree_children) in new_children.iter().enumerate() { if !tree_children.is_empty() { - let net = hat!(tables).peers_net.as_ref().unwrap(); let tree_idx = NodeIndex::new(tree_sid); if net.graph.contains_node(tree_idx) { let tree_id = net.graph[tree_idx].zid; - let subs_res = &hat!(tables).peer_subs; + let subs_res = &hat!(tables).linkstatepeer_subs; for res in subs_res { - let subs = &res_hat!(res).peer_subs; + let subs = &res_hat!(res).linkstatepeer_subs; for sub in subs { if *sub == tree_id { let sub_info = SubscriberInfo { - reliability: Reliability::Reliable, // @TODO - mode: Mode::Push, + reliability: Reliability::Reliable, // @TODO compute proper reliability to propagate from reliability of known subscribers }; send_sourced_subscription_to_net_children( tables, @@ -583,36 +677,121 @@ pub(super) fn pubsub_tree_change(tables: &mut Tables, new_children: &[Vec, +pub(super) fn declare_sub_interest( + tables: &mut Tables, + face: &mut Arc, + id: InterestId, + res: Option<&mut Arc>, + mode: InterestMode, + aggregate: bool, + send_declare: &mut SendDeclare, ) { - if net.trees.len() > source as usize { - for sub in subs { - if let Some(sub_idx) = net.get_idx(sub) { - if net.trees[source as usize].directions.len() > sub_idx.index() { - if let Some(direction) = net.trees[source as usize].directions[sub_idx.index()] + if mode.current() && face.whatami == WhatAmI::Client { + let interest_id = (!mode.future()).then_some(id); + let sub_info = SubscriberInfo { + reliability: Reliability::Reliable, // @TODO compute proper reliability to propagate from reliability of known subscribers + }; + if let Some(res) = res.as_ref() { + if aggregate { + if hat!(tables).linkstatepeer_subs.iter().any(|sub| { + sub.context.is_some() + && sub.matches(res) + && (remote_simple_subs(sub, face) || remote_linkstatepeer_subs(tables, sub)) + }) { + let id = if mode.future() { + let id = face_hat!(face).next_id.fetch_add(1, Ordering::SeqCst); + face_hat_mut!(face).local_subs.insert((*res).clone(), id); + id + } else { + 0 + }; + let wire_expr = Resource::decl_key(res, face, face.whatami != WhatAmI::Client); + send_declare( + &face.primitives, + RoutingContext::with_expr( + Declare { + interest_id, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::DeclareSubscriber(DeclareSubscriber { + id, + wire_expr, + ext_info: sub_info, + }), + }, + res.expr(), + ), + ); + } + } else { + for sub in &hat!(tables).linkstatepeer_subs { + if sub.context.is_some() + && sub.matches(res) + && (remote_simple_subs(sub, face) || remote_linkstatepeer_subs(tables, sub)) { - if net.graph.contains_node(direction) { - if let Some(face) = tables.get_face(&net.graph[direction].zid) { - route.entry(face.id).or_insert_with(|| { - let key_expr = - Resource::get_best_key(expr.prefix, expr.suffix, face.id); - (face.clone(), key_expr.to_owned(), source) - }); - } - } + let id = if mode.future() { + let id = face_hat!(face).next_id.fetch_add(1, Ordering::SeqCst); + face_hat_mut!(face).local_subs.insert(sub.clone(), id); + id + } else { + 0 + }; + let wire_expr = + Resource::decl_key(sub, face, face.whatami != WhatAmI::Client); + send_declare( + &face.primitives, + RoutingContext::with_expr( + Declare { + interest_id, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::DeclareSubscriber(DeclareSubscriber { + id, + wire_expr, + ext_info: sub_info, + }), + }, + sub.expr(), + ), + ); } } } + } else { + for sub in &hat!(tables).linkstatepeer_subs { + if sub.context.is_some() + && (remote_simple_subs(sub, face) || remote_linkstatepeer_subs(tables, sub)) + { + let id = if mode.future() { + let id = face_hat!(face).next_id.fetch_add(1, Ordering::SeqCst); + face_hat_mut!(face).local_subs.insert(sub.clone(), id); + id + } else { + 0 + }; + let wire_expr = Resource::decl_key(sub, face, face.whatami != WhatAmI::Client); + send_declare( + &face.primitives, + RoutingContext::with_expr( + Declare { + interest_id, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::DeclareSubscriber(DeclareSubscriber { + id, + wire_expr, + ext_info: sub_info, + }), + }, + sub.expr(), + ), + ); + } + } } - } else { - tracing::trace!("Tree for node sid:{} not yet ready", source); } } @@ -621,6 +800,7 @@ impl HatPubSubTrait for HatCode { &self, tables: &mut Tables, face: &mut Arc, + id: SubscriberId, res: &mut Arc, sub_info: &SubscriberInfo, node_id: NodeId, @@ -628,10 +808,10 @@ impl HatPubSubTrait for HatCode { ) { if face.whatami != WhatAmI::Client { if let Some(peer) = get_peer(tables, face, node_id) { - declare_peer_subscription(tables, face, res, sub_info, peer, send_declare) + declare_linkstatepeer_subscription(tables, face, res, sub_info, peer, send_declare) } } else { - declare_client_subscription(tables, face, res, sub_info, send_declare) + declare_simple_subscription(tables, face, id, res, sub_info, send_declare) } } @@ -639,23 +819,31 @@ impl HatPubSubTrait for HatCode { &self, tables: &mut Tables, face: &mut Arc, - res: &mut Arc, + id: SubscriberId, + res: Option>, node_id: NodeId, send_declare: &mut SendDeclare, - ) { + ) -> Option> { if face.whatami != WhatAmI::Client { - if let Some(peer) = get_peer(tables, face, node_id) { - forget_peer_subscription(tables, face, res, &peer, send_declare); + if let Some(mut res) = res { + if let Some(peer) = get_peer(tables, face, node_id) { + forget_linkstatepeer_subscription(tables, face, &mut res, &peer, send_declare); + Some(res) + } else { + None + } + } else { + None } } else { - forget_client_subscription(tables, face, res, send_declare); + forget_simple_subscription(tables, face, id, send_declare) } } fn get_subscriptions(&self, tables: &Tables) -> Vec<(Arc, Sources)> { // Compute the list of known suscriptions (keys) hat!(tables) - .peer_subs + .linkstatepeer_subs .iter() .map(|s| { ( @@ -664,7 +852,7 @@ impl HatPubSubTrait for HatCode { // sources of those subscriptions Sources { routers: vec![], - peers: Vec::from_iter(res_hat!(s).peer_subs.iter().cloned()), + peers: Vec::from_iter(res_hat!(s).linkstatepeer_subs.iter().cloned()), clients: s .session_ctxs .values() @@ -686,6 +874,43 @@ impl HatPubSubTrait for HatCode { source: NodeId, source_type: WhatAmI, ) -> Arc { + #[inline] + fn insert_faces_for_subs( + route: &mut Route, + expr: &RoutingExpr, + tables: &Tables, + net: &Network, + source: NodeId, + subs: &HashSet, + ) { + if net.trees.len() > source as usize { + for sub in subs { + if let Some(sub_idx) = net.get_idx(sub) { + if net.trees[source as usize].directions.len() > sub_idx.index() { + if let Some(direction) = + net.trees[source as usize].directions[sub_idx.index()] + { + if net.graph.contains_node(direction) { + if let Some(face) = tables.get_face(&net.graph[direction].zid) { + route.entry(face.id).or_insert_with(|| { + let key_expr = Resource::get_best_key( + expr.prefix, + expr.suffix, + face.id, + ); + (face.clone(), key_expr.to_owned(), source) + }); + } + } + } + } + } + } + } else { + tracing::trace!("Tree for node sid:{} not yet ready", source); + } + } + let mut route = HashMap::new(); let key_expr = expr.full_expr(); if key_expr.ends_with('/') { @@ -714,7 +939,7 @@ impl HatPubSubTrait for HatCode { for mres in matches.iter() { let mres = mres.upgrade().unwrap(); - let net = hat!(tables).peers_net.as_ref().unwrap(); + let net = hat!(tables).linkstatepeers_net.as_ref().unwrap(); let peer_source = match source_type { WhatAmI::Router | WhatAmI::Peer => source, _ => net.idx.index() as NodeId, @@ -725,24 +950,17 @@ impl HatPubSubTrait for HatCode { tables, net, peer_source, - &res_hat!(mres).peer_subs, + &res_hat!(mres).linkstatepeer_subs, ); for (sid, context) in &mres.session_ctxs { - if let Some(subinfo) = &context.subs { - if match tables.whatami { - WhatAmI::Router => context.face.whatami != WhatAmI::Router, - _ => { - source_type == WhatAmI::Client - || context.face.whatami == WhatAmI::Client - } - } && subinfo.mode == Mode::Push - { - route.entry(*sid).or_insert_with(|| { - let key_expr = Resource::get_best_key(expr.prefix, expr.suffix, *sid); - (context.face.clone(), key_expr.to_owned(), NodeId::default()) - }); - } + if context.subs.is_some() + && (source_type == WhatAmI::Client || context.face.whatami == WhatAmI::Client) + { + route.entry(*sid).or_insert_with(|| { + let key_expr = Resource::get_best_key(expr.prefix, expr.suffix, *sid); + (context.face.clone(), key_expr.to_owned(), NodeId::default()) + }); } } } @@ -762,4 +980,73 @@ impl HatPubSubTrait for HatCode { fn get_data_routes_entries(&self, tables: &Tables) -> RoutesIndexes { get_routes_entries(tables) } + + #[zenoh_macros::unstable] + fn get_matching_subscriptions( + &self, + tables: &Tables, + key_expr: &KeyExpr<'_>, + ) -> HashMap> { + #[inline] + fn insert_faces_for_subs( + route: &mut HashMap>, + tables: &Tables, + net: &Network, + source: usize, + subs: &HashSet, + ) { + if net.trees.len() > source { + for sub in subs { + if let Some(sub_idx) = net.get_idx(sub) { + if net.trees[source].directions.len() > sub_idx.index() { + if let Some(direction) = net.trees[source].directions[sub_idx.index()] { + if net.graph.contains_node(direction) { + if let Some(face) = tables.get_face(&net.graph[direction].zid) { + route.entry(face.id).or_insert_with(|| face.clone()); + } + } + } + } + } + } + } else { + tracing::trace!("Tree for node sid:{} not yet ready", source); + } + } + + let mut matching_subscriptions = HashMap::new(); + if key_expr.ends_with('/') { + return matching_subscriptions; + } + tracing::trace!("get_matching_subscriptions({})", key_expr,); + + let res = Resource::get_resource(&tables.root_res, key_expr); + let matches = res + .as_ref() + .and_then(|res| res.context.as_ref()) + .map(|ctx| Cow::from(&ctx.matches)) + .unwrap_or_else(|| Cow::from(Resource::get_matches(tables, key_expr))); + + for mres in matches.iter() { + let mres = mres.upgrade().unwrap(); + + let net = hat!(tables).linkstatepeers_net.as_ref().unwrap(); + insert_faces_for_subs( + &mut matching_subscriptions, + tables, + net, + net.idx.index(), + &res_hat!(mres).linkstatepeer_subs, + ); + + for (sid, context) in &mres.session_ctxs { + if context.subs.is_some() { + matching_subscriptions + .entry(*sid) + .or_insert_with(|| context.face.clone()); + } + } + } + matching_subscriptions + } } diff --git a/zenoh/src/net/routing/hat/linkstate_peer/queries.rs b/zenoh/src/net/routing/hat/linkstate_peer/queries.rs index dfe729e4a3..6941466571 100644 --- a/zenoh/src/net/routing/hat/linkstate_peer/queries.rs +++ b/zenoh/src/net/routing/hat/linkstate_peer/queries.rs @@ -11,51 +11,56 @@ // Contributors: // ZettaScale Zenoh Team, // -use super::network::Network; -use super::{face_hat, face_hat_mut, get_routes_entries, hat, hat_mut, res_hat, res_hat_mut}; -use super::{get_peer, HatCode, HatContext, HatFace, HatTables}; -use crate::net::routing::dispatcher::face::FaceState; -use crate::net::routing::dispatcher::queries::*; -use crate::net::routing::dispatcher::resource::{NodeId, Resource, SessionContext}; -use crate::net::routing::dispatcher::tables::Tables; -use crate::net::routing::dispatcher::tables::{QueryTargetQabl, QueryTargetQablSet, RoutingExpr}; -use crate::net::routing::hat::{HatQueriesTrait, SendDeclare, Sources}; -use crate::net::routing::router::RoutesIndexes; -use crate::net::routing::{RoutingContext, PREFIX_LIVELINESS}; +use std::{ + borrow::Cow, + collections::HashMap, + sync::{atomic::Ordering, Arc}, +}; + use ordered_float::OrderedFloat; use petgraph::graph::NodeIndex; -use std::borrow::Cow; -use std::collections::HashMap; -use std::sync::Arc; -use zenoh_buffers::ZBuf; -use zenoh_protocol::core::key_expr::include::{Includer, DEFAULT_INCLUDER}; -use zenoh_protocol::core::key_expr::OwnedKeyExpr; use zenoh_protocol::{ - core::{WhatAmI, WireExpr, ZenohId}, - network::declare::{ - common::ext::WireExprType, ext, queryable::ext::QueryableInfo, Declare, DeclareBody, - DeclareQueryable, UndeclareQueryable, + core::{ + key_expr::{ + include::{Includer, DEFAULT_INCLUDER}, + OwnedKeyExpr, + }, + WhatAmI, ZenohIdProto, + }, + network::{ + declare::{ + common::ext::WireExprType, ext, queryable::ext::QueryableInfoType, Declare, + DeclareBody, DeclareQueryable, QueryableId, UndeclareQueryable, + }, + interest::{InterestId, InterestMode}, }, }; use zenoh_sync::get_mut_unchecked; -#[cfg(feature = "complete_n")] -#[inline] -fn merge_qabl_infos(mut this: QueryableInfo, info: &QueryableInfo) -> QueryableInfo { - this.complete += info.complete; - this.distance = std::cmp::min(this.distance, info.distance); - this -} +use super::{ + face_hat, face_hat_mut, get_peer, get_routes_entries, hat, hat_mut, network::Network, res_hat, + res_hat_mut, HatCode, HatContext, HatFace, HatTables, +}; +use crate::net::routing::{ + dispatcher::{ + face::FaceState, + queries::*, + resource::{NodeId, Resource, SessionContext}, + tables::{QueryTargetQabl, QueryTargetQablSet, RoutingExpr, Tables}, + }, + hat::{CurrentFutureTrait, HatQueriesTrait, SendDeclare, Sources}, + router::RoutesIndexes, + RoutingContext, +}; -#[cfg(not(feature = "complete_n"))] #[inline] -fn merge_qabl_infos(mut this: QueryableInfo, info: &QueryableInfo) -> QueryableInfo { - this.complete = u8::from(this.complete != 0 || info.complete != 0); +fn merge_qabl_infos(mut this: QueryableInfoType, info: &QueryableInfoType) -> QueryableInfoType { + this.complete = this.complete || info.complete; this.distance = std::cmp::min(this.distance, info.distance); this } -fn local_peer_qabl_info(_tables: &Tables, res: &Arc) -> QueryableInfo { +fn local_peer_qabl_info(_tables: &Tables, res: &Arc) -> QueryableInfoType { res.session_ctxs .values() .fold(None, |accu, ctx| { @@ -68,16 +73,17 @@ fn local_peer_qabl_info(_tables: &Tables, res: &Arc) -> QueryableInfo accu } }) - .unwrap_or(QueryableInfo { - complete: 0, - distance: 0, - }) + .unwrap_or(QueryableInfoType::DEFAULT) } -fn local_qabl_info(tables: &Tables, res: &Arc, face: &Arc) -> QueryableInfo { +fn local_qabl_info( + tables: &Tables, + res: &Arc, + face: &Arc, +) -> QueryableInfoType { let info = if res.context.is_some() { res_hat!(res) - .peer_qabls + .linkstatepeer_qabls .iter() .fold(None, |accu, (zid, info)| { if *zid != tables.zid { @@ -110,10 +116,7 @@ fn local_qabl_info(tables: &Tables, res: &Arc, face: &Arc) accu } }) - .unwrap_or(QueryableInfo { - complete: 0, - distance: 0, - }) + .unwrap_or(QueryableInfoType::DEFAULT) } #[inline] @@ -122,7 +125,7 @@ fn send_sourced_queryable_to_net_children( net: &Network, children: &[NodeIndex], res: &Arc, - qabl_info: &QueryableInfo, + qabl_info: &QueryableInfoType, src_face: Option<&mut Arc>, routing_context: NodeId, ) { @@ -130,20 +133,24 @@ fn send_sourced_queryable_to_net_children( if net.graph.contains_node(*child) { match tables.get_face(&net.graph[*child].zid).cloned() { Some(mut someface) => { - if src_face.is_none() || someface.id != src_face.as_ref().unwrap().id { - let key_expr = Resource::decl_key(res, &mut someface); - - tracing::debug!("Send queryable {} on {}", res.expr(), someface); + if src_face + .as_ref() + .map(|src_face| someface.id != src_face.id) + .unwrap_or(true) + { + let push_declaration = someface.whatami != WhatAmI::Client; + let key_expr = Resource::decl_key(res, &mut someface, push_declaration); someface.primitives.send_declare(RoutingContext::with_expr( Declare { - ext_qos: ext::QoSType::declare_default(), + interest_id: None, + ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType { node_id: routing_context, }, body: DeclareBody::DeclareQueryable(DeclareQueryable { - id: 0, // @TODO use proper QueryableId (#703) + id: 0, // Sourced queryables do not use ids wire_expr: key_expr, ext_info: *qabl_info, }), @@ -167,24 +174,36 @@ fn propagate_simple_queryable( let faces = tables.faces.values().cloned(); for mut dst_face in faces { let info = local_qabl_info(tables, res, &dst_face); - let current_info = face_hat!(dst_face).local_qabls.get(res); - if (src_face.is_none() || src_face.as_ref().unwrap().id != dst_face.id) - && (current_info.is_none() || *current_info.unwrap() != info) + let current = face_hat!(dst_face).local_qabls.get(res); + if src_face + .as_ref() + .map(|src_face| dst_face.id != src_face.id) + .unwrap_or(true) + && (current.is_none() || current.unwrap().1 != info) && dst_face.whatami == WhatAmI::Client + && face_hat!(dst_face) + .remote_interests + .values() + .any(|(r, o)| o.queryables() && r.as_ref().map(|r| r.matches(res)).unwrap_or(true)) { + let id = current + .map(|c| c.0) + .unwrap_or(face_hat!(dst_face).next_id.fetch_add(1, Ordering::SeqCst)); face_hat_mut!(&mut dst_face) .local_qabls - .insert(res.clone(), info); - let key_expr = Resource::decl_key(res, &mut dst_face); + .insert(res.clone(), (id, info)); + let push_declaration = dst_face.whatami != WhatAmI::Client; + let key_expr = Resource::decl_key(res, &mut dst_face, push_declaration); send_declare( &dst_face.primitives, RoutingContext::with_expr( Declare { - ext_qos: ext::QoSType::declare_default(), + interest_id: None, + ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), + ext_nodeid: ext::NodeIdType::DEFAULT, body: DeclareBody::DeclareQueryable(DeclareQueryable { - id: 0, // @TODO use proper QueryableId (#703) + id, wire_expr: key_expr, ext_info: info, }), @@ -199,11 +218,11 @@ fn propagate_simple_queryable( fn propagate_sourced_queryable( tables: &Tables, res: &Arc, - qabl_info: &QueryableInfo, + qabl_info: &QueryableInfoType, src_face: Option<&mut Arc>, - source: &ZenohId, + source: &ZenohIdProto, ) { - let net = hat!(tables).peers_net.as_ref().unwrap(); + let net = hat!(tables).linkstatepeers_net.as_ref().unwrap(); match net.get_idx(source) { Some(tree_sid) => { if net.trees.len() > tree_sid.index() { @@ -233,97 +252,89 @@ fn propagate_sourced_queryable( } } -fn register_peer_queryable( +fn register_linkstatepeer_queryable( tables: &mut Tables, mut face: Option<&mut Arc>, res: &mut Arc, - qabl_info: &QueryableInfo, - peer: ZenohId, + qabl_info: &QueryableInfoType, + peer: ZenohIdProto, send_declare: &mut SendDeclare, ) { - let current_info = res_hat!(res).peer_qabls.get(&peer); + let current_info = res_hat!(res).linkstatepeer_qabls.get(&peer); if current_info.is_none() || current_info.unwrap() != qabl_info { // Register peer queryable { - tracing::debug!("Register peer queryable {} (peer: {})", res.expr(), peer,); - res_hat_mut!(res).peer_qabls.insert(peer, *qabl_info); - hat_mut!(tables).peer_qabls.insert(res.clone()); + res_hat_mut!(res) + .linkstatepeer_qabls + .insert(peer, *qabl_info); + hat_mut!(tables).linkstatepeer_qabls.insert(res.clone()); } // Propagate queryable to peers propagate_sourced_queryable(tables, res, qabl_info, face.as_deref_mut(), &peer); } - if tables.whatami == WhatAmI::Peer { - // Propagate queryable to clients - propagate_simple_queryable(tables, res, face, send_declare); - } + // Propagate queryable to clients + propagate_simple_queryable(tables, res, face, send_declare); } -fn declare_peer_queryable( +fn declare_linkstatepeer_queryable( tables: &mut Tables, face: &mut Arc, res: &mut Arc, - qabl_info: &QueryableInfo, - peer: ZenohId, + qabl_info: &QueryableInfoType, + peer: ZenohIdProto, send_declare: &mut SendDeclare, ) { let face = Some(face); - register_peer_queryable(tables, face, res, qabl_info, peer, send_declare); + register_linkstatepeer_queryable(tables, face, res, qabl_info, peer, send_declare); } -fn register_client_queryable( +fn register_simple_queryable( _tables: &mut Tables, face: &mut Arc, + id: QueryableId, res: &mut Arc, - qabl_info: &QueryableInfo, + qabl_info: &QueryableInfoType, ) { // Register queryable { let res = get_mut_unchecked(res); - tracing::debug!("Register queryable {} (face: {})", res.expr(), face,); - get_mut_unchecked(res.session_ctxs.entry(face.id).or_insert_with(|| { - Arc::new(SessionContext { - face: face.clone(), - local_expr_id: None, - remote_expr_id: None, - subs: None, - qabl: None, - last_values: HashMap::new(), - in_interceptor_cache: None, - e_interceptor_cache: None, - }) - })) + get_mut_unchecked( + res.session_ctxs + .entry(face.id) + .or_insert_with(|| Arc::new(SessionContext::new(face.clone()))), + ) .qabl = Some(*qabl_info); } - face_hat_mut!(face).remote_qabls.insert(res.clone()); + face_hat_mut!(face).remote_qabls.insert(id, res.clone()); } -fn declare_client_queryable( +fn declare_simple_queryable( tables: &mut Tables, face: &mut Arc, + id: QueryableId, res: &mut Arc, - qabl_info: &QueryableInfo, + qabl_info: &QueryableInfoType, send_declare: &mut SendDeclare, ) { - register_client_queryable(tables, face, res, qabl_info); - + register_simple_queryable(tables, face, id, res, qabl_info); let local_details = local_peer_qabl_info(tables, res); let zid = tables.zid; - register_peer_queryable(tables, Some(face), res, &local_details, zid, send_declare); + register_linkstatepeer_queryable(tables, Some(face), res, &local_details, zid, send_declare); } #[inline] -fn remote_peer_qabls(tables: &Tables, res: &Arc) -> bool { +fn remote_linkstatepeer_qabls(tables: &Tables, res: &Arc) -> bool { res.context.is_some() && res_hat!(res) - .peer_qabls + .linkstatepeer_qabls .keys() .any(|peer| peer != &tables.zid) } #[inline] -fn client_qabls(res: &Arc) -> Vec> { +fn simple_qabls(res: &Arc) -> Vec> { res.session_ctxs .values() .filter_map(|ctx| { @@ -336,6 +347,13 @@ fn client_qabls(res: &Arc) -> Vec> { .collect() } +#[inline] +fn remote_simple_qabls(res: &Arc, face: &Arc) -> bool { + res.session_ctxs + .values() + .any(|ctx| ctx.face.id != face.id && ctx.qabl.is_some()) +} + #[inline] fn send_forget_sourced_queryable_to_net_children( tables: &Tables, @@ -349,20 +367,23 @@ fn send_forget_sourced_queryable_to_net_children( if net.graph.contains_node(*child) { match tables.get_face(&net.graph[*child].zid).cloned() { Some(mut someface) => { - if src_face.is_none() || someface.id != src_face.unwrap().id { - let wire_expr = Resource::decl_key(res, &mut someface); - - tracing::debug!("Send forget queryable {} on {}", res.expr(), someface); + if src_face + .map(|src_face| someface.id != src_face.id) + .unwrap_or(true) + { + let push_declaration = someface.whatami != WhatAmI::Client; + let wire_expr = Resource::decl_key(res, &mut someface, push_declaration); someface.primitives.send_declare(RoutingContext::with_expr( Declare { - ext_qos: ext::QoSType::declare_default(), + interest_id: None, + ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType { node_id: routing_context, }, body: DeclareBody::UndeclareQueryable(UndeclareQueryable { - id: 0, // @TODO use proper QueryableId (#703) + id: 0, ext_wire_expr: WireExprType { wire_expr }, }), }, @@ -381,26 +402,57 @@ fn propagate_forget_simple_queryable( res: &mut Arc, send_declare: &mut SendDeclare, ) { - for face in tables.faces.values_mut() { - if face_hat!(face).local_qabls.contains_key(res) { - let wire_expr = Resource::get_best_key(res, "", face.id); + for mut face in tables.faces.values().cloned() { + if let Some((id, _)) = face_hat_mut!(&mut face).local_qabls.remove(res) { send_declare( &face.primitives, RoutingContext::with_expr( Declare { - ext_qos: ext::QoSType::declare_default(), + interest_id: None, + ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), + ext_nodeid: ext::NodeIdType::DEFAULT, body: DeclareBody::UndeclareQueryable(UndeclareQueryable { - id: 0, // @TODO use proper QueryableId (#703) - ext_wire_expr: WireExprType { wire_expr }, + id, + ext_wire_expr: WireExprType::null(), }), }, res.expr(), ), ); - - face_hat_mut!(face).local_qabls.remove(res); + } + for res in face_hat!(&mut face) + .local_qabls + .keys() + .cloned() + .collect::>>() + { + if !res.context().matches.iter().any(|m| { + m.upgrade().is_some_and(|m| { + m.context.is_some() + && (remote_simple_qabls(&m, &face) + || remote_linkstatepeer_qabls(tables, &m)) + }) + }) { + if let Some((id, _)) = face_hat_mut!(&mut face).local_qabls.remove(&res) { + send_declare( + &face.primitives, + RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::UndeclareQueryable(UndeclareQueryable { + id, + ext_wire_expr: WireExprType::null(), + }), + }, + res.expr(), + ), + ); + } + } } } } @@ -409,9 +461,9 @@ fn propagate_forget_sourced_queryable( tables: &mut Tables, res: &mut Arc, src_face: Option<&Arc>, - source: &ZenohId, + source: &ZenohIdProto, ) { - let net = hat!(tables).peers_net.as_ref().unwrap(); + let net = hat!(tables).linkstatepeers_net.as_ref().unwrap(); match net.get_idx(source) { Some(tree_sid) => { if net.trees.len() > tree_sid.index() { @@ -440,154 +492,164 @@ fn propagate_forget_sourced_queryable( } } -fn unregister_peer_queryable( +fn unregister_linkstatepeer_queryable( tables: &mut Tables, res: &mut Arc, - peer: &ZenohId, + peer: &ZenohIdProto, send_declare: &mut SendDeclare, ) { - tracing::debug!("Unregister peer queryable {} (peer: {})", res.expr(), peer,); - res_hat_mut!(res).peer_qabls.remove(peer); + res_hat_mut!(res).linkstatepeer_qabls.remove(peer); - if res_hat!(res).peer_qabls.is_empty() { + if res_hat!(res).linkstatepeer_qabls.is_empty() { hat_mut!(tables) - .peer_qabls + .linkstatepeer_qabls .retain(|qabl| !Arc::ptr_eq(qabl, res)); - if tables.whatami == WhatAmI::Peer { - propagate_forget_simple_queryable(tables, res, send_declare); - } + propagate_forget_simple_queryable(tables, res, send_declare); } } -fn undeclare_peer_queryable( +fn undeclare_linkstatepeer_queryable( tables: &mut Tables, face: Option<&Arc>, res: &mut Arc, - peer: &ZenohId, + peer: &ZenohIdProto, send_declare: &mut SendDeclare, ) { - if res_hat!(res).peer_qabls.contains_key(peer) { - unregister_peer_queryable(tables, res, peer, send_declare); + if res_hat!(res).linkstatepeer_qabls.contains_key(peer) { + unregister_linkstatepeer_queryable(tables, res, peer, send_declare); propagate_forget_sourced_queryable(tables, res, face, peer); } } -fn forget_peer_queryable( +fn forget_linkstatepeer_queryable( tables: &mut Tables, face: &mut Arc, res: &mut Arc, - peer: &ZenohId, + peer: &ZenohIdProto, send_declare: &mut SendDeclare, ) { - undeclare_peer_queryable(tables, Some(face), res, peer, send_declare); + undeclare_linkstatepeer_queryable(tables, Some(face), res, peer, send_declare); } -pub(super) fn undeclare_client_queryable( +pub(super) fn undeclare_simple_queryable( tables: &mut Tables, face: &mut Arc, res: &mut Arc, send_declare: &mut SendDeclare, ) { - tracing::debug!("Unregister client queryable {} for {}", res.expr(), face); - if let Some(ctx) = get_mut_unchecked(res).session_ctxs.get_mut(&face.id) { - get_mut_unchecked(ctx).qabl = None; - if ctx.qabl.is_none() { - face_hat_mut!(face).remote_qabls.remove(res); + if !face_hat_mut!(face) + .remote_qabls + .values() + .any(|s| *s == *res) + { + if let Some(ctx) = get_mut_unchecked(res).session_ctxs.get_mut(&face.id) { + get_mut_unchecked(ctx).qabl = None; } - } - - let mut client_qabls = client_qabls(res); - let peer_qabls = remote_peer_qabls(tables, res); - if client_qabls.is_empty() { - undeclare_peer_queryable(tables, None, res, &tables.zid.clone(), send_declare); - } else { - let local_info = local_peer_qabl_info(tables, res); - register_peer_queryable(tables, None, res, &local_info, tables.zid, send_declare); - } + let mut simple_qabls = simple_qabls(res); + let linkstatepeer_qabls = remote_linkstatepeer_qabls(tables, res); - if client_qabls.len() == 1 && !peer_qabls { - let face = &mut client_qabls[0]; - if face_hat!(face).local_qabls.contains_key(res) { - let wire_expr = Resource::get_best_key(res, "", face.id); - send_declare( - &face.primitives, - RoutingContext::with_expr( - Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - body: DeclareBody::UndeclareQueryable(UndeclareQueryable { - id: 0, // @TODO use proper QueryableId (#703) - ext_wire_expr: WireExprType { wire_expr }, - }), - }, - res.expr(), - ), + if simple_qabls.is_empty() { + undeclare_linkstatepeer_queryable(tables, None, res, &tables.zid.clone(), send_declare); + } else { + let local_info = local_peer_qabl_info(tables, res); + register_linkstatepeer_queryable( + tables, + None, + res, + &local_info, + tables.zid, + send_declare, ); - - face_hat_mut!(face).local_qabls.remove(res); } - } -} -fn forget_client_queryable( - tables: &mut Tables, - face: &mut Arc, - res: &mut Arc, - send_declare: &mut SendDeclare, -) { - undeclare_client_queryable(tables, face, res, send_declare); -} - -pub(super) fn queries_new_face( - tables: &mut Tables, - face: &mut Arc, - send_declare: &mut SendDeclare, -) { - if face.whatami == WhatAmI::Client { - for qabl in &hat!(tables).peer_qabls { - if qabl.context.is_some() { - let info = local_qabl_info(tables, qabl, face); - face_hat_mut!(face).local_qabls.insert(qabl.clone(), info); - let key_expr = Resource::decl_key(qabl, face); + if simple_qabls.len() == 1 && !linkstatepeer_qabls { + let mut face = &mut simple_qabls[0]; + if let Some((id, _)) = face_hat_mut!(face).local_qabls.remove(res) { send_declare( &face.primitives, RoutingContext::with_expr( Declare { - ext_qos: ext::QoSType::declare_default(), + interest_id: None, + ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - body: DeclareBody::DeclareQueryable(DeclareQueryable { - id: 0, // @TODO use proper QueryableId (#703) - wire_expr: key_expr, - ext_info: info, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::UndeclareQueryable(UndeclareQueryable { + id, + ext_wire_expr: WireExprType::null(), }), }, - qabl.expr(), + res.expr(), ), ); } + for res in face_hat!(face) + .local_qabls + .keys() + .cloned() + .collect::>>() + { + if !res.context().matches.iter().any(|m| { + m.upgrade().is_some_and(|m| { + m.context.is_some() + && (remote_simple_qabls(&m, face) + || remote_linkstatepeer_qabls(tables, &m)) + }) + }) { + if let Some((id, _)) = face_hat_mut!(&mut face).local_qabls.remove(&res) { + send_declare( + &face.primitives, + RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::UndeclareQueryable(UndeclareQueryable { + id, + ext_wire_expr: WireExprType::null(), + }), + }, + res.expr(), + ), + ); + } + } + } } } } +fn forget_simple_queryable( + tables: &mut Tables, + face: &mut Arc, + id: QueryableId, + send_declare: &mut SendDeclare, +) -> Option> { + if let Some(mut res) = face_hat_mut!(face).remote_qabls.remove(&id) { + undeclare_simple_queryable(tables, face, &mut res, send_declare); + Some(res) + } else { + None + } +} + pub(super) fn queries_remove_node( tables: &mut Tables, - node: &ZenohId, + node: &ZenohIdProto, send_declare: &mut SendDeclare, ) { let mut qabls = vec![]; - for res in hat!(tables).peer_qabls.iter() { - for qabl in res_hat!(res).peer_qabls.keys() { + for res in hat!(tables).linkstatepeer_qabls.iter() { + for qabl in res_hat!(res).linkstatepeer_qabls.keys() { if qabl == node { qabls.push(res.clone()); } } } for mut res in qabls { - unregister_peer_queryable(tables, &mut res, node, send_declare); + unregister_linkstatepeer_queryable(tables, &mut res, node, send_declare); update_matches_query_routes(tables, &res); Resource::clean(&mut res) @@ -595,18 +657,24 @@ pub(super) fn queries_remove_node( } pub(super) fn queries_tree_change(tables: &mut Tables, new_children: &[Vec]) { - // propagate qabls to new children + let net = match hat!(tables).linkstatepeers_net.as_ref() { + Some(net) => net, + None => { + tracing::error!("Error accessing peers_net in queries_tree_change!"); + return; + } + }; + // propagate qabls to new clildren for (tree_sid, tree_children) in new_children.iter().enumerate() { if !tree_children.is_empty() { - let net = hat!(tables).peers_net.as_ref().unwrap(); let tree_idx = NodeIndex::new(tree_sid); if net.graph.contains_node(tree_idx) { let tree_id = net.graph[tree_idx].zid; - let qabls_res = &hat!(tables).peer_qabls; + let qabls_res = &hat!(tables).linkstatepeer_qabls; for res in qabls_res { - let qabls = &res_hat!(res).peer_qabls; + let qabls = &res_hat!(res).linkstatepeer_qabls; if let Some(qabl_info) = qabls.get(&tree_id) { send_sourced_queryable_to_net_children( tables, @@ -634,7 +702,7 @@ fn insert_target_for_qabls( tables: &Tables, net: &Network, source: NodeId, - qabls: &HashMap, + qabls: &HashMap, complete: bool, ) { if net.trees.len() > source as usize { @@ -673,22 +741,149 @@ lazy_static::lazy_static! { static ref EMPTY_ROUTE: Arc = Arc::new(Vec::new()); } +pub(super) fn declare_qabl_interest( + tables: &mut Tables, + face: &mut Arc, + id: InterestId, + res: Option<&mut Arc>, + mode: InterestMode, + aggregate: bool, + send_declare: &mut SendDeclare, +) { + if mode.current() && face.whatami == WhatAmI::Client { + let interest_id = (!mode.future()).then_some(id); + if let Some(res) = res.as_ref() { + if aggregate { + if hat!(tables).linkstatepeer_qabls.iter().any(|qabl| { + qabl.context.is_some() + && qabl.matches(res) + && (remote_simple_qabls(qabl, face) + || remote_linkstatepeer_qabls(tables, qabl)) + }) { + let info = local_qabl_info(tables, res, face); + let id = if mode.future() { + let id = face_hat!(face).next_id.fetch_add(1, Ordering::SeqCst); + face_hat_mut!(face) + .local_qabls + .insert((*res).clone(), (id, info)); + id + } else { + 0 + }; + let wire_expr = Resource::decl_key(res, face, face.whatami != WhatAmI::Client); + send_declare( + &face.primitives, + RoutingContext::with_expr( + Declare { + interest_id, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::DeclareQueryable(DeclareQueryable { + id, + wire_expr, + ext_info: info, + }), + }, + res.expr(), + ), + ); + } + } else { + for qabl in hat!(tables).linkstatepeer_qabls.iter() { + if qabl.context.is_some() + && qabl.matches(res) + && (remote_simple_qabls(qabl, face) + || remote_linkstatepeer_qabls(tables, qabl)) + { + let info = local_qabl_info(tables, qabl, face); + let id = if mode.future() { + let id = face_hat!(face).next_id.fetch_add(1, Ordering::SeqCst); + face_hat_mut!(face) + .local_qabls + .insert(qabl.clone(), (id, info)); + id + } else { + 0 + }; + let key_expr = + Resource::decl_key(qabl, face, face.whatami != WhatAmI::Client); + send_declare( + &face.primitives, + RoutingContext::with_expr( + Declare { + interest_id, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::DeclareQueryable(DeclareQueryable { + id, + wire_expr: key_expr, + ext_info: info, + }), + }, + qabl.expr(), + ), + ); + } + } + } + } else { + for qabl in hat!(tables).linkstatepeer_qabls.iter() { + if qabl.context.is_some() + && (remote_simple_qabls(qabl, face) || remote_linkstatepeer_qabls(tables, qabl)) + { + let info = local_qabl_info(tables, qabl, face); + let id = if mode.future() { + let id = face_hat!(face).next_id.fetch_add(1, Ordering::SeqCst); + face_hat_mut!(face) + .local_qabls + .insert(qabl.clone(), (id, info)); + id + } else { + 0 + }; + let key_expr = Resource::decl_key(qabl, face, face.whatami != WhatAmI::Client); + send_declare( + &face.primitives, + RoutingContext::with_expr( + Declare { + interest_id, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::DeclareQueryable(DeclareQueryable { + id, + wire_expr: key_expr, + ext_info: info, + }), + }, + qabl.expr(), + ), + ); + } + } + } + } +} + impl HatQueriesTrait for HatCode { fn declare_queryable( &self, tables: &mut Tables, face: &mut Arc, + id: QueryableId, res: &mut Arc, - qabl_info: &QueryableInfo, + qabl_info: &QueryableInfoType, node_id: NodeId, send_declare: &mut SendDeclare, ) { if face.whatami != WhatAmI::Client { if let Some(peer) = get_peer(tables, face, node_id) { - declare_peer_queryable(tables, face, res, qabl_info, peer, send_declare); + declare_linkstatepeer_queryable(tables, face, res, qabl_info, peer, send_declare); } } else { - declare_client_queryable(tables, face, res, qabl_info, send_declare); + declare_simple_queryable(tables, face, id, res, qabl_info, send_declare); } } @@ -696,23 +891,31 @@ impl HatQueriesTrait for HatCode { &self, tables: &mut Tables, face: &mut Arc, - res: &mut Arc, + id: QueryableId, + res: Option>, node_id: NodeId, send_declare: &mut SendDeclare, - ) { + ) -> Option> { if face.whatami != WhatAmI::Client { - if let Some(peer) = get_peer(tables, face, node_id) { - forget_peer_queryable(tables, face, res, &peer, send_declare); + if let Some(mut res) = res { + if let Some(peer) = get_peer(tables, face, node_id) { + forget_linkstatepeer_queryable(tables, face, &mut res, &peer, send_declare); + Some(res) + } else { + None + } + } else { + None } } else { - forget_client_queryable(tables, face, res, send_declare); + forget_simple_queryable(tables, face, id, send_declare) } } fn get_queryables(&self, tables: &Tables) -> Vec<(Arc, Sources)> { // Compute the list of known queryables (keys) hat!(tables) - .peer_qabls + .linkstatepeer_qabls .iter() .map(|s| { ( @@ -721,7 +924,7 @@ impl HatQueriesTrait for HatCode { // sources of those queryables Sources { routers: vec![], - peers: Vec::from_iter(res_hat!(s).peer_qabls.keys().cloned()), + peers: Vec::from_iter(res_hat!(s).linkstatepeer_qabls.keys().cloned()), clients: s .session_ctxs .values() @@ -772,7 +975,7 @@ impl HatQueriesTrait for HatCode { let mres = mres.upgrade().unwrap(); let complete = DEFAULT_INCLUDER.includes(mres.expr().as_bytes(), key_expr.as_bytes()); - let net = hat!(tables).peers_net.as_ref().unwrap(); + let net = hat!(tables).linkstatepeers_net.as_ref().unwrap(); let peer_source = match source_type { WhatAmI::Router | WhatAmI::Peer => source, _ => net.idx.index() as NodeId, @@ -783,15 +986,12 @@ impl HatQueriesTrait for HatCode { tables, net, peer_source, - &res_hat!(mres).peer_qabls, + &res_hat!(mres).linkstatepeer_qabls, complete, ); for (sid, context) in &mres.session_ctxs { - if match tables.whatami { - WhatAmI::Router => context.face.whatami != WhatAmI::Router, - _ => source_type == WhatAmI::Client || context.face.whatami == WhatAmI::Client, - } { + if source_type == WhatAmI::Client || context.face.whatami == WhatAmI::Client { let key_expr = Resource::get_best_key(expr.prefix, expr.suffix, *sid); if let Some(qabl_info) = context.qabl.as_ref() { route.push(QueryTargetQabl { @@ -815,48 +1015,6 @@ impl HatQueriesTrait for HatCode { Arc::new(route) } - #[inline] - fn compute_local_replies( - &self, - tables: &Tables, - prefix: &Arc, - suffix: &str, - face: &Arc, - ) -> Vec<(WireExpr<'static>, ZBuf)> { - let mut result = vec![]; - // Only the first routing point in the query route - // should return the liveliness tokens - if face.whatami == WhatAmI::Client { - let key_expr = prefix.expr() + suffix; - let key_expr = match OwnedKeyExpr::try_from(key_expr) { - Ok(ke) => ke, - Err(e) => { - tracing::warn!("Invalid KE reached the system: {}", e); - return result; - } - }; - if key_expr.starts_with(PREFIX_LIVELINESS) { - let res = Resource::get_resource(prefix, suffix); - let matches = res - .as_ref() - .and_then(|res| res.context.as_ref()) - .map(|ctx| Cow::from(&ctx.matches)) - .unwrap_or_else(|| Cow::from(Resource::get_matches(tables, &key_expr))); - for mres in matches.iter() { - let mres = mres.upgrade().unwrap(); - if (mres.context.is_some() - && (!res_hat!(mres).router_subs.is_empty() - || !res_hat!(mres).peer_subs.is_empty())) - || mres.session_ctxs.values().any(|ctx| ctx.subs.is_some()) - { - result.push((Resource::get_best_key(&mres, "", face.id), ZBuf::default())); - } - } - } - } - result - } - fn get_query_routes_entries(&self, tables: &Tables) -> RoutesIndexes { get_routes_entries(tables) } diff --git a/zenoh/src/net/routing/hat/linkstate_peer/token.rs b/zenoh/src/net/routing/hat/linkstate_peer/token.rs new file mode 100644 index 0000000000..6e3ea08492 --- /dev/null +++ b/zenoh/src/net/routing/hat/linkstate_peer/token.rs @@ -0,0 +1,787 @@ +// +// Copyright (c) 2024 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// + +use std::sync::{atomic::Ordering, Arc}; + +use petgraph::graph::NodeIndex; +use zenoh_protocol::{ + core::{WhatAmI, ZenohIdProto}, + network::{ + declare::{common::ext::WireExprType, TokenId}, + ext, + interest::{InterestId, InterestMode, InterestOptions}, + Declare, DeclareBody, DeclareToken, UndeclareToken, + }, +}; +use zenoh_sync::get_mut_unchecked; + +use super::{ + face_hat, face_hat_mut, get_peer, hat, hat_mut, network::Network, res_hat, res_hat_mut, + HatCode, HatContext, HatFace, HatTables, +}; +use crate::net::routing::{ + dispatcher::{face::FaceState, tables::Tables}, + hat::{CurrentFutureTrait, HatTokenTrait, SendDeclare}, + router::{NodeId, Resource, SessionContext}, + RoutingContext, +}; + +#[inline] +fn send_sourced_token_to_net_clildren( + tables: &Tables, + net: &Network, + clildren: &[NodeIndex], + res: &Arc, + src_face: Option<&Arc>, + routing_context: NodeId, +) { + for child in clildren { + if net.graph.contains_node(*child) { + match tables.get_face(&net.graph[*child].zid).cloned() { + Some(mut someface) => { + if src_face + .map(|src_face| someface.id != src_face.id) + .unwrap_or(true) + { + let push_declaration = someface.whatami != WhatAmI::Client; + let key_expr = Resource::decl_key(res, &mut someface, push_declaration); + + someface.primitives.send_declare(RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType { + node_id: routing_context, + }, + body: DeclareBody::DeclareToken(DeclareToken { + id: 0, // Sourced tokens do not use ids + wire_expr: key_expr, + }), + }, + res.expr(), + )); + } + } + None => tracing::trace!("Unable to find face for zid {}", net.graph[*child].zid), + } + } + } +} + +#[inline] +fn propagate_simple_token_to( + tables: &mut Tables, + dst_face: &mut Arc, + res: &Arc, + src_face: &mut Arc, + send_declare: &mut SendDeclare, +) { + if (src_face.id != dst_face.id || dst_face.zid == tables.zid) + && !face_hat!(dst_face).local_tokens.contains_key(res) + && dst_face.whatami == WhatAmI::Client + { + if dst_face.whatami != WhatAmI::Client { + let id = face_hat!(dst_face).next_id.fetch_add(1, Ordering::SeqCst); + face_hat_mut!(dst_face).local_tokens.insert(res.clone(), id); + let key_expr = Resource::decl_key(res, dst_face, dst_face.whatami != WhatAmI::Client); + send_declare( + &dst_face.primitives, + RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::DeclareToken(DeclareToken { + id, + wire_expr: key_expr, + }), + }, + res.expr(), + ), + ); + } else { + let matching_interests = face_hat!(dst_face) + .remote_interests + .values() + .filter(|(r, o)| o.tokens() && r.as_ref().map(|r| r.matches(res)).unwrap_or(true)) + .cloned() + .collect::>, InterestOptions)>>(); + + for (int_res, options) in matching_interests { + let res = if options.aggregate() { + int_res.as_ref().unwrap_or(res) + } else { + res + }; + if !face_hat!(dst_face).local_tokens.contains_key(res) { + let id = face_hat!(dst_face).next_id.fetch_add(1, Ordering::SeqCst); + face_hat_mut!(dst_face).local_tokens.insert(res.clone(), id); + let key_expr = + Resource::decl_key(res, dst_face, dst_face.whatami != WhatAmI::Client); + send_declare( + &dst_face.primitives, + RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::DeclareToken(DeclareToken { + id, + wire_expr: key_expr, + }), + }, + res.expr(), + ), + ); + } + } + } + } +} + +fn propagate_simple_token( + tables: &mut Tables, + res: &Arc, + src_face: &mut Arc, + send_declare: &mut SendDeclare, +) { + for mut dst_face in tables + .faces + .values() + .cloned() + .collect::>>() + { + propagate_simple_token_to(tables, &mut dst_face, res, src_face, send_declare); + } +} + +fn propagate_sourced_token( + tables: &Tables, + res: &Arc, + src_face: Option<&Arc>, + source: &ZenohIdProto, +) { + let net = hat!(tables).linkstatepeers_net.as_ref().unwrap(); + match net.get_idx(source) { + Some(tree_sid) => { + if net.trees.len() > tree_sid.index() { + send_sourced_token_to_net_clildren( + tables, + net, + &net.trees[tree_sid.index()].children, + res, + src_face, + tree_sid.index() as NodeId, + ); + } else { + tracing::trace!( + "Propagating token {}: tree for node {} sid:{} not yet ready", + res.expr(), + tree_sid.index(), + source + ); + } + } + None => tracing::error!( + "Error propagating token {}: cannot get index of {}!", + res.expr(), + source + ), + } +} + +fn register_linkstatepeer_token( + tables: &mut Tables, + face: &mut Arc, + res: &mut Arc, + peer: ZenohIdProto, + send_declare: &mut SendDeclare, +) { + if !res_hat!(res).linkstatepeer_tokens.contains(&peer) { + // Register peer liveliness + { + res_hat_mut!(res).linkstatepeer_tokens.insert(peer); + hat_mut!(tables).linkstatepeer_tokens.insert(res.clone()); + } + + // Propagate liveliness to peers + propagate_sourced_token(tables, res, Some(face), &peer); + } + + // Propagate liveliness to clients + propagate_simple_token(tables, res, face, send_declare); +} + +fn declare_linkstatepeer_token( + tables: &mut Tables, + face: &mut Arc, + res: &mut Arc, + peer: ZenohIdProto, + send_declare: &mut SendDeclare, +) { + register_linkstatepeer_token(tables, face, res, peer, send_declare); +} + +fn register_simple_token( + _tables: &mut Tables, + face: &mut Arc, + id: TokenId, + res: &mut Arc, +) { + // Register liveliness + { + let res = get_mut_unchecked(res); + match res.session_ctxs.get_mut(&face.id) { + Some(ctx) => { + if !ctx.token { + get_mut_unchecked(ctx).token = true; + } + } + None => { + let ctx = res + .session_ctxs + .entry(face.id) + .or_insert_with(|| Arc::new(SessionContext::new(face.clone()))); + get_mut_unchecked(ctx).token = true; + } + } + } + face_hat_mut!(face).remote_tokens.insert(id, res.clone()); +} + +fn declare_simple_token( + tables: &mut Tables, + face: &mut Arc, + id: TokenId, + res: &mut Arc, + send_declare: &mut SendDeclare, +) { + register_simple_token(tables, face, id, res); + let zid = tables.zid; + register_linkstatepeer_token(tables, face, res, zid, send_declare); +} + +#[inline] +fn remote_linkstatepeer_tokens(tables: &Tables, res: &Arc) -> bool { + res.context.is_some() + && res_hat!(res) + .linkstatepeer_tokens + .iter() + .any(|peer| peer != &tables.zid) +} + +#[inline] +fn simple_tokens(res: &Arc) -> Vec> { + res.session_ctxs + .values() + .filter_map(|ctx| { + if ctx.token { + Some(ctx.face.clone()) + } else { + None + } + }) + .collect() +} + +#[inline] +fn remote_simple_tokens(tables: &Tables, res: &Arc, face: &Arc) -> bool { + res.session_ctxs + .values() + .any(|ctx| (ctx.face.id != face.id || face.zid == tables.zid) && ctx.token) +} + +#[inline] +fn send_forget_sourced_token_to_net_clildren( + tables: &Tables, + net: &Network, + clildren: &[NodeIndex], + res: &Arc, + src_face: Option<&Arc>, + routing_context: Option, +) { + for child in clildren { + if net.graph.contains_node(*child) { + match tables.get_face(&net.graph[*child].zid).cloned() { + Some(mut someface) => { + if src_face + .map(|src_face| someface.id != src_face.id) + .unwrap_or(true) + { + let push_declaration = someface.whatami != WhatAmI::Client; + let wire_expr = Resource::decl_key(res, &mut someface, push_declaration); + + someface.primitives.send_declare(RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType { + node_id: routing_context.unwrap_or(0), + }, + body: DeclareBody::UndeclareToken(UndeclareToken { + id: 0, // Sourced tokens do not use ids + ext_wire_expr: WireExprType { wire_expr }, + }), + }, + res.expr(), + )); + } + } + None => tracing::trace!("Unable to find face for zid {}", net.graph[*child].zid), + } + } + } +} + +fn propagate_forget_simple_token( + tables: &mut Tables, + res: &Arc, + send_declare: &mut SendDeclare, +) { + for mut face in tables.faces.values().cloned() { + if let Some(id) = face_hat_mut!(&mut face).local_tokens.remove(res) { + send_declare( + &face.primitives, + RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::UndeclareToken(UndeclareToken { + id, + ext_wire_expr: WireExprType::null(), + }), + }, + res.expr(), + ), + ); + } + for res in face_hat!(face) + .local_tokens + .keys() + .cloned() + .collect::>>() + { + if !res.context().matches.iter().any(|m| { + m.upgrade().is_some_and(|m| { + m.context.is_some() + && (remote_simple_tokens(tables, &m, &face) + || remote_linkstatepeer_tokens(tables, &m)) + }) + }) { + if let Some(id) = face_hat_mut!(&mut face).local_tokens.remove(&res) { + send_declare( + &face.primitives, + RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::UndeclareToken(UndeclareToken { + id, + ext_wire_expr: WireExprType::null(), + }), + }, + res.expr(), + ), + ); + } + } + } + } +} + +fn propagate_forget_sourced_token( + tables: &Tables, + res: &Arc, + src_face: Option<&Arc>, + source: &ZenohIdProto, +) { + let net = hat!(tables).linkstatepeers_net.as_ref().unwrap(); + match net.get_idx(source) { + Some(tree_sid) => { + if net.trees.len() > tree_sid.index() { + send_forget_sourced_token_to_net_clildren( + tables, + net, + &net.trees[tree_sid.index()].children, + res, + src_face, + Some(tree_sid.index() as NodeId), + ); + } else { + tracing::trace!( + "Propagating forget token {}: tree for node {} sid:{} not yet ready", + res.expr(), + tree_sid.index(), + source + ); + } + } + None => tracing::error!( + "Error propagating forget token {}: cannot get index of {}!", + res.expr(), + source + ), + } +} + +fn unregister_linkstatepeer_token( + tables: &mut Tables, + res: &mut Arc, + peer: &ZenohIdProto, + send_declare: &mut SendDeclare, +) { + res_hat_mut!(res) + .linkstatepeer_tokens + .retain(|token| token != peer); + + if res_hat!(res).linkstatepeer_tokens.is_empty() { + hat_mut!(tables) + .linkstatepeer_tokens + .retain(|token| !Arc::ptr_eq(token, res)); + + propagate_forget_simple_token(tables, res, send_declare); + } +} + +fn undeclare_linkstatepeer_token( + tables: &mut Tables, + face: Option<&Arc>, + res: &mut Arc, + peer: &ZenohIdProto, + send_declare: &mut SendDeclare, +) { + if res_hat!(res).linkstatepeer_tokens.contains(peer) { + unregister_linkstatepeer_token(tables, res, peer, send_declare); + propagate_forget_sourced_token(tables, res, face, peer); + } +} + +fn forget_linkstatepeer_token( + tables: &mut Tables, + face: &mut Arc, + res: &mut Arc, + peer: &ZenohIdProto, + send_declare: &mut SendDeclare, +) { + undeclare_linkstatepeer_token(tables, Some(face), res, peer, send_declare); +} + +pub(super) fn undeclare_simple_token( + tables: &mut Tables, + face: &mut Arc, + res: &mut Arc, + send_declare: &mut SendDeclare, +) { + if !face_hat_mut!(face) + .remote_tokens + .values() + .any(|s| *s == *res) + { + if let Some(ctx) = get_mut_unchecked(res).session_ctxs.get_mut(&face.id) { + get_mut_unchecked(ctx).token = false; + } + + let mut simple_tokens = simple_tokens(res); + let linkstatepeer_tokens = remote_linkstatepeer_tokens(tables, res); + if simple_tokens.is_empty() { + undeclare_linkstatepeer_token(tables, None, res, &tables.zid.clone(), send_declare); + } + + if simple_tokens.len() == 1 && !linkstatepeer_tokens { + let mut face = &mut simple_tokens[0]; + if face.whatami != WhatAmI::Client { + if let Some(id) = face_hat_mut!(face).local_tokens.remove(res) { + send_declare( + &face.primitives, + RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::UndeclareToken(UndeclareToken { + id, + ext_wire_expr: WireExprType::null(), + }), + }, + res.expr(), + ), + ); + } + for res in face_hat!(face) + .local_tokens + .keys() + .cloned() + .collect::>>() + { + if !res.context().matches.iter().any(|m| { + m.upgrade().is_some_and(|m| { + m.context.is_some() + && (remote_simple_tokens(tables, &m, face) + || remote_linkstatepeer_tokens(tables, &m)) + }) + }) { + if let Some(id) = face_hat_mut!(&mut face).local_tokens.remove(&res) { + send_declare( + &face.primitives, + RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::UndeclareToken(UndeclareToken { + id, + ext_wire_expr: WireExprType::null(), + }), + }, + res.expr(), + ), + ); + } + } + } + } + } + } +} + +fn forget_simple_token( + tables: &mut Tables, + face: &mut Arc, + id: TokenId, + send_declare: &mut SendDeclare, +) -> Option> { + if let Some(mut res) = face_hat_mut!(face).remote_tokens.remove(&id) { + undeclare_simple_token(tables, face, &mut res, send_declare); + Some(res) + } else { + None + } +} + +pub(super) fn token_remove_node( + tables: &mut Tables, + node: &ZenohIdProto, + send_declare: &mut SendDeclare, +) { + for mut res in hat!(tables) + .linkstatepeer_tokens + .iter() + .filter(|res| res_hat!(res).linkstatepeer_tokens.contains(node)) + .cloned() + .collect::>>() + { + unregister_linkstatepeer_token(tables, &mut res, node, send_declare); + Resource::clean(&mut res) + } +} + +pub(super) fn token_tree_change(tables: &mut Tables, new_clildren: &[Vec]) { + let net = match hat!(tables).linkstatepeers_net.as_ref() { + Some(net) => net, + None => { + tracing::error!("Error accessing peers_net in token_tree_change!"); + return; + } + }; + // propagate tokens to new clildren + for (tree_sid, tree_clildren) in new_clildren.iter().enumerate() { + if !tree_clildren.is_empty() { + let tree_idx = NodeIndex::new(tree_sid); + if net.graph.contains_node(tree_idx) { + let tree_id = net.graph[tree_idx].zid; + + let tokens_res = &hat!(tables).linkstatepeer_tokens; + + for res in tokens_res { + let tokens = &res_hat!(res).linkstatepeer_tokens; + for token in tokens { + if *token == tree_id { + send_sourced_token_to_net_clildren( + tables, + net, + tree_clildren, + res, + None, + tree_sid as NodeId, + ); + } + } + } + } + } + } +} + +pub(crate) fn declare_token_interest( + tables: &mut Tables, + face: &mut Arc, + id: InterestId, + res: Option<&mut Arc>, + mode: InterestMode, + aggregate: bool, + send_declare: &mut SendDeclare, +) { + if mode.current() && face.whatami == WhatAmI::Client { + let interest_id = (!mode.future()).then_some(id); + if let Some(res) = res.as_ref() { + if aggregate { + if hat!(tables).linkstatepeer_tokens.iter().any(|token| { + token.context.is_some() + && token.matches(res) + && (remote_simple_tokens(tables, token, face) + || remote_linkstatepeer_tokens(tables, token)) + }) { + let id = if mode.future() { + let id = face_hat!(face).next_id.fetch_add(1, Ordering::SeqCst); + face_hat_mut!(face).local_tokens.insert((*res).clone(), id); + id + } else { + 0 + }; + let wire_expr = Resource::decl_key(res, face, face.whatami != WhatAmI::Client); + send_declare( + &face.primitives, + RoutingContext::with_expr( + Declare { + interest_id, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::DeclareToken(DeclareToken { id, wire_expr }), + }, + res.expr(), + ), + ); + } + } else { + for token in &hat!(tables).linkstatepeer_tokens { + if token.context.is_some() + && token.matches(res) + && (remote_simple_tokens(tables, token, face) + || remote_linkstatepeer_tokens(tables, token)) + { + let id = if mode.future() { + let id = face_hat!(face).next_id.fetch_add(1, Ordering::SeqCst); + face_hat_mut!(face).local_tokens.insert(token.clone(), id); + id + } else { + 0 + }; + let wire_expr = + Resource::decl_key(token, face, face.whatami != WhatAmI::Client); + send_declare( + &face.primitives, + RoutingContext::with_expr( + Declare { + interest_id, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::DeclareToken(DeclareToken { id, wire_expr }), + }, + token.expr(), + ), + ); + } + } + } + } else { + for token in &hat!(tables).linkstatepeer_tokens { + if token.context.is_some() + && (remote_simple_tokens(tables, token, face) + || remote_linkstatepeer_tokens(tables, token)) + { + let id = if mode.future() { + let id = face_hat!(face).next_id.fetch_add(1, Ordering::SeqCst); + face_hat_mut!(face).local_tokens.insert(token.clone(), id); + id + } else { + 0 + }; + let wire_expr = + Resource::decl_key(token, face, face.whatami != WhatAmI::Client); + send_declare( + &face.primitives, + RoutingContext::with_expr( + Declare { + interest_id, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::DeclareToken(DeclareToken { id, wire_expr }), + }, + token.expr(), + ), + ); + } + } + } + } +} + +impl HatTokenTrait for HatCode { + fn declare_token( + &self, + tables: &mut Tables, + face: &mut Arc, + id: TokenId, + res: &mut Arc, + node_id: NodeId, + _interest_id: Option, + send_declare: &mut SendDeclare, + ) { + if face.whatami != WhatAmI::Client { + if let Some(peer) = get_peer(tables, face, node_id) { + declare_linkstatepeer_token(tables, face, res, peer, send_declare) + } + } else { + declare_simple_token(tables, face, id, res, send_declare) + } + } + + fn undeclare_token( + &self, + tables: &mut Tables, + face: &mut Arc, + id: TokenId, + res: Option>, + node_id: NodeId, + send_declare: &mut SendDeclare, + ) -> Option> { + if face.whatami != WhatAmI::Client { + if let Some(mut res) = res { + if let Some(peer) = get_peer(tables, face, node_id) { + forget_linkstatepeer_token(tables, face, &mut res, &peer, send_declare); + Some(res) + } else { + None + } + } else { + None + } + } else { + forget_simple_token(tables, face, id, send_declare) + } + } +} diff --git a/zenoh/src/net/routing/hat/mod.rs b/zenoh/src/net/routing/hat/mod.rs index 99e2f175b6..17c71d4162 100644 --- a/zenoh/src/net/routing/hat/mod.rs +++ b/zenoh/src/net/routing/hat/mod.rs @@ -17,27 +17,34 @@ //! This module is intended for Zenoh's internal use. //! //! [Click here for Zenoh's documentation](../zenoh/index.html) -use super::{ - dispatcher::{ - face::{Face, FaceState}, - tables::{NodeId, QueryTargetQablSet, Resource, Route, RoutingExpr, Tables, TablesLock}, - }, - router::RoutesIndexes, - RoutingContext, -}; -use crate::runtime::Runtime; use std::{any::Any, sync::Arc}; -use zenoh_buffers::ZBuf; -use zenoh_config::{unwrap_or_default, Config, WhatAmI, ZenohId}; + +use zenoh_config::{unwrap_or_default, Config, WhatAmI}; use zenoh_protocol::{ - core::WireExpr, + core::ZenohIdProto, network::{ - declare::{queryable::ext::QueryableInfo, subscriber::ext::SubscriberInfo}, + declare::{ + queryable::ext::QueryableInfoType, subscriber::ext::SubscriberInfo, QueryableId, + SubscriberId, TokenId, + }, + interest::{InterestId, InterestMode, InterestOptions}, Declare, Oam, }, }; use zenoh_result::ZResult; use zenoh_transport::unicast::TransportUnicast; +#[cfg(feature = "unstable")] +use {crate::key_expr::KeyExpr, std::collections::HashMap}; + +use super::{ + dispatcher::{ + face::{Face, FaceState}, + tables::{NodeId, QueryTargetQablSet, Resource, Route, RoutingExpr, Tables, TablesLock}, + }, + router::RoutesIndexes, + RoutingContext, +}; +use crate::net::runtime::Runtime; mod client; mod linkstate_peer; @@ -50,9 +57,9 @@ zconfigurable! { #[derive(serde::Serialize)] pub(crate) struct Sources { - routers: Vec, - peers: Vec, - clients: Vec, + routers: Vec, + peers: Vec, + clients: Vec, } impl Sources { @@ -67,8 +74,10 @@ impl Sources { pub(crate) type SendDeclare<'a> = dyn FnMut(&Arc, RoutingContext) + 'a; - -pub(crate) trait HatTrait: HatBaseTrait + HatPubSubTrait + HatQueriesTrait {} +pub(crate) trait HatTrait: + HatBaseTrait + HatInterestTrait + HatPubSubTrait + HatQueriesTrait + HatTokenTrait +{ +} pub(crate) trait HatBaseTrait { fn init(&self, tables: &mut Tables, runtime: Runtime); @@ -140,11 +149,29 @@ pub(crate) trait HatBaseTrait { ); } +pub(crate) trait HatInterestTrait { + #[allow(clippy::too_many_arguments)] + fn declare_interest( + &self, + tables: &mut Tables, + tables_ref: &Arc, + face: &mut Arc, + id: InterestId, + res: Option<&mut Arc>, + mode: InterestMode, + options: InterestOptions, + send_declare: &mut SendDeclare, + ); + fn undeclare_interest(&self, tables: &mut Tables, face: &mut Arc, id: InterestId); +} + pub(crate) trait HatPubSubTrait { + #[allow(clippy::too_many_arguments)] fn declare_subscription( &self, tables: &mut Tables, face: &mut Arc, + id: SubscriberId, res: &mut Arc, sub_info: &SubscriberInfo, node_id: NodeId, @@ -154,10 +181,11 @@ pub(crate) trait HatPubSubTrait { &self, tables: &mut Tables, face: &mut Arc, - res: &mut Arc, + id: SubscriberId, + res: Option>, node_id: NodeId, send_declare: &mut SendDeclare, - ); + ) -> Option>; fn get_subscriptions(&self, tables: &Tables) -> Vec<(Arc, Sources)>; @@ -170,15 +198,24 @@ pub(crate) trait HatPubSubTrait { ) -> Arc; fn get_data_routes_entries(&self, tables: &Tables) -> RoutesIndexes; + + #[zenoh_macros::unstable] + fn get_matching_subscriptions( + &self, + tables: &Tables, + key_expr: &KeyExpr<'_>, + ) -> HashMap>; } pub(crate) trait HatQueriesTrait { + #[allow(clippy::too_many_arguments)] fn declare_queryable( &self, tables: &mut Tables, face: &mut Arc, + id: QueryableId, res: &mut Arc, - qabl_info: &QueryableInfo, + qabl_info: &QueryableInfoType, node_id: NodeId, send_declare: &mut SendDeclare, ); @@ -186,10 +223,11 @@ pub(crate) trait HatQueriesTrait { &self, tables: &mut Tables, face: &mut Arc, - res: &mut Arc, + id: QueryableId, + res: Option>, node_id: NodeId, send_declare: &mut SendDeclare, - ); + ) -> Option>; fn get_queryables(&self, tables: &Tables) -> Vec<(Arc, Sources)>; @@ -202,14 +240,6 @@ pub(crate) trait HatQueriesTrait { ) -> Arc; fn get_query_routes_entries(&self, tables: &Tables) -> RoutesIndexes; - - fn compute_local_replies( - &self, - tables: &Tables, - prefix: &Arc, - suffix: &str, - face: &Arc, - ) -> Vec<(WireExpr<'static>, ZBuf)>; } pub(crate) fn new_hat(whatami: WhatAmI, config: &Config) -> Box { @@ -225,3 +255,44 @@ pub(crate) fn new_hat(whatami: WhatAmI, config: &Config) -> Box Box::new(router::HatCode {}), } } + +pub(crate) trait HatTokenTrait { + #[allow(clippy::too_many_arguments)] + fn declare_token( + &self, + tables: &mut Tables, + face: &mut Arc, + id: TokenId, + res: &mut Arc, + node_id: NodeId, + interest_id: Option, + send_declare: &mut SendDeclare, + ); + + fn undeclare_token( + &self, + tables: &mut Tables, + face: &mut Arc, + id: TokenId, + res: Option>, + node_id: NodeId, + send_declare: &mut SendDeclare, + ) -> Option>; +} + +trait CurrentFutureTrait { + fn future(&self) -> bool; + fn current(&self) -> bool; +} + +impl CurrentFutureTrait for InterestMode { + #[inline] + fn future(&self) -> bool { + self == &InterestMode::Future || self == &InterestMode::CurrentFuture + } + + #[inline] + fn current(&self) -> bool { + self == &InterestMode::Current || self == &InterestMode::CurrentFuture + } +} diff --git a/zenoh/src/net/routing/hat/p2p_peer/gossip.rs b/zenoh/src/net/routing/hat/p2p_peer/gossip.rs index 38a833d1c2..b3216c6b8c 100644 --- a/zenoh/src/net/routing/hat/p2p_peer/gossip.rs +++ b/zenoh/src/net/routing/hat/p2p_peer/gossip.rs @@ -11,24 +11,29 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::net::codec::Zenoh080Routing; -use crate::net::protocol::linkstate::{LinkState, LinkStateList}; -use crate::net::runtime::Runtime; -use crate::runtime::WeakRuntime; -use petgraph::graph::NodeIndex; -use rand::Rng; use std::convert::TryInto; + +use petgraph::graph::NodeIndex; use vec_map::VecMap; -use zenoh_buffers::writer::{DidntWrite, HasWriter}; -use zenoh_buffers::ZBuf; +use zenoh_buffers::{ + writer::{DidntWrite, HasWriter}, + ZBuf, +}; use zenoh_codec::WCodec; use zenoh_link::Locator; -use zenoh_protocol::common::ZExtBody; -use zenoh_protocol::core::{WhatAmI, WhatAmIMatcher, ZenohId}; -use zenoh_protocol::network::oam::id::OAM_LINKSTATE; -use zenoh_protocol::network::{oam, NetworkBody, NetworkMessage, Oam}; +use zenoh_protocol::{ + common::ZExtBody, + core::{WhatAmI, WhatAmIMatcher, ZenohIdProto}, + network::{oam, oam::id::OAM_LINKSTATE, NetworkBody, NetworkMessage, Oam}, +}; use zenoh_transport::unicast::TransportUnicast; +use crate::net::{ + codec::Zenoh080Routing, + protocol::linkstate::{LinkState, LinkStateList}, + runtime::{Runtime, WeakRuntime}, +}; + #[derive(Clone)] struct Details { zid: bool, @@ -38,11 +43,11 @@ struct Details { #[derive(Clone)] pub(super) struct Node { - pub(super) zid: ZenohId, + pub(super) zid: ZenohIdProto, pub(super) whatami: Option, pub(super) locators: Option>, pub(super) sn: u64, - pub(super) links: Vec, + pub(super) links: Vec, } impl std::fmt::Debug for Node { @@ -53,8 +58,8 @@ impl std::fmt::Debug for Node { pub(super) struct Link { pub(super) transport: TransportUnicast, - zid: ZenohId, - mappings: VecMap, + zid: ZenohIdProto, + mappings: VecMap, local_mappings: VecMap, } @@ -70,12 +75,12 @@ impl Link { } #[inline] - pub(super) fn set_zid_mapping(&mut self, psid: u64, zid: ZenohId) { + pub(super) fn set_zid_mapping(&mut self, psid: u64, zid: ZenohIdProto) { self.mappings.insert(psid.try_into().unwrap(), zid); } #[inline] - pub(super) fn get_zid(&self, psid: &u64) -> Option<&ZenohId> { + pub(super) fn get_zid(&self, psid: &u64) -> Option<&ZenohIdProto> { self.mappings.get((*psid).try_into().unwrap()) } @@ -101,7 +106,7 @@ pub(super) struct Network { impl Network { pub(super) fn new( name: String, - zid: ZenohId, + zid: ZenohIdProto, runtime: Runtime, router_peers_failover_brokering: bool, gossip: bool, @@ -139,14 +144,14 @@ impl Network { // } #[inline] - pub(super) fn get_idx(&self, zid: &ZenohId) -> Option { + pub(super) fn get_idx(&self, zid: &ZenohIdProto) -> Option { self.graph .node_indices() .find(|idx| self.graph[*idx].zid == *zid) } #[inline] - pub(super) fn get_link_from_zid(&self, zid: &ZenohId) -> Option<&Link> { + pub(super) fn get_link_from_zid(&self, zid: &ZenohIdProto) -> Option<&Link> { self.links.values().find(|link| link.zid == *zid) } @@ -215,7 +220,7 @@ impl Network { Ok(NetworkBody::OAM(Oam { id: OAM_LINKSTATE, body: ZExtBody::ZBuf(buf), - ext_qos: oam::ext::QoSType::oam_default(), + ext_qos: oam::ext::QoSType::OAM, ext_tstamp: None, }) .into()) @@ -266,7 +271,7 @@ impl Network { })) } - pub(super) fn link_states(&mut self, link_states: Vec, src: ZenohId) { + pub(super) fn link_states(&mut self, link_states: Vec, src: ZenohIdProto) { tracing::trace!("{} Received from {} raw: {:?}", self.name, src, link_states); let strong_runtime = self.runtime.upgrade().unwrap(); @@ -328,7 +333,7 @@ impl Network { let link_states = link_states .into_iter() .map(|(zid, wai, locs, sn, links)| { - let links: Vec = links + let links: Vec = links .iter() .filter_map(|l| { if let Some(zid) = src_link.get_zid(l) { @@ -393,6 +398,11 @@ impl Network { if self.gossip { if let Some(idx) = idx { + zenoh_runtime::ZRuntime::Net.block_in_place( + strong_runtime + .start_conditions() + .add_peer_connector_zid(zid), + ); if self.gossip_multihop || self.links.values().any(|link| link.zid == zid) { self.send_on_links( vec![( @@ -418,12 +428,11 @@ impl Network { .await .is_none() { - // random backoff - let sleep_time = std::time::Duration::from_millis( - rand::thread_rng().gen_range(0..100), - ); - tokio::time::sleep(sleep_time).await; runtime.connect_peer(&zid, &locators).await; + runtime + .start_conditions() + .terminate_peer_connector_zid(zid) + .await; } }); } @@ -431,6 +440,11 @@ impl Network { } } } + zenoh_runtime::ZRuntime::Net.block_in_place( + strong_runtime + .start_conditions() + .terminate_peer_connector_zid(src), + ); } pub(super) fn add_link(&mut self, transport: TransportUnicast) -> usize { @@ -537,7 +551,7 @@ impl Network { free_index } - pub(super) fn remove_link(&mut self, zid: &ZenohId) -> Vec<(NodeIndex, Node)> { + pub(super) fn remove_link(&mut self, zid: &ZenohIdProto) -> Vec<(NodeIndex, Node)> { tracing::trace!("{} remove_link {}", self.name, zid); self.links.retain(|_, link| link.zid != *zid); self.graph[self.idx].links.retain(|link| *link != *zid); diff --git a/zenoh/src/net/routing/hat/p2p_peer/interests.rs b/zenoh/src/net/routing/hat/p2p_peer/interests.rs new file mode 100644 index 0000000000..2ed9e22840 --- /dev/null +++ b/zenoh/src/net/routing/hat/p2p_peer/interests.rs @@ -0,0 +1,241 @@ +// +// Copyright (c) 2024 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// +use std::sync::{atomic::Ordering, Arc}; + +use zenoh_protocol::{ + core::WhatAmI, + network::{ + declare::ext, + interest::{InterestId, InterestMode, InterestOptions}, + Declare, DeclareBody, DeclareFinal, Interest, + }, +}; +use zenoh_sync::get_mut_unchecked; + +use super::{ + face_hat, face_hat_mut, pubsub::declare_sub_interest, queries::declare_qabl_interest, + token::declare_token_interest, HatCode, HatFace, +}; +use crate::net::routing::{ + dispatcher::{ + face::{FaceState, InterestState}, + interests::{CurrentInterest, CurrentInterestCleanup}, + resource::Resource, + tables::{Tables, TablesLock}, + }, + hat::{CurrentFutureTrait, HatInterestTrait, SendDeclare}, + RoutingContext, +}; + +pub(super) fn interests_new_face(tables: &mut Tables, face: &mut Arc) { + if face.whatami != WhatAmI::Client { + for mut src_face in tables + .faces + .values() + .cloned() + .collect::>>() + { + if face.whatami == WhatAmI::Router { + for (res, options) in face_hat_mut!(&mut src_face).remote_interests.values() { + let id = face_hat!(face).next_id.fetch_add(1, Ordering::SeqCst); + get_mut_unchecked(face).local_interests.insert( + id, + InterestState { + options: *options, + res: res.as_ref().map(|res| (*res).clone()), + finalized: false, + }, + ); + let wire_expr = res + .as_ref() + .map(|res| Resource::decl_key(res, face, face.whatami != WhatAmI::Client)); + face.primitives.send_interest(RoutingContext::with_expr( + Interest { + id, + mode: InterestMode::CurrentFuture, + options: *options, + wire_expr, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + }, + res.as_ref().map(|res| res.expr()).unwrap_or_default(), + )); + } + } + } + } +} + +impl HatInterestTrait for HatCode { + fn declare_interest( + &self, + tables: &mut Tables, + tables_ref: &Arc, + face: &mut Arc, + id: InterestId, + res: Option<&mut Arc>, + mode: InterestMode, + options: InterestOptions, + send_declare: &mut SendDeclare, + ) { + if options.subscribers() { + declare_sub_interest( + tables, + face, + id, + res.as_ref().map(|r| (*r).clone()).as_mut(), + mode, + options.aggregate(), + send_declare, + ) + } + if options.queryables() { + declare_qabl_interest( + tables, + face, + id, + res.as_ref().map(|r| (*r).clone()).as_mut(), + mode, + options.aggregate(), + send_declare, + ) + } + if options.tokens() { + declare_token_interest( + tables, + face, + id, + res.as_ref().map(|r| (*r).clone()).as_mut(), + mode, + options.aggregate(), + send_declare, + ) + } + face_hat_mut!(face) + .remote_interests + .insert(id, (res.as_ref().map(|res| (*res).clone()), options)); + + let interest = Arc::new(CurrentInterest { + src_face: face.clone(), + src_interest_id: id, + }); + + for dst_face in tables + .faces + .values_mut() + .filter(|f| f.whatami == WhatAmI::Router) + { + let id = face_hat!(dst_face).next_id.fetch_add(1, Ordering::SeqCst); + get_mut_unchecked(dst_face).local_interests.insert( + id, + InterestState { + options, + res: res.as_ref().map(|res| (*res).clone()), + finalized: mode == InterestMode::Future, + }, + ); + if mode.current() { + let dst_face_mut = get_mut_unchecked(dst_face); + let cancellation_token = dst_face_mut.task_controller.get_cancellation_token(); + dst_face_mut + .pending_current_interests + .insert(id, (interest.clone(), cancellation_token)); + CurrentInterestCleanup::spawn_interest_clean_up_task(dst_face, tables_ref, id); + } + let wire_expr = res + .as_ref() + .map(|res| Resource::decl_key(res, dst_face, dst_face.whatami == WhatAmI::Client)); + dst_face.primitives.send_interest(RoutingContext::with_expr( + Interest { + id, + mode, + options, + wire_expr, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + }, + res.as_ref().map(|res| res.expr()).unwrap_or_default(), + )); + } + + if mode.current() { + if let Some(interest) = Arc::into_inner(interest) { + tracing::debug!( + "Propagate DeclareFinal {}:{}", + interest.src_face, + interest.src_interest_id + ); + send_declare( + &interest.src_face.primitives, + RoutingContext::new(Declare { + interest_id: Some(interest.src_interest_id), + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::DeclareFinal(DeclareFinal), + }), + ); + } + } + } + + fn undeclare_interest(&self, tables: &mut Tables, face: &mut Arc, id: InterestId) { + if let Some(interest) = face_hat_mut!(face).remote_interests.remove(&id) { + if !tables.faces.values().any(|f| { + f.whatami == WhatAmI::Client + && face_hat!(f) + .remote_interests + .values() + .any(|i| *i == interest) + }) { + for dst_face in tables + .faces + .values_mut() + .filter(|f| f.whatami == WhatAmI::Router) + { + for id in dst_face + .local_interests + .keys() + .cloned() + .collect::>() + { + let local_interest = dst_face.local_interests.get(&id).unwrap(); + if local_interest.res == interest.0 && local_interest.options == interest.1 + { + dst_face.primitives.send_interest(RoutingContext::with_expr( + Interest { + id, + mode: InterestMode::Final, + options: InterestOptions::empty(), + wire_expr: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + }, + local_interest + .res + .as_ref() + .map(|res| res.expr()) + .unwrap_or_default(), + )); + get_mut_unchecked(dst_face).local_interests.remove(&id); + } + } + } + } + } + } +} diff --git a/zenoh/src/net/routing/hat/p2p_peer/mod.rs b/zenoh/src/net/routing/hat/p2p_peer/mod.rs index 89270ffe2c..21737326e4 100644 --- a/zenoh/src/net/routing/hat/p2p_peer/mod.rs +++ b/zenoh/src/net/routing/hat/p2p_peer/mod.rs @@ -17,24 +17,36 @@ //! This module is intended for Zenoh's internal use. //! //! [Click here for Zenoh's documentation](../zenoh/index.html) -use crate::{ - net::{ - codec::Zenoh080Routing, - protocol::linkstate::LinkStateList, - routing::{ - dispatcher::face::Face, - router::{ - compute_data_routes, compute_matching_pulls, compute_query_routes, RoutesIndexes, - }, +use std::{ + any::Any, + collections::HashMap, + sync::{atomic::AtomicU32, Arc}, +}; + +use token::{token_new_face, undeclare_simple_token}; +use zenoh_config::{unwrap_or_default, ModeDependent, WhatAmI, WhatAmIMatcher}; +use zenoh_protocol::{ + common::ZExtBody, + network::{ + declare::{ + ext::{NodeIdType, QoSType}, + queryable::ext::QueryableInfoType, + QueryableId, SubscriberId, TokenId, }, + interest::{InterestId, InterestOptions}, + oam::id::OAM_LINKSTATE, + Declare, DeclareBody, DeclareFinal, Oam, }, - runtime::Runtime, }; +use zenoh_result::ZResult; +use zenoh_sync::get_mut_unchecked; +use zenoh_transport::unicast::TransportUnicast; use self::{ gossip::Network, - pubsub::{pubsub_new_face, undeclare_client_subscription}, - queries::{queries_new_face, undeclare_client_queryable}, + interests::interests_new_face, + pubsub::{pubsub_new_face, undeclare_simple_subscription}, + queries::{queries_new_face, undeclare_simple_queryable}, }; use super::{ super::dispatcher::{ @@ -43,24 +55,22 @@ use super::{ }, HatBaseTrait, HatTrait, SendDeclare, }; -use std::{ - any::Any, - collections::{HashMap, HashSet}, - sync::Arc, -}; -use zenoh_config::{unwrap_or_default, ModeDependent, WhatAmI, WhatAmIMatcher}; -use zenoh_protocol::network::Oam; -use zenoh_protocol::{ - common::ZExtBody, - network::{declare::queryable::ext::QueryableInfo, oam::id::OAM_LINKSTATE}, +use crate::net::{ + codec::Zenoh080Routing, + protocol::linkstate::LinkStateList, + routing::{ + dispatcher::face::{Face, InterestState}, + router::{compute_data_routes, compute_query_routes, RoutesIndexes}, + RoutingContext, + }, + runtime::Runtime, }; -use zenoh_result::ZResult; -use zenoh_sync::get_mut_unchecked; -use zenoh_transport::unicast::TransportUnicast; mod gossip; +mod interests; mod pubsub; mod queries; +mod token; macro_rules! hat_mut { ($t:expr) => { @@ -140,8 +150,10 @@ impl HatBaseTrait for HatCode { face: &mut Face, send_declare: &mut SendDeclare, ) -> ZResult<()> { + interests_new_face(tables, &mut face.state); pubsub_new_face(tables, &mut face.state, send_declare); queries_new_face(tables, &mut face.state, send_declare); + token_new_face(tables, &mut face.state, send_declare); Ok(()) } @@ -158,8 +170,34 @@ impl HatBaseTrait for HatCode { net.add_link(transport.clone()); } } + if face.state.whatami == WhatAmI::Peer { + get_mut_unchecked(&mut face.state).local_interests.insert( + 0, + InterestState { + options: InterestOptions::ALL, + res: None, + finalized: false, + }, + ); + } + + interests_new_face(tables, &mut face.state); pubsub_new_face(tables, &mut face.state, send_declare); queries_new_face(tables, &mut face.state, send_declare); + token_new_face(tables, &mut face.state, send_declare); + + if face.state.whatami == WhatAmI::Peer { + send_declare( + &face.state.primitives, + RoutingContext::new(Declare { + interest_id: Some(0), + ext_qos: QoSType::default(), + ext_tstamp: None, + ext_nodeid: NodeIdType::default(), + body: DeclareBody::DeclareFinal(DeclareFinal), + }), + ); + } Ok(()) } @@ -172,6 +210,19 @@ impl HatBaseTrait for HatCode { let mut wtables = zwrite!(tables.tables); let mut face_clone = face.clone(); let face = get_mut_unchecked(face); + let hat_face = match face.hat.downcast_mut::() { + Some(hate_face) => hate_face, + None => { + tracing::error!("Error downcasting face hat in close_face!"); + return; + } + }; + + hat_face.remote_interests.clear(); + hat_face.local_subs.clear(); + hat_face.local_qabls.clear(); + hat_face.local_tokens.clear(); + for res in face.remote_mappings.values_mut() { get_mut_unchecked(res).session_ctxs.remove(&face.id); Resource::clean(res); @@ -184,15 +235,9 @@ impl HatBaseTrait for HatCode { face.local_mappings.clear(); let mut subs_matches = vec![]; - for mut res in face - .hat - .downcast_mut::() - .unwrap() - .remote_subs - .drain() - { + for (_id, mut res) in hat_face.remote_subs.drain() { get_mut_unchecked(&mut res).session_ctxs.remove(&face.id); - undeclare_client_subscription(&mut wtables, &mut face_clone, &mut res, send_declare); + undeclare_simple_subscription(&mut wtables, &mut face_clone, &mut res, send_declare); if res.context.is_some() { for match_ in &res.context().matches { @@ -212,15 +257,9 @@ impl HatBaseTrait for HatCode { } let mut qabls_matches = vec![]; - for mut res in face - .hat - .downcast_mut::() - .unwrap() - .remote_qabls - .drain() - { + for (_id, mut res) in hat_face.remote_qabls.drain() { get_mut_unchecked(&mut res).session_ctxs.remove(&face.id); - undeclare_client_queryable(&mut wtables, &mut face_clone, &mut res, send_declare); + undeclare_simple_queryable(&mut wtables, &mut face_clone, &mut res, send_declare); if res.context.is_some() { for match_ in &res.context().matches { @@ -238,6 +277,11 @@ impl HatBaseTrait for HatCode { qabls_matches.push(res); } } + + for (_id, mut res) in hat_face.remote_tokens.drain() { + get_mut_unchecked(&mut res).session_ctxs.remove(&face.id); + undeclare_simple_token(&mut wtables, &mut face_clone, &mut res, send_declare); + } drop(wtables); let mut matches_data_routes = vec![]; @@ -245,11 +289,7 @@ impl HatBaseTrait for HatCode { let rtables = zread!(tables.tables); for _match in subs_matches.drain(..) { let mut expr = RoutingExpr::new(&_match, ""); - matches_data_routes.push(( - _match.clone(), - compute_data_routes(&rtables, &mut expr), - compute_matching_pulls(&rtables, &mut expr), - )); + matches_data_routes.push((_match.clone(), compute_data_routes(&rtables, &mut expr))); } for _match in qabls_matches.drain(..) { matches_query_routes.push((_match.clone(), compute_query_routes(&rtables, &_match))); @@ -257,13 +297,10 @@ impl HatBaseTrait for HatCode { drop(rtables); let mut wtables = zwrite!(tables.tables); - for (mut res, data_routes, matching_pulls) in matches_data_routes { + for (mut res, data_routes) in matches_data_routes { get_mut_unchecked(&mut res) .context_mut() .update_data_routes(data_routes); - get_mut_unchecked(&mut res) - .context_mut() - .update_matching_pulls(matching_pulls); Resource::clean(&mut res); } for (mut res, query_routes) in matches_query_routes { @@ -368,19 +405,27 @@ impl HatContext { } struct HatFace { - local_subs: HashSet>, - remote_subs: HashSet>, - local_qabls: HashMap, QueryableInfo>, - remote_qabls: HashSet>, + next_id: AtomicU32, // @TODO: manage rollover and uniqueness + remote_interests: HashMap>, InterestOptions)>, + local_subs: HashMap, SubscriberId>, + remote_subs: HashMap>, + local_tokens: HashMap, TokenId>, + remote_tokens: HashMap>, + local_qabls: HashMap, (QueryableId, QueryableInfoType)>, + remote_qabls: HashMap>, } impl HatFace { fn new() -> Self { Self { - local_subs: HashSet::new(), - remote_subs: HashSet::new(), + next_id: AtomicU32::new(0), + remote_interests: HashMap::new(), + local_subs: HashMap::new(), + remote_subs: HashMap::new(), + local_tokens: HashMap::new(), + remote_tokens: HashMap::new(), local_qabls: HashMap::new(), - remote_qabls: HashSet::new(), + remote_qabls: HashMap::new(), } } } diff --git a/zenoh/src/net/routing/hat/p2p_peer/pubsub.rs b/zenoh/src/net/routing/hat/p2p_peer/pubsub.rs index 175ee8f0ca..0dccf9ba3c 100644 --- a/zenoh/src/net/routing/hat/p2p_peer/pubsub.rs +++ b/zenoh/src/net/routing/hat/p2p_peer/pubsub.rs @@ -11,28 +11,39 @@ // Contributors: // ZettaScale Zenoh Team, // -use super::{face_hat, face_hat_mut, get_routes_entries}; -use super::{HatCode, HatFace}; -use crate::net::routing::dispatcher::face::FaceState; -use crate::net::routing::dispatcher::resource::{NodeId, Resource, SessionContext}; -use crate::net::routing::dispatcher::tables::Tables; -use crate::net::routing::dispatcher::tables::{Route, RoutingExpr}; -use crate::net::routing::hat::{HatPubSubTrait, SendDeclare, Sources}; -use crate::net::routing::router::RoutesIndexes; -use crate::net::routing::{RoutingContext, PREFIX_LIVELINESS}; -use std::borrow::Cow; -use std::collections::HashMap; -use std::sync::Arc; -use zenoh_protocol::core::key_expr::OwnedKeyExpr; +use std::{ + borrow::Cow, + collections::HashMap, + sync::{atomic::Ordering, Arc}, +}; + use zenoh_protocol::{ - core::{Reliability, WhatAmI}, - network::declare::{ - common::ext::WireExprType, ext, subscriber::ext::SubscriberInfo, Declare, DeclareBody, - DeclareSubscriber, Mode, UndeclareSubscriber, + core::{key_expr::OwnedKeyExpr, Reliability, WhatAmI}, + network::{ + declare::{ + common::ext::WireExprType, ext, subscriber::ext::SubscriberInfo, Declare, DeclareBody, + DeclareSubscriber, SubscriberId, UndeclareSubscriber, + }, + interest::{InterestId, InterestMode, InterestOptions}, }, }; use zenoh_sync::get_mut_unchecked; +use super::{face_hat, face_hat_mut, get_routes_entries, HatCode, HatFace}; +use crate::{ + key_expr::KeyExpr, + net::routing::{ + dispatcher::{ + face::FaceState, + resource::{NodeId, Resource, SessionContext}, + tables::{Route, RoutingExpr, Tables}, + }, + hat::{CurrentFutureTrait, HatPubSubTrait, SendDeclare, Sources}, + router::{update_data_routes_from, RoutesIndexes}, + RoutingContext, + }, +}; + #[inline] fn propagate_simple_subscription_to( _tables: &mut Tables, @@ -42,29 +53,72 @@ fn propagate_simple_subscription_to( src_face: &mut Arc, send_declare: &mut SendDeclare, ) { - if (src_face.id != dst_face.id - || (dst_face.whatami == WhatAmI::Client && res.expr().starts_with(PREFIX_LIVELINESS))) - && !face_hat!(dst_face).local_subs.contains(res) + if (src_face.id != dst_face.id) + && !face_hat!(dst_face).local_subs.contains_key(res) && (src_face.whatami == WhatAmI::Client || dst_face.whatami == WhatAmI::Client) { - face_hat_mut!(dst_face).local_subs.insert(res.clone()); - let key_expr = Resource::decl_key(res, dst_face); - send_declare( - &dst_face.primitives, - RoutingContext::with_expr( - Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - body: DeclareBody::DeclareSubscriber(DeclareSubscriber { - id: 0, // @TODO use proper SubscriberId (#703) - wire_expr: key_expr, - ext_info: *sub_info, - }), - }, - res.expr(), - ), - ); + if dst_face.whatami != WhatAmI::Client { + let id = face_hat!(dst_face).next_id.fetch_add(1, Ordering::SeqCst); + face_hat_mut!(dst_face).local_subs.insert(res.clone(), id); + let key_expr = Resource::decl_key(res, dst_face, dst_face.whatami != WhatAmI::Client); + send_declare( + &dst_face.primitives, + RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::DeclareSubscriber(DeclareSubscriber { + id, + wire_expr: key_expr, + ext_info: *sub_info, + }), + }, + res.expr(), + ), + ); + } else { + let matching_interests = face_hat!(dst_face) + .remote_interests + .values() + .filter(|(r, o)| { + o.subscribers() && r.as_ref().map(|r| r.matches(res)).unwrap_or(true) + }) + .cloned() + .collect::>, InterestOptions)>>(); + + for (int_res, options) in matching_interests { + let res = if options.aggregate() { + int_res.as_ref().unwrap_or(res) + } else { + res + }; + if !face_hat!(dst_face).local_subs.contains_key(res) { + let id = face_hat!(dst_face).next_id.fetch_add(1, Ordering::SeqCst); + face_hat_mut!(dst_face).local_subs.insert(res.clone(), id); + let key_expr = + Resource::decl_key(res, dst_face, dst_face.whatami != WhatAmI::Client); + send_declare( + &dst_face.primitives, + RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::DeclareSubscriber(DeclareSubscriber { + id, + wire_expr: key_expr, + ext_info: *sub_info, + }), + }, + res.expr(), + ), + ); + } + } + } } } @@ -92,59 +146,45 @@ fn propagate_simple_subscription( } } -fn register_client_subscription( +fn register_simple_subscription( _tables: &mut Tables, face: &mut Arc, + id: SubscriberId, res: &mut Arc, sub_info: &SubscriberInfo, ) { // Register subscription { let res = get_mut_unchecked(res); - tracing::debug!("Register subscription {} for {}", res.expr(), face); match res.session_ctxs.get_mut(&face.id) { - Some(ctx) => match &ctx.subs { - Some(info) => { - if Mode::Pull == info.mode { - get_mut_unchecked(ctx).subs = Some(*sub_info); - } - } - None => { + Some(ctx) => { + if ctx.subs.is_none() { get_mut_unchecked(ctx).subs = Some(*sub_info); } - }, + } None => { - res.session_ctxs.insert( - face.id, - Arc::new(SessionContext { - face: face.clone(), - local_expr_id: None, - remote_expr_id: None, - subs: Some(*sub_info), - qabl: None, - last_values: HashMap::new(), - in_interceptor_cache: None, - e_interceptor_cache: None, - }), - ); + let ctx = res + .session_ctxs + .entry(face.id) + .or_insert_with(|| Arc::new(SessionContext::new(face.clone()))); + get_mut_unchecked(ctx).subs = Some(*sub_info); } } } - face_hat_mut!(face).remote_subs.insert(res.clone()); + face_hat_mut!(face).remote_subs.insert(id, res.clone()); } -fn declare_client_subscription( +fn declare_simple_subscription( tables: &mut Tables, face: &mut Arc, + id: SubscriberId, res: &mut Arc, sub_info: &SubscriberInfo, send_declare: &mut SendDeclare, ) { - register_client_subscription(tables, face, res, sub_info); - let mut propa_sub_info = *sub_info; - propa_sub_info.mode = Mode::Push; + register_simple_subscription(tables, face, id, res, sub_info); - propagate_simple_subscription(tables, res, &propa_sub_info, face, send_declare); + propagate_simple_subscription(tables, res, sub_info, face, send_declare); // This introduced a buffer overflow on windows // TODO: Let's deactivate this on windows until Fixed #[cfg(not(windows))] @@ -153,11 +193,12 @@ fn declare_client_subscription( .primitives .send_declare(RoutingContext::with_expr( Declare { - ext_qos: ext::QoSType::declare_default(), + interest_id: None, + ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), + ext_nodeid: ext::NodeIdType::DEFAULT, body: DeclareBody::DeclareSubscriber(DeclareSubscriber { - id: 0, // @TODO use proper SubscriberId (#703) + id: 0, // @TODO use proper SubscriberId wire_expr: res.expr().into(), ext_info: *sub_info, }), @@ -168,7 +209,7 @@ fn declare_client_subscription( } #[inline] -fn client_subs(res: &Arc) -> Vec> { +fn simple_subs(res: &Arc) -> Vec> { res.session_ctxs .values() .filter_map(|ctx| { @@ -181,84 +222,152 @@ fn client_subs(res: &Arc) -> Vec> { .collect() } +#[inline] +fn remote_simple_subs(res: &Arc, face: &Arc) -> bool { + res.session_ctxs + .values() + .any(|ctx| ctx.face.id != face.id && ctx.subs.is_some()) +} + fn propagate_forget_simple_subscription( tables: &mut Tables, res: &Arc, send_declare: &mut SendDeclare, ) { - for face in tables.faces.values_mut() { - if face_hat!(face).local_subs.contains(res) { - let wire_expr = Resource::get_best_key(res, "", face.id); + for mut face in tables.faces.values().cloned() { + if let Some(id) = face_hat_mut!(&mut face).local_subs.remove(res) { send_declare( &face.primitives, RoutingContext::with_expr( Declare { - ext_qos: ext::QoSType::declare_default(), + interest_id: None, + ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), + ext_nodeid: ext::NodeIdType::DEFAULT, body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { - id: 0, // @TODO use proper SubscriberId (#703) - ext_wire_expr: WireExprType { wire_expr }, + id, + ext_wire_expr: WireExprType::null(), }), }, res.expr(), ), ); - face_hat_mut!(face).local_subs.remove(res); + } + for res in face_hat!(face) + .local_subs + .keys() + .cloned() + .collect::>>() + { + if !res.context().matches.iter().any(|m| { + m.upgrade() + .is_some_and(|m| m.context.is_some() && remote_simple_subs(&m, &face)) + }) { + if let Some(id) = face_hat_mut!(&mut face).local_subs.remove(&res) { + send_declare( + &face.primitives, + RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { + id, + ext_wire_expr: WireExprType::null(), + }), + }, + res.expr(), + ), + ); + } + } } } } -pub(super) fn undeclare_client_subscription( +pub(super) fn undeclare_simple_subscription( tables: &mut Tables, face: &mut Arc, res: &mut Arc, send_declare: &mut SendDeclare, ) { - tracing::debug!("Unregister client subscription {} for {}", res.expr(), face); - if let Some(ctx) = get_mut_unchecked(res).session_ctxs.get_mut(&face.id) { - get_mut_unchecked(ctx).subs = None; - } - face_hat_mut!(face).remote_subs.remove(res); + if !face_hat_mut!(face).remote_subs.values().any(|s| *s == *res) { + if let Some(ctx) = get_mut_unchecked(res).session_ctxs.get_mut(&face.id) { + get_mut_unchecked(ctx).subs = None; + } - let mut client_subs = client_subs(res); - if client_subs.is_empty() { - propagate_forget_simple_subscription(tables, res, send_declare); - } - if client_subs.len() == 1 { - let face = &mut client_subs[0]; - if face_hat!(face).local_subs.contains(res) - && !(face.whatami == WhatAmI::Client && res.expr().starts_with(PREFIX_LIVELINESS)) - { - let wire_expr = Resource::get_best_key(res, "", face.id); - send_declare( - &face.primitives, - RoutingContext::with_expr( - Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { - id: 0, // @TODO use proper SubscriberId (#703) - ext_wire_expr: WireExprType { wire_expr }, - }), - }, - res.expr(), - ), - ); + let mut simple_subs = simple_subs(res); + if simple_subs.is_empty() { + propagate_forget_simple_subscription(tables, res, send_declare); + } - face_hat_mut!(face).local_subs.remove(res); + if simple_subs.len() == 1 { + let mut face = &mut simple_subs[0]; + if let Some(id) = face_hat_mut!(face).local_subs.remove(res) { + send_declare( + &face.primitives, + RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { + id, + ext_wire_expr: WireExprType::null(), + }), + }, + res.expr(), + ), + ); + } + for res in face_hat!(face) + .local_subs + .keys() + .cloned() + .collect::>>() + { + if !res.context().matches.iter().any(|m| { + m.upgrade() + .is_some_and(|m| m.context.is_some() && remote_simple_subs(&m, face)) + }) { + if let Some(id) = face_hat_mut!(&mut face).local_subs.remove(&res) { + send_declare( + &face.primitives, + RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { + id, + ext_wire_expr: WireExprType::null(), + }), + }, + res.expr(), + ), + ); + } + } + } } } } -fn forget_client_subscription( +fn forget_simple_subscription( tables: &mut Tables, face: &mut Arc, - res: &mut Arc, + id: SubscriberId, send_declare: &mut SendDeclare, -) { - undeclare_client_subscription(tables, face, res, send_declare); +) -> Option> { + if let Some(mut res) = face_hat_mut!(face).remote_subs.remove(&id) { + undeclare_simple_subscription(tables, face, &mut res, send_declare); + Some(res) + } else { + None + } } pub(super) fn pubsub_new_face( @@ -266,25 +375,164 @@ pub(super) fn pubsub_new_face( face: &mut Arc, send_declare: &mut SendDeclare, ) { - let sub_info = SubscriberInfo { - reliability: Reliability::Reliable, // @TODO compute proper reliability to propagate from reliability of known subscribers - mode: Mode::Push, - }; - for src_face in tables - .faces - .values() - .cloned() - .collect::>>() - { - for sub in &face_hat!(src_face).remote_subs { - propagate_simple_subscription_to( - tables, - face, - sub, - &sub_info, - &mut src_face.clone(), - send_declare, - ); + if face.whatami != WhatAmI::Client { + let sub_info = SubscriberInfo { + reliability: Reliability::Reliable, // @TODO compute proper reliability to propagate from reliability of known subscribers + }; + for src_face in tables + .faces + .values() + .cloned() + .collect::>>() + { + for sub in face_hat!(src_face).remote_subs.values() { + propagate_simple_subscription_to( + tables, + face, + sub, + &sub_info, + &mut src_face.clone(), + send_declare, + ); + } + } + } + // recompute routes + // TODO: disable data routes and recompute them in parallel to avoid holding + // tables write lock for a long time on peer connection. + update_data_routes_from(tables, &mut tables.root_res.clone()); +} + +pub(super) fn declare_sub_interest( + tables: &mut Tables, + face: &mut Arc, + id: InterestId, + res: Option<&mut Arc>, + mode: InterestMode, + aggregate: bool, + send_declare: &mut SendDeclare, +) { + if mode.current() && face.whatami == WhatAmI::Client { + let interest_id = (!mode.future()).then_some(id); + let sub_info = SubscriberInfo { + reliability: Reliability::Reliable, // @TODO compute proper reliability to propagate from reliability of known subscribers + }; + if let Some(res) = res.as_ref() { + if aggregate { + if tables.faces.values().any(|src_face| { + src_face.id != face.id + && face_hat!(src_face) + .remote_subs + .values() + .any(|sub| sub.context.is_some() && sub.matches(res)) + }) { + let id = if mode.future() { + let id = face_hat!(face).next_id.fetch_add(1, Ordering::SeqCst); + face_hat_mut!(face).local_subs.insert((*res).clone(), id); + id + } else { + 0 + }; + let wire_expr = Resource::decl_key(res, face, face.whatami != WhatAmI::Client); + send_declare( + &face.primitives, + RoutingContext::with_expr( + Declare { + interest_id, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::DeclareSubscriber(DeclareSubscriber { + id, + wire_expr, + ext_info: sub_info, + }), + }, + res.expr(), + ), + ); + } + } else { + for src_face in tables + .faces + .values() + .cloned() + .collect::>>() + { + if src_face.id != face.id { + for sub in face_hat!(src_face).remote_subs.values() { + if sub.context.is_some() && sub.matches(res) { + let id = if mode.future() { + let id = face_hat!(face).next_id.fetch_add(1, Ordering::SeqCst); + face_hat_mut!(face).local_subs.insert(sub.clone(), id); + id + } else { + 0 + }; + let wire_expr = + Resource::decl_key(sub, face, face.whatami != WhatAmI::Client); + send_declare( + &face.primitives, + RoutingContext::with_expr( + Declare { + interest_id, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::DeclareSubscriber( + DeclareSubscriber { + id, + wire_expr, + ext_info: sub_info, + }, + ), + }, + sub.expr(), + ), + ); + } + } + } + } + } + } else { + for src_face in tables + .faces + .values() + .cloned() + .collect::>>() + { + if src_face.id != face.id { + for sub in face_hat!(src_face).remote_subs.values() { + let id = if mode.future() { + let id = face_hat!(face).next_id.fetch_add(1, Ordering::SeqCst); + face_hat_mut!(face).local_subs.insert(sub.clone(), id); + id + } else { + 0 + }; + let wire_expr = + Resource::decl_key(sub, face, face.whatami != WhatAmI::Client); + send_declare( + &face.primitives, + RoutingContext::with_expr( + Declare { + interest_id, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::DeclareSubscriber(DeclareSubscriber { + id, + wire_expr, + ext_info: sub_info, + }), + }, + sub.expr(), + ), + ); + } + } + } } } } @@ -294,30 +542,32 @@ impl HatPubSubTrait for HatCode { &self, tables: &mut Tables, face: &mut Arc, + id: SubscriberId, res: &mut Arc, sub_info: &SubscriberInfo, _node_id: NodeId, send_declare: &mut SendDeclare, ) { - declare_client_subscription(tables, face, res, sub_info, send_declare); + declare_simple_subscription(tables, face, id, res, sub_info, send_declare); } fn undeclare_subscription( &self, tables: &mut Tables, face: &mut Arc, - res: &mut Arc, + id: SubscriberId, + _res: Option>, _node_id: NodeId, send_declare: &mut SendDeclare, - ) { - forget_client_subscription(tables, face, res, send_declare); + ) -> Option> { + forget_simple_subscription(tables, face, id, send_declare) } fn get_subscriptions(&self, tables: &Tables) -> Vec<(Arc, Sources)> { // Compute the list of known suscriptions (keys) let mut subs = HashMap::new(); for src_face in tables.faces.values() { - for sub in &face_hat!(src_face).remote_subs { + for sub in face_hat!(src_face).remote_subs.values() { // Insert the key in the list of known suscriptions let srcs = subs.entry(sub.clone()).or_insert_with(Sources::empty); // Append src_face as a suscription source in the proper list @@ -356,6 +606,67 @@ impl HatPubSubTrait for HatCode { return Arc::new(route); } }; + + if source_type == WhatAmI::Client { + for face in tables + .faces + .values() + .filter(|f| f.whatami == WhatAmI::Router) + { + if face.local_interests.values().any(|interest| { + interest.finalized + && interest.options.subscribers() + && interest + .res + .as_ref() + .map(|res| { + KeyExpr::try_from(res.expr()) + .and_then(|intres| { + KeyExpr::try_from(expr.full_expr()) + .map(|putres| intres.includes(&putres)) + }) + .unwrap_or(false) + }) + .unwrap_or(true) + }) { + if face_hat!(face).remote_subs.values().any(|sub| { + KeyExpr::try_from(sub.expr()) + .and_then(|subres| { + KeyExpr::try_from(expr.full_expr()) + .map(|putres| subres.intersects(&putres)) + }) + .unwrap_or(false) + }) { + let key_expr = Resource::get_best_key(expr.prefix, expr.suffix, face.id); + route.insert( + face.id, + (face.clone(), key_expr.to_owned(), NodeId::default()), + ); + } + } else { + let key_expr = Resource::get_best_key(expr.prefix, expr.suffix, face.id); + route.insert( + face.id, + (face.clone(), key_expr.to_owned(), NodeId::default()), + ); + } + } + + for face in tables.faces.values().filter(|f| { + f.whatami == WhatAmI::Peer + && !f + .local_interests + .get(&0) + .map(|i| i.finalized) + .unwrap_or(true) + }) { + route.entry(face.id).or_insert_with(|| { + let key_expr = Resource::get_best_key(expr.prefix, expr.suffix, face.id); + (face.clone(), key_expr.to_owned(), NodeId::default()) + }); + } + } + let res = Resource::get_resource(expr.prefix, expr.suffix); let matches = res .as_ref() @@ -367,20 +678,13 @@ impl HatPubSubTrait for HatCode { let mres = mres.upgrade().unwrap(); for (sid, context) in &mres.session_ctxs { - if let Some(subinfo) = &context.subs { - if match tables.whatami { - WhatAmI::Router => context.face.whatami != WhatAmI::Router, - _ => { - source_type == WhatAmI::Client - || context.face.whatami == WhatAmI::Client - } - } && subinfo.mode == Mode::Push - { - route.entry(*sid).or_insert_with(|| { - let key_expr = Resource::get_best_key(expr.prefix, expr.suffix, *sid); - (context.face.clone(), key_expr.to_owned(), NodeId::default()) - }); - } + if context.subs.is_some() + && (source_type == WhatAmI::Client || context.face.whatami == WhatAmI::Client) + { + route.entry(*sid).or_insert_with(|| { + let key_expr = Resource::get_best_key(expr.prefix, expr.suffix, *sid); + (context.face.clone(), key_expr.to_owned(), NodeId::default()) + }); } } } @@ -400,4 +704,36 @@ impl HatPubSubTrait for HatCode { fn get_data_routes_entries(&self, _tables: &Tables) -> RoutesIndexes { get_routes_entries() } + + #[zenoh_macros::unstable] + fn get_matching_subscriptions( + &self, + tables: &Tables, + key_expr: &KeyExpr<'_>, + ) -> HashMap> { + let mut matching_subscriptions = HashMap::new(); + if key_expr.ends_with('/') { + return matching_subscriptions; + } + tracing::trace!("get_matching_subscriptions({})", key_expr,); + let res = Resource::get_resource(&tables.root_res, key_expr); + let matches = res + .as_ref() + .and_then(|res| res.context.as_ref()) + .map(|ctx| Cow::from(&ctx.matches)) + .unwrap_or_else(|| Cow::from(Resource::get_matches(tables, key_expr))); + + for mres in matches.iter() { + let mres = mres.upgrade().unwrap(); + + for (sid, context) in &mres.session_ctxs { + if context.subs.is_some() { + matching_subscriptions + .entry(*sid) + .or_insert_with(|| context.face.clone()); + } + } + } + matching_subscriptions + } } diff --git a/zenoh/src/net/routing/hat/p2p_peer/queries.rs b/zenoh/src/net/routing/hat/p2p_peer/queries.rs index 6084164a80..2fd6d6fa81 100644 --- a/zenoh/src/net/routing/hat/p2p_peer/queries.rs +++ b/zenoh/src/net/routing/hat/p2p_peer/queries.rs @@ -11,48 +11,55 @@ // Contributors: // ZettaScale Zenoh Team, // -use super::{face_hat, face_hat_mut, get_routes_entries}; -use super::{HatCode, HatFace}; -use crate::net::routing::dispatcher::face::FaceState; -use crate::net::routing::dispatcher::resource::{NodeId, Resource, SessionContext}; -use crate::net::routing::dispatcher::tables::Tables; -use crate::net::routing::dispatcher::tables::{QueryTargetQabl, QueryTargetQablSet, RoutingExpr}; -use crate::net::routing::hat::{HatQueriesTrait, SendDeclare, Sources}; -use crate::net::routing::router::RoutesIndexes; -use crate::net::routing::{RoutingContext, PREFIX_LIVELINESS}; +use std::{ + borrow::Cow, + collections::HashMap, + sync::{atomic::Ordering, Arc}, +}; + use ordered_float::OrderedFloat; -use std::borrow::Cow; -use std::collections::HashMap; -use std::sync::Arc; -use zenoh_buffers::ZBuf; -use zenoh_protocol::core::key_expr::include::{Includer, DEFAULT_INCLUDER}; -use zenoh_protocol::core::key_expr::OwnedKeyExpr; use zenoh_protocol::{ - core::{WhatAmI, WireExpr}, - network::declare::{ - common::ext::WireExprType, ext, queryable::ext::QueryableInfo, Declare, DeclareBody, - DeclareQueryable, UndeclareQueryable, + core::{ + key_expr::{ + include::{Includer, DEFAULT_INCLUDER}, + OwnedKeyExpr, + }, + WhatAmI, + }, + network::{ + declare::{ + common::ext::WireExprType, ext, queryable::ext::QueryableInfoType, Declare, + DeclareBody, DeclareQueryable, QueryableId, UndeclareQueryable, + }, + interest::{InterestId, InterestMode}, }, }; use zenoh_sync::get_mut_unchecked; -#[cfg(feature = "complete_n")] -#[inline] -fn merge_qabl_infos(mut this: QueryableInfo, info: &QueryableInfo) -> QueryableInfo { - this.complete += info.complete; - this.distance = std::cmp::min(this.distance, info.distance); - this -} +use super::{face_hat, face_hat_mut, get_routes_entries, HatCode, HatFace}; +use crate::net::routing::{ + dispatcher::{ + face::FaceState, + resource::{NodeId, Resource, SessionContext}, + tables::{QueryTargetQabl, QueryTargetQablSet, RoutingExpr, Tables}, + }, + hat::{CurrentFutureTrait, HatQueriesTrait, SendDeclare, Sources}, + router::{update_query_routes_from, RoutesIndexes}, + RoutingContext, +}; -#[cfg(not(feature = "complete_n"))] #[inline] -fn merge_qabl_infos(mut this: QueryableInfo, info: &QueryableInfo) -> QueryableInfo { - this.complete = u8::from(this.complete != 0 || info.complete != 0); +fn merge_qabl_infos(mut this: QueryableInfoType, info: &QueryableInfoType) -> QueryableInfoType { + this.complete = this.complete || info.complete; this.distance = std::cmp::min(this.distance, info.distance); this } -fn local_qabl_info(_tables: &Tables, res: &Arc, face: &Arc) -> QueryableInfo { +fn local_qabl_info( + _tables: &Tables, + res: &Arc, + face: &Arc, +) -> QueryableInfoType { res.session_ctxs .values() .fold(None, |accu, ctx| { @@ -69,10 +76,61 @@ fn local_qabl_info(_tables: &Tables, res: &Arc, face: &Arc) accu } }) - .unwrap_or(QueryableInfo { - complete: 0, - distance: 0, - }) + .unwrap_or(QueryableInfoType::DEFAULT) +} + +#[inline] +fn propagate_simple_queryable_to( + tables: &mut Tables, + dst_face: &mut Arc, + res: &Arc, + src_face: &Option<&mut Arc>, + send_declare: &mut SendDeclare, +) { + let info = local_qabl_info(tables, res, dst_face); + let current = face_hat!(dst_face).local_qabls.get(res); + if src_face + .as_ref() + .map(|src_face| dst_face.id != src_face.id) + .unwrap_or(true) + && (current.is_none() || current.unwrap().1 != info) + && (dst_face.whatami != WhatAmI::Client + || face_hat!(dst_face) + .remote_interests + .values() + .any(|(r, o)| o.queryables() && r.as_ref().map(|r| r.matches(res)).unwrap_or(true))) + && src_face + .as_ref() + .map(|src_face| { + src_face.whatami == WhatAmI::Client || dst_face.whatami == WhatAmI::Client + }) + .unwrap_or(true) + { + let id = current + .map(|c| c.0) + .unwrap_or(face_hat!(dst_face).next_id.fetch_add(1, Ordering::SeqCst)); + face_hat_mut!(dst_face) + .local_qabls + .insert(res.clone(), (id, info)); + let key_expr = Resource::decl_key(res, dst_face, dst_face.whatami != WhatAmI::Client); + send_declare( + &dst_face.primitives, + RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::DeclareQueryable(DeclareQueryable { + id, + wire_expr: key_expr, + ext_info: info, + }), + }, + res.expr(), + ), + ); + } } fn propagate_simple_queryable( @@ -81,80 +139,50 @@ fn propagate_simple_queryable( src_face: Option<&mut Arc>, send_declare: &mut SendDeclare, ) { - let faces = tables.faces.values().cloned(); + let faces = tables + .faces + .values() + .cloned() + .collect::>>(); for mut dst_face in faces { - let info = local_qabl_info(tables, res, &dst_face); - let current_info = face_hat!(dst_face).local_qabls.get(res); - if (src_face.is_none() || src_face.as_ref().unwrap().id != dst_face.id) - && (current_info.is_none() || *current_info.unwrap() != info) - && (src_face.is_none() - || src_face.as_ref().unwrap().whatami == WhatAmI::Client - || dst_face.whatami == WhatAmI::Client) - { - face_hat_mut!(&mut dst_face) - .local_qabls - .insert(res.clone(), info); - let key_expr = Resource::decl_key(res, &mut dst_face); - send_declare( - &dst_face.primitives, - RoutingContext::with_expr( - Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - body: DeclareBody::DeclareQueryable(DeclareQueryable { - id: 0, // @TODO use proper QueryableId (#703) - wire_expr: key_expr, - ext_info: info, - }), - }, - res.expr(), - ), - ); - } + propagate_simple_queryable_to(tables, &mut dst_face, res, &src_face, send_declare); } } -fn register_client_queryable( +fn register_simple_queryable( _tables: &mut Tables, face: &mut Arc, + id: QueryableId, res: &mut Arc, - qabl_info: &QueryableInfo, + qabl_info: &QueryableInfoType, ) { // Register queryable { let res = get_mut_unchecked(res); - tracing::debug!("Register queryable {} (face: {})", res.expr(), face,); - get_mut_unchecked(res.session_ctxs.entry(face.id).or_insert_with(|| { - Arc::new(SessionContext { - face: face.clone(), - local_expr_id: None, - remote_expr_id: None, - subs: None, - qabl: None, - last_values: HashMap::new(), - in_interceptor_cache: None, - e_interceptor_cache: None, - }) - })) + get_mut_unchecked( + res.session_ctxs + .entry(face.id) + .or_insert_with(|| Arc::new(SessionContext::new(face.clone()))), + ) .qabl = Some(*qabl_info); } - face_hat_mut!(face).remote_qabls.insert(res.clone()); + face_hat_mut!(face).remote_qabls.insert(id, res.clone()); } -fn declare_client_queryable( +fn declare_simple_queryable( tables: &mut Tables, face: &mut Arc, + id: QueryableId, res: &mut Arc, - qabl_info: &QueryableInfo, + qabl_info: &QueryableInfoType, send_declare: &mut SendDeclare, ) { - register_client_queryable(tables, face, res, qabl_info); + register_simple_queryable(tables, face, id, res, qabl_info); propagate_simple_queryable(tables, res, Some(face), send_declare); } #[inline] -fn client_qabls(res: &Arc) -> Vec> { +fn simple_qabls(res: &Arc) -> Vec> { res.session_ctxs .values() .filter_map(|ctx| { @@ -167,139 +195,363 @@ fn client_qabls(res: &Arc) -> Vec> { .collect() } +#[inline] +fn remote_simple_qabls(res: &Arc, face: &Arc) -> bool { + res.session_ctxs + .values() + .any(|ctx| ctx.face.id != face.id && ctx.qabl.is_some()) +} + fn propagate_forget_simple_queryable( tables: &mut Tables, res: &mut Arc, send_declare: &mut SendDeclare, ) { for face in tables.faces.values_mut() { - if face_hat!(face).local_qabls.contains_key(res) { - let wire_expr = Resource::get_best_key(res, "", face.id); + if let Some((id, _)) = face_hat_mut!(face).local_qabls.remove(res) { send_declare( &face.primitives, RoutingContext::with_expr( Declare { - ext_qos: ext::QoSType::declare_default(), + interest_id: None, + ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), + ext_nodeid: ext::NodeIdType::DEFAULT, body: DeclareBody::UndeclareQueryable(UndeclareQueryable { - id: 0, // @TODO use proper QueryableId (#703) - ext_wire_expr: WireExprType { wire_expr }, + id, + ext_wire_expr: WireExprType::null(), }), }, res.expr(), ), ); - - face_hat_mut!(face).local_qabls.remove(res); + } + for res in face_hat!(face) + .local_qabls + .keys() + .cloned() + .collect::>>() + { + if !res.context().matches.iter().any(|m| { + m.upgrade() + .is_some_and(|m| m.context.is_some() && remote_simple_qabls(&m, face)) + }) { + if let Some((id, _)) = face_hat_mut!(face).local_qabls.remove(&res) { + send_declare( + &face.primitives, + RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::UndeclareQueryable(UndeclareQueryable { + id, + ext_wire_expr: WireExprType::null(), + }), + }, + res.expr(), + ), + ); + } + } } } } -pub(super) fn undeclare_client_queryable( +pub(super) fn undeclare_simple_queryable( tables: &mut Tables, face: &mut Arc, res: &mut Arc, send_declare: &mut SendDeclare, ) { - tracing::debug!("Unregister client queryable {} for {}", res.expr(), face); - if let Some(ctx) = get_mut_unchecked(res).session_ctxs.get_mut(&face.id) { - get_mut_unchecked(ctx).qabl = None; - if ctx.qabl.is_none() { - face_hat_mut!(face).remote_qabls.remove(res); + if !face_hat_mut!(face) + .remote_qabls + .values() + .any(|s| *s == *res) + { + if let Some(ctx) = get_mut_unchecked(res).session_ctxs.get_mut(&face.id) { + get_mut_unchecked(ctx).qabl = None; } - } - let mut client_qabls = client_qabls(res); - if client_qabls.is_empty() { - propagate_forget_simple_queryable(tables, res, send_declare); - } else { - propagate_simple_queryable(tables, res, None, send_declare); - } - if client_qabls.len() == 1 { - let face = &mut client_qabls[0]; - if face_hat!(face).local_qabls.contains_key(res) { - let wire_expr = Resource::get_best_key(res, "", face.id); - send_declare( - &face.primitives, - RoutingContext::with_expr( - Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - body: DeclareBody::UndeclareQueryable(UndeclareQueryable { - id: 0, // @TODO use proper QueryableId (#703) - ext_wire_expr: WireExprType { wire_expr }, - }), - }, - res.expr(), - ), - ); - - face_hat_mut!(face).local_qabls.remove(res); + let mut simple_qabls = simple_qabls(res); + if simple_qabls.is_empty() { + propagate_forget_simple_queryable(tables, res, send_declare); + } else { + propagate_simple_queryable(tables, res, None, send_declare); + } + if simple_qabls.len() == 1 { + let mut face = &mut simple_qabls[0]; + if let Some((id, _)) = face_hat_mut!(face).local_qabls.remove(res) { + send_declare( + &face.primitives, + RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::UndeclareQueryable(UndeclareQueryable { + id, + ext_wire_expr: WireExprType::null(), + }), + }, + res.expr(), + ), + ); + } + for res in face_hat!(face) + .local_qabls + .keys() + .cloned() + .collect::>>() + { + if !res.context().matches.iter().any(|m| { + m.upgrade() + .is_some_and(|m| m.context.is_some() && (remote_simple_qabls(&m, face))) + }) { + if let Some((id, _)) = face_hat_mut!(&mut face).local_qabls.remove(&res) { + send_declare( + &face.primitives, + RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::UndeclareQueryable(UndeclareQueryable { + id, + ext_wire_expr: WireExprType::null(), + }), + }, + res.expr(), + ), + ); + } + } + } } } } -fn forget_client_queryable( +fn forget_simple_queryable( tables: &mut Tables, face: &mut Arc, - res: &mut Arc, + id: QueryableId, send_declare: &mut SendDeclare, -) { - undeclare_client_queryable(tables, face, res, send_declare); +) -> Option> { + if let Some(mut res) = face_hat_mut!(face).remote_qabls.remove(&id) { + undeclare_simple_queryable(tables, face, &mut res, send_declare); + Some(res) + } else { + None + } } pub(super) fn queries_new_face( tables: &mut Tables, - _face: &mut Arc, + face: &mut Arc, send_declare: &mut SendDeclare, ) { - for face in tables - .faces - .values() - .cloned() - .collect::>>() - { - for qabl in face_hat!(face).remote_qabls.iter() { - propagate_simple_queryable(tables, qabl, Some(&mut face.clone()), send_declare); + if face.whatami != WhatAmI::Client { + for src_face in tables + .faces + .values() + .cloned() + .collect::>>() + { + for qabl in face_hat!(src_face).remote_qabls.values() { + propagate_simple_queryable_to( + tables, + face, + qabl, + &Some(&mut src_face.clone()), + send_declare, + ); + } } } + // recompute routes + // TODO: disable query routes and recompute them in parallel to avoid holding + // tables write lock for a long time on peer connection. + update_query_routes_from(tables, &mut tables.root_res.clone()); } lazy_static::lazy_static! { static ref EMPTY_ROUTE: Arc = Arc::new(Vec::new()); } +pub(super) fn declare_qabl_interest( + tables: &mut Tables, + face: &mut Arc, + id: InterestId, + res: Option<&mut Arc>, + mode: InterestMode, + aggregate: bool, + send_declare: &mut SendDeclare, +) { + if mode.current() && face.whatami == WhatAmI::Client { + let interest_id = (!mode.future()).then_some(id); + if let Some(res) = res.as_ref() { + if aggregate { + if tables.faces.values().any(|src_face| { + src_face.id != face.id + && face_hat!(src_face) + .remote_qabls + .values() + .any(|qabl| qabl.context.is_some() && qabl.matches(res)) + }) { + let info = local_qabl_info(tables, res, face); + let id = if mode.future() { + let id = face_hat!(face).next_id.fetch_add(1, Ordering::SeqCst); + face_hat_mut!(face) + .local_qabls + .insert((*res).clone(), (id, info)); + id + } else { + 0 + }; + let wire_expr = Resource::decl_key(res, face, face.whatami != WhatAmI::Client); + send_declare( + &face.primitives, + RoutingContext::with_expr( + Declare { + interest_id, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::DeclareQueryable(DeclareQueryable { + id, + wire_expr, + ext_info: info, + }), + }, + res.expr(), + ), + ); + } + } else { + for src_face in tables + .faces + .values() + .cloned() + .collect::>>() + { + if src_face.id != face.id { + for qabl in face_hat!(src_face).remote_qabls.values() { + if qabl.context.is_some() && qabl.matches(res) { + let info = local_qabl_info(tables, qabl, face); + let id = if mode.future() { + let id = face_hat!(face).next_id.fetch_add(1, Ordering::SeqCst); + face_hat_mut!(face) + .local_qabls + .insert(qabl.clone(), (id, info)); + id + } else { + 0 + }; + let key_expr = + Resource::decl_key(qabl, face, face.whatami != WhatAmI::Client); + send_declare( + &face.primitives, + RoutingContext::with_expr( + Declare { + interest_id, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::DeclareQueryable(DeclareQueryable { + id, + wire_expr: key_expr, + ext_info: info, + }), + }, + qabl.expr(), + ), + ); + } + } + } + } + } + } else { + for src_face in tables + .faces + .values() + .cloned() + .collect::>>() + { + if src_face.id != face.id { + for qabl in face_hat!(src_face).remote_qabls.values() { + if qabl.context.is_some() { + let info = local_qabl_info(tables, qabl, face); + let id = if mode.future() { + let id = face_hat!(face).next_id.fetch_add(1, Ordering::SeqCst); + face_hat_mut!(face) + .local_qabls + .insert(qabl.clone(), (id, info)); + id + } else { + 0 + }; + let key_expr = + Resource::decl_key(qabl, face, face.whatami != WhatAmI::Client); + send_declare( + &face.primitives, + RoutingContext::with_expr( + Declare { + interest_id, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::DeclareQueryable(DeclareQueryable { + id, + wire_expr: key_expr, + ext_info: info, + }), + }, + qabl.expr(), + ), + ); + } + } + } + } + } + } +} + impl HatQueriesTrait for HatCode { fn declare_queryable( &self, tables: &mut Tables, face: &mut Arc, + id: QueryableId, res: &mut Arc, - qabl_info: &QueryableInfo, + qabl_info: &QueryableInfoType, _node_id: NodeId, send_declare: &mut SendDeclare, ) { - declare_client_queryable(tables, face, res, qabl_info, send_declare); + declare_simple_queryable(tables, face, id, res, qabl_info, send_declare); } fn undeclare_queryable( &self, tables: &mut Tables, face: &mut Arc, - res: &mut Arc, + id: QueryableId, + _res: Option>, _node_id: NodeId, send_declare: &mut SendDeclare, - ) { - forget_client_queryable(tables, face, res, send_declare); + ) -> Option> { + forget_simple_queryable(tables, face, id, send_declare) } fn get_queryables(&self, tables: &Tables) -> Vec<(Arc, Sources)> { // Compute the list of known queryables (keys) let mut qabls = HashMap::new(); for src_face in tables.faces.values() { - for qabl in &face_hat!(src_face).remote_qabls { + for qabl in face_hat!(src_face).remote_qabls.values() { // Insert the key in the list of known queryables let srcs = qabls.entry(qabl.clone()).or_insert_with(Sources::empty); // Append src_face as a queryable source in the proper list @@ -338,6 +590,35 @@ impl HatQueriesTrait for HatCode { return EMPTY_ROUTE.clone(); } }; + + if source_type == WhatAmI::Client { + // TODO: BNestMatching: What if there is a local compete ? + if let Some(face) = tables.faces.values().find(|f| f.whatami == WhatAmI::Router) { + let key_expr = Resource::get_best_key(expr.prefix, expr.suffix, face.id); + route.push(QueryTargetQabl { + direction: (face.clone(), key_expr.to_owned(), NodeId::default()), + complete: 0, + distance: f64::MAX, + }); + } + + for face in tables.faces.values().filter(|f| { + f.whatami == WhatAmI::Peer + && !f + .local_interests + .get(&0) + .map(|i| i.finalized) + .unwrap_or(true) + }) { + let key_expr = Resource::get_best_key(expr.prefix, expr.suffix, face.id); + route.push(QueryTargetQabl { + direction: (face.clone(), key_expr.to_owned(), NodeId::default()), + complete: 0, + distance: 0.5, + }); + } + } + let res = Resource::get_resource(expr.prefix, expr.suffix); let matches = res .as_ref() @@ -349,10 +630,7 @@ impl HatQueriesTrait for HatCode { let mres = mres.upgrade().unwrap(); let complete = DEFAULT_INCLUDER.includes(mres.expr().as_bytes(), key_expr.as_bytes()); for (sid, context) in &mres.session_ctxs { - if match tables.whatami { - WhatAmI::Router => context.face.whatami != WhatAmI::Router, - _ => source_type == WhatAmI::Client || context.face.whatami == WhatAmI::Client, - } { + if source_type == WhatAmI::Client || context.face.whatami == WhatAmI::Client { let key_expr = Resource::get_best_key(expr.prefix, expr.suffix, *sid); if let Some(qabl_info) = context.qabl.as_ref() { route.push(QueryTargetQabl { @@ -376,44 +654,6 @@ impl HatQueriesTrait for HatCode { Arc::new(route) } - #[inline] - fn compute_local_replies( - &self, - tables: &Tables, - prefix: &Arc, - suffix: &str, - face: &Arc, - ) -> Vec<(WireExpr<'static>, ZBuf)> { - let mut result = vec![]; - // Only the first routing point in the query route - // should return the liveliness tokens - if face.whatami == WhatAmI::Client { - let key_expr = prefix.expr() + suffix; - let key_expr = match OwnedKeyExpr::try_from(key_expr) { - Ok(ke) => ke, - Err(e) => { - tracing::warn!("Invalid KE reached the system: {}", e); - return result; - } - }; - if key_expr.starts_with(PREFIX_LIVELINESS) { - let res = Resource::get_resource(prefix, suffix); - let matches = res - .as_ref() - .and_then(|res| res.context.as_ref()) - .map(|ctx| Cow::from(&ctx.matches)) - .unwrap_or_else(|| Cow::from(Resource::get_matches(tables, &key_expr))); - for mres in matches.iter() { - let mres = mres.upgrade().unwrap(); - if mres.session_ctxs.values().any(|ctx| ctx.subs.is_some()) { - result.push((Resource::get_best_key(&mres, "", face.id), ZBuf::default())); - } - } - } - } - result - } - fn get_query_routes_entries(&self, _tables: &Tables) -> RoutesIndexes { get_routes_entries() } diff --git a/zenoh/src/net/routing/hat/p2p_peer/token.rs b/zenoh/src/net/routing/hat/p2p_peer/token.rs new file mode 100644 index 0000000000..866737f0df --- /dev/null +++ b/zenoh/src/net/routing/hat/p2p_peer/token.rs @@ -0,0 +1,545 @@ +// +// Copyright (c) 2024 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// + +use std::sync::{atomic::Ordering, Arc}; + +use zenoh_config::WhatAmI; +use zenoh_protocol::network::{ + declare::{common::ext::WireExprType, TokenId}, + ext, + interest::{InterestId, InterestMode, InterestOptions}, + Declare, DeclareBody, DeclareToken, UndeclareToken, +}; +use zenoh_sync::get_mut_unchecked; + +use super::{face_hat, face_hat_mut, HatCode, HatFace}; +use crate::net::routing::{ + dispatcher::{face::FaceState, tables::Tables}, + hat::{CurrentFutureTrait, HatTokenTrait, SendDeclare}, + router::{NodeId, Resource, SessionContext}, + RoutingContext, +}; + +#[inline] +fn propagate_simple_token_to( + tables: &mut Tables, + dst_face: &mut Arc, + res: &Arc, + src_face: &mut Arc, + send_declare: &mut SendDeclare, +) { + if (src_face.id != dst_face.id || dst_face.zid == tables.zid) + && !face_hat!(dst_face).local_tokens.contains_key(res) + && (src_face.whatami == WhatAmI::Client || dst_face.whatami == WhatAmI::Client) + { + if dst_face.whatami != WhatAmI::Client { + let id = face_hat!(dst_face).next_id.fetch_add(1, Ordering::SeqCst); + face_hat_mut!(dst_face).local_tokens.insert(res.clone(), id); + let key_expr = Resource::decl_key(res, dst_face, dst_face.whatami != WhatAmI::Client); + send_declare( + &dst_face.primitives, + RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::DeclareToken(DeclareToken { + id, + wire_expr: key_expr, + }), + }, + res.expr(), + ), + ); + } else { + let matching_interests = face_hat!(dst_face) + .remote_interests + .values() + .filter(|(r, o)| o.tokens() && r.as_ref().map(|r| r.matches(res)).unwrap_or(true)) + .cloned() + .collect::>, InterestOptions)>>(); + + for (int_res, options) in matching_interests { + let res = if options.aggregate() { + int_res.as_ref().unwrap_or(res) + } else { + res + }; + if !face_hat!(dst_face).local_tokens.contains_key(res) { + let id = face_hat!(dst_face).next_id.fetch_add(1, Ordering::SeqCst); + face_hat_mut!(dst_face).local_tokens.insert(res.clone(), id); + let key_expr = + Resource::decl_key(res, dst_face, dst_face.whatami != WhatAmI::Client); + send_declare( + &dst_face.primitives, + RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::DeclareToken(DeclareToken { + id, + wire_expr: key_expr, + }), + }, + res.expr(), + ), + ); + } + } + } + } +} + +fn propagate_simple_token( + tables: &mut Tables, + res: &Arc, + src_face: &mut Arc, + send_declare: &mut SendDeclare, +) { + for mut dst_face in tables + .faces + .values() + .cloned() + .collect::>>() + { + propagate_simple_token_to(tables, &mut dst_face, res, src_face, send_declare); + } +} + +fn register_simple_token( + _tables: &mut Tables, + face: &mut Arc, + id: TokenId, + res: &mut Arc, +) { + // Register liveliness + { + let res = get_mut_unchecked(res); + match res.session_ctxs.get_mut(&face.id) { + Some(ctx) => { + if !ctx.token { + get_mut_unchecked(ctx).token = true; + } + } + None => { + let ctx = res + .session_ctxs + .entry(face.id) + .or_insert_with(|| Arc::new(SessionContext::new(face.clone()))); + get_mut_unchecked(ctx).token = true; + } + } + } + face_hat_mut!(face).remote_tokens.insert(id, res.clone()); +} + +fn declare_simple_token( + tables: &mut Tables, + face: &mut Arc, + id: TokenId, + res: &mut Arc, + send_declare: &mut SendDeclare, +) { + register_simple_token(tables, face, id, res); + + propagate_simple_token(tables, res, face, send_declare); +} + +#[inline] +fn simple_tokens(res: &Arc) -> Vec> { + res.session_ctxs + .values() + .filter_map(|ctx| { + if ctx.token { + Some(ctx.face.clone()) + } else { + None + } + }) + .collect() +} + +#[inline] +fn remote_simple_tokens(tables: &Tables, res: &Arc, face: &Arc) -> bool { + res.session_ctxs + .values() + .any(|ctx| (ctx.face.id != face.id || face.zid == tables.zid) && ctx.token) +} + +fn propagate_forget_simple_token( + tables: &mut Tables, + res: &Arc, + src_face: &Arc, + send_declare: &mut SendDeclare, +) { + for mut face in tables.faces.values().cloned() { + if let Some(id) = face_hat_mut!(&mut face).local_tokens.remove(res) { + send_declare( + &face.primitives, + RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::UndeclareToken(UndeclareToken { + id, + ext_wire_expr: WireExprType::null(), + }), + }, + res.expr(), + ), + ); + } else if src_face.id != face.id + && face_hat!(face).remote_interests.values().any(|(r, o)| { + o.tokens() && r.as_ref().map(|r| r.matches(res)).unwrap_or(true) && !o.aggregate() + }) + { + // Token has never been declared on this face. + // Send an Undeclare with a one shot generated id and a WireExpr ext. + send_declare( + &face.primitives, + RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::UndeclareToken(UndeclareToken { + id: face_hat!(face).next_id.fetch_add(1, Ordering::SeqCst), + ext_wire_expr: WireExprType { + wire_expr: Resource::get_best_key(res, "", face.id), + }, + }), + }, + res.expr(), + ), + ); + } + for res in face_hat!(face) + .local_tokens + .keys() + .cloned() + .collect::>>() + { + if !res.context().matches.iter().any(|m| { + m.upgrade() + .is_some_and(|m| m.context.is_some() && remote_simple_tokens(tables, &m, &face)) + }) { + if let Some(id) = face_hat_mut!(&mut face).local_tokens.remove(&res) { + send_declare( + &face.primitives, + RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::UndeclareToken(UndeclareToken { + id, + ext_wire_expr: WireExprType::null(), + }), + }, + res.expr(), + ), + ); + } else if face_hat!(face).remote_interests.values().any(|(r, o)| { + o.tokens() + && r.as_ref().map(|r| r.matches(&res)).unwrap_or(true) + && !o.aggregate() + }) { + // Token has never been declared on this face. + // Send an Undeclare with a one shot generated id and a WireExpr ext. + send_declare( + &face.primitives, + RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::UndeclareToken(UndeclareToken { + id: face_hat!(face).next_id.fetch_add(1, Ordering::SeqCst), + ext_wire_expr: WireExprType { + wire_expr: Resource::get_best_key(&res, "", face.id), + }, + }), + }, + res.expr(), + ), + ); + } + } + } + } +} + +pub(super) fn undeclare_simple_token( + tables: &mut Tables, + face: &mut Arc, + res: &mut Arc, + send_declare: &mut SendDeclare, +) { + if !face_hat_mut!(face) + .remote_tokens + .values() + .any(|s| *s == *res) + { + if let Some(ctx) = get_mut_unchecked(res).session_ctxs.get_mut(&face.id) { + get_mut_unchecked(ctx).token = false; + } + + let mut simple_tokens = simple_tokens(res); + if simple_tokens.is_empty() { + propagate_forget_simple_token(tables, res, face, send_declare); + } + + if simple_tokens.len() == 1 { + let mut face = &mut simple_tokens[0]; + if face.whatami != WhatAmI::Client { + if let Some(id) = face_hat_mut!(face).local_tokens.remove(res) { + send_declare( + &face.primitives, + RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::UndeclareToken(UndeclareToken { + id, + ext_wire_expr: WireExprType::null(), + }), + }, + res.expr(), + ), + ); + } + for res in face_hat!(face) + .local_tokens + .keys() + .cloned() + .collect::>>() + { + if !res.context().matches.iter().any(|m| { + m.upgrade().is_some_and(|m| { + m.context.is_some() && remote_simple_tokens(tables, &m, face) + }) + }) { + if let Some(id) = face_hat_mut!(&mut face).local_tokens.remove(&res) { + send_declare( + &face.primitives, + RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::UndeclareToken(UndeclareToken { + id, + ext_wire_expr: WireExprType::null(), + }), + }, + res.expr(), + ), + ); + } + } + } + } + } + } +} + +fn forget_simple_token( + tables: &mut Tables, + face: &mut Arc, + id: TokenId, + res: Option>, + send_declare: &mut SendDeclare, +) -> Option> { + if let Some(mut res) = face_hat_mut!(face).remote_tokens.remove(&id) { + undeclare_simple_token(tables, face, &mut res, send_declare); + Some(res) + } else if let Some(mut res) = res { + undeclare_simple_token(tables, face, &mut res, send_declare); + Some(res) + } else { + None + } +} + +pub(super) fn token_new_face( + tables: &mut Tables, + face: &mut Arc, + send_declare: &mut SendDeclare, +) { + if face.whatami != WhatAmI::Client { + for mut src_face in tables + .faces + .values() + .cloned() + .collect::>>() + { + for token in face_hat!(src_face.clone()).remote_tokens.values() { + propagate_simple_token_to(tables, face, token, &mut src_face, send_declare); + } + } + } +} + +pub(crate) fn declare_token_interest( + tables: &mut Tables, + face: &mut Arc, + id: InterestId, + res: Option<&mut Arc>, + mode: InterestMode, + aggregate: bool, + send_declare: &mut SendDeclare, +) { + if mode.current() && face.whatami == WhatAmI::Client { + let interest_id = (!mode.future()).then_some(id); + if let Some(res) = res.as_ref() { + if aggregate { + if tables.faces.values().any(|src_face| { + face_hat!(src_face) + .remote_tokens + .values() + .any(|token| token.context.is_some() && token.matches(res)) + }) { + let id = if mode.future() { + let id = face_hat!(face).next_id.fetch_add(1, Ordering::SeqCst); + face_hat_mut!(face).local_tokens.insert((*res).clone(), id); + id + } else { + 0 + }; + let wire_expr = Resource::decl_key(res, face, face.whatami != WhatAmI::Client); + send_declare( + &face.primitives, + RoutingContext::with_expr( + Declare { + interest_id, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::DeclareToken(DeclareToken { id, wire_expr }), + }, + res.expr(), + ), + ); + } + } else { + for src_face in tables + .faces + .values() + .cloned() + .collect::>>() + { + for token in face_hat!(src_face).remote_tokens.values() { + if token.context.is_some() && token.matches(res) { + let id = if mode.future() { + let id = face_hat!(face).next_id.fetch_add(1, Ordering::SeqCst); + face_hat_mut!(face).local_tokens.insert(token.clone(), id); + id + } else { + 0 + }; + let wire_expr = + Resource::decl_key(token, face, face.whatami != WhatAmI::Client); + send_declare( + &face.primitives, + RoutingContext::with_expr( + Declare { + interest_id, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::DeclareToken(DeclareToken { + id, + wire_expr, + }), + }, + token.expr(), + ), + ); + } + } + } + } + } else { + for src_face in tables + .faces + .values() + .cloned() + .collect::>>() + { + for token in face_hat!(src_face).remote_tokens.values() { + let id = if mode.future() { + let id = face_hat!(face).next_id.fetch_add(1, Ordering::SeqCst); + face_hat_mut!(face).local_tokens.insert(token.clone(), id); + id + } else { + 0 + }; + let wire_expr = + Resource::decl_key(token, face, face.whatami != WhatAmI::Client); + send_declare( + &face.primitives, + RoutingContext::with_expr( + Declare { + interest_id, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::DeclareToken(DeclareToken { id, wire_expr }), + }, + token.expr(), + ), + ); + } + } + } + } +} + +impl HatTokenTrait for HatCode { + fn declare_token( + &self, + tables: &mut Tables, + face: &mut Arc, + id: TokenId, + res: &mut Arc, + _node_id: NodeId, + _interest_id: Option, + send_declare: &mut SendDeclare, + ) { + declare_simple_token(tables, face, id, res, send_declare) + } + + fn undeclare_token( + &self, + tables: &mut Tables, + face: &mut Arc, + id: TokenId, + res: Option>, + _node_id: NodeId, + send_declare: &mut SendDeclare, + ) -> Option> { + forget_simple_token(tables, face, id, res, send_declare) + } +} diff --git a/zenoh/src/net/routing/hat/router/interests.rs b/zenoh/src/net/routing/hat/router/interests.rs new file mode 100644 index 0000000000..33bb3ddf6b --- /dev/null +++ b/zenoh/src/net/routing/hat/router/interests.rs @@ -0,0 +1,120 @@ +// +// Copyright (c) 2024 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// +use std::sync::Arc; + +use zenoh_protocol::{ + core::WhatAmI, + network::{ + declare::ext, + interest::{InterestId, InterestMode, InterestOptions}, + Declare, DeclareBody, DeclareFinal, + }, +}; +use zenoh_sync::get_mut_unchecked; + +use super::{ + face_hat_mut, hat, pubsub::declare_sub_interest, queries::declare_qabl_interest, + token::declare_token_interest, HatCode, HatFace, HatTables, +}; +use crate::net::routing::{ + dispatcher::{ + face::FaceState, + resource::Resource, + tables::{Tables, TablesLock}, + }, + hat::{CurrentFutureTrait, HatInterestTrait, SendDeclare}, + RoutingContext, +}; + +impl HatInterestTrait for HatCode { + fn declare_interest( + &self, + tables: &mut Tables, + _tables_ref: &Arc, + face: &mut Arc, + id: InterestId, + res: Option<&mut Arc>, + mode: InterestMode, + mut options: InterestOptions, + send_declare: &mut SendDeclare, + ) { + if options.aggregate() && face.whatami == WhatAmI::Peer { + tracing::warn!( + "Received Interest with aggregate=true from peer {}. Not supported!", + face.zid + ); + options -= InterestOptions::AGGREGATE; + } + if options.subscribers() { + declare_sub_interest( + tables, + face, + id, + res.as_ref().map(|r| (*r).clone()).as_mut(), + mode, + options.aggregate(), + send_declare, + ) + } + if options.queryables() { + declare_qabl_interest( + tables, + face, + id, + res.as_ref().map(|r| (*r).clone()).as_mut(), + mode, + options.aggregate(), + send_declare, + ) + } + if options.tokens() { + declare_token_interest( + tables, + face, + id, + res.as_ref().map(|r| (*r).clone()).as_mut(), + mode, + options.aggregate(), + send_declare, + ) + } + if mode.future() { + face_hat_mut!(face) + .remote_interests + .insert(id, (res.cloned(), options)); + } + if mode.current() { + send_declare( + &face.primitives, + RoutingContext::new(Declare { + interest_id: Some(id), + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::DeclareFinal(DeclareFinal), + }), + ); + } + } + + fn undeclare_interest(&self, _tables: &mut Tables, face: &mut Arc, id: InterestId) { + face_hat_mut!(face).remote_interests.remove(&id); + } +} + +#[inline] +pub(super) fn push_declaration_profile(tables: &Tables, face: &FaceState) -> bool { + face.whatami == WhatAmI::Client + || (face.whatami == WhatAmI::Peer && !hat!(tables).full_net(WhatAmI::Peer)) +} diff --git a/zenoh/src/net/routing/hat/router/mod.rs b/zenoh/src/net/routing/hat/router/mod.rs index 407562425e..a2d3c66aa3 100644 --- a/zenoh/src/net/routing/hat/router/mod.rs +++ b/zenoh/src/net/routing/hat/router/mod.rs @@ -17,14 +17,34 @@ //! This module is intended for Zenoh's internal use. //! //! [Click here for Zenoh's documentation](../zenoh/index.html) +use std::{ + any::Any, + collections::{hash_map::DefaultHasher, HashMap, HashSet}, + hash::Hasher, + sync::{atomic::AtomicU32, Arc}, +}; + +use token::{token_linkstate_change, token_remove_node, undeclare_simple_token}; +use zenoh_config::{unwrap_or_default, ModeDependent, WhatAmI, WhatAmIMatcher}; +use zenoh_protocol::{ + common::ZExtBody, + core::ZenohIdProto, + network::{ + declare::{queryable::ext::QueryableInfoType, QueryableId, SubscriberId, TokenId}, + interest::{InterestId, InterestOptions}, + oam::id::OAM_LINKSTATE, + Oam, + }, +}; +use zenoh_result::ZResult; +use zenoh_sync::get_mut_unchecked; +use zenoh_task::TerminatableTask; +use zenoh_transport::unicast::TransportUnicast; + use self::{ network::{shared_nodes, Network}, - pubsub::{ - pubsub_linkstate_change, pubsub_new_face, pubsub_remove_node, undeclare_client_subscription, - }, - queries::{ - queries_linkstate_change, queries_new_face, queries_remove_node, undeclare_client_queryable, - }, + pubsub::{pubsub_linkstate_change, pubsub_remove_node, undeclare_simple_subscription}, + queries::{queries_linkstate_change, queries_remove_node, undeclare_simple_queryable}, }; use super::{ super::dispatcher::{ @@ -33,39 +53,22 @@ use super::{ }, HatBaseTrait, HatTrait, SendDeclare, }; -use crate::{ - net::{ - codec::Zenoh080Routing, - protocol::linkstate::LinkStateList, - routing::{ - dispatcher::face::Face, - hat::TREES_COMPUTATION_DELAY_MS, - router::{ - compute_data_routes, compute_matching_pulls, compute_query_routes, RoutesIndexes, - }, - }, +use crate::net::{ + codec::Zenoh080Routing, + protocol::linkstate::LinkStateList, + routing::{ + dispatcher::face::Face, + hat::TREES_COMPUTATION_DELAY_MS, + router::{compute_data_routes, compute_query_routes, RoutesIndexes}, }, runtime::Runtime, }; -use std::{ - any::Any, - collections::{hash_map::DefaultHasher, HashMap, HashSet}, - hash::Hasher, - sync::Arc, -}; -use zenoh_config::{unwrap_or_default, ModeDependent, WhatAmI, WhatAmIMatcher, ZenohId}; -use zenoh_protocol::{ - common::ZExtBody, - network::{declare::queryable::ext::QueryableInfo, oam::id::OAM_LINKSTATE, Oam}, -}; -use zenoh_result::ZResult; -use zenoh_sync::get_mut_unchecked; -use zenoh_task::TerminatableTask; -use zenoh_transport::unicast::TransportUnicast; +mod interests; mod network; mod pubsub; mod queries; +mod token; macro_rules! hat { ($t:expr) => { @@ -137,12 +140,17 @@ impl TreesComputationWorker { .as_mut() .unwrap() .compute_trees(), - _ => hat_mut!(tables).peers_net.as_mut().unwrap().compute_trees(), + _ => hat_mut!(tables) + .linkstatepeers_net + .as_mut() + .unwrap() + .compute_trees(), }; tracing::trace!("Compute routes"); pubsub::pubsub_tree_change(&mut tables, &new_children, net_type); queries::queries_tree_change(&mut tables, &new_children, net_type); + token::token_tree_change(&mut tables, &new_children, net_type); drop(tables); } } @@ -153,14 +161,16 @@ impl TreesComputationWorker { struct HatTables { router_subs: HashSet>, - peer_subs: HashSet>, + linkstatepeer_subs: HashSet>, + router_tokens: HashSet>, + linkstatepeer_tokens: HashSet>, router_qabls: HashSet>, - peer_qabls: HashSet>, + linkstatepeer_qabls: HashSet>, routers_net: Option, - peers_net: Option, - shared_nodes: Vec, + linkstatepeers_net: Option, + shared_nodes: Vec, routers_trees_worker: TreesComputationWorker, - peers_trees_worker: TreesComputationWorker, + linkstatepeers_trees_worker: TreesComputationWorker, router_peers_failover_brokering: bool, } @@ -168,14 +178,16 @@ impl HatTables { fn new(router_peers_failover_brokering: bool) -> Self { Self { router_subs: HashSet::new(), - peer_subs: HashSet::new(), + linkstatepeer_subs: HashSet::new(), router_qabls: HashSet::new(), - peer_qabls: HashSet::new(), + linkstatepeer_qabls: HashSet::new(), + router_tokens: HashSet::new(), + linkstatepeer_tokens: HashSet::new(), routers_net: None, - peers_net: None, + linkstatepeers_net: None, shared_nodes: vec![], routers_trees_worker: TreesComputationWorker::new(WhatAmI::Router), - peers_trees_worker: TreesComputationWorker::new(WhatAmI::Peer), + linkstatepeers_trees_worker: TreesComputationWorker::new(WhatAmI::Peer), router_peers_failover_brokering, } } @@ -184,7 +196,7 @@ impl HatTables { fn get_net(&self, net_type: WhatAmI) -> Option<&Network> { match net_type { WhatAmI::Router => self.routers_net.as_ref(), - WhatAmI::Peer => self.peers_net.as_ref(), + WhatAmI::Peer => self.linkstatepeers_net.as_ref(), _ => None, } } @@ -198,7 +210,7 @@ impl HatTables { .map(|net| net.full_linkstate) .unwrap_or(false), WhatAmI::Peer => self - .peers_net + .linkstatepeers_net .as_ref() .map(|net| net.full_linkstate) .unwrap_or(false), @@ -207,8 +219,8 @@ impl HatTables { } #[inline] - fn get_router_links(&self, peer: ZenohId) -> impl Iterator + '_ { - self.peers_net + fn get_router_links(&self, peer: ZenohIdProto) -> impl Iterator + '_ { + self.linkstatepeers_net .as_ref() .unwrap() .get_links(peer) @@ -225,14 +237,14 @@ impl HatTables { #[inline] fn elect_router<'a>( &'a self, - self_zid: &'a ZenohId, + self_zid: &'a ZenohIdProto, key_expr: &str, - mut routers: impl Iterator, - ) -> &'a ZenohId { + mut routers: impl Iterator, + ) -> &'a ZenohIdProto { match routers.next() { None => self_zid, Some(router) => { - let hash = |r: &ZenohId| { + let hash = |r: &ZenohIdProto| { let mut hasher = DefaultHasher::new(); for b in key_expr.as_bytes() { hasher.write_u8(*b); @@ -257,16 +269,16 @@ impl HatTables { } #[inline] - fn failover_brokering_to(source_links: &[ZenohId], dest: ZenohId) -> bool { + fn failover_brokering_to(source_links: &[ZenohIdProto], dest: ZenohIdProto) -> bool { // if source_links is empty then gossip is probably disabled in source peer !source_links.is_empty() && !source_links.contains(&dest) } #[inline] - fn failover_brokering(&self, peer1: ZenohId, peer2: ZenohId) -> bool { + fn failover_brokering(&self, peer1: ZenohIdProto, peer2: ZenohIdProto) -> bool { self.router_peers_failover_brokering && self - .peers_net + .linkstatepeers_net .as_ref() .map(|net| { let links = net.get_links(peer1); @@ -284,7 +296,7 @@ impl HatTables { let _ = self.routers_trees_worker.tx.try_send(tables_ref); } WhatAmI::Peer => { - let _ = self.peers_trees_worker.tx.try_send(tables_ref); + let _ = self.linkstatepeers_trees_worker.tx.try_send(tables_ref); } _ => (), } @@ -305,9 +317,9 @@ impl HatBaseTrait for HatCode { WhatAmIMatcher::empty() }; - let router_full_linkstate = whatami == WhatAmI::Router; - let peer_full_linkstate = whatami != WhatAmI::Client - && unwrap_or_default!(config.routing().peer().mode()) == *"linkstate"; + let router_full_linkstate = true; + let peer_full_linkstate = + unwrap_or_default!(config.routing().peer().mode()) == *"linkstate"; let router_peers_failover_brokering = unwrap_or_default!(config.routing().router().peers_failover_brokering()); drop(config); @@ -325,7 +337,7 @@ impl HatBaseTrait for HatCode { )); } if peer_full_linkstate | gossip { - hat_mut!(tables).peers_net = Some(Network::new( + hat_mut!(tables).linkstatepeers_net = Some(Network::new( "[Peers network]".to_string(), tables.zid, runtime, @@ -339,7 +351,7 @@ impl HatBaseTrait for HatCode { if router_full_linkstate && peer_full_linkstate { hat_mut!(tables).shared_nodes = shared_nodes( hat!(tables).routers_net.as_ref().unwrap(), - hat!(tables).peers_net.as_ref().unwrap(), + hat!(tables).linkstatepeers_net.as_ref().unwrap(), ); } } @@ -358,13 +370,12 @@ impl HatBaseTrait for HatCode { fn new_local_face( &self, - tables: &mut Tables, + _tables: &mut Tables, _tables_ref: &Arc, - face: &mut Face, - send_declare: &mut SendDeclare, + _face: &mut Face, + _send_declare: &mut SendDeclare, ) -> ZResult<()> { - pubsub_new_face(tables, &mut face.state, send_declare); - queries_new_face(tables, &mut face.state, send_declare); + // Nothing to do Ok(()) } @@ -374,7 +385,7 @@ impl HatBaseTrait for HatCode { tables_ref: &Arc, face: &mut Face, transport: &TransportUnicast, - send_declare: &mut SendDeclare, + _send_declare: &mut SendDeclare, ) -> ZResult<()> { let link_id = match face.state.whatami { WhatAmI::Router => hat_mut!(tables) @@ -383,7 +394,7 @@ impl HatBaseTrait for HatCode { .unwrap() .add_link(transport.clone()), WhatAmI::Peer => { - if let Some(net) = hat_mut!(tables).peers_net.as_mut() { + if let Some(net) = hat_mut!(tables).linkstatepeers_net.as_mut() { net.add_link(transport.clone()) } else { 0 @@ -395,13 +406,11 @@ impl HatBaseTrait for HatCode { if hat!(tables).full_net(WhatAmI::Router) && hat!(tables).full_net(WhatAmI::Peer) { hat_mut!(tables).shared_nodes = shared_nodes( hat!(tables).routers_net.as_ref().unwrap(), - hat!(tables).peers_net.as_ref().unwrap(), + hat!(tables).linkstatepeers_net.as_ref().unwrap(), ); } face_hat_mut!(&mut face.state).link_id = link_id; - pubsub_new_face(tables, &mut face.state, send_declare); - queries_new_face(tables, &mut face.state, send_declare); match face.state.whatami { WhatAmI::Router => { @@ -426,6 +435,19 @@ impl HatBaseTrait for HatCode { let mut wtables = zwrite!(tables.tables); let mut face_clone = face.clone(); let face = get_mut_unchecked(face); + let hat_face = match face.hat.downcast_mut::() { + Some(hate_face) => hate_face, + None => { + tracing::error!("Error downcasting face hat in close_face!"); + return; + } + }; + + hat_face.remote_interests.clear(); + hat_face.local_subs.clear(); + hat_face.local_qabls.clear(); + hat_face.local_tokens.clear(); + for res in face.remote_mappings.values_mut() { get_mut_unchecked(res).session_ctxs.remove(&face.id); Resource::clean(res); @@ -438,15 +460,9 @@ impl HatBaseTrait for HatCode { face.local_mappings.clear(); let mut subs_matches = vec![]; - for mut res in face - .hat - .downcast_mut::() - .unwrap() - .remote_subs - .drain() - { + for (_id, mut res) in hat_face.remote_subs.drain() { get_mut_unchecked(&mut res).session_ctxs.remove(&face.id); - undeclare_client_subscription(&mut wtables, &mut face_clone, &mut res, send_declare); + undeclare_simple_subscription(&mut wtables, &mut face_clone, &mut res, send_declare); if res.context.is_some() { for match_ in &res.context().matches { @@ -466,15 +482,9 @@ impl HatBaseTrait for HatCode { } let mut qabls_matches = vec![]; - for mut res in face - .hat - .downcast_mut::() - .unwrap() - .remote_qabls - .drain() - { + for (_, mut res) in hat_face.remote_qabls.drain() { get_mut_unchecked(&mut res).session_ctxs.remove(&face.id); - undeclare_client_queryable(&mut wtables, &mut face_clone, &mut res, send_declare); + undeclare_simple_queryable(&mut wtables, &mut face_clone, &mut res, send_declare); if res.context.is_some() { for match_ in &res.context().matches { @@ -492,6 +502,11 @@ impl HatBaseTrait for HatCode { qabls_matches.push(res); } } + + for (_id, mut res) in hat_face.remote_tokens.drain() { + get_mut_unchecked(&mut res).session_ctxs.remove(&face.id); + undeclare_simple_token(&mut wtables, &mut face_clone, &mut res, send_declare); + } drop(wtables); let mut matches_data_routes = vec![]; @@ -499,11 +514,7 @@ impl HatBaseTrait for HatCode { let rtables = zread!(tables.tables); for _match in subs_matches.drain(..) { let mut expr = RoutingExpr::new(&_match, ""); - matches_data_routes.push(( - _match.clone(), - compute_data_routes(&rtables, &mut expr), - compute_matching_pulls(&rtables, &mut expr), - )); + matches_data_routes.push((_match.clone(), compute_data_routes(&rtables, &mut expr))); } for _match in qabls_matches.drain(..) { matches_query_routes.push((_match.clone(), compute_query_routes(&rtables, &_match))); @@ -511,13 +522,10 @@ impl HatBaseTrait for HatCode { drop(rtables); let mut wtables = zwrite!(tables.tables); - for (mut res, data_routes, matching_pulls) in matches_data_routes { + for (mut res, data_routes) in matches_data_routes { get_mut_unchecked(&mut res) .context_mut() .update_data_routes(data_routes); - get_mut_unchecked(&mut res) - .context_mut() - .update_matching_pulls(matching_pulls); Resource::clean(&mut res); } for (mut res, query_routes) in matches_query_routes { @@ -569,12 +577,18 @@ impl HatBaseTrait for HatCode { WhatAmI::Router, send_declare, ); + token_remove_node( + tables, + &removed_node.zid, + WhatAmI::Router, + send_declare, + ); } if hat!(tables).full_net(WhatAmI::Peer) { hat_mut!(tables).shared_nodes = shared_nodes( hat!(tables).routers_net.as_ref().unwrap(), - hat!(tables).peers_net.as_ref().unwrap(), + hat!(tables).linkstatepeers_net.as_ref().unwrap(), ); } @@ -582,7 +596,7 @@ impl HatBaseTrait for HatCode { .schedule_compute_trees(tables_ref.clone(), WhatAmI::Router); } WhatAmI::Peer => { - if let Some(net) = hat_mut!(tables).peers_net.as_mut() { + if let Some(net) = hat_mut!(tables).linkstatepeers_net.as_mut() { let changes = net.link_states(list.link_states, zid); if hat!(tables).full_net(WhatAmI::Peer) { for (_, removed_node) in changes.removed_nodes { @@ -598,11 +612,17 @@ impl HatBaseTrait for HatCode { WhatAmI::Peer, send_declare, ); + token_remove_node( + tables, + &removed_node.zid, + WhatAmI::Peer, + send_declare, + ); } hat_mut!(tables).shared_nodes = shared_nodes( hat!(tables).routers_net.as_ref().unwrap(), - hat!(tables).peers_net.as_ref().unwrap(), + hat!(tables).linkstatepeers_net.as_ref().unwrap(), ); hat_mut!(tables) @@ -621,6 +641,12 @@ impl HatBaseTrait for HatCode { &updated_node.links, send_declare, ); + token_linkstate_change( + tables, + &updated_node.zid, + &updated_node.links, + send_declare, + ); } } } @@ -650,7 +676,7 @@ impl HatBaseTrait for HatCode { WhatAmI::Peer => { if hat!(tables).full_net(WhatAmI::Peer) { hat!(tables) - .peers_net + .linkstatepeers_net .as_ref() .unwrap() .get_local_context(routing_context, face_hat!(face).link_id) @@ -691,12 +717,18 @@ impl HatBaseTrait for HatCode { WhatAmI::Router, send_declare, ); + token_remove_node( + tables, + &removed_node.zid, + WhatAmI::Router, + send_declare, + ); } if hat!(tables).full_net(WhatAmI::Peer) { hat_mut!(tables).shared_nodes = shared_nodes( hat!(tables).routers_net.as_ref().unwrap(), - hat!(tables).peers_net.as_ref().unwrap(), + hat!(tables).linkstatepeers_net.as_ref().unwrap(), ); } @@ -706,7 +738,7 @@ impl HatBaseTrait for HatCode { WhatAmI::Peer => { if hat!(tables).full_net(WhatAmI::Peer) { for (_, removed_node) in hat_mut!(tables) - .peers_net + .linkstatepeers_net .as_mut() .unwrap() .remove_link(&zid) @@ -723,16 +755,22 @@ impl HatBaseTrait for HatCode { WhatAmI::Peer, send_declare, ); + token_remove_node( + tables, + &removed_node.zid, + WhatAmI::Peer, + send_declare, + ); } hat_mut!(tables).shared_nodes = shared_nodes( hat!(tables).routers_net.as_ref().unwrap(), - hat!(tables).peers_net.as_ref().unwrap(), + hat!(tables).linkstatepeers_net.as_ref().unwrap(), ); hat_mut!(tables) .schedule_compute_trees(tables_ref.clone(), WhatAmI::Peer); - } else if let Some(net) = hat_mut!(tables).peers_net.as_mut() { + } else if let Some(net) = hat_mut!(tables).linkstatepeers_net.as_mut() { net.remove_link(&zid); } } @@ -747,7 +785,7 @@ impl HatBaseTrait for HatCode { #[inline] fn ingress_filter(&self, tables: &Tables, face: &FaceState, expr: &mut RoutingExpr) -> bool { face.whatami != WhatAmI::Peer - || hat!(tables).peers_net.is_none() + || hat!(tables).linkstatepeers_net.is_none() || tables.zid == *hat!(tables).elect_router( &tables.zid, @@ -771,7 +809,7 @@ impl HatBaseTrait for HatCode { } { let dst_master = out_face.whatami != WhatAmI::Peer - || hat!(tables).peers_net.is_none() + || hat!(tables).linkstatepeers_net.is_none() || tables.zid == *hat!(tables).elect_router( &tables.zid, @@ -796,7 +834,7 @@ impl HatBaseTrait for HatCode { .map(|net| net.dot()) .unwrap_or_else(|| "graph {}".to_string()), WhatAmI::Peer => hat!(tables) - .peers_net + .linkstatepeers_net .as_ref() .map(|net| net.dot()) .unwrap_or_else(|| "graph {}".to_string()), @@ -806,44 +844,56 @@ impl HatBaseTrait for HatCode { } struct HatContext { - router_subs: HashSet, - peer_subs: HashSet, - router_qabls: HashMap, - peer_qabls: HashMap, + router_subs: HashSet, + linkstatepeer_subs: HashSet, + router_qabls: HashMap, + linkstatepeer_qabls: HashMap, + router_tokens: HashSet, + linkstatepeer_tokens: HashSet, } impl HatContext { fn new() -> Self { Self { router_subs: HashSet::new(), - peer_subs: HashSet::new(), + linkstatepeer_subs: HashSet::new(), router_qabls: HashMap::new(), - peer_qabls: HashMap::new(), + linkstatepeer_qabls: HashMap::new(), + router_tokens: HashSet::new(), + linkstatepeer_tokens: HashSet::new(), } } } struct HatFace { link_id: usize, - local_subs: HashSet>, - remote_subs: HashSet>, - local_qabls: HashMap, QueryableInfo>, - remote_qabls: HashSet>, + next_id: AtomicU32, // @TODO: manage rollover and uniqueness + remote_interests: HashMap>, InterestOptions)>, + local_subs: HashMap, SubscriberId>, + remote_subs: HashMap>, + local_qabls: HashMap, (QueryableId, QueryableInfoType)>, + remote_qabls: HashMap>, + local_tokens: HashMap, TokenId>, + remote_tokens: HashMap>, } impl HatFace { fn new() -> Self { Self { link_id: 0, - local_subs: HashSet::new(), - remote_subs: HashSet::new(), + next_id: AtomicU32::new(0), + remote_interests: HashMap::new(), + local_subs: HashMap::new(), + remote_subs: HashMap::new(), local_qabls: HashMap::new(), - remote_qabls: HashSet::new(), + remote_qabls: HashMap::new(), + local_tokens: HashMap::new(), + remote_tokens: HashMap::new(), } } } -fn get_router(tables: &Tables, face: &Arc, nodeid: NodeId) -> Option { +fn get_router(tables: &Tables, face: &Arc, nodeid: NodeId) -> Option { match hat!(tables) .routers_net .as_ref() @@ -870,9 +920,9 @@ fn get_router(tables: &Tables, face: &Arc, nodeid: NodeId) -> Option< } } -fn get_peer(tables: &Tables, face: &Arc, nodeid: NodeId) -> Option { +fn get_peer(tables: &Tables, face: &Arc, nodeid: NodeId) -> Option { match hat!(tables) - .peers_net + .linkstatepeers_net .as_ref() .unwrap() .get_link(face_hat!(face).link_id) @@ -911,7 +961,7 @@ fn get_routes_entries(tables: &Tables) -> RoutesIndexes { .collect::>(); let peers_indexes = if hat!(tables).full_net(WhatAmI::Peer) { hat!(tables) - .peers_net + .linkstatepeers_net .as_ref() .unwrap() .graph diff --git a/zenoh/src/net/routing/hat/router/network.rs b/zenoh/src/net/routing/hat/router/network.rs index e8e3a56aaf..3bfdde49d1 100644 --- a/zenoh/src/net/routing/hat/router/network.rs +++ b/zenoh/src/net/routing/hat/router/network.rs @@ -11,25 +11,34 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::net::codec::Zenoh080Routing; -use crate::net::protocol::linkstate::{LinkState, LinkStateList}; -use crate::net::routing::dispatcher::tables::NodeId; -use crate::net::runtime::Runtime; -use petgraph::graph::NodeIndex; -use petgraph::visit::{IntoNodeReferences, VisitMap, Visitable}; -use rand::Rng; use std::convert::TryInto; + +use petgraph::{ + graph::NodeIndex, + visit::{IntoNodeReferences, VisitMap, Visitable}, +}; +use rand::Rng; use vec_map::VecMap; -use zenoh_buffers::writer::{DidntWrite, HasWriter}; -use zenoh_buffers::ZBuf; +use zenoh_buffers::{ + writer::{DidntWrite, HasWriter}, + ZBuf, +}; use zenoh_codec::WCodec; use zenoh_link::Locator; -use zenoh_protocol::common::ZExtBody; -use zenoh_protocol::core::{WhatAmI, WhatAmIMatcher, ZenohId}; -use zenoh_protocol::network::oam::id::OAM_LINKSTATE; -use zenoh_protocol::network::{oam, NetworkBody, NetworkMessage, Oam}; +use zenoh_protocol::{ + common::ZExtBody, + core::{WhatAmI, WhatAmIMatcher, ZenohIdProto}, + network::{oam, oam::id::OAM_LINKSTATE, NetworkBody, NetworkMessage, Oam}, +}; use zenoh_transport::unicast::TransportUnicast; +use crate::net::{ + codec::Zenoh080Routing, + protocol::linkstate::{LinkState, LinkStateList}, + routing::dispatcher::tables::NodeId, + runtime::Runtime, +}; + #[derive(Clone)] struct Details { zid: bool, @@ -39,11 +48,11 @@ struct Details { #[derive(Clone)] pub(super) struct Node { - pub(super) zid: ZenohId, + pub(super) zid: ZenohIdProto, pub(super) whatami: Option, pub(super) locators: Option>, pub(super) sn: u64, - pub(super) links: Vec, + pub(super) links: Vec, } impl std::fmt::Debug for Node { @@ -54,8 +63,8 @@ impl std::fmt::Debug for Node { pub(super) struct Link { pub(super) transport: TransportUnicast, - zid: ZenohId, - mappings: VecMap, + zid: ZenohIdProto, + mappings: VecMap, local_mappings: VecMap, } @@ -71,12 +80,12 @@ impl Link { } #[inline] - pub(super) fn set_zid_mapping(&mut self, psid: u64, zid: ZenohId) { + pub(super) fn set_zid_mapping(&mut self, psid: u64, zid: ZenohIdProto) { self.mappings.insert(psid.try_into().unwrap(), zid); } #[inline] - pub(super) fn get_zid(&self, psid: &u64) -> Option<&ZenohId> { + pub(super) fn get_zid(&self, psid: &u64) -> Option<&ZenohIdProto> { self.mappings.get((*psid).try_into().unwrap()) } @@ -123,7 +132,7 @@ impl Network { #[allow(clippy::too_many_arguments)] pub(super) fn new( name: String, - zid: ZenohId, + zid: ZenohIdProto, runtime: Runtime, full_linkstate: bool, router_peers_failover_brokering: bool, @@ -168,12 +177,12 @@ impl Network { } #[inline] - pub(super) fn get_node(&self, zid: &ZenohId) -> Option<&Node> { + pub(super) fn get_node(&self, zid: &ZenohIdProto) -> Option<&Node> { self.graph.node_weights().find(|weight| weight.zid == *zid) } #[inline] - pub(super) fn get_idx(&self, zid: &ZenohId) -> Option { + pub(super) fn get_idx(&self, zid: &ZenohIdProto) -> Option { self.graph .node_indices() .find(|idx| self.graph[*idx].zid == *zid) @@ -185,7 +194,7 @@ impl Network { } #[inline] - pub(super) fn get_link_from_zid(&self, zid: &ZenohId) -> Option<&Link> { + pub(super) fn get_link_from_zid(&self, zid: &ZenohIdProto) -> Option<&Link> { self.links.values().find(|link| link.zid == *zid) } @@ -275,7 +284,7 @@ impl Network { Ok(NetworkBody::OAM(Oam { id: OAM_LINKSTATE, body: ZExtBody::ZBuf(buf), - ext_qos: oam::ext::QoSType::oam_default(), + ext_qos: oam::ext::QoSType::OAM, ext_tstamp: None, }) .into()) @@ -340,7 +349,11 @@ impl Network { self.graph.update_edge(idx1, idx2, weight); } - pub(super) fn link_states(&mut self, link_states: Vec, src: ZenohId) -> Changes { + pub(super) fn link_states( + &mut self, + link_states: Vec, + src: ZenohIdProto, + ) -> Changes { tracing::trace!("{} Received from {} raw: {:?}", self.name, src, link_states); let graph = &self.graph; @@ -404,7 +417,7 @@ impl Network { let link_states = link_states .into_iter() .map(|(zid, wai, locs, sn, links)| { - let links: Vec = links + let links: Vec = links .iter() .filter_map(|l| { if let Some(zid) = src_link.get_zid(l) { @@ -554,7 +567,7 @@ impl Network { } }, ) - .collect::, NodeIndex, bool)>>(); + .collect::, NodeIndex, bool)>>(); // Add/remove edges from graph let mut reintroduced_nodes = vec![]; @@ -606,7 +619,7 @@ impl Network { let link_states = link_states .into_iter() .filter(|ls| !removed.iter().any(|(idx, _)| idx == &ls.1)) - .collect::, NodeIndex, bool)>>(); + .collect::, NodeIndex, bool)>>(); if !self.autoconnect.is_empty() { // Connect discovered peers @@ -645,8 +658,8 @@ impl Network { #[allow(clippy::type_complexity)] // This is only used here if !link_states.is_empty() { let (new_idxs, updated_idxs): ( - Vec<(Vec, NodeIndex, bool)>, - Vec<(Vec, NodeIndex, bool)>, + Vec<(Vec, NodeIndex, bool)>, + Vec<(Vec, NodeIndex, bool)>, ) = link_states.into_iter().partition(|(_, _, new)| *new); let new_idxs = new_idxs .into_iter() @@ -810,7 +823,7 @@ impl Network { free_index } - pub(super) fn remove_link(&mut self, zid: &ZenohId) -> Vec<(NodeIndex, Node)> { + pub(super) fn remove_link(&mut self, zid: &ZenohIdProto) -> Vec<(NodeIndex, Node)> { tracing::trace!("{} remove_link {}", self.name, zid); self.links.retain(|_, link| link.zid != *zid); self.graph[self.idx].links.retain(|link| *link != *zid); @@ -991,7 +1004,7 @@ impl Network { } #[inline] - pub(super) fn get_links(&self, node: ZenohId) -> &[ZenohId] { + pub(super) fn get_links(&self, node: ZenohIdProto) -> &[ZenohIdProto] { self.get_node(&node) .map(|node| &node.links[..]) .unwrap_or_default() @@ -999,7 +1012,7 @@ impl Network { } #[inline] -pub(super) fn shared_nodes(net1: &Network, net2: &Network) -> Vec { +pub(super) fn shared_nodes(net1: &Network, net2: &Network) -> Vec { net1.graph .node_references() .filter_map(|(_, node1)| { diff --git a/zenoh/src/net/routing/hat/router/pubsub.rs b/zenoh/src/net/routing/hat/router/pubsub.rs index b223050a42..cc0251a07a 100644 --- a/zenoh/src/net/routing/hat/router/pubsub.rs +++ b/zenoh/src/net/routing/hat/router/pubsub.rs @@ -11,31 +11,44 @@ // Contributors: // ZettaScale Zenoh Team, // -use super::network::Network; -use super::{face_hat, face_hat_mut, get_routes_entries, hat, hat_mut, res_hat, res_hat_mut}; -use super::{get_peer, get_router, HatCode, HatContext, HatFace, HatTables}; -use crate::net::routing::dispatcher::face::FaceState; -use crate::net::routing::dispatcher::pubsub::*; -use crate::net::routing::dispatcher::resource::{NodeId, Resource, SessionContext}; -use crate::net::routing::dispatcher::tables::Tables; -use crate::net::routing::dispatcher::tables::{Route, RoutingExpr}; -use crate::net::routing::hat::{HatPubSubTrait, SendDeclare, Sources}; -use crate::net::routing::router::RoutesIndexes; -use crate::net::routing::{RoutingContext, PREFIX_LIVELINESS}; +use std::{ + borrow::Cow, + collections::{HashMap, HashSet}, + sync::{atomic::Ordering, Arc}, +}; + use petgraph::graph::NodeIndex; -use std::borrow::Cow; -use std::collections::{HashMap, HashSet}; -use std::sync::Arc; -use zenoh_protocol::core::key_expr::OwnedKeyExpr; use zenoh_protocol::{ - core::{Reliability, WhatAmI, ZenohId}, - network::declare::{ - common::ext::WireExprType, ext, subscriber::ext::SubscriberInfo, Declare, DeclareBody, - DeclareSubscriber, Mode, UndeclareSubscriber, + core::{key_expr::OwnedKeyExpr, Reliability, WhatAmI, ZenohIdProto}, + network::{ + declare::{ + common::ext::WireExprType, ext, subscriber::ext::SubscriberInfo, Declare, DeclareBody, + DeclareSubscriber, SubscriberId, UndeclareSubscriber, + }, + interest::{InterestId, InterestMode, InterestOptions}, }, }; use zenoh_sync::get_mut_unchecked; +use super::{ + face_hat, face_hat_mut, get_peer, get_router, get_routes_entries, hat, hat_mut, + interests::push_declaration_profile, network::Network, res_hat, res_hat_mut, HatCode, + HatContext, HatFace, HatTables, +}; +#[cfg(feature = "unstable")] +use crate::key_expr::KeyExpr; +use crate::net::routing::{ + dispatcher::{ + face::FaceState, + pubsub::*, + resource::{NodeId, Resource, SessionContext}, + tables::{Route, RoutingExpr, Tables}, + }, + hat::{CurrentFutureTrait, HatPubSubTrait, SendDeclare, Sources}, + router::RoutesIndexes, + RoutingContext, +}; + #[inline] fn send_sourced_subscription_to_net_children( tables: &Tables, @@ -50,20 +63,23 @@ fn send_sourced_subscription_to_net_children( if net.graph.contains_node(*child) { match tables.get_face(&net.graph[*child].zid).cloned() { Some(mut someface) => { - if src_face.is_none() || someface.id != src_face.unwrap().id { - let key_expr = Resource::decl_key(res, &mut someface); - - tracing::debug!("Send subscription {} on {}", res.expr(), someface); + if src_face + .map(|src_face| someface.id != src_face.id) + .unwrap_or(true) + { + let push_declaration = push_declaration_profile(tables, &someface); + let key_expr = Resource::decl_key(res, &mut someface, push_declaration); someface.primitives.send_declare(RoutingContext::with_expr( Declare { - ext_qos: ext::QoSType::declare_default(), + interest_id: None, + ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType { node_id: routing_context, }, body: DeclareBody::DeclareSubscriber(DeclareSubscriber { - id: 0, // @TODO use proper SubscriberId (#703) + id: 0, // Sourced subscriptions do not use ids wire_expr: key_expr, ext_info: *sub_info, }), @@ -88,9 +104,8 @@ fn propagate_simple_subscription_to( full_peer_net: bool, send_declare: &mut SendDeclare, ) { - if (src_face.id != dst_face.id - || (dst_face.whatami == WhatAmI::Client && res.expr().starts_with(PREFIX_LIVELINESS))) - && !face_hat!(dst_face).local_subs.contains(res) + if src_face.id != dst_face.id + && !face_hat!(dst_face).local_subs.contains_key(res) && if full_peer_net { dst_face.whatami == WhatAmI::Client } else { @@ -100,24 +115,43 @@ fn propagate_simple_subscription_to( || hat!(tables).failover_brokering(src_face.zid, dst_face.zid)) } { - face_hat_mut!(dst_face).local_subs.insert(res.clone()); - let key_expr = Resource::decl_key(res, dst_face); - send_declare( - &dst_face.primitives, - RoutingContext::with_expr( - Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - body: DeclareBody::DeclareSubscriber(DeclareSubscriber { - id: 0, // @TODO use proper SubscriberId (#703) - wire_expr: key_expr, - ext_info: *sub_info, - }), - }, - res.expr(), - ), - ); + let matching_interests = face_hat!(dst_face) + .remote_interests + .values() + .filter(|(r, o)| o.subscribers() && r.as_ref().map(|r| r.matches(res)).unwrap_or(true)) + .cloned() + .collect::>, InterestOptions)>>(); + + for (int_res, options) in matching_interests { + let res = if options.aggregate() { + int_res.as_ref().unwrap_or(res) + } else { + res + }; + if !face_hat!(dst_face).local_subs.contains_key(res) { + let id = face_hat!(dst_face).next_id.fetch_add(1, Ordering::SeqCst); + face_hat_mut!(dst_face).local_subs.insert(res.clone(), id); + let key_expr = + Resource::decl_key(res, dst_face, push_declaration_profile(tables, dst_face)); + send_declare( + &dst_face.primitives, + RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::DeclareSubscriber(DeclareSubscriber { + id, + wire_expr: key_expr, + ext_info: *sub_info, + }), + }, + res.expr(), + ), + ); + } + } } } @@ -152,7 +186,7 @@ fn propagate_sourced_subscription( res: &Arc, sub_info: &SubscriberInfo, src_face: Option<&Arc>, - source: &ZenohId, + source: &ZenohIdProto, net_type: WhatAmI, ) { let net = hat!(tables).get_net(net_type).unwrap(); @@ -190,17 +224,12 @@ fn register_router_subscription( face: &mut Arc, res: &mut Arc, sub_info: &SubscriberInfo, - router: ZenohId, + router: ZenohIdProto, send_declare: &mut SendDeclare, ) { if !res_hat!(res).router_subs.contains(&router) { // Register router subscription { - tracing::debug!( - "Register router subscription {} (router: {})", - res.expr(), - router - ); res_hat_mut!(res).router_subs.insert(router); hat_mut!(tables).router_subs.insert(res.clone()); } @@ -210,7 +239,7 @@ fn register_router_subscription( } // Propagate subscription to peers if hat!(tables).full_net(WhatAmI::Peer) && face.whatami != WhatAmI::Peer { - register_peer_subscription(tables, face, res, sub_info, tables.zid) + register_linkstatepeer_subscription(tables, face, res, sub_info, tables.zid) } // Propagate subscription to clients @@ -222,25 +251,24 @@ fn declare_router_subscription( face: &mut Arc, res: &mut Arc, sub_info: &SubscriberInfo, - router: ZenohId, + router: ZenohIdProto, send_declare: &mut SendDeclare, ) { register_router_subscription(tables, face, res, sub_info, router, send_declare); } -fn register_peer_subscription( +fn register_linkstatepeer_subscription( tables: &mut Tables, face: &mut Arc, res: &mut Arc, sub_info: &SubscriberInfo, - peer: ZenohId, + peer: ZenohIdProto, ) { - if !res_hat!(res).peer_subs.contains(&peer) { + if !res_hat!(res).linkstatepeer_subs.contains(&peer) { // Register peer subscription { - tracing::debug!("Register peer subscription {} (peer: {})", res.expr(), peer); - res_hat_mut!(res).peer_subs.insert(peer); - hat_mut!(tables).peer_subs.insert(res.clone()); + res_hat_mut!(res).linkstatepeer_subs.insert(peer); + hat_mut!(tables).linkstatepeer_subs.insert(res.clone()); } // Propagate subscription to peers @@ -248,74 +276,59 @@ fn register_peer_subscription( } } -fn declare_peer_subscription( +fn declare_linkstatepeer_subscription( tables: &mut Tables, face: &mut Arc, res: &mut Arc, sub_info: &SubscriberInfo, - peer: ZenohId, + peer: ZenohIdProto, send_declare: &mut SendDeclare, ) { - register_peer_subscription(tables, face, res, sub_info, peer); - let mut propa_sub_info = *sub_info; - propa_sub_info.mode = Mode::Push; + register_linkstatepeer_subscription(tables, face, res, sub_info, peer); + let propa_sub_info = *sub_info; let zid = tables.zid; register_router_subscription(tables, face, res, &propa_sub_info, zid, send_declare); } -fn register_client_subscription( +fn register_simple_subscription( _tables: &mut Tables, face: &mut Arc, + id: SubscriberId, res: &mut Arc, sub_info: &SubscriberInfo, ) { // Register subscription { let res = get_mut_unchecked(res); - tracing::debug!("Register subscription {} for {}", res.expr(), face); match res.session_ctxs.get_mut(&face.id) { - Some(ctx) => match &ctx.subs { - Some(info) => { - if Mode::Pull == info.mode { - get_mut_unchecked(ctx).subs = Some(*sub_info); - } - } - None => { + Some(ctx) => { + if ctx.subs.is_none() { get_mut_unchecked(ctx).subs = Some(*sub_info); } - }, + } None => { - res.session_ctxs.insert( - face.id, - Arc::new(SessionContext { - face: face.clone(), - local_expr_id: None, - remote_expr_id: None, - subs: Some(*sub_info), - qabl: None, - last_values: HashMap::new(), - in_interceptor_cache: None, - e_interceptor_cache: None, - }), - ); + let ctx = res + .session_ctxs + .entry(face.id) + .or_insert_with(|| Arc::new(SessionContext::new(face.clone()))); + get_mut_unchecked(ctx).subs = Some(*sub_info); } } } - face_hat_mut!(face).remote_subs.insert(res.clone()); + face_hat_mut!(face).remote_subs.insert(id, res.clone()); } -fn declare_client_subscription( +fn declare_simple_subscription( tables: &mut Tables, face: &mut Arc, + id: SubscriberId, res: &mut Arc, sub_info: &SubscriberInfo, send_declare: &mut SendDeclare, ) { - register_client_subscription(tables, face, res, sub_info); - let mut propa_sub_info = *sub_info; - propa_sub_info.mode = Mode::Push; + register_simple_subscription(tables, face, id, res, sub_info); let zid = tables.zid; - register_router_subscription(tables, face, res, &propa_sub_info, zid, send_declare); + register_router_subscription(tables, face, res, sub_info, zid, send_declare); } #[inline] @@ -328,16 +341,16 @@ fn remote_router_subs(tables: &Tables, res: &Arc) -> bool { } #[inline] -fn remote_peer_subs(tables: &Tables, res: &Arc) -> bool { +fn remote_linkstatepeer_subs(tables: &Tables, res: &Arc) -> bool { res.context.is_some() && res_hat!(res) - .peer_subs + .linkstatepeer_subs .iter() .any(|peer| peer != &tables.zid) } #[inline] -fn client_subs(res: &Arc) -> Vec> { +fn simple_subs(res: &Arc) -> Vec> { res.session_ctxs .values() .filter_map(|ctx| { @@ -350,6 +363,13 @@ fn client_subs(res: &Arc) -> Vec> { .collect() } +#[inline] +fn remote_simple_subs(res: &Arc, face: &Arc) -> bool { + res.session_ctxs + .values() + .any(|ctx| ctx.face.id != face.id && ctx.subs.is_some()) +} + #[inline] fn send_forget_sourced_subscription_to_net_children( tables: &Tables, @@ -363,20 +383,23 @@ fn send_forget_sourced_subscription_to_net_children( if net.graph.contains_node(*child) { match tables.get_face(&net.graph[*child].zid).cloned() { Some(mut someface) => { - if src_face.is_none() || someface.id != src_face.unwrap().id { - let wire_expr = Resource::decl_key(res, &mut someface); - - tracing::debug!("Send forget subscription {} on {}", res.expr(), someface); + if src_face + .map(|src_face| someface.id != src_face.id) + .unwrap_or(true) + { + let push_declaration = push_declaration_profile(tables, &someface); + let wire_expr = Resource::decl_key(res, &mut someface, push_declaration); someface.primitives.send_declare(RoutingContext::with_expr( Declare { - ext_qos: ext::QoSType::declare_default(), + interest_id: None, + ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType { node_id: routing_context.unwrap_or(0), }, body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { - id: 0, // @TODO use proper SubscriberId (#703) + id: 0, // Sourced subscriptions do not use ids ext_wire_expr: WireExprType { wire_expr }, }), }, @@ -395,25 +418,58 @@ fn propagate_forget_simple_subscription( res: &Arc, send_declare: &mut SendDeclare, ) { - for face in tables.faces.values_mut() { - if face_hat!(face).local_subs.contains(res) { - let wire_expr = Resource::get_best_key(res, "", face.id); + for mut face in tables.faces.values().cloned() { + if let Some(id) = face_hat_mut!(&mut face).local_subs.remove(res) { send_declare( &face.primitives, RoutingContext::with_expr( Declare { - ext_qos: ext::QoSType::declare_default(), + interest_id: None, + ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), + ext_nodeid: ext::NodeIdType::DEFAULT, body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { - id: 0, // @TODO use proper SubscriberId (#703) - ext_wire_expr: WireExprType { wire_expr }, + id, + ext_wire_expr: WireExprType::null(), }), }, res.expr(), ), ); - face_hat_mut!(face).local_subs.remove(res); + } + for res in face_hat!(&mut face) + .local_subs + .keys() + .cloned() + .collect::>>() + { + if !res.context().matches.iter().any(|m| { + m.upgrade().is_some_and(|m| { + m.context.is_some() + && (remote_simple_subs(&m, &face) + || remote_linkstatepeer_subs(tables, &m) + || remote_router_subs(tables, &m)) + }) + }) { + if let Some(id) = face_hat_mut!(&mut face).local_subs.remove(&res) { + send_declare( + &face.primitives, + RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { + id, + ext_wire_expr: WireExprType::null(), + }), + }, + res.expr(), + ), + ); + } + } } } } @@ -434,7 +490,7 @@ fn propagate_forget_simple_subscription_to_peers( .collect::>>() { if face.whatami == WhatAmI::Peer - && face_hat!(face).local_subs.contains(res) + && face_hat!(face).local_subs.contains_key(res) && !res.session_ctxs.values().any(|s| { face.zid != s.face.zid && s.subs.is_some() @@ -443,24 +499,24 @@ fn propagate_forget_simple_subscription_to_peers( && hat!(tables).failover_brokering(s.face.zid, face.zid))) }) { - let wire_expr = Resource::get_best_key(res, "", face.id); - send_declare( - &face.primitives, - RoutingContext::with_expr( - Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { - id: 0, // @TODO use proper SubscriberId (#703) - ext_wire_expr: WireExprType { wire_expr }, - }), - }, - res.expr(), - ), - ); - - face_hat_mut!(&mut face).local_subs.remove(res); + if let Some(id) = face_hat_mut!(&mut face).local_subs.remove(res) { + send_declare( + &face.primitives, + RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { + id, + ext_wire_expr: WireExprType::null(), + }), + }, + res.expr(), + ), + ); + } } } } @@ -470,7 +526,7 @@ fn propagate_forget_sourced_subscription( tables: &Tables, res: &Arc, src_face: Option<&Arc>, - source: &ZenohId, + source: &ZenohIdProto, net_type: WhatAmI, ) { let net = hat!(tables).get_net(net_type).unwrap(); @@ -505,14 +561,9 @@ fn propagate_forget_sourced_subscription( fn unregister_router_subscription( tables: &mut Tables, res: &mut Arc, - router: &ZenohId, + router: &ZenohIdProto, send_declare: &mut SendDeclare, ) { - tracing::debug!( - "Unregister router subscription {} (router: {})", - res.expr(), - router - ); res_hat_mut!(res).router_subs.retain(|sub| sub != router); if res_hat!(res).router_subs.is_empty() { @@ -521,7 +572,7 @@ fn unregister_router_subscription( .retain(|sub| !Arc::ptr_eq(sub, res)); if hat_mut!(tables).full_net(WhatAmI::Peer) { - undeclare_peer_subscription(tables, None, res, &tables.zid.clone()); + undeclare_linkstatepeer_subscription(tables, None, res, &tables.zid.clone()); } propagate_forget_simple_subscription(tables, res, send_declare); } @@ -533,7 +584,7 @@ fn undeclare_router_subscription( tables: &mut Tables, face: Option<&Arc>, res: &mut Arc, - router: &ZenohId, + router: &ZenohIdProto, send_declare: &mut SendDeclare, ) { if res_hat!(res).router_subs.contains(router) { @@ -546,179 +597,147 @@ fn forget_router_subscription( tables: &mut Tables, face: &mut Arc, res: &mut Arc, - router: &ZenohId, + router: &ZenohIdProto, send_declare: &mut SendDeclare, ) { undeclare_router_subscription(tables, Some(face), res, router, send_declare); } -fn unregister_peer_subscription(tables: &mut Tables, res: &mut Arc, peer: &ZenohId) { - tracing::debug!( - "Unregister peer subscription {} (peer: {})", - res.expr(), - peer - ); - res_hat_mut!(res).peer_subs.retain(|sub| sub != peer); +fn unregister_peer_subscription(tables: &mut Tables, res: &mut Arc, peer: &ZenohIdProto) { + res_hat_mut!(res) + .linkstatepeer_subs + .retain(|sub| sub != peer); - if res_hat!(res).peer_subs.is_empty() { + if res_hat!(res).linkstatepeer_subs.is_empty() { hat_mut!(tables) - .peer_subs + .linkstatepeer_subs .retain(|sub| !Arc::ptr_eq(sub, res)); } } -fn undeclare_peer_subscription( +fn undeclare_linkstatepeer_subscription( tables: &mut Tables, face: Option<&Arc>, res: &mut Arc, - peer: &ZenohId, + peer: &ZenohIdProto, ) { - if res_hat!(res).peer_subs.contains(peer) { + if res_hat!(res).linkstatepeer_subs.contains(peer) { unregister_peer_subscription(tables, res, peer); propagate_forget_sourced_subscription(tables, res, face, peer, WhatAmI::Peer); } } -fn forget_peer_subscription( +fn forget_linkstatepeer_subscription( tables: &mut Tables, face: &mut Arc, res: &mut Arc, - peer: &ZenohId, + peer: &ZenohIdProto, send_declare: &mut SendDeclare, ) { - undeclare_peer_subscription(tables, Some(face), res, peer); - let client_subs = res.session_ctxs.values().any(|ctx| ctx.subs.is_some()); - let peer_subs = remote_peer_subs(tables, res); + undeclare_linkstatepeer_subscription(tables, Some(face), res, peer); + let simple_subs = res.session_ctxs.values().any(|ctx| ctx.subs.is_some()); + let linkstatepeer_subs = remote_linkstatepeer_subs(tables, res); let zid = tables.zid; - if !client_subs && !peer_subs { + if !simple_subs && !linkstatepeer_subs { undeclare_router_subscription(tables, None, res, &zid, send_declare); } } -pub(super) fn undeclare_client_subscription( +pub(super) fn undeclare_simple_subscription( tables: &mut Tables, face: &mut Arc, res: &mut Arc, send_declare: &mut SendDeclare, ) { - tracing::debug!("Unregister client subscription {} for {}", res.expr(), face); - if let Some(ctx) = get_mut_unchecked(res).session_ctxs.get_mut(&face.id) { - get_mut_unchecked(ctx).subs = None; - } - face_hat_mut!(face).remote_subs.remove(res); - - let mut client_subs = client_subs(res); - let router_subs = remote_router_subs(tables, res); - let peer_subs = remote_peer_subs(tables, res); - if client_subs.is_empty() && !peer_subs { - undeclare_router_subscription(tables, None, res, &tables.zid.clone(), send_declare); - } else { - propagate_forget_simple_subscription_to_peers(tables, res, send_declare); - } - if client_subs.len() == 1 && !router_subs && !peer_subs { - let face = &mut client_subs[0]; - if face_hat!(face).local_subs.contains(res) - && !(face.whatami == WhatAmI::Client && res.expr().starts_with(PREFIX_LIVELINESS)) - { - let wire_expr = Resource::get_best_key(res, "", face.id); - send_declare( - &face.primitives, - RoutingContext::with_expr( - Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { - id: 0, // @TODO use proper SubscriberId (#703) - ext_wire_expr: WireExprType { wire_expr }, - }), - }, - res.expr(), - ), - ); - - face_hat_mut!(face).local_subs.remove(res); + if !face_hat_mut!(face).remote_subs.values().any(|s| *s == *res) { + if let Some(ctx) = get_mut_unchecked(res).session_ctxs.get_mut(&face.id) { + get_mut_unchecked(ctx).subs = None; } - } -} -fn forget_client_subscription( - tables: &mut Tables, - face: &mut Arc, - res: &mut Arc, - send_declare: &mut SendDeclare, -) { - undeclare_client_subscription(tables, face, res, send_declare); -} - -pub(super) fn pubsub_new_face( - tables: &mut Tables, - face: &mut Arc, - send_declare: &mut SendDeclare, -) { - let sub_info = SubscriberInfo { - reliability: Reliability::Reliable, // @TODO compute proper reliability to propagate from reliability of known subscribers - mode: Mode::Push, - }; - - if face.whatami == WhatAmI::Client { - for sub in &hat!(tables).router_subs { - face_hat_mut!(face).local_subs.insert(sub.clone()); - let key_expr = Resource::decl_key(sub, face); - send_declare( - &face.primitives, - RoutingContext::with_expr( - Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - body: DeclareBody::DeclareSubscriber(DeclareSubscriber { - id: 0, // @TODO use proper SubscriberId (#703) - wire_expr: key_expr, - ext_info: sub_info, - }), - }, - sub.expr(), - ), - ); + let mut simple_subs = simple_subs(res); + let router_subs = remote_router_subs(tables, res); + let linkstatepeer_subs = remote_linkstatepeer_subs(tables, res); + if simple_subs.is_empty() && !linkstatepeer_subs { + undeclare_router_subscription(tables, None, res, &tables.zid.clone(), send_declare); + } else { + propagate_forget_simple_subscription_to_peers(tables, res, send_declare); } - } else if face.whatami == WhatAmI::Peer && !hat!(tables).full_net(WhatAmI::Peer) { - for sub in &hat!(tables).router_subs { - if sub.context.is_some() - && (res_hat!(sub).router_subs.iter().any(|r| *r != tables.zid) - || sub.session_ctxs.values().any(|s| { - s.subs.is_some() - && (s.face.whatami == WhatAmI::Client - || (s.face.whatami == WhatAmI::Peer - && hat!(tables).failover_brokering(s.face.zid, face.zid))) - })) - { - face_hat_mut!(face).local_subs.insert(sub.clone()); - let key_expr = Resource::decl_key(sub, face); + + if simple_subs.len() == 1 && !router_subs && !linkstatepeer_subs { + let mut face = &mut simple_subs[0]; + if let Some(id) = face_hat_mut!(face).local_subs.remove(res) { send_declare( &face.primitives, RoutingContext::with_expr( Declare { - ext_qos: ext::QoSType::declare_default(), + interest_id: None, + ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - body: DeclareBody::DeclareSubscriber(DeclareSubscriber { - id: 0, // @TODO use proper SubscriberId (#703) - wire_expr: key_expr, - ext_info: sub_info, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { + id, + ext_wire_expr: WireExprType::null(), }), }, - sub.expr(), + res.expr(), ), ); } + for res in face_hat!(face) + .local_subs + .keys() + .cloned() + .collect::>>() + { + if !res.context().matches.iter().any(|m| { + m.upgrade().is_some_and(|m| { + m.context.is_some() + && (remote_simple_subs(&m, face) + || remote_linkstatepeer_subs(tables, &m) + || remote_router_subs(tables, &m)) + }) + }) { + if let Some(id) = face_hat_mut!(&mut face).local_subs.remove(&res) { + send_declare( + &face.primitives, + RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { + id, + ext_wire_expr: WireExprType::null(), + }), + }, + res.expr(), + ), + ); + } + } + } } } } +fn forget_simple_subscription( + tables: &mut Tables, + face: &mut Arc, + id: SubscriberId, + send_declare: &mut SendDeclare, +) -> Option> { + if let Some(mut res) = face_hat_mut!(face).remote_subs.remove(&id) { + undeclare_simple_subscription(tables, face, &mut res, send_declare); + Some(res) + } else { + None + } +} + pub(super) fn pubsub_remove_node( tables: &mut Tables, - node: &ZenohId, + node: &ZenohIdProto, net_type: WhatAmI, send_declare: &mut SendDeclare, ) { @@ -739,16 +758,16 @@ pub(super) fn pubsub_remove_node( } WhatAmI::Peer => { for mut res in hat!(tables) - .peer_subs + .linkstatepeer_subs .iter() - .filter(|res| res_hat!(res).peer_subs.contains(node)) + .filter(|res| res_hat!(res).linkstatepeer_subs.contains(node)) .cloned() .collect::>>() { unregister_peer_subscription(tables, &mut res, node); - let client_subs = res.session_ctxs.values().any(|ctx| ctx.subs.is_some()); - let peer_subs = remote_peer_subs(tables, &res); - if !client_subs && !peer_subs { + let simple_subs = res.session_ctxs.values().any(|ctx| ctx.subs.is_some()); + let linkstatepeer_subs = remote_linkstatepeer_subs(tables, &res); + if !simple_subs && !linkstatepeer_subs { undeclare_router_subscription( tables, None, @@ -771,29 +790,34 @@ pub(super) fn pubsub_tree_change( new_children: &[Vec], net_type: WhatAmI, ) { + let net = match hat!(tables).get_net(net_type) { + Some(net) => net, + None => { + tracing::error!("Error accessing net in pubsub_tree_change!"); + return; + } + }; // propagate subs to new children for (tree_sid, tree_children) in new_children.iter().enumerate() { if !tree_children.is_empty() { - let net = hat!(tables).get_net(net_type).unwrap(); let tree_idx = NodeIndex::new(tree_sid); if net.graph.contains_node(tree_idx) { let tree_id = net.graph[tree_idx].zid; let subs_res = match net_type { WhatAmI::Router => &hat!(tables).router_subs, - _ => &hat!(tables).peer_subs, + _ => &hat!(tables).linkstatepeer_subs, }; for res in subs_res { let subs = match net_type { WhatAmI::Router => &res_hat!(res).router_subs, - _ => &res_hat!(res).peer_subs, + _ => &res_hat!(res).linkstatepeer_subs, }; for sub in subs { if *sub == tree_id { let sub_info = SubscriberInfo { reliability: Reliability::Reliable, // @TODO compute proper reliability to propagate from reliability of known subscribers - mode: Mode::Push, }; send_sourced_subscription_to_net_children( tables, @@ -817,15 +841,15 @@ pub(super) fn pubsub_tree_change( pub(super) fn pubsub_linkstate_change( tables: &mut Tables, - zid: &ZenohId, - links: &[ZenohId], + zid: &ZenohIdProto, + links: &[ZenohIdProto], send_declare: &mut SendDeclare, ) { if let Some(mut src_face) = tables.get_face(zid).cloned() { if hat!(tables).router_peers_failover_brokering && src_face.whatami == WhatAmI::Peer { let to_forget = face_hat!(src_face) .local_subs - .iter() + .keys() .filter(|res| { let client_subs = res .session_ctxs @@ -835,52 +859,59 @@ pub(super) fn pubsub_linkstate_change( && !client_subs && !res.session_ctxs.values().any(|ctx| { ctx.face.whatami == WhatAmI::Peer - && src_face.zid != ctx.face.zid + && src_face.id != ctx.face.id && HatTables::failover_brokering_to(links, ctx.face.zid) }) }) .cloned() .collect::>>(); for res in to_forget { - let wire_expr = Resource::get_best_key(&res, "", src_face.id); - send_declare( - &src_face.primitives, - RoutingContext::with_expr( - Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { - id: 0, // @TODO use proper SubscriberId (#703) - ext_wire_expr: WireExprType { wire_expr }, - }), - }, - res.expr(), - ), - ); - - face_hat_mut!(&mut src_face).local_subs.remove(&res); + if let Some(id) = face_hat_mut!(&mut src_face).local_subs.remove(&res) { + let wire_expr = Resource::get_best_key(&res, "", src_face.id); + send_declare( + &src_face.primitives, + RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::default(), + body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { + id, + ext_wire_expr: WireExprType { wire_expr }, + }), + }, + res.expr(), + ), + ); + } } - for dst_face in tables.faces.values_mut() { - if HatTables::failover_brokering_to(links, dst_face.zid) { - for res in &face_hat!(src_face).remote_subs { - if !face_hat!(dst_face).local_subs.contains(res) { - face_hat_mut!(dst_face).local_subs.insert(res.clone()); - let key_expr = Resource::decl_key(res, dst_face); + for mut dst_face in tables.faces.values().cloned() { + if src_face.id != dst_face.id + && HatTables::failover_brokering_to(links, dst_face.zid) + { + for res in face_hat!(src_face).remote_subs.values() { + if !face_hat!(dst_face).local_subs.contains_key(res) { + let id = face_hat!(dst_face).next_id.fetch_add(1, Ordering::SeqCst); + face_hat_mut!(&mut dst_face) + .local_subs + .insert(res.clone(), id); + let push_declaration = push_declaration_profile(tables, &dst_face); + let key_expr = Resource::decl_key(res, &mut dst_face, push_declaration); let sub_info = SubscriberInfo { reliability: Reliability::Reliable, // @TODO compute proper reliability to propagate from reliability of known subscribers - mode: Mode::Push, }; send_declare( &dst_face.primitives, RoutingContext::with_expr( Declare { - ext_qos: ext::QoSType::declare_default(), + interest_id: None, + ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType::default(), body: DeclareBody::DeclareSubscriber(DeclareSubscriber { - id: 0, // @TODO use proper SubscriberId (#703) + id, wire_expr: key_expr, ext_info: sub_info, }), @@ -896,36 +927,148 @@ pub(super) fn pubsub_linkstate_change( } } -#[inline] -fn insert_faces_for_subs( - route: &mut Route, - expr: &RoutingExpr, - tables: &Tables, - net: &Network, - source: NodeId, - subs: &HashSet, +pub(crate) fn declare_sub_interest( + tables: &mut Tables, + face: &mut Arc, + id: InterestId, + res: Option<&mut Arc>, + mode: InterestMode, + aggregate: bool, + send_declare: &mut SendDeclare, ) { - if net.trees.len() > source as usize { - for sub in subs { - if let Some(sub_idx) = net.get_idx(sub) { - if net.trees[source as usize].directions.len() > sub_idx.index() { - if let Some(direction) = net.trees[source as usize].directions[sub_idx.index()] + if mode.current() { + let interest_id = (!mode.future()).then_some(id); + let sub_info = SubscriberInfo { + reliability: Reliability::Reliable, // @TODO compute proper reliability to propagate from reliability of known subscribers + }; + if let Some(res) = res.as_ref() { + if aggregate { + if hat!(tables).router_subs.iter().any(|sub| { + sub.context.is_some() + && sub.matches(res) + && (remote_simple_subs(sub, face) + || remote_linkstatepeer_subs(tables, sub) + || remote_router_subs(tables, sub)) + }) { + let id = if mode.future() { + let id = face_hat!(face).next_id.fetch_add(1, Ordering::SeqCst); + face_hat_mut!(face).local_subs.insert((*res).clone(), id); + id + } else { + 0 + }; + let wire_expr = + Resource::decl_key(res, face, push_declaration_profile(tables, face)); + send_declare( + &face.primitives, + RoutingContext::with_expr( + Declare { + interest_id, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::DeclareSubscriber(DeclareSubscriber { + id, + wire_expr, + ext_info: sub_info, + }), + }, + res.expr(), + ), + ); + } + } else { + for sub in &hat!(tables).router_subs { + if sub.context.is_some() + && sub.matches(res) + && (res_hat!(sub).router_subs.iter().any(|r| *r != tables.zid) + || res_hat!(sub) + .linkstatepeer_subs + .iter() + .any(|r| *r != tables.zid) + || sub.session_ctxs.values().any(|s| { + s.face.id != face.id + && s.subs.is_some() + && (s.face.whatami == WhatAmI::Client + || face.whatami == WhatAmI::Client + || (s.face.whatami == WhatAmI::Peer + && hat!(tables) + .failover_brokering(s.face.zid, face.zid))) + })) { - if net.graph.contains_node(direction) { - if let Some(face) = tables.get_face(&net.graph[direction].zid) { - route.entry(face.id).or_insert_with(|| { - let key_expr = - Resource::get_best_key(expr.prefix, expr.suffix, face.id); - (face.clone(), key_expr.to_owned(), source) - }); - } - } + let id = if mode.future() { + let id = face_hat!(face).next_id.fetch_add(1, Ordering::SeqCst); + face_hat_mut!(face).local_subs.insert(sub.clone(), id); + id + } else { + 0 + }; + let wire_expr = + Resource::decl_key(sub, face, push_declaration_profile(tables, face)); + send_declare( + &face.primitives, + RoutingContext::with_expr( + Declare { + interest_id, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::DeclareSubscriber(DeclareSubscriber { + id, + wire_expr, + ext_info: sub_info, + }), + }, + sub.expr(), + ), + ); } } } + } else { + for sub in &hat!(tables).router_subs { + if sub.context.is_some() + && (res_hat!(sub).router_subs.iter().any(|r| *r != tables.zid) + || res_hat!(sub) + .linkstatepeer_subs + .iter() + .any(|r| *r != tables.zid) + || sub.session_ctxs.values().any(|s| { + s.subs.is_some() + && (s.face.whatami != WhatAmI::Peer + || face.whatami != WhatAmI::Peer + || hat!(tables).failover_brokering(s.face.zid, face.zid)) + })) + { + let id = if mode.future() { + let id = face_hat!(face).next_id.fetch_add(1, Ordering::SeqCst); + face_hat_mut!(face).local_subs.insert(sub.clone(), id); + id + } else { + 0 + }; + let wire_expr = + Resource::decl_key(sub, face, push_declaration_profile(tables, face)); + send_declare( + &face.primitives, + RoutingContext::with_expr( + Declare { + interest_id, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::DeclareSubscriber(DeclareSubscriber { + id, + wire_expr, + ext_info: sub_info, + }), + }, + sub.expr(), + ), + ); + } + } } - } else { - tracing::trace!("Tree for node sid:{} not yet ready", source); } } @@ -934,6 +1077,7 @@ impl HatPubSubTrait for HatCode { &self, tables: &mut Tables, face: &mut Arc, + id: SubscriberId, res: &mut Arc, sub_info: &SubscriberInfo, node_id: NodeId, @@ -948,13 +1092,20 @@ impl HatPubSubTrait for HatCode { WhatAmI::Peer => { if hat!(tables).full_net(WhatAmI::Peer) { if let Some(peer) = get_peer(tables, face, node_id) { - declare_peer_subscription(tables, face, res, sub_info, peer, send_declare) + declare_linkstatepeer_subscription( + tables, + face, + res, + sub_info, + peer, + send_declare, + ) } } else { - declare_client_subscription(tables, face, res, sub_info, send_declare) + declare_simple_subscription(tables, face, id, res, sub_info, send_declare) } } - _ => declare_client_subscription(tables, face, res, sub_info, send_declare), + _ => declare_simple_subscription(tables, face, id, res, sub_info, send_declare), } } @@ -962,26 +1113,47 @@ impl HatPubSubTrait for HatCode { &self, tables: &mut Tables, face: &mut Arc, - res: &mut Arc, + id: SubscriberId, + res: Option>, node_id: NodeId, send_declare: &mut SendDeclare, - ) { + ) -> Option> { match face.whatami { WhatAmI::Router => { - if let Some(router) = get_router(tables, face, node_id) { - forget_router_subscription(tables, face, res, &router, send_declare) + if let Some(mut res) = res { + if let Some(router) = get_router(tables, face, node_id) { + forget_router_subscription(tables, face, &mut res, &router, send_declare); + Some(res) + } else { + None + } + } else { + None } } WhatAmI::Peer => { if hat!(tables).full_net(WhatAmI::Peer) { - if let Some(peer) = get_peer(tables, face, node_id) { - forget_peer_subscription(tables, face, res, &peer, send_declare) + if let Some(mut res) = res { + if let Some(peer) = get_peer(tables, face, node_id) { + forget_linkstatepeer_subscription( + tables, + face, + &mut res, + &peer, + send_declare, + ); + Some(res) + } else { + None + } + } else { + None } } else { - forget_client_subscription(tables, face, res, send_declare) + forget_simple_subscription(tables, face, id, send_declare) } } - _ => forget_client_subscription(tables, face, res, send_declare), + _ => forget_simple_subscription(tables, face, id, send_declare), } } @@ -998,7 +1170,7 @@ impl HatPubSubTrait for HatCode { Sources { routers: Vec::from_iter(res_hat!(s).router_subs.iter().cloned()), peers: if hat!(tables).full_net(WhatAmI::Peer) { - Vec::from_iter(res_hat!(s).peer_subs.iter().cloned()) + Vec::from_iter(res_hat!(s).linkstatepeer_subs.iter().cloned()) } else { s.session_ctxs .values() @@ -1029,6 +1201,43 @@ impl HatPubSubTrait for HatCode { source: NodeId, source_type: WhatAmI, ) -> Arc { + #[inline] + fn insert_faces_for_subs( + route: &mut Route, + expr: &RoutingExpr, + tables: &Tables, + net: &Network, + source: NodeId, + subs: &HashSet, + ) { + if net.trees.len() > source as usize { + for sub in subs { + if let Some(sub_idx) = net.get_idx(sub) { + if net.trees[source as usize].directions.len() > sub_idx.index() { + if let Some(direction) = + net.trees[source as usize].directions[sub_idx.index()] + { + if net.graph.contains_node(direction) { + if let Some(face) = tables.get_face(&net.graph[direction].zid) { + route.entry(face.id).or_insert_with(|| { + let key_expr = Resource::get_best_key( + expr.prefix, + expr.suffix, + face.id, + ); + (face.clone(), key_expr.to_owned(), source) + }); + } + } + } + } + } + } + } else { + tracing::trace!("Tree for node sid:{} not yet ready", source); + } + } + let mut route = HashMap::new(); let key_expr = expr.full_expr(); if key_expr.ends_with('/') { @@ -1078,7 +1287,7 @@ impl HatPubSubTrait for HatCode { } if (master || source_type != WhatAmI::Router) && hat!(tables).full_net(WhatAmI::Peer) { - let net = hat!(tables).peers_net.as_ref().unwrap(); + let net = hat!(tables).linkstatepeers_net.as_ref().unwrap(); let peer_source = match source_type { WhatAmI::Peer => source, _ => net.idx.index() as NodeId, @@ -1089,20 +1298,17 @@ impl HatPubSubTrait for HatCode { tables, net, peer_source, - &res_hat!(mres).peer_subs, + &res_hat!(mres).linkstatepeer_subs, ); } if master || source_type == WhatAmI::Router { for (sid, context) in &mres.session_ctxs { - if let Some(subinfo) = &context.subs { - if context.face.whatami != WhatAmI::Router && subinfo.mode == Mode::Push { - route.entry(*sid).or_insert_with(|| { - let key_expr = - Resource::get_best_key(expr.prefix, expr.suffix, *sid); - (context.face.clone(), key_expr.to_owned(), NodeId::default()) - }); - } + if context.subs.is_some() && context.face.whatami != WhatAmI::Router { + route.entry(*sid).or_insert_with(|| { + let key_expr = Resource::get_best_key(expr.prefix, expr.suffix, *sid); + (context.face.clone(), key_expr.to_owned(), NodeId::default()) + }); } } } @@ -1123,4 +1329,92 @@ impl HatPubSubTrait for HatCode { fn get_data_routes_entries(&self, tables: &Tables) -> RoutesIndexes { get_routes_entries(tables) } + + #[zenoh_macros::unstable] + fn get_matching_subscriptions( + &self, + tables: &Tables, + key_expr: &KeyExpr<'_>, + ) -> HashMap> { + #[inline] + fn insert_faces_for_subs( + route: &mut HashMap>, + tables: &Tables, + net: &Network, + source: usize, + subs: &HashSet, + ) { + if net.trees.len() > source { + for sub in subs { + if let Some(sub_idx) = net.get_idx(sub) { + if net.trees[source].directions.len() > sub_idx.index() { + if let Some(direction) = net.trees[source].directions[sub_idx.index()] { + if net.graph.contains_node(direction) { + if let Some(face) = tables.get_face(&net.graph[direction].zid) { + route.entry(face.id).or_insert_with(|| face.clone()); + } + } + } + } + } + } + } else { + tracing::trace!("Tree for node sid:{} not yet ready", source); + } + } + + let mut matching_subscriptions = HashMap::new(); + if key_expr.ends_with('/') { + return matching_subscriptions; + } + tracing::trace!("get_matching_subscriptions({})", key_expr,); + + let res = Resource::get_resource(&tables.root_res, key_expr); + let matches = res + .as_ref() + .and_then(|res| res.context.as_ref()) + .map(|ctx| Cow::from(&ctx.matches)) + .unwrap_or_else(|| Cow::from(Resource::get_matches(tables, key_expr))); + + let master = !hat!(tables).full_net(WhatAmI::Peer) + || *hat!(tables).elect_router(&tables.zid, key_expr, hat!(tables).shared_nodes.iter()) + == tables.zid; + + for mres in matches.iter() { + let mres = mres.upgrade().unwrap(); + + if master { + let net = hat!(tables).routers_net.as_ref().unwrap(); + insert_faces_for_subs( + &mut matching_subscriptions, + tables, + net, + net.idx.index(), + &res_hat!(mres).router_subs, + ); + } + + if hat!(tables).full_net(WhatAmI::Peer) { + let net = hat!(tables).linkstatepeers_net.as_ref().unwrap(); + insert_faces_for_subs( + &mut matching_subscriptions, + tables, + net, + net.idx.index(), + &res_hat!(mres).linkstatepeer_subs, + ); + } + + if master { + for (sid, context) in &mres.session_ctxs { + if context.subs.is_some() && context.face.whatami != WhatAmI::Router { + matching_subscriptions + .entry(*sid) + .or_insert_with(|| context.face.clone()); + } + } + } + } + matching_subscriptions + } } diff --git a/zenoh/src/net/routing/hat/router/queries.rs b/zenoh/src/net/routing/hat/router/queries.rs index ac7840fbe8..f45a260288 100644 --- a/zenoh/src/net/routing/hat/router/queries.rs +++ b/zenoh/src/net/routing/hat/router/queries.rs @@ -11,55 +11,61 @@ // Contributors: // ZettaScale Zenoh Team, // -use super::network::Network; -use super::{face_hat, face_hat_mut, get_routes_entries, hat, hat_mut, res_hat, res_hat_mut}; -use super::{get_peer, get_router, HatCode, HatContext, HatFace, HatTables}; -use crate::net::routing::dispatcher::face::FaceState; -use crate::net::routing::dispatcher::queries::*; -use crate::net::routing::dispatcher::resource::{NodeId, Resource, SessionContext}; -use crate::net::routing::dispatcher::tables::Tables; -use crate::net::routing::dispatcher::tables::{QueryTargetQabl, QueryTargetQablSet, RoutingExpr}; -use crate::net::routing::hat::{HatQueriesTrait, SendDeclare, Sources}; -use crate::net::routing::router::RoutesIndexes; -use crate::net::routing::{RoutingContext, PREFIX_LIVELINESS}; +use std::{ + borrow::Cow, + collections::HashMap, + sync::{atomic::Ordering, Arc}, +}; + use ordered_float::OrderedFloat; use petgraph::graph::NodeIndex; -use std::borrow::Cow; -use std::collections::HashMap; -use std::sync::Arc; -use zenoh_buffers::ZBuf; -use zenoh_protocol::core::key_expr::include::{Includer, DEFAULT_INCLUDER}; -use zenoh_protocol::core::key_expr::OwnedKeyExpr; use zenoh_protocol::{ - core::{WhatAmI, WireExpr, ZenohId}, - network::declare::{ - common::ext::WireExprType, ext, queryable::ext::QueryableInfo, Declare, DeclareBody, - DeclareQueryable, UndeclareQueryable, + core::{ + key_expr::{ + include::{Includer, DEFAULT_INCLUDER}, + OwnedKeyExpr, + }, + WhatAmI, ZenohIdProto, + }, + network::{ + declare::{ + common::ext::WireExprType, ext, queryable::ext::QueryableInfoType, Declare, + DeclareBody, DeclareQueryable, QueryableId, UndeclareQueryable, + }, + interest::{InterestId, InterestMode}, }, }; use zenoh_sync::get_mut_unchecked; -#[cfg(feature = "complete_n")] -#[inline] -fn merge_qabl_infos(mut this: QueryableInfo, info: &QueryableInfo) -> QueryableInfo { - this.complete += info.complete; - this.distance = std::cmp::min(this.distance, info.distance); - this -} +use super::{ + face_hat, face_hat_mut, get_peer, get_router, get_routes_entries, hat, hat_mut, + interests::push_declaration_profile, network::Network, res_hat, res_hat_mut, HatCode, + HatContext, HatFace, HatTables, +}; +use crate::net::routing::{ + dispatcher::{ + face::FaceState, + queries::*, + resource::{NodeId, Resource, SessionContext}, + tables::{QueryTargetQabl, QueryTargetQablSet, RoutingExpr, Tables}, + }, + hat::{CurrentFutureTrait, HatQueriesTrait, SendDeclare, Sources}, + router::RoutesIndexes, + RoutingContext, +}; -#[cfg(not(feature = "complete_n"))] #[inline] -fn merge_qabl_infos(mut this: QueryableInfo, info: &QueryableInfo) -> QueryableInfo { - this.complete = u8::from(this.complete != 0 || info.complete != 0); +fn merge_qabl_infos(mut this: QueryableInfoType, info: &QueryableInfoType) -> QueryableInfoType { + this.complete = this.complete || info.complete; this.distance = std::cmp::min(this.distance, info.distance); this } -fn local_router_qabl_info(tables: &Tables, res: &Arc) -> QueryableInfo { +fn local_router_qabl_info(tables: &Tables, res: &Arc) -> QueryableInfoType { let info = if hat!(tables).full_net(WhatAmI::Peer) { res.context.as_ref().and_then(|_| { res_hat!(res) - .peer_qabls + .linkstatepeer_qabls .iter() .fold(None, |accu, (zid, info)| { if *zid != tables.zid { @@ -87,13 +93,10 @@ fn local_router_qabl_info(tables: &Tables, res: &Arc) -> QueryableInfo accu } }) - .unwrap_or(QueryableInfo { - complete: 0, - distance: 0, - }) + .unwrap_or(QueryableInfoType::DEFAULT) } -fn local_peer_qabl_info(tables: &Tables, res: &Arc) -> QueryableInfo { +fn local_peer_qabl_info(tables: &Tables, res: &Arc) -> QueryableInfoType { let info = if res.context.is_some() { res_hat!(res) .router_qabls @@ -123,13 +126,14 @@ fn local_peer_qabl_info(tables: &Tables, res: &Arc) -> QueryableInfo { accu } }) - .unwrap_or(QueryableInfo { - complete: 0, - distance: 0, - }) + .unwrap_or(QueryableInfoType::DEFAULT) } -fn local_qabl_info(tables: &Tables, res: &Arc, face: &Arc) -> QueryableInfo { +fn local_qabl_info( + tables: &Tables, + res: &Arc, + face: &Arc, +) -> QueryableInfoType { let mut info = if res.context.is_some() { res_hat!(res) .router_qabls @@ -149,7 +153,7 @@ fn local_qabl_info(tables: &Tables, res: &Arc, face: &Arc) }; if res.context.is_some() && hat!(tables).full_net(WhatAmI::Peer) { info = res_hat!(res) - .peer_qabls + .linkstatepeer_qabls .iter() .fold(info, |accu, (zid, info)| { if *zid != tables.zid { @@ -181,10 +185,7 @@ fn local_qabl_info(tables: &Tables, res: &Arc, face: &Arc) accu } }) - .unwrap_or(QueryableInfo { - complete: 0, - distance: 0, - }) + .unwrap_or(QueryableInfoType::DEFAULT) } #[inline] @@ -193,7 +194,7 @@ fn send_sourced_queryable_to_net_children( net: &Network, children: &[NodeIndex], res: &Arc, - qabl_info: &QueryableInfo, + qabl_info: &QueryableInfoType, src_face: Option<&mut Arc>, routing_context: NodeId, ) { @@ -201,20 +202,24 @@ fn send_sourced_queryable_to_net_children( if net.graph.contains_node(*child) { match tables.get_face(&net.graph[*child].zid).cloned() { Some(mut someface) => { - if src_face.is_none() || someface.id != src_face.as_ref().unwrap().id { - let key_expr = Resource::decl_key(res, &mut someface); - - tracing::debug!("Send queryable {} on {}", res.expr(), someface); + if src_face + .as_ref() + .map(|src_face| someface.id != src_face.id) + .unwrap_or(true) + { + let push_declaration = push_declaration_profile(tables, &someface); + let key_expr = Resource::decl_key(res, &mut someface, push_declaration); someface.primitives.send_declare(RoutingContext::with_expr( Declare { - ext_qos: ext::QoSType::declare_default(), + interest_id: None, + ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType { node_id: routing_context, }, body: DeclareBody::DeclareQueryable(DeclareQueryable { - id: 0, // @TODO use proper QueryableId (#703) + id: 0, // Sourced queryables do not use ids wire_expr: key_expr, ext_info: *qabl_info, }), @@ -239,33 +244,48 @@ fn propagate_simple_queryable( let faces = tables.faces.values().cloned(); for mut dst_face in faces { let info = local_qabl_info(tables, res, &dst_face); - let current_info = face_hat!(dst_face).local_qabls.get(res); - if (src_face.is_none() || src_face.as_ref().unwrap().id != dst_face.id) - && (current_info.is_none() || *current_info.unwrap() != info) + let current = face_hat!(dst_face).local_qabls.get(res); + if src_face + .as_ref() + .map(|src_face| dst_face.id != src_face.id) + .unwrap_or(true) + && (current.is_none() || current.unwrap().1 != info) + && face_hat!(dst_face) + .remote_interests + .values() + .any(|(r, o)| o.queryables() && r.as_ref().map(|r| r.matches(res)).unwrap_or(true)) && if full_peers_net { dst_face.whatami == WhatAmI::Client } else { dst_face.whatami != WhatAmI::Router - && (src_face.is_none() - || src_face.as_ref().unwrap().whatami != WhatAmI::Peer - || dst_face.whatami != WhatAmI::Peer - || hat!(tables) - .failover_brokering(src_face.as_ref().unwrap().zid, dst_face.zid)) + && src_face + .as_ref() + .map(|src_face| { + src_face.whatami != WhatAmI::Peer + || dst_face.whatami != WhatAmI::Peer + || hat!(tables).failover_brokering(src_face.zid, dst_face.zid) + }) + .unwrap_or(true) } { + let id = current + .map(|c| c.0) + .unwrap_or(face_hat!(dst_face).next_id.fetch_add(1, Ordering::SeqCst)); face_hat_mut!(&mut dst_face) .local_qabls - .insert(res.clone(), info); - let key_expr = Resource::decl_key(res, &mut dst_face); + .insert(res.clone(), (id, info)); + let push_declaration = push_declaration_profile(tables, &dst_face); + let key_expr = Resource::decl_key(res, &mut dst_face, push_declaration); send_declare( &dst_face.primitives, RoutingContext::with_expr( Declare { - ext_qos: ext::QoSType::declare_default(), + interest_id: None, + ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), + ext_nodeid: ext::NodeIdType::DEFAULT, body: DeclareBody::DeclareQueryable(DeclareQueryable { - id: 0, // @TODO use proper QueryableId (#703) + id, wire_expr: key_expr, ext_info: info, }), @@ -280,9 +300,9 @@ fn propagate_simple_queryable( fn propagate_sourced_queryable( tables: &Tables, res: &Arc, - qabl_info: &QueryableInfo, + qabl_info: &QueryableInfoType, src_face: Option<&mut Arc>, - source: &ZenohId, + source: &ZenohIdProto, net_type: WhatAmI, ) { let net = hat!(tables).get_net(net_type).unwrap(); @@ -319,19 +339,14 @@ fn register_router_queryable( tables: &mut Tables, mut face: Option<&mut Arc>, res: &mut Arc, - qabl_info: &QueryableInfo, - router: ZenohId, + qabl_info: &QueryableInfoType, + router: ZenohIdProto, send_declare: &mut SendDeclare, ) { let current_info = res_hat!(res).router_qabls.get(&router); if current_info.is_none() || current_info.unwrap() != qabl_info { // Register router queryable { - tracing::debug!( - "Register router queryable {} (router: {})", - res.expr(), - router, - ); res_hat_mut!(res).router_qabls.insert(router, *qabl_info); hat_mut!(tables).router_qabls.insert(res.clone()); } @@ -351,7 +366,13 @@ fn register_router_queryable( // Propagate queryable to peers if face.is_none() || face.as_ref().unwrap().whatami != WhatAmI::Peer { let local_info = local_peer_qabl_info(tables, res); - register_peer_queryable(tables, face.as_deref_mut(), res, &local_info, tables.zid) + register_linkstatepeer_queryable( + tables, + face.as_deref_mut(), + res, + &local_info, + tables.zid, + ) } } @@ -363,27 +384,28 @@ fn declare_router_queryable( tables: &mut Tables, face: &mut Arc, res: &mut Arc, - qabl_info: &QueryableInfo, - router: ZenohId, + qabl_info: &QueryableInfoType, + router: ZenohIdProto, send_declare: &mut SendDeclare, ) { register_router_queryable(tables, Some(face), res, qabl_info, router, send_declare); } -fn register_peer_queryable( +fn register_linkstatepeer_queryable( tables: &mut Tables, face: Option<&mut Arc>, res: &mut Arc, - qabl_info: &QueryableInfo, - peer: ZenohId, + qabl_info: &QueryableInfoType, + peer: ZenohIdProto, ) { - let current_info = res_hat!(res).peer_qabls.get(&peer); + let current_info = res_hat!(res).linkstatepeer_qabls.get(&peer); if current_info.is_none() || current_info.unwrap() != qabl_info { // Register peer queryable { - tracing::debug!("Register peer queryable {} (peer: {})", res.expr(), peer,); - res_hat_mut!(res).peer_qabls.insert(peer, *qabl_info); - hat_mut!(tables).peer_qabls.insert(res.clone()); + res_hat_mut!(res) + .linkstatepeer_qabls + .insert(peer, *qabl_info); + hat_mut!(tables).linkstatepeer_qabls.insert(res.clone()); } // Propagate queryable to peers @@ -391,56 +413,50 @@ fn register_peer_queryable( } } -fn declare_peer_queryable( +fn declare_linkstatepeer_queryable( tables: &mut Tables, face: &mut Arc, res: &mut Arc, - qabl_info: &QueryableInfo, - peer: ZenohId, + qabl_info: &QueryableInfoType, + peer: ZenohIdProto, send_declare: &mut SendDeclare, ) { let mut face = Some(face); - register_peer_queryable(tables, face.as_deref_mut(), res, qabl_info, peer); + register_linkstatepeer_queryable(tables, face.as_deref_mut(), res, qabl_info, peer); let local_info = local_router_qabl_info(tables, res); let zid = tables.zid; register_router_queryable(tables, face, res, &local_info, zid, send_declare); } -fn register_client_queryable( +fn register_simple_queryable( _tables: &mut Tables, face: &mut Arc, + id: QueryableId, res: &mut Arc, - qabl_info: &QueryableInfo, + qabl_info: &QueryableInfoType, ) { // Register queryable { let res = get_mut_unchecked(res); - tracing::debug!("Register queryable {} (face: {})", res.expr(), face,); - get_mut_unchecked(res.session_ctxs.entry(face.id).or_insert_with(|| { - Arc::new(SessionContext { - face: face.clone(), - local_expr_id: None, - remote_expr_id: None, - subs: None, - qabl: None, - last_values: HashMap::new(), - in_interceptor_cache: None, - e_interceptor_cache: None, - }) - })) + get_mut_unchecked( + res.session_ctxs + .entry(face.id) + .or_insert_with(|| Arc::new(SessionContext::new(face.clone()))), + ) .qabl = Some(*qabl_info); } - face_hat_mut!(face).remote_qabls.insert(res.clone()); + face_hat_mut!(face).remote_qabls.insert(id, res.clone()); } -fn declare_client_queryable( +fn declare_simple_queryable( tables: &mut Tables, face: &mut Arc, + id: QueryableId, res: &mut Arc, - qabl_info: &QueryableInfo, + qabl_info: &QueryableInfoType, send_declare: &mut SendDeclare, ) { - register_client_queryable(tables, face, res, qabl_info); + register_simple_queryable(tables, face, id, res, qabl_info); let local_details = local_router_qabl_info(tables, res); let zid = tables.zid; register_router_queryable(tables, Some(face), res, &local_details, zid, send_declare); @@ -456,16 +472,16 @@ fn remote_router_qabls(tables: &Tables, res: &Arc) -> bool { } #[inline] -fn remote_peer_qabls(tables: &Tables, res: &Arc) -> bool { +fn remote_linkstatepeer_qabls(tables: &Tables, res: &Arc) -> bool { res.context.is_some() && res_hat!(res) - .peer_qabls + .linkstatepeer_qabls .keys() .any(|peer| peer != &tables.zid) } #[inline] -fn client_qabls(res: &Arc) -> Vec> { +fn simple_qabls(res: &Arc) -> Vec> { res.session_ctxs .values() .filter_map(|ctx| { @@ -478,6 +494,13 @@ fn client_qabls(res: &Arc) -> Vec> { .collect() } +#[inline] +fn remote_simple_qabls(res: &Arc, face: &Arc) -> bool { + res.session_ctxs + .values() + .any(|ctx| ctx.face.id != face.id && ctx.qabl.is_some()) +} + #[inline] fn send_forget_sourced_queryable_to_net_children( tables: &Tables, @@ -491,20 +514,23 @@ fn send_forget_sourced_queryable_to_net_children( if net.graph.contains_node(*child) { match tables.get_face(&net.graph[*child].zid).cloned() { Some(mut someface) => { - if src_face.is_none() || someface.id != src_face.unwrap().id { - let wire_expr = Resource::decl_key(res, &mut someface); - - tracing::debug!("Send forget queryable {} on {}", res.expr(), someface); + if src_face + .map(|src_face| someface.id != src_face.id) + .unwrap_or(true) + { + let push_declaration = push_declaration_profile(tables, &someface); + let wire_expr = Resource::decl_key(res, &mut someface, push_declaration); someface.primitives.send_declare(RoutingContext::with_expr( Declare { - ext_qos: ext::QoSType::declare_default(), + interest_id: None, + ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, ext_nodeid: ext::NodeIdType { node_id: routing_context, }, body: DeclareBody::UndeclareQueryable(UndeclareQueryable { - id: 0, // @TODO use proper QueryableId (#703) + id: 0, // Sourced queryables do not use ids ext_wire_expr: WireExprType { wire_expr }, }), }, @@ -523,26 +549,58 @@ fn propagate_forget_simple_queryable( res: &mut Arc, send_declare: &mut SendDeclare, ) { - for face in tables.faces.values_mut() { - if face_hat!(face).local_qabls.contains_key(res) { - let wire_expr = Resource::get_best_key(res, "", face.id); + for mut face in tables.faces.values().cloned() { + if let Some((id, _)) = face_hat_mut!(&mut face).local_qabls.remove(res) { send_declare( &face.primitives, RoutingContext::with_expr( Declare { - ext_qos: ext::QoSType::declare_default(), + interest_id: None, + ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), + ext_nodeid: ext::NodeIdType::DEFAULT, body: DeclareBody::UndeclareQueryable(UndeclareQueryable { - id: 0, // @TODO use proper QueryableId (#703) - ext_wire_expr: WireExprType { wire_expr }, + id, + ext_wire_expr: WireExprType::null(), }), }, res.expr(), ), ); - - face_hat_mut!(face).local_qabls.remove(res); + } + for res in face_hat!(&mut face) + .local_qabls + .keys() + .cloned() + .collect::>>() + { + if !res.context().matches.iter().any(|m| { + m.upgrade().is_some_and(|m| { + m.context.is_some() + && (remote_simple_qabls(&m, &face) + || remote_linkstatepeer_qabls(tables, &m) + || remote_router_qabls(tables, &m)) + }) + }) { + if let Some((id, _)) = face_hat_mut!(&mut face).local_qabls.remove(&res) { + send_declare( + &face.primitives, + RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::UndeclareQueryable(UndeclareQueryable { + id, + ext_wire_expr: WireExprType::null(), + }), + }, + res.expr(), + ), + ); + } + } } } } @@ -572,24 +630,24 @@ fn propagate_forget_simple_queryable_to_peers( && hat!(tables).failover_brokering(s.face.zid, face.zid))) }) { - let wire_expr = Resource::get_best_key(res, "", face.id); - send_declare( - &face.primitives, - RoutingContext::with_expr( - Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - body: DeclareBody::UndeclareQueryable(UndeclareQueryable { - id: 0, // @TODO use proper QueryableId (#703) - ext_wire_expr: WireExprType { wire_expr }, - }), - }, - res.expr(), - ), - ); - - face_hat_mut!(&mut face).local_qabls.remove(res); + if let Some((id, _)) = face_hat_mut!(&mut face).local_qabls.remove(res) { + send_declare( + &face.primitives, + RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::UndeclareQueryable(UndeclareQueryable { + id, + ext_wire_expr: WireExprType::null(), + }), + }, + res.expr(), + ), + ); + } } } } @@ -599,7 +657,7 @@ fn propagate_forget_sourced_queryable( tables: &mut Tables, res: &mut Arc, src_face: Option<&Arc>, - source: &ZenohId, + source: &ZenohIdProto, net_type: WhatAmI, ) { let net = hat!(tables).get_net(net_type).unwrap(); @@ -634,14 +692,9 @@ fn propagate_forget_sourced_queryable( fn unregister_router_queryable( tables: &mut Tables, res: &mut Arc, - router: &ZenohId, + router: &ZenohIdProto, send_declare: &mut SendDeclare, ) { - tracing::debug!( - "Unregister router queryable {} (router: {})", - res.expr(), - router, - ); res_hat_mut!(res).router_qabls.remove(router); if res_hat!(res).router_qabls.is_empty() { @@ -650,7 +703,7 @@ fn unregister_router_queryable( .retain(|qabl| !Arc::ptr_eq(qabl, res)); if hat!(tables).full_net(WhatAmI::Peer) { - undeclare_peer_queryable(tables, None, res, &tables.zid.clone()); + undeclare_linkstatepeer_queryable(tables, None, res, &tables.zid.clone()); } propagate_forget_simple_queryable(tables, res, send_declare); } @@ -662,7 +715,7 @@ fn undeclare_router_queryable( tables: &mut Tables, face: Option<&Arc>, res: &mut Arc, - router: &ZenohId, + router: &ZenohIdProto, send_declare: &mut SendDeclare, ) { if res_hat!(res).router_qabls.contains_key(router) { @@ -675,48 +728,51 @@ fn forget_router_queryable( tables: &mut Tables, face: &mut Arc, res: &mut Arc, - router: &ZenohId, + router: &ZenohIdProto, send_declare: &mut SendDeclare, ) { undeclare_router_queryable(tables, Some(face), res, router, send_declare); } -fn unregister_peer_queryable(tables: &mut Tables, res: &mut Arc, peer: &ZenohId) { - tracing::debug!("Unregister peer queryable {} (peer: {})", res.expr(), peer,); - res_hat_mut!(res).peer_qabls.remove(peer); +fn unregister_linkstatepeer_queryable( + tables: &mut Tables, + res: &mut Arc, + peer: &ZenohIdProto, +) { + res_hat_mut!(res).linkstatepeer_qabls.remove(peer); - if res_hat!(res).peer_qabls.is_empty() { + if res_hat!(res).linkstatepeer_qabls.is_empty() { hat_mut!(tables) - .peer_qabls + .linkstatepeer_qabls .retain(|qabl| !Arc::ptr_eq(qabl, res)); } } -fn undeclare_peer_queryable( +fn undeclare_linkstatepeer_queryable( tables: &mut Tables, face: Option<&Arc>, res: &mut Arc, - peer: &ZenohId, + peer: &ZenohIdProto, ) { - if res_hat!(res).peer_qabls.contains_key(peer) { - unregister_peer_queryable(tables, res, peer); + if res_hat!(res).linkstatepeer_qabls.contains_key(peer) { + unregister_linkstatepeer_queryable(tables, res, peer); propagate_forget_sourced_queryable(tables, res, face, peer, WhatAmI::Peer); } } -fn forget_peer_queryable( +fn forget_linkstatepeer_queryable( tables: &mut Tables, face: &mut Arc, res: &mut Arc, - peer: &ZenohId, + peer: &ZenohIdProto, send_declare: &mut SendDeclare, ) { - undeclare_peer_queryable(tables, Some(face), res, peer); + undeclare_linkstatepeer_queryable(tables, Some(face), res, peer); - let client_qabls = res.session_ctxs.values().any(|ctx| ctx.qabl.is_some()); - let peer_qabls = remote_peer_qabls(tables, res); + let simple_qabls = res.session_ctxs.values().any(|ctx| ctx.qabl.is_some()); + let linkstatepeer_qabls = remote_linkstatepeer_qabls(tables, res); let zid = tables.zid; - if !client_qabls && !peer_qabls { + if !simple_qabls && !linkstatepeer_qabls { undeclare_router_queryable(tables, None, res, &zid, send_declare); } else { let local_info = local_router_qabl_info(tables, res); @@ -724,133 +780,108 @@ fn forget_peer_queryable( } } -pub(super) fn undeclare_client_queryable( +pub(super) fn undeclare_simple_queryable( tables: &mut Tables, face: &mut Arc, res: &mut Arc, send_declare: &mut SendDeclare, ) { - tracing::debug!("Unregister client queryable {} for {}", res.expr(), face); - if let Some(ctx) = get_mut_unchecked(res).session_ctxs.get_mut(&face.id) { - get_mut_unchecked(ctx).qabl = None; - if ctx.qabl.is_none() { - face_hat_mut!(face).remote_qabls.remove(res); + if !face_hat_mut!(face) + .remote_qabls + .values() + .any(|s| *s == *res) + { + if let Some(ctx) = get_mut_unchecked(res).session_ctxs.get_mut(&face.id) { + get_mut_unchecked(ctx).qabl = None; } - } - let mut client_qabls = client_qabls(res); - let router_qabls = remote_router_qabls(tables, res); - let peer_qabls = remote_peer_qabls(tables, res); + let mut simple_qabls = simple_qabls(res); + let router_qabls = remote_router_qabls(tables, res); + let linkstatepeer_qabls = remote_linkstatepeer_qabls(tables, res); - if client_qabls.is_empty() && !peer_qabls { - undeclare_router_queryable(tables, None, res, &tables.zid.clone(), send_declare); - } else { - let local_info = local_router_qabl_info(tables, res); - register_router_queryable(tables, None, res, &local_info, tables.zid, send_declare); - propagate_forget_simple_queryable_to_peers(tables, res, send_declare); - } - - if client_qabls.len() == 1 && !router_qabls && !peer_qabls { - let face = &mut client_qabls[0]; - if face_hat!(face).local_qabls.contains_key(res) { - let wire_expr = Resource::get_best_key(res, "", face.id); - send_declare( - &face.primitives, - RoutingContext::with_expr( - Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - body: DeclareBody::UndeclareQueryable(UndeclareQueryable { - id: 0, // @TODO use proper QueryableId (#703) - ext_wire_expr: WireExprType { wire_expr }, - }), - }, - res.expr(), - ), - ); - - face_hat_mut!(face).local_qabls.remove(res); + if simple_qabls.is_empty() && !linkstatepeer_qabls { + undeclare_router_queryable(tables, None, res, &tables.zid.clone(), send_declare); + } else { + let local_info = local_router_qabl_info(tables, res); + register_router_queryable(tables, None, res, &local_info, tables.zid, send_declare); + propagate_forget_simple_queryable_to_peers(tables, res, send_declare); } - } -} -fn forget_client_queryable( - tables: &mut Tables, - face: &mut Arc, - res: &mut Arc, - send_declare: &mut SendDeclare, -) { - undeclare_client_queryable(tables, face, res, send_declare); -} - -pub(super) fn queries_new_face( - tables: &mut Tables, - face: &mut Arc, - send_declare: &mut SendDeclare, -) { - if face.whatami == WhatAmI::Client { - for qabl in hat!(tables).router_qabls.iter() { - if qabl.context.is_some() { - let info = local_qabl_info(tables, qabl, face); - face_hat_mut!(face).local_qabls.insert(qabl.clone(), info); - let key_expr = Resource::decl_key(qabl, face); + if simple_qabls.len() == 1 && !router_qabls && !linkstatepeer_qabls { + let mut face = &mut simple_qabls[0]; + if let Some((id, _)) = face_hat_mut!(face).local_qabls.remove(res) { send_declare( &face.primitives, RoutingContext::with_expr( Declare { - ext_qos: ext::QoSType::declare_default(), + interest_id: None, + ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - body: DeclareBody::DeclareQueryable(DeclareQueryable { - id: 0, // @TODO use proper QueryableId (#703) - wire_expr: key_expr, - ext_info: info, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::UndeclareQueryable(UndeclareQueryable { + id, + ext_wire_expr: WireExprType::null(), }), }, - qabl.expr(), + res.expr(), ), ); } - } - } else if face.whatami == WhatAmI::Peer && !hat!(tables).full_net(WhatAmI::Peer) { - for qabl in hat!(tables).router_qabls.iter() { - if qabl.context.is_some() - && (res_hat!(qabl).router_qabls.keys().any(|r| *r != tables.zid) - || qabl.session_ctxs.values().any(|s| { - s.qabl.is_some() - && (s.face.whatami == WhatAmI::Client - || (s.face.whatami == WhatAmI::Peer - && hat!(tables).failover_brokering(s.face.zid, face.zid))) - })) + for res in face_hat!(face) + .local_qabls + .keys() + .cloned() + .collect::>>() { - let info = local_qabl_info(tables, qabl, face); - face_hat_mut!(face).local_qabls.insert(qabl.clone(), info); - let key_expr = Resource::decl_key(qabl, face); - send_declare( - &face.primitives, - RoutingContext::with_expr( - Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - body: DeclareBody::DeclareQueryable(DeclareQueryable { - id: 0, // @TODO use proper QueryableId (#703) - wire_expr: key_expr, - ext_info: info, - }), - }, - qabl.expr(), - ), - ); + if !res.context().matches.iter().any(|m| { + m.upgrade().is_some_and(|m| { + m.context.is_some() + && (remote_simple_qabls(&m, face) + || remote_linkstatepeer_qabls(tables, &m) + || remote_router_qabls(tables, &m)) + }) + }) { + if let Some((id, _)) = face_hat_mut!(&mut face).local_qabls.remove(&res) { + send_declare( + &face.primitives, + RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::UndeclareQueryable(UndeclareQueryable { + id, + ext_wire_expr: WireExprType::null(), + }), + }, + res.expr(), + ), + ); + } + } } } } } +fn forget_simple_queryable( + tables: &mut Tables, + face: &mut Arc, + id: QueryableId, + send_declare: &mut SendDeclare, +) -> Option> { + if let Some(mut res) = face_hat_mut!(face).remote_qabls.remove(&id) { + undeclare_simple_queryable(tables, face, &mut res, send_declare); + Some(res) + } else { + None + } +} + pub(super) fn queries_remove_node( tables: &mut Tables, - node: &ZenohId, + node: &ZenohIdProto, net_type: WhatAmI, send_declare: &mut SendDeclare, ) { @@ -881,11 +912,11 @@ pub(super) fn queries_remove_node( } } for mut res in qabls { - unregister_peer_queryable(tables, &mut res, node); + unregister_linkstatepeer_queryable(tables, &mut res, node); - let client_qabls = res.session_ctxs.values().any(|ctx| ctx.qabl.is_some()); - let peer_qabls = remote_peer_qabls(tables, &res); - if !client_qabls && !peer_qabls { + let simple_qabls = res.session_ctxs.values().any(|ctx| ctx.qabl.is_some()); + let linkstatepeer_qabls = remote_linkstatepeer_qabls(tables, &res); + if !simple_qabls && !linkstatepeer_qabls { undeclare_router_queryable( tables, None, @@ -915,87 +946,82 @@ pub(super) fn queries_remove_node( pub(super) fn queries_linkstate_change( tables: &mut Tables, - zid: &ZenohId, - links: &[ZenohId], + zid: &ZenohIdProto, + links: &[ZenohIdProto], send_declare: &mut SendDeclare, ) { - if let Some(src_face) = tables.get_face(zid) { + if let Some(mut src_face) = tables.get_face(zid).cloned() { if hat!(tables).router_peers_failover_brokering && src_face.whatami == WhatAmI::Peer { - for res in &face_hat!(src_face).remote_qabls { - let client_qabls = res - .session_ctxs - .values() - .any(|ctx| ctx.face.whatami == WhatAmI::Client && ctx.qabl.is_some()); - if !remote_router_qabls(tables, res) && !client_qabls { - for ctx in get_mut_unchecked(&mut res.clone()) + let to_forget = face_hat!(src_face) + .local_qabls + .keys() + .filter(|res| { + let client_qabls = res .session_ctxs - .values_mut() - { - let dst_face = &mut get_mut_unchecked(ctx).face; - if dst_face.whatami == WhatAmI::Peer && src_face.zid != dst_face.zid { - if face_hat!(dst_face).local_qabls.contains_key(res) { - let forget = !HatTables::failover_brokering_to(links, dst_face.zid) - && { - let ctx_links = hat!(tables) - .peers_net - .as_ref() - .map(|net| net.get_links(dst_face.zid)) - .unwrap_or_else(|| &[]); - res.session_ctxs.values().any(|ctx2| { - ctx2.face.whatami == WhatAmI::Peer - && ctx2.qabl.is_some() - && HatTables::failover_brokering_to( - ctx_links, - ctx2.face.zid, - ) - }) - }; - if forget { - let wire_expr = Resource::get_best_key(res, "", dst_face.id); - send_declare( - &dst_face.primitives, - RoutingContext::with_expr( - Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - body: DeclareBody::UndeclareQueryable( - UndeclareQueryable { - id: 0, // @TODO use proper QueryableId (#703) - ext_wire_expr: WireExprType { wire_expr }, - }, - ), - }, - res.expr(), - ), - ); - - face_hat_mut!(dst_face).local_qabls.remove(res); - } - } else if HatTables::failover_brokering_to(links, ctx.face.zid) { - let dst_face = &mut get_mut_unchecked(ctx).face; - let info = local_qabl_info(tables, res, dst_face); - face_hat_mut!(dst_face) - .local_qabls - .insert(res.clone(), info); - let key_expr = Resource::decl_key(res, dst_face); - send_declare( - &dst_face.primitives, - RoutingContext::with_expr( - Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - body: DeclareBody::DeclareQueryable(DeclareQueryable { - id: 0, // @TODO use proper QueryableId (#703) - wire_expr: key_expr, - ext_info: info, - }), - }, - res.expr(), - ), - ); - } + .values() + .any(|ctx| ctx.face.whatami == WhatAmI::Client && ctx.qabl.is_some()); + !remote_router_qabls(tables, res) + && !client_qabls + && !res.session_ctxs.values().any(|ctx| { + ctx.face.whatami == WhatAmI::Peer + && src_face.id != ctx.face.id + && HatTables::failover_brokering_to(links, ctx.face.zid) + }) + }) + .cloned() + .collect::>>(); + for res in to_forget { + if let Some((id, _)) = face_hat_mut!(&mut src_face).local_qabls.remove(&res) { + let wire_expr = Resource::get_best_key(&res, "", src_face.id); + send_declare( + &src_face.primitives, + RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::default(), + body: DeclareBody::UndeclareQueryable(UndeclareQueryable { + id, + ext_wire_expr: WireExprType { wire_expr }, + }), + }, + res.expr(), + ), + ); + } + } + + for mut dst_face in tables.faces.values().cloned() { + if src_face.id != dst_face.id + && HatTables::failover_brokering_to(links, dst_face.zid) + { + for res in face_hat!(src_face).remote_qabls.values() { + if !face_hat!(dst_face).local_qabls.contains_key(res) { + let id = face_hat!(dst_face).next_id.fetch_add(1, Ordering::SeqCst); + let info = local_qabl_info(tables, res, &dst_face); + face_hat_mut!(&mut dst_face) + .local_qabls + .insert(res.clone(), (id, info)); + let push_declaration = push_declaration_profile(tables, &dst_face); + let key_expr = Resource::decl_key(res, &mut dst_face, push_declaration); + send_declare( + &dst_face.primitives, + RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::default(), + body: DeclareBody::DeclareQueryable(DeclareQueryable { + id, + wire_expr: key_expr, + ext_info: info, + }), + }, + res.expr(), + ), + ); } } } @@ -1009,23 +1035,29 @@ pub(super) fn queries_tree_change( new_children: &[Vec], net_type: WhatAmI, ) { + let net = match hat!(tables).get_net(net_type) { + Some(net) => net, + None => { + tracing::error!("Error accessing net in queries_tree_change!"); + return; + } + }; // propagate qabls to new children for (tree_sid, tree_children) in new_children.iter().enumerate() { if !tree_children.is_empty() { - let net = hat!(tables).get_net(net_type).unwrap(); let tree_idx = NodeIndex::new(tree_sid); if net.graph.contains_node(tree_idx) { let tree_id = net.graph[tree_idx].zid; let qabls_res = match net_type { WhatAmI::Router => &hat!(tables).router_qabls, - _ => &hat!(tables).peer_qabls, + _ => &hat!(tables).linkstatepeer_qabls, }; for res in qabls_res { let qabls = match net_type { WhatAmI::Router => &res_hat!(res).router_qabls, - _ => &res_hat!(res).peer_qabls, + _ => &res_hat!(res).linkstatepeer_qabls, }; if let Some(qabl_info) = qabls.get(&tree_id) { send_sourced_queryable_to_net_children( @@ -1054,7 +1086,7 @@ fn insert_target_for_qabls( tables: &Tables, net: &Network, source: NodeId, - qabls: &HashMap, + qabls: &HashMap, complete: bool, ) { if net.trees.len() > source as usize { @@ -1093,13 +1125,165 @@ lazy_static::lazy_static! { static ref EMPTY_ROUTE: Arc = Arc::new(Vec::new()); } +pub(crate) fn declare_qabl_interest( + tables: &mut Tables, + face: &mut Arc, + id: InterestId, + res: Option<&mut Arc>, + mode: InterestMode, + aggregate: bool, + send_declare: &mut SendDeclare, +) { + if mode.current() { + let interest_id = (!mode.future()).then_some(id); + if let Some(res) = res.as_ref() { + if aggregate { + if hat!(tables).router_qabls.iter().any(|qabl| { + qabl.context.is_some() + && qabl.matches(res) + && (res_hat!(qabl).router_qabls.keys().any(|r| *r != tables.zid) + || res_hat!(qabl) + .linkstatepeer_qabls + .keys() + .any(|r| *r != tables.zid) + || qabl.session_ctxs.values().any(|s| { + s.face.id != face.id + && s.qabl.is_some() + && (s.face.whatami == WhatAmI::Client + || face.whatami == WhatAmI::Client + || (s.face.whatami == WhatAmI::Peer + && hat!(tables) + .failover_brokering(s.face.zid, face.zid))) + })) + }) { + let info = local_qabl_info(tables, res, face); + let id = if mode.future() { + let id = face_hat!(face).next_id.fetch_add(1, Ordering::SeqCst); + face_hat_mut!(face) + .local_qabls + .insert((*res).clone(), (id, info)); + id + } else { + 0 + }; + let wire_expr = + Resource::decl_key(res, face, push_declaration_profile(tables, face)); + send_declare( + &face.primitives, + RoutingContext::with_expr( + Declare { + interest_id, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::DeclareQueryable(DeclareQueryable { + id, + wire_expr, + ext_info: info, + }), + }, + res.expr(), + ), + ); + } + } else { + for qabl in hat!(tables).router_qabls.iter() { + if qabl.context.is_some() + && qabl.matches(res) + && (res_hat!(qabl).router_qabls.keys().any(|r| *r != tables.zid) + || res_hat!(qabl) + .linkstatepeer_qabls + .keys() + .any(|r| *r != tables.zid) + || qabl.session_ctxs.values().any(|s| { + s.qabl.is_some() + && (s.face.whatami != WhatAmI::Peer + || face.whatami != WhatAmI::Peer + || hat!(tables).failover_brokering(s.face.zid, face.zid)) + })) + { + let info = local_qabl_info(tables, qabl, face); + let id = if mode.future() { + let id = face_hat!(face).next_id.fetch_add(1, Ordering::SeqCst); + face_hat_mut!(face) + .local_qabls + .insert(qabl.clone(), (id, info)); + id + } else { + 0 + }; + let key_expr = + Resource::decl_key(qabl, face, push_declaration_profile(tables, face)); + send_declare( + &face.primitives, + RoutingContext::with_expr( + Declare { + interest_id, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::DeclareQueryable(DeclareQueryable { + id, + wire_expr: key_expr, + ext_info: info, + }), + }, + qabl.expr(), + ), + ); + } + } + } + } else { + for qabl in hat!(tables).router_qabls.iter() { + if qabl.context.is_some() + && (remote_simple_qabls(qabl, face) + || remote_linkstatepeer_qabls(tables, qabl) + || remote_router_qabls(tables, qabl)) + { + let info = local_qabl_info(tables, qabl, face); + let id = if mode.future() { + let id = face_hat!(face).next_id.fetch_add(1, Ordering::SeqCst); + face_hat_mut!(face) + .local_qabls + .insert(qabl.clone(), (id, info)); + id + } else { + 0 + }; + let key_expr = + Resource::decl_key(qabl, face, push_declaration_profile(tables, face)); + send_declare( + &face.primitives, + RoutingContext::with_expr( + Declare { + interest_id, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::DeclareQueryable(DeclareQueryable { + id, + wire_expr: key_expr, + ext_info: info, + }), + }, + qabl.expr(), + ), + ); + } + } + } + } +} + impl HatQueriesTrait for HatCode { fn declare_queryable( &self, tables: &mut Tables, face: &mut Arc, + id: QueryableId, res: &mut Arc, - qabl_info: &QueryableInfo, + qabl_info: &QueryableInfoType, node_id: NodeId, send_declare: &mut SendDeclare, ) { @@ -1112,13 +1296,20 @@ impl HatQueriesTrait for HatCode { WhatAmI::Peer => { if hat!(tables).full_net(WhatAmI::Peer) { if let Some(peer) = get_peer(tables, face, node_id) { - declare_peer_queryable(tables, face, res, qabl_info, peer, send_declare) + declare_linkstatepeer_queryable( + tables, + face, + res, + qabl_info, + peer, + send_declare, + ) } } else { - declare_client_queryable(tables, face, res, qabl_info, send_declare) + declare_simple_queryable(tables, face, id, res, qabl_info, send_declare) } } - _ => declare_client_queryable(tables, face, res, qabl_info, send_declare), + _ => declare_simple_queryable(tables, face, id, res, qabl_info, send_declare), } } @@ -1126,26 +1317,47 @@ impl HatQueriesTrait for HatCode { &self, tables: &mut Tables, face: &mut Arc, - res: &mut Arc, + id: QueryableId, + res: Option>, node_id: NodeId, send_declare: &mut SendDeclare, - ) { + ) -> Option> { match face.whatami { WhatAmI::Router => { - if let Some(router) = get_router(tables, face, node_id) { - forget_router_queryable(tables, face, res, &router, send_declare) + if let Some(mut res) = res { + if let Some(router) = get_router(tables, face, node_id) { + forget_router_queryable(tables, face, &mut res, &router, send_declare); + Some(res) + } else { + None + } + } else { + None } } WhatAmI::Peer => { if hat!(tables).full_net(WhatAmI::Peer) { - if let Some(peer) = get_peer(tables, face, node_id) { - forget_peer_queryable(tables, face, res, &peer, send_declare) + if let Some(mut res) = res { + if let Some(peer) = get_peer(tables, face, node_id) { + forget_linkstatepeer_queryable( + tables, + face, + &mut res, + &peer, + send_declare, + ); + Some(res) + } else { + None + } + } else { + None } } else { - forget_client_queryable(tables, face, res, send_declare) + forget_simple_queryable(tables, face, id, send_declare) } } - _ => forget_client_queryable(tables, face, res, send_declare), + _ => forget_simple_queryable(tables, face, id, send_declare), } } @@ -1162,7 +1374,7 @@ impl HatQueriesTrait for HatCode { Sources { routers: Vec::from_iter(res_hat!(s).router_qabls.keys().cloned()), peers: if hat!(tables).full_net(WhatAmI::Peer) { - Vec::from_iter(res_hat!(s).peer_qabls.keys().cloned()) + Vec::from_iter(res_hat!(s).linkstatepeer_qabls.keys().cloned()) } else { s.session_ctxs .values() @@ -1243,7 +1455,7 @@ impl HatQueriesTrait for HatCode { } if (master || source_type != WhatAmI::Router) && hat!(tables).full_net(WhatAmI::Peer) { - let net = hat!(tables).peers_net.as_ref().unwrap(); + let net = hat!(tables).linkstatepeers_net.as_ref().unwrap(); let peer_source = match source_type { WhatAmI::Peer => source, _ => net.idx.index() as NodeId, @@ -1254,7 +1466,7 @@ impl HatQueriesTrait for HatCode { tables, net, peer_source, - &res_hat!(mres).peer_qabls, + &res_hat!(mres).linkstatepeer_qabls, complete, ); } @@ -1286,48 +1498,6 @@ impl HatQueriesTrait for HatCode { Arc::new(route) } - #[inline] - fn compute_local_replies( - &self, - tables: &Tables, - prefix: &Arc, - suffix: &str, - face: &Arc, - ) -> Vec<(WireExpr<'static>, ZBuf)> { - let mut result = vec![]; - // Only the first routing point in the query route - // should return the liveliness tokens - if face.whatami == WhatAmI::Client { - let key_expr = prefix.expr() + suffix; - let key_expr = match OwnedKeyExpr::try_from(key_expr) { - Ok(ke) => ke, - Err(e) => { - tracing::warn!("Invalid KE reached the system: {}", e); - return result; - } - }; - if key_expr.starts_with(PREFIX_LIVELINESS) { - let res = Resource::get_resource(prefix, suffix); - let matches = res - .as_ref() - .and_then(|res| res.context.as_ref()) - .map(|ctx| Cow::from(&ctx.matches)) - .unwrap_or_else(|| Cow::from(Resource::get_matches(tables, &key_expr))); - for mres in matches.iter() { - let mres = mres.upgrade().unwrap(); - if (mres.context.is_some() - && (!res_hat!(mres).router_subs.is_empty() - || !res_hat!(mres).peer_subs.is_empty())) - || mres.session_ctxs.values().any(|ctx| ctx.subs.is_some()) - { - result.push((Resource::get_best_key(&mres, "", face.id), ZBuf::default())); - } - } - } - } - result - } - fn get_query_routes_entries(&self, tables: &Tables) -> RoutesIndexes { get_routes_entries(tables) } diff --git a/zenoh/src/net/routing/hat/router/token.rs b/zenoh/src/net/routing/hat/router/token.rs new file mode 100644 index 0000000000..f94a4d12d4 --- /dev/null +++ b/zenoh/src/net/routing/hat/router/token.rs @@ -0,0 +1,1167 @@ +// +// Copyright (c) 2024 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// + +use std::sync::{atomic::Ordering, Arc}; + +use petgraph::graph::NodeIndex; +use zenoh_protocol::{ + core::{WhatAmI, ZenohIdProto}, + network::{ + declare::{common::ext::WireExprType, TokenId}, + ext, + interest::{InterestId, InterestMode, InterestOptions}, + Declare, DeclareBody, DeclareToken, UndeclareToken, + }, +}; +use zenoh_sync::get_mut_unchecked; + +use super::{ + face_hat, face_hat_mut, get_peer, get_router, hat, hat_mut, + interests::push_declaration_profile, network::Network, res_hat, res_hat_mut, HatCode, + HatContext, HatFace, HatTables, +}; +use crate::net::routing::{ + dispatcher::{face::FaceState, tables::Tables}, + hat::{CurrentFutureTrait, HatTokenTrait, SendDeclare}, + router::{NodeId, Resource, SessionContext}, + RoutingContext, +}; + +#[inline] +fn send_sourced_token_to_net_clildren( + tables: &Tables, + net: &Network, + clildren: &[NodeIndex], + res: &Arc, + src_face: Option<&Arc>, + routing_context: NodeId, +) { + for child in clildren { + if net.graph.contains_node(*child) { + match tables.get_face(&net.graph[*child].zid).cloned() { + Some(mut someface) => { + if src_face + .map(|src_face| someface.id != src_face.id) + .unwrap_or(true) + { + let push_declaration = push_declaration_profile(tables, &someface); + let key_expr = Resource::decl_key(res, &mut someface, push_declaration); + + someface.primitives.send_declare(RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType { + node_id: routing_context, + }, + body: DeclareBody::DeclareToken(DeclareToken { + id: 0, // Sourced tokens do not use ids + wire_expr: key_expr, + }), + }, + res.expr(), + )); + } + } + None => tracing::trace!("Unable to find face for zid {}", net.graph[*child].zid), + } + } + } +} + +#[inline] +fn propagate_simple_token_to( + tables: &mut Tables, + dst_face: &mut Arc, + res: &Arc, + src_face: &mut Arc, + full_peer_net: bool, + send_declare: &mut SendDeclare, +) { + if (src_face.id != dst_face.id || dst_face.zid == tables.zid) + && !face_hat!(dst_face).local_tokens.contains_key(res) + && if full_peer_net { + dst_face.whatami == WhatAmI::Client + } else { + dst_face.whatami != WhatAmI::Router + && (src_face.whatami != WhatAmI::Peer + || dst_face.whatami != WhatAmI::Peer + || hat!(tables).failover_brokering(src_face.zid, dst_face.zid)) + } + { + let matching_interests = face_hat!(dst_face) + .remote_interests + .values() + .filter(|(r, o)| o.tokens() && r.as_ref().map(|r| r.matches(res)).unwrap_or(true)) + .cloned() + .collect::>, InterestOptions)>>(); + + for (int_res, options) in matching_interests { + let res = if options.aggregate() { + int_res.as_ref().unwrap_or(res) + } else { + res + }; + if !face_hat!(dst_face).local_tokens.contains_key(res) { + let id = face_hat!(dst_face).next_id.fetch_add(1, Ordering::SeqCst); + face_hat_mut!(dst_face).local_tokens.insert(res.clone(), id); + let key_expr = + Resource::decl_key(res, dst_face, push_declaration_profile(tables, dst_face)); + send_declare( + &dst_face.primitives, + RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::DeclareToken(DeclareToken { + id, + wire_expr: key_expr, + }), + }, + res.expr(), + ), + ); + } + } + } +} + +fn propagate_simple_token( + tables: &mut Tables, + res: &Arc, + src_face: &mut Arc, + send_declare: &mut SendDeclare, +) { + let full_peer_net = hat!(tables).full_net(WhatAmI::Peer); + for mut dst_face in tables + .faces + .values() + .cloned() + .collect::>>() + { + propagate_simple_token_to( + tables, + &mut dst_face, + res, + src_face, + full_peer_net, + send_declare, + ); + } +} + +fn propagate_sourced_token( + tables: &Tables, + res: &Arc, + src_face: Option<&Arc>, + source: &ZenohIdProto, + net_type: WhatAmI, +) { + let net = hat!(tables).get_net(net_type).unwrap(); + match net.get_idx(source) { + Some(tree_sid) => { + if net.trees.len() > tree_sid.index() { + send_sourced_token_to_net_clildren( + tables, + net, + &net.trees[tree_sid.index()].children, + res, + src_face, + tree_sid.index() as NodeId, + ); + } else { + tracing::trace!( + "Propagating liveliness {}: tree for node {} sid:{} not yet ready", + res.expr(), + tree_sid.index(), + source + ); + } + } + None => tracing::error!( + "Error propagating token {}: cannot get index of {}!", + res.expr(), + source + ), + } +} + +fn register_router_token( + tables: &mut Tables, + face: &mut Arc, + res: &mut Arc, + router: ZenohIdProto, + send_declare: &mut SendDeclare, +) { + if !res_hat!(res).router_tokens.contains(&router) { + // Register router liveliness + { + res_hat_mut!(res).router_tokens.insert(router); + hat_mut!(tables).router_tokens.insert(res.clone()); + } + + // Propagate liveliness to routers + propagate_sourced_token(tables, res, Some(face), &router, WhatAmI::Router); + } + // Propagate liveliness to peers + if hat!(tables).full_net(WhatAmI::Peer) && face.whatami != WhatAmI::Peer { + register_linkstatepeer_token(tables, face, res, tables.zid) + } + + // Propagate liveliness to clients + propagate_simple_token(tables, res, face, send_declare); +} + +fn declare_router_token( + tables: &mut Tables, + face: &mut Arc, + res: &mut Arc, + router: ZenohIdProto, + send_declare: &mut SendDeclare, +) { + register_router_token(tables, face, res, router, send_declare); +} + +fn register_linkstatepeer_token( + tables: &mut Tables, + face: &mut Arc, + res: &mut Arc, + peer: ZenohIdProto, +) { + if !res_hat!(res).linkstatepeer_tokens.contains(&peer) { + // Register peer liveliness + { + res_hat_mut!(res).linkstatepeer_tokens.insert(peer); + hat_mut!(tables).linkstatepeer_tokens.insert(res.clone()); + } + + // Propagate liveliness to peers + propagate_sourced_token(tables, res, Some(face), &peer, WhatAmI::Peer); + } +} + +fn declare_linkstatepeer_token( + tables: &mut Tables, + face: &mut Arc, + res: &mut Arc, + peer: ZenohIdProto, + send_declare: &mut SendDeclare, +) { + register_linkstatepeer_token(tables, face, res, peer); + let zid = tables.zid; + register_router_token(tables, face, res, zid, send_declare); +} + +fn register_simple_token( + _tables: &mut Tables, + face: &mut Arc, + id: TokenId, + res: &mut Arc, +) { + // Register liveliness + { + let res = get_mut_unchecked(res); + match res.session_ctxs.get_mut(&face.id) { + Some(ctx) => { + if !ctx.token { + get_mut_unchecked(ctx).token = true; + } + } + None => { + let ctx = res + .session_ctxs + .entry(face.id) + .or_insert_with(|| Arc::new(SessionContext::new(face.clone()))); + get_mut_unchecked(ctx).token = true; + } + } + } + face_hat_mut!(face).remote_tokens.insert(id, res.clone()); +} + +fn declare_simple_token( + tables: &mut Tables, + face: &mut Arc, + id: TokenId, + res: &mut Arc, + send_declare: &mut SendDeclare, +) { + register_simple_token(tables, face, id, res); + let zid = tables.zid; + register_router_token(tables, face, res, zid, send_declare); +} + +#[inline] +fn remote_router_tokens(tables: &Tables, res: &Arc) -> bool { + res.context.is_some() + && res_hat!(res) + .router_tokens + .iter() + .any(|peer| peer != &tables.zid) +} + +#[inline] +fn remote_linkstatepeer_tokens(tables: &Tables, res: &Arc) -> bool { + res.context.is_some() + && res_hat!(res) + .linkstatepeer_tokens + .iter() + .any(|peer| peer != &tables.zid) +} + +#[inline] +fn simple_tokens(res: &Arc) -> Vec> { + res.session_ctxs + .values() + .filter_map(|ctx| { + if ctx.token { + Some(ctx.face.clone()) + } else { + None + } + }) + .collect() +} + +#[inline] +fn remote_simple_tokens(tables: &Tables, res: &Arc, face: &Arc) -> bool { + res.session_ctxs + .values() + .any(|ctx| (ctx.face.id != face.id || face.zid == tables.zid) && ctx.token) +} + +#[inline] +fn send_forget_sourced_token_to_net_clildren( + tables: &Tables, + net: &Network, + clildren: &[NodeIndex], + res: &Arc, + src_face: Option<&Arc>, + routing_context: Option, +) { + for child in clildren { + if net.graph.contains_node(*child) { + match tables.get_face(&net.graph[*child].zid).cloned() { + Some(mut someface) => { + if src_face + .map(|src_face| someface.id != src_face.id) + .unwrap_or(true) + { + let push_declaration = push_declaration_profile(tables, &someface); + let wire_expr = Resource::decl_key(res, &mut someface, push_declaration); + + someface.primitives.send_declare(RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType { + node_id: routing_context.unwrap_or(0), + }, + body: DeclareBody::UndeclareToken(UndeclareToken { + id: 0, // Sourced tokens do not use ids + ext_wire_expr: WireExprType { wire_expr }, + }), + }, + res.expr(), + )); + } + } + None => tracing::trace!("Unable to find face for zid {}", net.graph[*child].zid), + } + } + } +} + +fn propagate_forget_simple_token( + tables: &mut Tables, + res: &Arc, + src_face: Option<&Arc>, + send_declare: &mut SendDeclare, +) { + for mut face in tables.faces.values().cloned() { + if let Some(id) = face_hat_mut!(&mut face).local_tokens.remove(res) { + send_declare( + &face.primitives, + RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::UndeclareToken(UndeclareToken { + id, + ext_wire_expr: WireExprType::null(), + }), + }, + res.expr(), + ), + ); + // NOTE(fuzzypixelz): We need to check that `face` is not the source Face of the token + // undeclaration, otherwise the undeclaration would be duplicated at the source Face. In + // cases where we don't have access to a Face as we didnt't receive an undeclaration and we + // default to true. + } else if src_face.map_or(true, |src_face| src_face.id != face.id) + && face_hat!(face).remote_interests.values().any(|(r, o)| { + o.tokens() && r.as_ref().map(|r| r.matches(res)).unwrap_or(true) && !o.aggregate() + }) + { + // Token has never been declared on this face. + // Send an Undeclare with a one shot generated id and a WireExpr ext. + send_declare( + &face.primitives, + RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::UndeclareToken(UndeclareToken { + id: face_hat!(face).next_id.fetch_add(1, Ordering::SeqCst), + ext_wire_expr: WireExprType { + wire_expr: Resource::get_best_key(res, "", face.id), + }, + }), + }, + res.expr(), + ), + ); + } + for res in face_hat!(&mut face) + .local_tokens + .keys() + .cloned() + .collect::>>() + { + if !res.context().matches.iter().any(|m| { + m.upgrade().is_some_and(|m| { + m.context.is_some() + && (remote_simple_tokens(tables, &m, &face) + || remote_linkstatepeer_tokens(tables, &m) + || remote_router_tokens(tables, &m)) + }) + }) { + if let Some(id) = face_hat_mut!(&mut face).local_tokens.remove(&res) { + send_declare( + &face.primitives, + RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::UndeclareToken(UndeclareToken { + id, + ext_wire_expr: WireExprType::null(), + }), + }, + res.expr(), + ), + ); + } else if face_hat!(face).remote_interests.values().any(|(r, o)| { + o.tokens() + && r.as_ref().map(|r| r.matches(&res)).unwrap_or(true) + && !o.aggregate() + }) { + // Token has never been declared on this face. + // Send an Undeclare with a one shot generated id and a WireExpr ext. + send_declare( + &face.primitives, + RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::UndeclareToken(UndeclareToken { + id: face_hat!(face).next_id.fetch_add(1, Ordering::SeqCst), + ext_wire_expr: WireExprType { + wire_expr: Resource::get_best_key(&res, "", face.id), + }, + }), + }, + res.expr(), + ), + ); + } + } + } + } +} + +fn propagate_forget_simple_token_to_peers( + tables: &mut Tables, + res: &Arc, + send_declare: &mut SendDeclare, +) { + if !hat!(tables).full_net(WhatAmI::Peer) + && res_hat!(res).router_tokens.len() == 1 + && res_hat!(res).router_tokens.contains(&tables.zid) + { + for mut face in tables + .faces + .values() + .cloned() + .collect::>>() + { + if face.whatami == WhatAmI::Peer + && face_hat!(face).local_tokens.contains_key(res) + && !res.session_ctxs.values().any(|s| { + face.zid != s.face.zid + && s.token + && (s.face.whatami == WhatAmI::Client + || (s.face.whatami == WhatAmI::Peer + && hat!(tables).failover_brokering(s.face.zid, face.zid))) + }) + { + if let Some(id) = face_hat_mut!(&mut face).local_tokens.remove(res) { + send_declare( + &face.primitives, + RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::UndeclareToken(UndeclareToken { + id, + ext_wire_expr: WireExprType::null(), + }), + }, + res.expr(), + ), + ); + } + } + } + } +} + +fn propagate_forget_sourced_token( + tables: &Tables, + res: &Arc, + src_face: Option<&Arc>, + source: &ZenohIdProto, + net_type: WhatAmI, +) { + let net = hat!(tables).get_net(net_type).unwrap(); + match net.get_idx(source) { + Some(tree_sid) => { + if net.trees.len() > tree_sid.index() { + send_forget_sourced_token_to_net_clildren( + tables, + net, + &net.trees[tree_sid.index()].children, + res, + src_face, + Some(tree_sid.index() as NodeId), + ); + } else { + tracing::trace!( + "Propagating forget token {}: tree for node {} sid:{} not yet ready", + res.expr(), + tree_sid.index(), + source + ); + } + } + None => tracing::error!( + "Error propagating forget token {}: cannot get index of {}!", + res.expr(), + source + ), + } +} + +fn unregister_router_token( + tables: &mut Tables, + face: Option<&Arc>, + res: &mut Arc, + router: &ZenohIdProto, + send_declare: &mut SendDeclare, +) { + res_hat_mut!(res) + .router_tokens + .retain(|token| token != router); + + if res_hat!(res).router_tokens.is_empty() { + hat_mut!(tables) + .router_tokens + .retain(|token| !Arc::ptr_eq(token, res)); + + if hat_mut!(tables).full_net(WhatAmI::Peer) { + undeclare_linkstatepeer_token(tables, None, res, &tables.zid.clone()); + } + propagate_forget_simple_token(tables, res, face, send_declare); + } + + propagate_forget_simple_token_to_peers(tables, res, send_declare); +} + +fn undeclare_router_token( + tables: &mut Tables, + face: Option<&Arc>, + res: &mut Arc, + router: &ZenohIdProto, + send_declare: &mut SendDeclare, +) { + if res_hat!(res).router_tokens.contains(router) { + unregister_router_token(tables, face, res, router, send_declare); + propagate_forget_sourced_token(tables, res, face, router, WhatAmI::Router); + } +} + +fn forget_router_token( + tables: &mut Tables, + face: &mut Arc, + res: &mut Arc, + router: &ZenohIdProto, + send_declare: &mut SendDeclare, +) { + undeclare_router_token(tables, Some(face), res, router, send_declare); +} + +fn unregister_linkstatepeer_token( + tables: &mut Tables, + res: &mut Arc, + peer: &ZenohIdProto, +) { + res_hat_mut!(res) + .linkstatepeer_tokens + .retain(|token| token != peer); + + if res_hat!(res).linkstatepeer_tokens.is_empty() { + hat_mut!(tables) + .linkstatepeer_tokens + .retain(|token| !Arc::ptr_eq(token, res)); + } +} + +fn undeclare_linkstatepeer_token( + tables: &mut Tables, + face: Option<&Arc>, + res: &mut Arc, + peer: &ZenohIdProto, +) { + if res_hat!(res).linkstatepeer_tokens.contains(peer) { + unregister_linkstatepeer_token(tables, res, peer); + propagate_forget_sourced_token(tables, res, face, peer, WhatAmI::Peer); + } +} + +fn forget_linkstatepeer_token( + tables: &mut Tables, + face: &mut Arc, + res: &mut Arc, + peer: &ZenohIdProto, + send_declare: &mut SendDeclare, +) { + undeclare_linkstatepeer_token(tables, Some(face), res, peer); + let simple_tokens = res.session_ctxs.values().any(|ctx| ctx.token); + let linkstatepeer_tokens = remote_linkstatepeer_tokens(tables, res); + let zid = tables.zid; + if !simple_tokens && !linkstatepeer_tokens { + undeclare_router_token(tables, None, res, &zid, send_declare); + } +} + +pub(super) fn undeclare_simple_token( + tables: &mut Tables, + face: &mut Arc, + res: &mut Arc, + send_declare: &mut SendDeclare, +) { + if !face_hat_mut!(face) + .remote_tokens + .values() + .any(|s| *s == *res) + { + if let Some(ctx) = get_mut_unchecked(res).session_ctxs.get_mut(&face.id) { + get_mut_unchecked(ctx).token = false; + } + + let mut simple_tokens = simple_tokens(res); + let router_tokens = remote_router_tokens(tables, res); + let linkstatepeer_tokens = remote_linkstatepeer_tokens(tables, res); + if simple_tokens.is_empty() && !linkstatepeer_tokens { + undeclare_router_token(tables, Some(face), res, &tables.zid.clone(), send_declare); + } else { + propagate_forget_simple_token_to_peers(tables, res, send_declare); + } + + if simple_tokens.len() == 1 && !router_tokens && !linkstatepeer_tokens { + let mut face = &mut simple_tokens[0]; + if face.whatami != WhatAmI::Client { + if let Some(id) = face_hat_mut!(face).local_tokens.remove(res) { + send_declare( + &face.primitives, + RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::UndeclareToken(UndeclareToken { + id, + ext_wire_expr: WireExprType::null(), + }), + }, + res.expr(), + ), + ); + } + for res in face_hat!(face) + .local_tokens + .keys() + .cloned() + .collect::>>() + { + if !res.context().matches.iter().any(|m| { + m.upgrade().is_some_and(|m| { + m.context.is_some() + && (remote_simple_tokens(tables, &m, face) + || remote_linkstatepeer_tokens(tables, &m) + || remote_router_tokens(tables, &m)) + }) + }) { + if let Some(id) = face_hat_mut!(&mut face).local_tokens.remove(&res) { + send_declare( + &face.primitives, + RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::UndeclareToken(UndeclareToken { + id, + ext_wire_expr: WireExprType::null(), + }), + }, + res.expr(), + ), + ); + } + } + } + } + } + } +} + +fn forget_simple_token( + tables: &mut Tables, + face: &mut Arc, + id: TokenId, + send_declare: &mut SendDeclare, +) -> Option> { + if let Some(mut res) = face_hat_mut!(face).remote_tokens.remove(&id) { + undeclare_simple_token(tables, face, &mut res, send_declare); + Some(res) + } else { + None + } +} + +pub(super) fn token_remove_node( + tables: &mut Tables, + node: &ZenohIdProto, + net_type: WhatAmI, + send_declare: &mut SendDeclare, +) { + match net_type { + WhatAmI::Router => { + for mut res in hat!(tables) + .router_tokens + .iter() + .filter(|res| res_hat!(res).router_tokens.contains(node)) + .cloned() + .collect::>>() + { + unregister_router_token(tables, None, &mut res, node, send_declare); + Resource::clean(&mut res) + } + } + WhatAmI::Peer => { + for mut res in hat!(tables) + .linkstatepeer_tokens + .iter() + .filter(|res| res_hat!(res).linkstatepeer_tokens.contains(node)) + .cloned() + .collect::>>() + { + unregister_linkstatepeer_token(tables, &mut res, node); + let simple_tokens = res.session_ctxs.values().any(|ctx| ctx.token); + let linkstatepeer_tokens = remote_linkstatepeer_tokens(tables, &res); + if !simple_tokens && !linkstatepeer_tokens { + undeclare_router_token( + tables, + None, + &mut res, + &tables.zid.clone(), + send_declare, + ); + } + Resource::clean(&mut res) + } + } + _ => (), + } +} + +pub(super) fn token_tree_change( + tables: &mut Tables, + new_clildren: &[Vec], + net_type: WhatAmI, +) { + let net = match hat!(tables).get_net(net_type) { + Some(net) => net, + None => { + tracing::error!("Error accessing net in token_tree_change!"); + return; + } + }; + // propagate tokens to new clildren + for (tree_sid, tree_clildren) in new_clildren.iter().enumerate() { + if !tree_clildren.is_empty() { + let tree_idx = NodeIndex::new(tree_sid); + if net.graph.contains_node(tree_idx) { + let tree_id = net.graph[tree_idx].zid; + + let tokens_res = match net_type { + WhatAmI::Router => &hat!(tables).router_tokens, + _ => &hat!(tables).linkstatepeer_tokens, + }; + + for res in tokens_res { + let tokens = match net_type { + WhatAmI::Router => &res_hat!(res).router_tokens, + _ => &res_hat!(res).linkstatepeer_tokens, + }; + for token in tokens { + if *token == tree_id { + send_sourced_token_to_net_clildren( + tables, + net, + tree_clildren, + res, + None, + tree_sid as NodeId, + ); + } + } + } + } + } + } +} + +pub(super) fn token_linkstate_change( + tables: &mut Tables, + zid: &ZenohIdProto, + links: &[ZenohIdProto], + send_declare: &mut SendDeclare, +) { + if let Some(mut src_face) = tables.get_face(zid).cloned() { + if hat!(tables).router_peers_failover_brokering && src_face.whatami == WhatAmI::Peer { + let to_forget = face_hat!(src_face) + .local_tokens + .keys() + .filter(|res| { + let client_tokens = res + .session_ctxs + .values() + .any(|ctx| ctx.face.whatami == WhatAmI::Client && ctx.token); + !remote_router_tokens(tables, res) + && !client_tokens + && !res.session_ctxs.values().any(|ctx| { + ctx.face.whatami == WhatAmI::Peer + && src_face.id != ctx.face.id + && HatTables::failover_brokering_to(links, ctx.face.zid) + }) + }) + .cloned() + .collect::>>(); + for res in to_forget { + if let Some(id) = face_hat_mut!(&mut src_face).local_tokens.remove(&res) { + let wire_expr = Resource::get_best_key(&res, "", src_face.id); + send_declare( + &src_face.primitives, + RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::default(), + body: DeclareBody::UndeclareToken(UndeclareToken { + id, + ext_wire_expr: WireExprType { wire_expr }, + }), + }, + res.expr(), + ), + ); + } + } + + for mut dst_face in tables.faces.values().cloned() { + if src_face.id != dst_face.id + && HatTables::failover_brokering_to(links, dst_face.zid) + { + for res in face_hat!(src_face).remote_tokens.values() { + if !face_hat!(dst_face).local_tokens.contains_key(res) { + let id = face_hat!(dst_face).next_id.fetch_add(1, Ordering::SeqCst); + face_hat_mut!(&mut dst_face) + .local_tokens + .insert(res.clone(), id); + let push_declaration = push_declaration_profile(tables, &dst_face); + let key_expr = Resource::decl_key(res, &mut dst_face, push_declaration); + send_declare( + &dst_face.primitives, + RoutingContext::with_expr( + Declare { + interest_id: None, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::default(), + body: DeclareBody::DeclareToken(DeclareToken { + id, + wire_expr: key_expr, + }), + }, + res.expr(), + ), + ); + } + } + } + } + } + } +} + +pub(crate) fn declare_token_interest( + tables: &mut Tables, + face: &mut Arc, + id: InterestId, + res: Option<&mut Arc>, + mode: InterestMode, + aggregate: bool, + send_declare: &mut SendDeclare, +) { + if mode.current() + && (face.whatami == WhatAmI::Client + || (face.whatami == WhatAmI::Peer && !hat!(tables).full_net(WhatAmI::Peer))) + { + let interest_id = (!mode.future()).then_some(id); + if let Some(res) = res.as_ref() { + if aggregate { + if hat!(tables).router_tokens.iter().any(|token| { + token.context.is_some() + && token.matches(res) + && (remote_simple_tokens(tables, token, face) + || remote_linkstatepeer_tokens(tables, token) + || remote_router_tokens(tables, token)) + }) { + let id = if mode.future() { + let id = face_hat!(face).next_id.fetch_add(1, Ordering::SeqCst); + face_hat_mut!(face).local_tokens.insert((*res).clone(), id); + id + } else { + 0 + }; + let wire_expr = + Resource::decl_key(res, face, push_declaration_profile(tables, face)); + send_declare( + &face.primitives, + RoutingContext::with_expr( + Declare { + interest_id, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::DeclareToken(DeclareToken { id, wire_expr }), + }, + res.expr(), + ), + ); + } + } else { + for token in &hat!(tables).router_tokens { + if token.context.is_some() + && token.matches(res) + && (res_hat!(token) + .router_tokens + .iter() + .any(|r| *r != tables.zid) + || res_hat!(token) + .linkstatepeer_tokens + .iter() + .any(|r| *r != tables.zid) + || token.session_ctxs.values().any(|s| { + s.face.id != face.id + && s.token + && (s.face.whatami == WhatAmI::Client + || face.whatami == WhatAmI::Client + || (s.face.whatami == WhatAmI::Peer + && hat!(tables) + .failover_brokering(s.face.zid, face.zid))) + })) + { + let id = if mode.future() { + let id = face_hat!(face).next_id.fetch_add(1, Ordering::SeqCst); + face_hat_mut!(face).local_tokens.insert(token.clone(), id); + id + } else { + 0 + }; + let wire_expr = + Resource::decl_key(token, face, push_declaration_profile(tables, face)); + send_declare( + &face.primitives, + RoutingContext::with_expr( + Declare { + interest_id, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::DeclareToken(DeclareToken { id, wire_expr }), + }, + token.expr(), + ), + ); + } + } + } + } else { + for token in &hat!(tables).router_tokens { + if token.context.is_some() + && (res_hat!(token) + .router_tokens + .iter() + .any(|r| *r != tables.zid) + || res_hat!(token) + .linkstatepeer_tokens + .iter() + .any(|r| *r != tables.zid) + || token.session_ctxs.values().any(|s| { + s.token + && (s.face.whatami != WhatAmI::Peer + || face.whatami != WhatAmI::Peer + || hat!(tables).failover_brokering(s.face.zid, face.zid)) + })) + { + let id = if mode.future() { + let id = face_hat!(face).next_id.fetch_add(1, Ordering::SeqCst); + face_hat_mut!(face).local_tokens.insert(token.clone(), id); + id + } else { + 0 + }; + let wire_expr = + Resource::decl_key(token, face, push_declaration_profile(tables, face)); + send_declare( + &face.primitives, + RoutingContext::with_expr( + Declare { + interest_id, + ext_qos: ext::QoSType::DECLARE, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::DEFAULT, + body: DeclareBody::DeclareToken(DeclareToken { id, wire_expr }), + }, + token.expr(), + ), + ); + } + } + } + } +} + +impl HatTokenTrait for HatCode { + fn declare_token( + &self, + tables: &mut Tables, + face: &mut Arc, + id: TokenId, + res: &mut Arc, + node_id: NodeId, + _interest_id: Option, + send_declare: &mut SendDeclare, + ) { + match face.whatami { + WhatAmI::Router => { + if let Some(router) = get_router(tables, face, node_id) { + declare_router_token(tables, face, res, router, send_declare) + } + } + WhatAmI::Peer => { + if hat!(tables).full_net(WhatAmI::Peer) { + if let Some(peer) = get_peer(tables, face, node_id) { + declare_linkstatepeer_token(tables, face, res, peer, send_declare) + } + } else { + declare_simple_token(tables, face, id, res, send_declare) + } + } + _ => declare_simple_token(tables, face, id, res, send_declare), + } + } + + fn undeclare_token( + &self, + tables: &mut Tables, + face: &mut Arc, + id: TokenId, + res: Option>, + node_id: NodeId, + send_declare: &mut SendDeclare, + ) -> Option> { + match face.whatami { + WhatAmI::Router => { + if let Some(mut res) = res { + if let Some(router) = get_router(tables, face, node_id) { + forget_router_token(tables, face, &mut res, &router, send_declare); + Some(res) + } else { + None + } + } else { + None + } + } + WhatAmI::Peer => { + if hat!(tables).full_net(WhatAmI::Peer) { + if let Some(mut res) = res { + if let Some(peer) = get_peer(tables, face, node_id) { + forget_linkstatepeer_token(tables, face, &mut res, &peer, send_declare); + Some(res) + } else { + None + } + } else { + None + } + } else { + forget_simple_token(tables, face, id, send_declare) + } + } + _ => forget_simple_token(tables, face, id, send_declare), + } + } +} diff --git a/zenoh/src/net/routing/interceptor/access_control.rs b/zenoh/src/net/routing/interceptor/access_control.rs index 2615719132..839f18bd07 100644 --- a/zenoh/src/net/routing/interceptor/access_control.rs +++ b/zenoh/src/net/routing/interceptor/access_control.rs @@ -18,38 +18,50 @@ //! //! [Click here for Zenoh's documentation](../zenoh/index.html) -use super::{ - authorization::PolicyEnforcer, EgressInterceptor, IngressInterceptor, InterceptorFactory, - InterceptorFactoryTrait, InterceptorTrait, +use std::{any::Any, collections::HashSet, iter, sync::Arc}; + +use itertools::Itertools; +use zenoh_config::{ + AclConfig, AclMessage, CertCommonName, InterceptorFlow, Interface, Permission, Username, }; -use crate::net::routing::RoutingContext; -use crate::KeyExpr; -use std::any::Any; -use std::sync::Arc; -use zenoh_config::{AclConfig, Action, InterceptorFlow, Permission, Subject, ZenohId}; use zenoh_protocol::{ - network::{Declare, DeclareBody, NetworkBody, NetworkMessage, Push, Request}, + core::ZenohIdProto, + network::{Declare, DeclareBody, NetworkBody, NetworkMessage, Push, Request, Response}, zenoh::{PushBody, RequestBody}, }; use zenoh_result::ZResult; -use zenoh_transport::{multicast::TransportMulticast, unicast::TransportUnicast}; +use zenoh_transport::{ + multicast::TransportMulticast, + unicast::{authentication::AuthId, TransportUnicast}, +}; + +use super::{ + authorization::PolicyEnforcer, EgressInterceptor, IngressInterceptor, InterceptorFactory, + InterceptorFactoryTrait, InterceptorTrait, +}; +use crate::{ + api::key_expr::KeyExpr, + net::routing::{interceptor::authorization::SubjectQuery, RoutingContext}, +}; pub struct AclEnforcer { enforcer: Arc, } -#[derive(Clone, Debug)] -pub struct Interface { +#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] +pub struct AuthSubject { id: usize, name: String, } + struct EgressAclEnforcer { policy_enforcer: Arc, - interface_list: Vec, - zid: ZenohId, + subject: Vec, + zid: ZenohIdProto, } + struct IngressAclEnforcer { policy_enforcer: Arc, - interface_list: Vec, - zid: ZenohId, + subject: Vec, + zid: ZenohIdProto, } pub(crate) fn acl_interceptor_factories( @@ -80,54 +92,112 @@ impl InterceptorFactoryTrait for AclEnforcer { &self, transport: &TransportUnicast, ) -> (Option, Option) { - match transport.get_zid() { - Ok(zid) => { - let mut interface_list: Vec = Vec::new(); - match transport.get_links() { - Ok(links) => { - for link in links { - let enforcer = self.enforcer.clone(); - for face in link.interfaces { - let subject = &Subject::Interface(face.clone()); - if let Some(val) = enforcer.subject_map.get(subject) { - interface_list.push(Interface { - id: *val, - name: face, - }); - } - } - } - } - Err(e) => { - tracing::error!("Couldn't get interface list with error: {}", e); + let auth_ids = match transport.get_auth_ids() { + Ok(auth_ids) => auth_ids, + Err(err) => { + tracing::error!("Couldn't get Transport Auth IDs: {}", err); + return (None, None); + } + }; + + let mut cert_common_names = Vec::new(); + let mut username = None; + + for auth_id in auth_ids { + match auth_id { + AuthId::CertCommonName(value) => { + cert_common_names.push(Some(CertCommonName(value))); + } + AuthId::Username(value) => { + if username.is_some() { + tracing::error!("Transport should not report more than one username"); return (None, None); } + username = Some(Username(value)); } - let ingress_interceptor = Box::new(IngressAclEnforcer { - policy_enforcer: self.enforcer.clone(), - interface_list: interface_list.clone(), - zid, - }); - let egress_interceptor = Box::new(EgressAclEnforcer { - policy_enforcer: self.enforcer.clone(), - interface_list: interface_list.clone(), - zid, + AuthId::None => {} + } + } + if cert_common_names.is_empty() { + cert_common_names.push(None); + } + + let links = match transport.get_links() { + Ok(links) => links, + Err(err) => { + tracing::error!("Couldn't get Transport links: {}", err); + return (None, None); + } + }; + let mut interfaces = links + .into_iter() + .flat_map(|link| { + link.interfaces + .into_iter() + .map(|interface| Some(Interface(interface))) + }) + .collect::>(); + if interfaces.is_empty() { + interfaces.push(None); + } else if interfaces.len() > 1 { + tracing::warn!("Transport returned multiple network interfaces, current ACL logic might incorrectly apply filters in this case!"); + } + + let mut auth_subjects = HashSet::new(); + + for ((username, interface), cert_common_name) in iter::once(username) + .cartesian_product(interfaces.into_iter()) + .cartesian_product(cert_common_names.into_iter()) + { + let query = SubjectQuery { + interface, + cert_common_name, + username, + }; + + if let Some(entry) = self.enforcer.subject_store.query(&query) { + auth_subjects.insert(AuthSubject { + id: entry.id, + name: format!("{query}"), }); - match ( - self.enforcer.interface_enabled.ingress, - self.enforcer.interface_enabled.egress, - ) { - (true, true) => (Some(ingress_interceptor), Some(egress_interceptor)), - (true, false) => (Some(ingress_interceptor), None), - (false, true) => (None, Some(egress_interceptor)), - (false, false) => (None, None), - } } - Err(e) => { - tracing::error!("Failed to get zid with error :{}", e); - (None, None) + } + + let zid = match transport.get_zid() { + Ok(zid) => zid, + Err(err) => { + tracing::error!("Couldn't get Transport zid: {}", err); + return (None, None); } + }; + // FIXME: Investigate if `AuthSubject` can have duplicates above and try to avoid this conversion + let auth_subjects = auth_subjects.into_iter().collect::>(); + if auth_subjects.is_empty() { + tracing::info!( + "{zid} did not match any configured ACL subject. Default permission `{:?}` will be applied on all messages", + self.enforcer.default_permission + ); } + let ingress_interceptor = Box::new(IngressAclEnforcer { + policy_enforcer: self.enforcer.clone(), + zid, + subject: auth_subjects.clone(), + }); + let egress_interceptor = Box::new(EgressAclEnforcer { + policy_enforcer: self.enforcer.clone(), + zid, + subject: auth_subjects, + }); + ( + self.enforcer + .interface_enabled + .ingress + .then_some(ingress_interceptor), + self.enforcer + .interface_enabled + .egress + .then_some(egress_interceptor), + ) } fn new_transport_multicast( @@ -165,19 +235,36 @@ impl InterceptorTrait for IngressAclEnforcer { .or_else(|| ctx.full_expr()); match &ctx.msg.body { + NetworkBody::Request(Request { + payload: RequestBody::Query(_), + .. + }) => { + if self.action(AclMessage::Query, "Query (ingress)", key_expr?) == Permission::Deny + { + return None; + } + } + NetworkBody::Response(Response { .. }) => { + if self.action(AclMessage::Reply, "Reply (ingress)", key_expr?) == Permission::Deny + { + return None; + } + } NetworkBody::Push(Push { payload: PushBody::Put(_), .. }) => { - if self.action(Action::Put, "Put (ingress)", key_expr?) == Permission::Deny { + if self.action(AclMessage::Put, "Put (ingress)", key_expr?) == Permission::Deny { return None; } } - NetworkBody::Request(Request { - payload: RequestBody::Query(_), + NetworkBody::Push(Push { + payload: PushBody::Del(_), .. }) => { - if self.action(Action::Get, "Get (ingress)", key_expr?) == Permission::Deny { + if self.action(AclMessage::Delete, "Delete (ingress)", key_expr?) + == Permission::Deny + { return None; } } @@ -186,7 +273,7 @@ impl InterceptorTrait for IngressAclEnforcer { .. }) => { if self.action( - Action::DeclareSubscriber, + AclMessage::DeclareSubscriber, "Declare Subscriber (ingress)", key_expr?, ) == Permission::Deny @@ -199,7 +286,7 @@ impl InterceptorTrait for IngressAclEnforcer { .. }) => { if self.action( - Action::DeclareQueryable, + AclMessage::DeclareQueryable, "Declare Queryable (ingress)", key_expr?, ) == Permission::Deny @@ -207,7 +294,38 @@ impl InterceptorTrait for IngressAclEnforcer { return None; } } - _ => {} + // Unfiltered Declare messages + NetworkBody::Declare(Declare { + body: DeclareBody::DeclareKeyExpr(_), + .. + }) + | NetworkBody::Declare(Declare { + body: DeclareBody::DeclareFinal(_), + .. + }) + | NetworkBody::Declare(Declare { + body: DeclareBody::DeclareToken(_), + .. + }) => {} + // Unfiltered Undeclare messages + NetworkBody::Declare(Declare { + body: DeclareBody::UndeclareKeyExpr(_), + .. + }) + | NetworkBody::Declare(Declare { + body: DeclareBody::UndeclareToken(_), + .. + }) + | NetworkBody::Declare(Declare { + body: DeclareBody::UndeclareQueryable(_), + .. + }) + | NetworkBody::Declare(Declare { + body: DeclareBody::UndeclareSubscriber(_), + .. + }) => {} + // Unfiltered remaining message types + NetworkBody::Interest(_) | NetworkBody::OAM(_) | NetworkBody::ResponseFinal(_) => {} } Some(ctx) } @@ -217,6 +335,7 @@ impl InterceptorTrait for EgressAclEnforcer { fn compute_keyexpr_cache(&self, key_expr: &KeyExpr<'_>) -> Option> { Some(Box::new(key_expr.to_string())) } + fn intercept( &self, ctx: RoutingContext, @@ -233,19 +352,33 @@ impl InterceptorTrait for EgressAclEnforcer { .or_else(|| ctx.full_expr()); match &ctx.msg.body { + NetworkBody::Request(Request { + payload: RequestBody::Query(_), + .. + }) => { + if self.action(AclMessage::Query, "Query (egress)", key_expr?) == Permission::Deny { + return None; + } + } + NetworkBody::Response(Response { .. }) => { + if self.action(AclMessage::Reply, "Reply (egress)", key_expr?) == Permission::Deny { + return None; + } + } NetworkBody::Push(Push { payload: PushBody::Put(_), .. }) => { - if self.action(Action::Put, "Put (egress)", key_expr?) == Permission::Deny { + if self.action(AclMessage::Put, "Put (egress)", key_expr?) == Permission::Deny { return None; } } - NetworkBody::Request(Request { - payload: RequestBody::Query(_), + NetworkBody::Push(Push { + payload: PushBody::Del(_), .. }) => { - if self.action(Action::Get, "Get (egress)", key_expr?) == Permission::Deny { + if self.action(AclMessage::Delete, "Delete (egress)", key_expr?) == Permission::Deny + { return None; } } @@ -254,7 +387,7 @@ impl InterceptorTrait for EgressAclEnforcer { .. }) => { if self.action( - Action::DeclareSubscriber, + AclMessage::DeclareSubscriber, "Declare Subscriber (egress)", key_expr?, ) == Permission::Deny @@ -267,7 +400,7 @@ impl InterceptorTrait for EgressAclEnforcer { .. }) => { if self.action( - Action::DeclareQueryable, + AclMessage::DeclareQueryable, "Declare Queryable (egress)", key_expr?, ) == Permission::Deny @@ -275,22 +408,53 @@ impl InterceptorTrait for EgressAclEnforcer { return None; } } - _ => {} + // Unfiltered Declare messages + NetworkBody::Declare(Declare { + body: DeclareBody::DeclareKeyExpr(_), + .. + }) + | NetworkBody::Declare(Declare { + body: DeclareBody::DeclareFinal(_), + .. + }) + | NetworkBody::Declare(Declare { + body: DeclareBody::DeclareToken(_), + .. + }) => {} + // Unfiltered Undeclare messages + NetworkBody::Declare(Declare { + body: DeclareBody::UndeclareKeyExpr(_), + .. + }) + | NetworkBody::Declare(Declare { + body: DeclareBody::UndeclareToken(_), + .. + }) + | NetworkBody::Declare(Declare { + body: DeclareBody::UndeclareQueryable(_), + .. + }) + | NetworkBody::Declare(Declare { + body: DeclareBody::UndeclareSubscriber(_), + .. + }) => {} + // Unfiltered remaining message types + NetworkBody::Interest(_) | NetworkBody::OAM(_) | NetworkBody::ResponseFinal(_) => {} } Some(ctx) } } pub trait AclActionMethods { fn policy_enforcer(&self) -> Arc; - fn interface_list(&self) -> Vec; - fn zid(&self) -> ZenohId; + fn zid(&self) -> ZenohIdProto; fn flow(&self) -> InterceptorFlow; - fn action(&self, action: Action, log_msg: &str, key_expr: &str) -> Permission { + fn authn_ids(&self) -> Vec; + fn action(&self, action: AclMessage, log_msg: &str, key_expr: &str) -> Permission { let policy_enforcer = self.policy_enforcer(); - let interface_list = self.interface_list(); + let authn_ids: Vec = self.authn_ids(); let zid = self.zid(); let mut decision = policy_enforcer.default_permission; - for subject in &interface_list { + for subject in &authn_ids { match policy_enforcer.policy_decision_point(subject.id, self.flow(), action, key_expr) { Ok(Permission::Allow) => { tracing::trace!( @@ -337,16 +501,17 @@ impl AclActionMethods for EgressAclEnforcer { self.policy_enforcer.clone() } - fn interface_list(&self) -> Vec { - self.interface_list.clone() - } - - fn zid(&self) -> ZenohId { + fn zid(&self) -> ZenohIdProto { self.zid } + fn flow(&self) -> InterceptorFlow { InterceptorFlow::Egress } + + fn authn_ids(&self) -> Vec { + self.subject.clone() + } } impl AclActionMethods for IngressAclEnforcer { @@ -354,14 +519,15 @@ impl AclActionMethods for IngressAclEnforcer { self.policy_enforcer.clone() } - fn interface_list(&self) -> Vec { - self.interface_list.clone() - } - - fn zid(&self) -> ZenohId { + fn zid(&self) -> ZenohIdProto { self.zid } + fn flow(&self) -> InterceptorFlow { InterceptorFlow::Ingress } + + fn authn_ids(&self) -> Vec { + self.subject.clone() + } } diff --git a/zenoh/src/net/routing/interceptor/authorization.rs b/zenoh/src/net/routing/interceptor/authorization.rs index a39d15b3c3..a7446382d1 100644 --- a/zenoh/src/net/routing/interceptor/authorization.rs +++ b/zenoh/src/net/routing/interceptor/authorization.rs @@ -17,20 +17,146 @@ //! This module is intended for Zenoh's internal use. //! //! [Click here for Zenoh's documentation](../zenoh/index.html) -use ahash::RandomState; use std::collections::HashMap; -use std::net::Ipv4Addr; + +use ahash::RandomState; +use itertools::Itertools; use zenoh_config::{ - AclConfig, AclConfigRules, Action, InterceptorFlow, Permission, PolicyRule, Subject, + AclConfig, AclConfigPolicyEntry, AclConfigRule, AclConfigSubjects, AclMessage, CertCommonName, + InterceptorFlow, Interface, Permission, PolicyRule, Username, +}; +use zenoh_keyexpr::{ + keyexpr, + keyexpr_tree::{IKeyExprTree, IKeyExprTreeMut, KeBoxTree}, }; -use zenoh_keyexpr::keyexpr; -use zenoh_keyexpr::keyexpr_tree::{IKeyExprTree, IKeyExprTreeMut, KeBoxTree}; use zenoh_result::ZResult; -use zenoh_util::net::get_interface_names_by_addr; type PolicyForSubject = FlowPolicy; type PolicyMap = HashMap; -type SubjectMap = HashMap; + +#[derive(Debug, Clone, PartialEq, Eq, Hash)] +pub(crate) struct Subject { + pub(crate) interface: SubjectProperty, + pub(crate) cert_common_name: SubjectProperty, + pub(crate) username: SubjectProperty, +} + +impl Subject { + fn matches(&self, query: &SubjectQuery) -> bool { + self.interface.matches(query.interface.as_ref()) + && self.username.matches(query.username.as_ref()) + && self + .cert_common_name + .matches(query.cert_common_name.as_ref()) + } +} + +#[derive(Debug, Clone, PartialEq, Eq, Hash)] +pub(crate) enum SubjectProperty { + Wildcard, + Exactly(T), +} + +impl SubjectProperty { + fn matches(&self, other: Option<&T>) -> bool { + match (self, other) { + (SubjectProperty::Wildcard, None) => true, + // NOTE: This match arm is the reason why `SubjectProperty` cannot simply be `Option` + (SubjectProperty::Wildcard, Some(_)) => true, + (SubjectProperty::Exactly(_), None) => false, + (SubjectProperty::Exactly(lhs), Some(rhs)) => lhs == rhs, + } + } +} + +#[derive(Debug)] +pub(crate) struct SubjectQuery { + pub(crate) interface: Option, + pub(crate) cert_common_name: Option, + pub(crate) username: Option, +} + +impl std::fmt::Display for SubjectQuery { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + let subject_names = [ + self.interface.as_ref().map(|face| format!("{face}")), + self.cert_common_name.as_ref().map(|ccn| format!("{ccn}")), + self.username.as_ref().map(|username| format!("{username}")), + ]; + write!( + f, + "{}", + subject_names + .iter() + .filter_map(|v| v.as_ref()) + .cloned() + .collect::>() + .join("+") + ) + } +} + +#[derive(Debug, Clone)] +pub(crate) struct SubjectEntry { + pub(crate) subject: Subject, + pub(crate) id: usize, +} + +#[derive(Debug, Clone)] +pub(crate) struct SubjectStore { + inner: Vec, +} + +impl SubjectStore { + pub(crate) fn query(&self, query: &SubjectQuery) -> Option<&SubjectEntry> { + // FIXME: Can this search be better than linear? + self.inner.iter().find(|entry| entry.subject.matches(query)) + } +} + +impl Default for SubjectStore { + fn default() -> Self { + SubjectMapBuilder::new().build() + } +} + +pub(crate) struct SubjectMapBuilder { + builder: HashMap, + id_counter: usize, +} + +impl SubjectMapBuilder { + pub(crate) fn new() -> Self { + Self { + // FIXME: Capacity can be calculated from the length of subject properties in configuration + builder: HashMap::new(), + id_counter: 0, + } + } + + pub(crate) fn build(self) -> SubjectStore { + SubjectStore { + inner: self + .builder + .into_iter() + .map(|(subject, id)| SubjectEntry { subject, id }) + .collect(), + } + } + + /// Assumes subject contains at most one instance of each Subject variant + pub(crate) fn insert_or_get(&mut self, subject: Subject) -> usize { + match self.builder.get(&subject).copied() { + Some(id) => id, + None => { + self.id_counter += 1; + self.builder.insert(subject, self.id_counter); + self.id_counter + } + } + } +} + type KeTreeRule = KeBoxTree; #[derive(Default)] @@ -56,27 +182,33 @@ impl PermissionPolicy { } #[derive(Default)] struct ActionPolicy { - get: PermissionPolicy, + query: PermissionPolicy, put: PermissionPolicy, + delete: PermissionPolicy, declare_subscriber: PermissionPolicy, declare_queryable: PermissionPolicy, + reply: PermissionPolicy, } impl ActionPolicy { - fn action(&self, action: Action) -> &PermissionPolicy { + fn action(&self, action: AclMessage) -> &PermissionPolicy { match action { - Action::Get => &self.get, - Action::Put => &self.put, - Action::DeclareSubscriber => &self.declare_subscriber, - Action::DeclareQueryable => &self.declare_queryable, + AclMessage::Query => &self.query, + AclMessage::Reply => &self.reply, + AclMessage::Put => &self.put, + AclMessage::Delete => &self.delete, + AclMessage::DeclareSubscriber => &self.declare_subscriber, + AclMessage::DeclareQueryable => &self.declare_queryable, } } - fn action_mut(&mut self, action: Action) -> &mut PermissionPolicy { + fn action_mut(&mut self, action: AclMessage) -> &mut PermissionPolicy { match action { - Action::Get => &mut self.get, - Action::Put => &mut self.put, - Action::DeclareSubscriber => &mut self.declare_subscriber, - Action::DeclareQueryable => &mut self.declare_queryable, + AclMessage::Query => &mut self.query, + AclMessage::Reply => &mut self.reply, + AclMessage::Put => &mut self.put, + AclMessage::Delete => &mut self.delete, + AclMessage::DeclareSubscriber => &mut self.declare_subscriber, + AclMessage::DeclareQueryable => &mut self.declare_queryable, } } } @@ -111,14 +243,14 @@ pub struct InterfaceEnabled { pub struct PolicyEnforcer { pub(crate) acl_enabled: bool, pub(crate) default_permission: Permission, - pub(crate) subject_map: SubjectMap, + pub(crate) subject_store: SubjectStore, pub(crate) policy_map: PolicyMap, pub(crate) interface_enabled: InterfaceEnabled, } #[derive(Debug, Clone)] pub struct PolicyInformation { - subject_map: SubjectMap, + subject_map: SubjectStore, policy_rules: Vec, } @@ -127,7 +259,7 @@ impl PolicyEnforcer { PolicyEnforcer { acl_enabled: true, default_permission: Permission::Deny, - subject_map: SubjectMap::default(), + subject_store: SubjectStore::default(), policy_map: PolicyMap::default(), interface_enabled: InterfaceEnabled::default(), } @@ -141,11 +273,23 @@ impl PolicyEnforcer { self.acl_enabled = mut_acl_config.enabled; self.default_permission = mut_acl_config.default_permission; if self.acl_enabled { - if let Some(mut rules) = mut_acl_config.rules { - if rules.is_empty() { - tracing::warn!("Access control rules are empty in config file"); + if let (Some(mut rules), Some(mut subjects), Some(policies)) = ( + mut_acl_config.rules, + mut_acl_config.subjects, + mut_acl_config.policies, + ) { + if rules.is_empty() || subjects.is_empty() || policies.is_empty() { + rules.is_empty().then(|| { + tracing::warn!("Access control rules list is empty in config file") + }); + subjects.is_empty().then(|| { + tracing::warn!("Access control subjects list is empty in config file") + }); + policies.is_empty().then(|| { + tracing::warn!("Access control policies list is empty in config file") + }); self.policy_map = PolicyMap::default(); - self.subject_map = SubjectMap::default(); + self.subject_store = SubjectStore::default(); if self.default_permission == Permission::Deny { self.interface_enabled = InterfaceEnabled { ingress: true, @@ -154,63 +298,71 @@ impl PolicyEnforcer { } } else { // check for undefined values in rules and initialize them to defaults - for (rule_offset, rule) in rules.iter_mut().enumerate() { - match rule.interfaces { - Some(_) => (), - None => { - tracing::warn!("ACL config interfaces list is empty. Applying rule #{} to all network interfaces", rule_offset); - if let Ok(all_interfaces) = - get_interface_names_by_addr(Ipv4Addr::UNSPECIFIED.into()) - { - rule.interfaces = Some(all_interfaces); - } - } + for rule in rules.iter_mut() { + if rule.id.trim().is_empty() { + bail!("Found empty rule id in rules list"); } - match rule.flows { - Some(_) => (), - None => { - tracing::warn!("ACL config flows list is empty. Applying rule #{} to both Ingress and Egress flows", rule_offset); - rule.flows = Some( - [InterceptorFlow::Ingress, InterceptorFlow::Egress].into(), - ); - } + if rule.flows.is_none() { + tracing::warn!("Rule '{}' flows list is not set. Setting it to both Ingress and Egress", rule.id); + rule.flows = + Some([InterceptorFlow::Ingress, InterceptorFlow::Egress].into()); } } - let policy_information = self.policy_information_point(&rules)?; - let subject_map = policy_information.subject_map; - let mut main_policy: PolicyMap = PolicyMap::default(); + // check for undefined values in subjects and initialize them to defaults + for subject in subjects.iter_mut() { + if subject.id.trim().is_empty() { + bail!("Found empty subject id in subjects list"); + } + + if subject + .cert_common_names + .as_ref() + .is_some_and(Vec::is_empty) + { + bail!("Subject property `cert_common_names` cannot be empty"); + } + + if subject.usernames.as_ref().is_some_and(Vec::is_empty) { + bail!("Subject property `usernames` cannot be empty"); + } + if subject.interfaces.as_ref().is_some_and(Vec::is_empty) { + bail!("Subject property `interfaces` cannot be empty"); + } + } + let policy_information = + self.policy_information_point(subjects, rules, policies)?; + + let mut main_policy: PolicyMap = PolicyMap::default(); for rule in policy_information.policy_rules { - if let Some(index) = subject_map.get(&rule.subject) { - let single_policy = main_policy.entry(*index).or_default(); - single_policy - .flow_mut(rule.flow) - .action_mut(rule.action) - .permission_mut(rule.permission) - .insert(keyexpr::new(&rule.key_expr)?, true); - - if self.default_permission == Permission::Deny { - self.interface_enabled = InterfaceEnabled { - ingress: true, - egress: true, - }; - } else { - match rule.flow { - InterceptorFlow::Ingress => { - self.interface_enabled.ingress = true; - } - InterceptorFlow::Egress => { - self.interface_enabled.egress = true; - } + let subject_policy = main_policy.entry(rule.subject_id).or_default(); + subject_policy + .flow_mut(rule.flow) + .action_mut(rule.message) + .permission_mut(rule.permission) + .insert(keyexpr::new(&rule.key_expr)?, true); + + if self.default_permission == Permission::Deny { + self.interface_enabled = InterfaceEnabled { + ingress: true, + egress: true, + }; + } else { + match rule.flow { + InterceptorFlow::Ingress => { + self.interface_enabled.ingress = true; + } + InterceptorFlow::Egress => { + self.interface_enabled.egress = true; } } - }; + } } self.policy_map = main_policy; - self.subject_map = subject_map; + self.subject_store = policy_information.subject_map; } } else { - tracing::warn!("Access control rules are empty in config file"); + bail!("All ACL rules/subjects/policies config lists must be provided"); } } Ok(()) @@ -221,17 +373,27 @@ impl PolicyEnforcer { */ pub fn policy_information_point( &self, - config_rule_set: &Vec, + subjects: Vec, + rules: Vec, + policies: Vec, ) -> ZResult { let mut policy_rules: Vec = Vec::new(); - for config_rule in config_rule_set { - // config validation - let mut validation_err = String::new(); - if config_rule.interfaces.as_ref().unwrap().is_empty() { - validation_err.push_str("ACL config interfaces list is empty. "); + let mut rule_map = HashMap::new(); + let mut subject_id_map = HashMap::>::new(); + let mut subject_map_builder = SubjectMapBuilder::new(); + + // validate rules config and insert them in hashmaps + for config_rule in rules { + if rule_map.contains_key(&config_rule.id) { + bail!( + "Rule id must be unique: id '{}' is repeated", + config_rule.id + ); } - if config_rule.actions.is_empty() { - validation_err.push_str("ACL config actions list is empty. "); + // Config validation + let mut validation_err = String::new(); + if config_rule.messages.is_empty() { + validation_err.push_str("ACL config messages list is empty. "); } if config_rule.flows.as_ref().unwrap().is_empty() { validation_err.push_str("ACL config flows list is empty. "); @@ -240,62 +402,186 @@ impl PolicyEnforcer { validation_err.push_str("ACL config key_exprs list is empty. "); } if !validation_err.is_empty() { - bail!("{}", validation_err); + bail!("Rule '{}' is malformed: {}", config_rule.id, validation_err); } - for subject in config_rule.interfaces.as_ref().unwrap() { - if subject.trim().is_empty() { - bail!("found an empty interface value in interfaces list"); + for key_expr in config_rule.key_exprs.iter() { + if key_expr.trim().is_empty() { + bail!("Found empty key expression in rule '{}'", config_rule.id); } - for flow in config_rule.flows.as_ref().unwrap() { - for action in &config_rule.actions { - for key_expr in &config_rule.key_exprs { - if key_expr.trim().is_empty() { - bail!("found an empty key-expression value in key_exprs list"); + } + rule_map.insert(config_rule.id.clone(), config_rule); + } + + for config_subject in subjects.into_iter() { + if subject_id_map.contains_key(&config_subject.id) { + bail!( + "Subject id must be unique: id '{}' is repeated", + config_subject.id + ); + } + // validate subject config fields + if config_subject + .interfaces + .as_ref() + .is_some_and(|interfaces| interfaces.iter().any(|face| face.0.trim().is_empty())) + { + bail!( + "Found empty interface value in subject '{}'", + config_subject.id + ); + } + if config_subject + .cert_common_names + .as_ref() + .is_some_and(|cert_common_names| { + cert_common_names.iter().any(|ccn| ccn.0.trim().is_empty()) + }) + { + bail!( + "Found empty cert_common_name value in subject '{}'", + config_subject.id + ); + } + if config_subject.usernames.as_ref().is_some_and(|usernames| { + usernames + .iter() + .any(|username| username.0.trim().is_empty()) + }) { + bail!( + "Found empty username value in subject '{}'", + config_subject.id + ); + } + // Map properties to SubjectProperty type + // FIXME: Unnecessary .collect() because of different iterator types + let interfaces = config_subject + .interfaces + .map(|interfaces| { + interfaces + .into_iter() + .map(SubjectProperty::Exactly) + .collect::>() + }) + .unwrap_or(vec![SubjectProperty::Wildcard]); + // FIXME: Unnecessary .collect() because of different iterator types + let cert_common_names = config_subject + .cert_common_names + .map(|cert_common_names| { + cert_common_names + .into_iter() + .map(SubjectProperty::Exactly) + .collect::>() + }) + .unwrap_or(vec![SubjectProperty::Wildcard]); + // FIXME: Unnecessary .collect() because of different iterator types + let usernames = config_subject + .usernames + .map(|usernames| { + usernames + .into_iter() + .map(SubjectProperty::Exactly) + .collect::>() + }) + .unwrap_or(vec![SubjectProperty::Wildcard]); + + // create ACL subject combinations + let subject_combination_ids = interfaces + .into_iter() + .cartesian_product(cert_common_names) + .cartesian_product(usernames) + .map(|((interface, cert_common_name), username)| { + let subject = Subject { + interface, + cert_common_name, + username, + }; + subject_map_builder.insert_or_get(subject) + }) + .collect(); + subject_id_map.insert(config_subject.id.clone(), subject_combination_ids); + } + // finally, handle policy content + for (entry_id, entry) in policies.iter().enumerate() { + // validate policy config lists + if entry.rules.is_empty() || entry.subjects.is_empty() { + bail!( + "Policy #{} is malformed: empty subjects or rules list", + entry_id + ); + } + for subject_config_id in &entry.subjects { + if subject_config_id.trim().is_empty() { + bail!("Found empty subject id in policy #{}", entry_id) + } + if !subject_id_map.contains_key(subject_config_id) { + bail!( + "Subject '{}' in policy #{} does not exist in subjects list", + subject_config_id, + entry_id + ) + } + } + // Create PolicyRules + for rule_id in &entry.rules { + if rule_id.trim().is_empty() { + bail!("Found empty rule id in policy #{}", entry_id) + } + let rule = rule_map.get(rule_id).ok_or(zerror!( + "Rule '{}' in policy #{} does not exist in rules list", + rule_id, + entry_id + ))?; + for subject_config_id in &entry.subjects { + let subject_combination_ids = subject_id_map + .get(subject_config_id) + .expect("config subject id should exist in subject_id_map"); + for subject_id in subject_combination_ids { + for flow in rule + .flows + .as_ref() + .expect("flows list should be defined in rule") + { + for message in &rule.messages { + for key_expr in &rule.key_exprs { + policy_rules.push(PolicyRule { + subject_id: *subject_id, + key_expr: key_expr.clone(), + message: *message, + permission: rule.permission, + flow: *flow, + }); + } } - policy_rules.push(PolicyRule { - subject: Subject::Interface(subject.clone()), - key_expr: key_expr.clone(), - action: *action, - permission: config_rule.permission, - flow: *flow, - }) } } } } } - let mut subject_map = SubjectMap::default(); - let mut counter = 1; - //starting at 1 since 0 is the init value and should not match anything - for rule in policy_rules.iter() { - if !subject_map.contains_key(&rule.subject) { - subject_map.insert(rule.subject.clone(), counter); - counter += 1; - } - } Ok(PolicyInformation { - subject_map, + subject_map: subject_map_builder.build(), policy_rules, }) } - /* - checks each msg against the ACL ruleset for allow/deny - */ - + /** + * Check each msg against the ACL ruleset for allow/deny + */ pub fn policy_decision_point( &self, subject: usize, flow: InterceptorFlow, - action: Action, + message: AclMessage, key_expr: &str, ) -> ZResult { let policy_map = &self.policy_map; + if policy_map.is_empty() { + return Ok(self.default_permission); + } match policy_map.get(&subject) { Some(single_policy) => { let deny_result = single_policy .flow(flow) - .action(action) + .action(message) .deny .nodes_including(keyexpr::new(&key_expr)?) .count(); @@ -307,7 +593,7 @@ impl PolicyEnforcer { } else { let allow_result = single_policy .flow(flow) - .action(action) + .action(message) .allow .nodes_including(keyexpr::new(&key_expr)?) .count(); diff --git a/zenoh/src/net/routing/interceptor/downsampling.rs b/zenoh/src/net/routing/interceptor/downsampling.rs index 34c59ac07d..c8881341e0 100644 --- a/zenoh/src/net/routing/interceptor/downsampling.rs +++ b/zenoh/src/net/routing/interceptor/downsampling.rs @@ -18,17 +18,21 @@ //! //! [Click here for Zenoh's documentation](../zenoh/index.html) -use crate::net::routing::interceptor::*; -use std::collections::HashMap; -use std::sync::{Arc, Mutex}; +use std::{ + collections::HashMap, + sync::{Arc, Mutex}, +}; + use zenoh_config::{DownsamplingItemConf, DownsamplingRuleConf, InterceptorFlow}; use zenoh_core::zlock; -use zenoh_keyexpr::keyexpr_tree::impls::KeyedSetProvider; -use zenoh_keyexpr::keyexpr_tree::{support::UnknownWildness, KeBoxTree}; -use zenoh_keyexpr::keyexpr_tree::{IKeyExprTree, IKeyExprTreeMut}; +use zenoh_keyexpr::keyexpr_tree::{ + impls::KeyedSetProvider, support::UnknownWildness, IKeyExprTree, IKeyExprTreeMut, KeBoxTree, +}; use zenoh_protocol::network::NetworkBody; use zenoh_result::ZResult; +use crate::net::routing::interceptor::*; + pub(crate) fn downsampling_interceptor_factories( config: &Vec, ) -> ZResult> { @@ -122,11 +126,12 @@ pub(crate) struct DownsamplingInterceptor { impl InterceptorTrait for DownsamplingInterceptor { fn compute_keyexpr_cache(&self, key_expr: &KeyExpr<'_>) -> Option> { let ke_id = zlock!(self.ke_id); - if let Some(id) = ke_id.weight_at(&key_expr.clone()) { - Some(Box::new(Some(*id))) - } else { - Some(Box::new(None::)) + if let Some(node) = ke_id.intersecting_keys(key_expr).next() { + if let Some(id) = ke_id.weight_at(&node) { + return Some(Box::new(Some(*id))); + } } + Some(Box::new(None::)) } fn intercept( @@ -184,6 +189,11 @@ impl DownsamplingInterceptor { latest_message_timestamp, }, ); + tracing::debug!( + "New downsampler rule enabled: key_expr={:?}, threshold={:?}", + rule.key_expr, + threshold + ); } Self { ke_id: Arc::new(Mutex::new(ke_id)), diff --git a/zenoh/src/net/routing/interceptor/mod.rs b/zenoh/src/net/routing/interceptor/mod.rs index d7a5fc63f3..ba0209de2d 100644 --- a/zenoh/src/net/routing/interceptor/mod.rs +++ b/zenoh/src/net/routing/interceptor/mod.rs @@ -22,8 +22,6 @@ mod access_control; use access_control::acl_interceptor_factories; mod authorization; -use super::RoutingContext; -use crate::KeyExpr; use std::any::Any; use zenoh_config::Config; @@ -31,6 +29,9 @@ use zenoh_protocol::network::NetworkMessage; use zenoh_result::ZResult; use zenoh_transport::{multicast::TransportMulticast, unicast::TransportUnicast}; +use super::RoutingContext; +use crate::api::key_expr::KeyExpr; + pub mod downsampling; use crate::net::routing::interceptor::downsampling::downsampling_interceptor_factories; @@ -159,7 +160,6 @@ impl InterceptorTrait for ComputeOnMiss { } #[allow(dead_code)] - pub(crate) struct IngressMsgLogger {} impl InterceptorTrait for IngressMsgLogger { diff --git a/zenoh/src/net/routing/mod.rs b/zenoh/src/net/routing/mod.rs index afc49003f8..f4a5e4c2da 100644 --- a/zenoh/src/net/routing/mod.rs +++ b/zenoh/src/net/routing/mod.rs @@ -24,15 +24,14 @@ pub mod router; use std::{cell::OnceCell, sync::Arc}; -use zenoh_protocol::core::key_expr::OwnedKeyExpr; -use zenoh_protocol::{core::WireExpr, network::NetworkMessage}; +use zenoh_protocol::{ + core::{key_expr::OwnedKeyExpr, WireExpr}, + network::NetworkMessage, +}; use self::{dispatcher::face::Face, router::Resource}; - use super::runtime; -pub(crate) static PREFIX_LIVELINESS: &str = "@/liveliness"; - pub(crate) struct RoutingContext { pub(crate) msg: Msg, pub(crate) inface: OnceCell, @@ -100,13 +99,13 @@ impl RoutingContext { impl RoutingContext { #[inline] pub(crate) fn wire_expr(&self) -> Option<&WireExpr> { - use zenoh_protocol::network::DeclareBody; - use zenoh_protocol::network::NetworkBody; + use zenoh_protocol::network::{DeclareBody, NetworkBody}; match &self.msg.body { NetworkBody::Push(m) => Some(&m.wire_expr), NetworkBody::Request(m) => Some(&m.wire_expr), NetworkBody::Response(m) => Some(&m.wire_expr), NetworkBody::ResponseFinal(_) => None, + NetworkBody::Interest(m) => m.wire_expr.as_ref(), NetworkBody::Declare(m) => match &m.body { DeclareBody::DeclareKeyExpr(m) => Some(&m.wire_expr), DeclareBody::UndeclareKeyExpr(_) => None, @@ -116,9 +115,7 @@ impl RoutingContext { DeclareBody::UndeclareQueryable(m) => Some(&m.ext_wire_expr.wire_expr), DeclareBody::DeclareToken(m) => Some(&m.wire_expr), DeclareBody::UndeclareToken(m) => Some(&m.ext_wire_expr.wire_expr), - DeclareBody::DeclareInterest(m) => Some(&m.wire_expr), - DeclareBody::FinalInterest(_) => None, - DeclareBody::UndeclareInterest(m) => Some(&m.ext_wire_expr.wire_expr), + DeclareBody::DeclareFinal(_) => None, }, NetworkBody::OAM(_) => None, } diff --git a/zenoh/src/net/routing/router.rs b/zenoh/src/net/routing/router.rs index d8a5ee4526..cd525189d3 100644 --- a/zenoh/src/net/routing/router.rs +++ b/zenoh/src/net/routing/router.rs @@ -11,33 +11,33 @@ // Contributors: // ZettaScale Zenoh Team, // -use super::dispatcher::face::{Face, FaceState}; -pub use super::dispatcher::pubsub::*; -pub use super::dispatcher::queries::*; -pub use super::dispatcher::resource::*; -use super::dispatcher::tables::Tables; -use super::dispatcher::tables::TablesLock; -use super::hat; -use super::interceptor::EgressInterceptor; -use super::interceptor::InterceptorsChain; -use super::runtime::Runtime; -use crate::net::primitives::DeMux; -use crate::net::primitives::DummyPrimitives; -use crate::net::primitives::EPrimitives; -use crate::net::primitives::McastMux; -use crate::net::primitives::Mux; -use crate::net::routing::interceptor::IngressInterceptor; -use std::str::FromStr; -use std::sync::Arc; -use std::sync::{Mutex, RwLock}; +use std::{ + str::FromStr, + sync::{Arc, Mutex, RwLock}, +}; + use uhlc::HLC; use zenoh_config::Config; -use zenoh_protocol::core::{WhatAmI, ZenohId}; -use zenoh_transport::multicast::TransportMulticast; -use zenoh_transport::unicast::TransportUnicast; -use zenoh_transport::TransportPeer; +use zenoh_protocol::core::{WhatAmI, ZenohIdProto}; // use zenoh_collections::Timer; use zenoh_result::ZResult; +use zenoh_transport::{multicast::TransportMulticast, unicast::TransportUnicast, TransportPeer}; + +pub(crate) use super::dispatcher::token::*; +pub use super::dispatcher::{pubsub::*, queries::*, resource::*}; +use super::{ + dispatcher::{ + face::{Face, FaceState}, + tables::{Tables, TablesLock}, + }, + hat, + interceptor::{EgressInterceptor, InterceptorsChain}, + runtime::Runtime, +}; +use crate::net::{ + primitives::{DeMux, DummyPrimitives, EPrimitives, McastMux, Mux}, + routing::interceptor::IngressInterceptor, +}; pub struct Router { // whatami: WhatAmI, @@ -46,7 +46,7 @@ pub struct Router { impl Router { pub fn new( - zid: ZenohId, + zid: ZenohIdProto, whatami: WhatAmI, hlc: Option>, config: &Config, @@ -61,7 +61,6 @@ impl Router { }) } - #[allow(clippy::too_many_arguments)] pub fn init_link_state(&mut self, runtime: Runtime) { let ctrl_lock = zlock!(self.tables.ctrl_lock); let mut tables = zwrite!(self.tables.tables); @@ -195,7 +194,7 @@ impl Router { let mux = Arc::new(McastMux::new(transport.clone(), interceptor)); let face = FaceState::new( fid, - ZenohId::from_str("1").unwrap(), + ZenohIdProto::from_str("1").unwrap(), WhatAmI::Peer, #[cfg(feature = "stats")] None, diff --git a/zenoh/src/net/runtime/adminspace.rs b/zenoh/src/net/runtime/adminspace.rs index 0ba661c8f1..ce87d68ef0 100644 --- a/zenoh/src/net/runtime/adminspace.rs +++ b/zenoh/src/net/runtime/adminspace.rs @@ -10,62 +10,71 @@ // // Contributors: // ZettaScale Zenoh Team, -use super::routing::dispatcher::face::Face; -use super::Runtime; -use crate::key_expr::KeyExpr; -use crate::net::primitives::Primitives; -#[cfg(all(feature = "unstable", feature = "plugins"))] -use crate::plugins::sealed::{self as plugins}; -use crate::prelude::sync::{Sample, SyncResolve}; -use crate::queryable::Query; -use crate::queryable::QueryInner; -use crate::value::Value; +use std::{ + collections::HashMap, + convert::{TryFrom, TryInto}, + sync::{Arc, Mutex}, +}; + use serde_json::json; -use std::collections::HashMap; -use std::convert::TryFrom; -use std::convert::TryInto; -use std::sync::Arc; -use std::sync::Mutex; use tracing::{error, trace}; use zenoh_buffers::buffer::SplitBuffer; -use zenoh_config::{unwrap_or_default, ConfigValidator, ValidatedMap, WhatAmI}; -#[cfg(all(feature = "unstable", feature = "plugins"))] +use zenoh_config::{unwrap_or_default, wrappers::ZenohId, ConfigValidator, ValidatedMap, WhatAmI}; +use zenoh_core::Wait; +#[cfg(feature = "plugins")] use zenoh_plugin_trait::{PluginControl, PluginStatus}; -#[cfg(all(feature = "unstable", feature = "plugins"))] +#[cfg(feature = "plugins")] use zenoh_protocol::core::key_expr::keyexpr; use zenoh_protocol::{ - core::{key_expr::OwnedKeyExpr, ExprId, KnownEncoding, WireExpr, ZenohId, EMPTY_EXPR_ID}, + core::{key_expr::OwnedKeyExpr, ExprId, WireExpr, EMPTY_EXPR_ID}, network::{ - declare::{queryable::ext::QueryableInfo, subscriber::ext::SubscriberInfo}, - ext, Declare, DeclareBody, DeclareQueryable, DeclareSubscriber, Push, Request, Response, - ResponseFinal, + declare::{ + queryable::ext::QueryableInfoType, subscriber::ext::SubscriberInfo, QueryableId, + }, + ext, Declare, DeclareBody, DeclareQueryable, DeclareSubscriber, Interest, Push, Request, + Response, ResponseFinal, }, zenoh::{PushBody, RequestBody}, }; use zenoh_result::ZResult; use zenoh_transport::unicast::TransportUnicast; +use super::{routing::dispatcher::face::Face, Runtime}; +#[cfg(feature = "plugins")] +use crate::api::plugins::PluginsManager; +use crate::{ + api::{ + builders::sample::EncodingBuilderTrait, + bytes::ZBytes, + key_expr::KeyExpr, + queryable::{Query, QueryInner}, + value::Value, + }, + bytes::Encoding, + net::primitives::Primitives, +}; + pub struct AdminContext { runtime: Runtime, version: String, - metadata: serde_json::Value, } type Handler = Arc; pub struct AdminSpace { zid: ZenohId, + queryable_id: QueryableId, primitives: Mutex>>, mappings: Mutex>, handlers: HashMap, context: Arc, } -#[cfg(all(feature = "unstable", feature = "plugins"))] +#[cfg(feature = "plugins")] #[derive(Debug, Clone)] enum PluginDiff { Delete(String), - Start(crate::config::PluginLoad), + Start(zenoh_config::PluginLoad), } impl ConfigValidator for AdminSpace { @@ -76,7 +85,7 @@ impl ConfigValidator for AdminSpace { current: &serde_json::Map, new: &serde_json::Map, ) -> ZResult>> { - #[cfg(all(feature = "unstable", feature = "plugins"))] + #[cfg(feature = "plugins")] { let plugins_mgr = self.context.runtime.plugins_manager(); let Some(plugin) = plugins_mgr.started_plugin(name) else { @@ -87,7 +96,7 @@ impl ConfigValidator for AdminSpace { }; plugin.instance().config_checker(path, current, new) } - #[cfg(not(all(feature = "unstable", feature = "plugins")))] + #[cfg(not(feature = "plugins"))] { let _ = (name, path, current, new); Ok(None) @@ -96,10 +105,10 @@ impl ConfigValidator for AdminSpace { } impl AdminSpace { - #[cfg(all(feature = "unstable", feature = "plugins"))] + #[cfg(feature = "plugins")] fn start_plugin( - plugin_mgr: &mut plugins::PluginsManager, - config: &crate::config::PluginLoad, + plugin_mgr: &mut PluginsManager, + config: &zenoh_config::PluginLoad, start_args: &Runtime, required: bool, ) -> ZResult<()> { @@ -122,7 +131,16 @@ impl AdminSpace { ); loaded } else { - declared.load()? + match declared.load()? { + Some(loaded) => loaded, + None => { + tracing::warn!( + "Plugin `{}` will not be loaded as plugin loading is disabled", + config.name + ); + return Ok(()); + } + } }; if let Some(started) = loaded.started_mut() { @@ -143,20 +161,19 @@ impl AdminSpace { let zid_str = runtime.state.zid.to_string(); let whatami_str = runtime.state.whatami.to_str(); let mut config = runtime.config().lock(); - let metadata = runtime.state.metadata.clone(); - let root_key: OwnedKeyExpr = format!("@/{whatami_str}/{zid_str}").try_into().unwrap(); + let root_key: OwnedKeyExpr = format!("@/{zid_str}/{whatami_str}").try_into().unwrap(); let mut handlers: HashMap<_, Handler> = HashMap::new(); handlers.insert(root_key.clone(), Arc::new(local_data)); handlers.insert( - format!("@/{whatami_str}/{zid_str}/metrics") + format!("@/{zid_str}/{whatami_str}/metrics") .try_into() .unwrap(), Arc::new(metrics), ); if runtime.state.whatami == WhatAmI::Router { handlers.insert( - format!("@/{whatami_str}/{zid_str}/linkstate/routers") + format!("@/{zid_str}/{whatami_str}/linkstate/routers") .try_into() .unwrap(), Arc::new(routers_linkstate_data), @@ -166,42 +183,42 @@ impl AdminSpace { && unwrap_or_default!(config.routing().peer().mode()) == *"linkstate" { handlers.insert( - format!("@/{whatami_str}/{zid_str}/linkstate/peers") + format!("@/{zid_str}/{whatami_str}/linkstate/peers") .try_into() .unwrap(), Arc::new(peers_linkstate_data), ); } handlers.insert( - format!("@/{whatami_str}/{zid_str}/subscriber/**") + format!("@/{zid_str}/{whatami_str}/subscriber/**") .try_into() .unwrap(), Arc::new(subscribers_data), ); handlers.insert( - format!("@/{whatami_str}/{zid_str}/queryable/**") + format!("@/{zid_str}/{whatami_str}/queryable/**") .try_into() .unwrap(), Arc::new(queryables_data), ); - #[cfg(all(feature = "unstable", feature = "plugins"))] + #[cfg(feature = "plugins")] handlers.insert( - format!("@/{whatami_str}/{zid_str}/plugins/**") + format!("@/{zid_str}/{whatami_str}/plugins/**") .try_into() .unwrap(), Arc::new(plugins_data), ); - #[cfg(all(feature = "unstable", feature = "plugins"))] + #[cfg(feature = "plugins")] handlers.insert( - format!("@/{whatami_str}/{zid_str}/status/plugins/**") + format!("@/{zid_str}/{whatami_str}/status/plugins/**") .try_into() .unwrap(), Arc::new(plugins_status), ); - #[cfg(all(feature = "unstable", feature = "plugins"))] + #[cfg(feature = "plugins")] let mut active_plugins = runtime .plugins_manager() .started_plugins_iter() @@ -211,10 +228,10 @@ impl AdminSpace { let context = Arc::new(AdminContext { runtime: runtime.clone(), version, - metadata, }); let admin = Arc::new(AdminSpace { - zid: runtime.state.zid, + zid: runtime.zid(), + queryable_id: runtime.next_id(), primitives: Mutex::new(None), mappings: Mutex::new(HashMap::new()), handlers, @@ -223,7 +240,7 @@ impl AdminSpace { config.set_plugin_validator(Arc::downgrade(&admin)); - #[cfg(all(feature = "unstable", feature = "plugins"))] + #[cfg(feature = "plugins")] { let cfg_rx = admin.context.runtime.state.config.subscribe(); @@ -299,27 +316,27 @@ impl AdminSpace { zlock!(admin.primitives).replace(primitives.clone()); primitives.send_declare(Declare { - ext_qos: ext::QoSType::declare_default(), + interest_id: None, + + ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), + ext_nodeid: ext::NodeIdType::DEFAULT, body: DeclareBody::DeclareQueryable(DeclareQueryable { - id: 0, // @TODO use proper QueryableId (#703) + id: runtime.next_id(), wire_expr: [&root_key, "/**"].concat().into(), - ext_info: QueryableInfo { - complete: 0, - distance: 0, - }, + ext_info: QueryableInfoType::DEFAULT, }), }); primitives.send_declare(Declare { - ext_qos: ext::QoSType::declare_default(), + interest_id: None, + ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), + ext_nodeid: ext::NodeIdType::DEFAULT, body: DeclareBody::DeclareSubscriber(DeclareSubscriber { - id: 0, // @TODO use proper SubscriberId (#703) + id: runtime.next_id(), wire_expr: [&root_key, "/config/**"].concat().into(), - ext_info: SubscriberInfo::default(), + ext_info: SubscriberInfo::DEFAULT, }), }); } @@ -342,6 +359,10 @@ impl AdminSpace { } impl Primitives for AdminSpace { + fn send_interest(&self, msg: Interest) { + tracing::trace!("Recv interest {:?}", msg); + } + fn send_declare(&self, msg: Declare) { tracing::trace!("Recv declare {:?}", msg); if let DeclareBody::DeclareKeyExpr(m) = msg.body { @@ -369,24 +390,24 @@ impl Primitives for AdminSpace { if let Some(key) = msg.wire_expr.as_str().strip_prefix(&format!( "@/{}/{}/config/", - self.context.runtime.state.whatami, self.context.runtime.state.zid + self.context.runtime.state.zid, self.context.runtime.state.whatami, )) { match msg.payload { PushBody::Put(put) => match std::str::from_utf8(&put.payload.contiguous()) { Ok(json) => { tracing::trace!( - "Insert conf value /@/{}/{}/config/{} : {}", - self.context.runtime.state.whatami, + "Insert conf value @/{}/{}/config/{} : {}", self.context.runtime.state.zid, + self.context.runtime.state.whatami, key, json ); if let Err(e) = (&self.context.runtime.state.config).insert_json5(key, json) { error!( - "Error inserting conf value /@/{}/{}/config/{} : {} - {}", - self.context.runtime.state.whatami, + "Error inserting conf value @/{}/{}/config/{} : {} - {}", self.context.runtime.state.zid, + self.context.runtime.state.whatami, key, json, e @@ -394,15 +415,15 @@ impl Primitives for AdminSpace { } } Err(e) => error!( - "Received non utf8 conf value on /@/{}/{}/config/{} : {}", - self.context.runtime.state.whatami, self.context.runtime.state.zid, key, e + "Received non utf8 conf value on @/{}/{}/config/{} : {}", + self.context.runtime.state.zid, self.context.runtime.state.whatami, key, e ), }, PushBody::Del(_) => { tracing::trace!( "Deleting conf value /@/{}/{}/config/{}", - self.context.runtime.state.whatami, self.context.runtime.state.zid, + self.context.runtime.state.whatami, key ); if let Err(e) = self.context.runtime.state.config.remove(key) { @@ -415,57 +436,56 @@ impl Primitives for AdminSpace { fn send_request(&self, msg: Request) { trace!("recv Request {:?}", msg); - if let RequestBody::Query(query) = msg.payload { - let primitives = zlock!(self.primitives).as_ref().unwrap().clone(); - { - let conf = self.context.runtime.state.config.lock(); - if !conf.adminspace.permissions().read { - tracing::error!( + match msg.payload { + RequestBody::Query(query) => { + let primitives = zlock!(self.primitives).as_ref().unwrap().clone(); + { + let conf = self.context.runtime.state.config.lock(); + if !conf.adminspace.permissions().read { + tracing::error!( "Received GET on '{}' but adminspace.permissions.read=false in configuration", msg.wire_expr ); - primitives.send_response_final(ResponseFinal { - rid: msg.id, - ext_qos: ext::QoSType::response_final_default(), - ext_tstamp: None, - }); - return; - } - } - - let key_expr = match self.key_expr_to_string(&msg.wire_expr) { - Ok(key_expr) => key_expr.into_owned(), - Err(e) => { - tracing::error!("Unknown KeyExpr: {}", e); - primitives.send_response_final(ResponseFinal { - rid: msg.id, - ext_qos: ext::QoSType::response_final_default(), - ext_tstamp: None, - }); - return; + primitives.send_response_final(ResponseFinal { + rid: msg.id, + ext_qos: ext::QoSType::RESPONSE_FINAL, + ext_tstamp: None, + }); + return; + } } - }; - let zid = self.zid; - let parameters = query.parameters.to_owned(); - let query = Query { - inner: Arc::new(QueryInner { - key_expr: key_expr.clone(), - parameters, - value: query - .ext_body - .map(|b| Value::from(b.payload).encoding(b.encoding)), - qid: msg.id, - zid, - primitives, - #[cfg(feature = "unstable")] + let key_expr = match self.key_expr_to_string(&msg.wire_expr) { + Ok(key_expr) => key_expr.into_owned(), + Err(e) => { + tracing::error!("Unknown KeyExpr: {}", e); + primitives.send_response_final(ResponseFinal { + rid: msg.id, + ext_qos: ext::QoSType::RESPONSE_FINAL, + ext_tstamp: None, + }); + return; + } + }; + + let zid = self.zid; + let query = Query { + inner: Arc::new(QueryInner { + key_expr: key_expr.clone(), + parameters: query.parameters.into(), + qid: msg.id, + zid: zid.into(), + primitives, + }), + eid: self.queryable_id, + value: query.ext_body.map(|b| Value::new(b.payload, b.encoding)), attachment: query.ext_attachment.map(Into::into), - }), - }; + }; - for (key, handler) in &self.handlers { - if key_expr.intersects(key) { - handler(&self.context, query.clone()); + for (key, handler) in &self.handlers { + if key_expr.intersects(key) { + handler(&self.context, query.clone()); + } } } } @@ -485,6 +505,11 @@ impl Primitives for AdminSpace { } impl crate::net::primitives::EPrimitives for AdminSpace { + #[inline] + fn send_interest(&self, ctx: crate::net::routing::RoutingContext) { + (self as &dyn Primitives).send_interest(ctx.msg) + } + #[inline] fn send_declare(&self, ctx: crate::net::routing::RoutingContext) { (self as &dyn Primitives).send_declare(ctx.msg) @@ -496,18 +521,18 @@ impl crate::net::primitives::EPrimitives for AdminSpace { } #[inline] - fn send_request(&self, ctx: crate::net::routing::RoutingContext) { - (self as &dyn Primitives).send_request(ctx.msg) + fn send_request(&self, msg: Request) { + (self as &dyn Primitives).send_request(msg) } #[inline] - fn send_response(&self, ctx: crate::net::routing::RoutingContext) { - (self as &dyn Primitives).send_response(ctx.msg) + fn send_response(&self, msg: Response) { + (self as &dyn Primitives).send_response(msg) } #[inline] - fn send_response_final(&self, ctx: crate::net::routing::RoutingContext) { - (self as &dyn Primitives).send_response_final(ctx.msg) + fn send_response_final(&self, msg: ResponseFinal) { + (self as &dyn Primitives).send_response_final(msg) } fn as_any(&self) -> &dyn std::any::Any { @@ -518,7 +543,7 @@ impl crate::net::primitives::EPrimitives for AdminSpace { fn local_data(context: &AdminContext, query: Query) { let reply_key: OwnedKeyExpr = format!( "@/{}/{}", - context.runtime.state.whatami, context.runtime.state.zid + context.runtime.state.zid, context.runtime.state.whatami ) .try_into() .unwrap(); @@ -526,7 +551,7 @@ fn local_data(context: &AdminContext, query: Query) { let transport_mgr = context.runtime.manager().clone(); // plugins info - #[cfg(all(feature = "unstable", feature = "plugins"))] + #[cfg(feature = "plugins")] let plugins: serde_json::Value = { let plugins_mgr = context.runtime.plugins_manager(); plugins_mgr @@ -557,8 +582,10 @@ fn local_data(context: &AdminContext, query: Query) { }); #[cfg(feature = "stats")] { - let stats = crate::prelude::Parameters::decode(&query.selector()) - .any(|(k, v)| k.as_ref() == "_stats" && v != "false"); + let stats = query + .parameters() + .iter() + .any(|(k, v)| k == "_stats" && v != "false"); if stats { json.as_object_mut().unwrap().insert( "stats".to_string(), @@ -580,7 +607,7 @@ fn local_data(context: &AdminContext, query: Query) { let mut json = json!({ "zid": context.runtime.state.zid, "version": context.version, - "metadata": context.metadata, + "metadata": context.runtime.config().lock().metadata(), "locators": locators, "sessions": transports, "plugins": plugins, @@ -588,8 +615,10 @@ fn local_data(context: &AdminContext, query: Query) { #[cfg(feature = "stats")] { - let stats = crate::prelude::Parameters::decode(&query.selector()) - .any(|(k, v)| k.as_ref() == "_stats" && v != "false"); + let stats = query + .parameters() + .iter() + .any(|(k, v)| k == "_stats" && v != "false"); if stats { json.as_object_mut().unwrap().insert( "stats".to_string(), @@ -599,13 +628,17 @@ fn local_data(context: &AdminContext, query: Query) { } tracing::trace!("AdminSpace router_data: {:?}", json); + let payload = match ZBytes::try_from(json) { + Ok(p) => p, + Err(e) => { + tracing::error!("Error serializing AdminSpace reply: {:?}", e); + return; + } + }; if let Err(e) = query - .reply(Ok(Sample::new( - reply_key, - Value::from(json.to_string().as_bytes().to_vec()) - .encoding(KnownEncoding::AppJson.into()), - ))) - .res() + .reply(reply_key, payload) + .encoding(Encoding::APPLICATION_JSON) + .wait() { tracing::error!("Error sending AdminSpace reply: {:?}", e); } @@ -614,7 +647,7 @@ fn local_data(context: &AdminContext, query: Query) { fn metrics(context: &AdminContext, query: Query) { let reply_key: OwnedKeyExpr = format!( "@/{}/{}/metrics", - context.runtime.state.whatami, context.runtime.state.zid + context.runtime.state.zid, context.runtime.state.whatami ) .try_into() .unwrap(); @@ -638,11 +671,9 @@ zenoh_build{{version="{}"}} 1 ); if let Err(e) = query - .reply(Ok(Sample::new( - reply_key, - Value::from(metrics.as_bytes().to_vec()).encoding(KnownEncoding::TextPlain.into()), - ))) - .res() + .reply(reply_key, metrics) + .encoding(Encoding::TEXT_PLAIN) + .wait() { tracing::error!("Error sending AdminSpace reply: {:?}", e); } @@ -651,7 +682,7 @@ zenoh_build{{version="{}"}} 1 fn routers_linkstate_data(context: &AdminContext, query: Query) { let reply_key: OwnedKeyExpr = format!( "@/{}/{}/linkstate/routers", - context.runtime.state.whatami, context.runtime.state.zid + context.runtime.state.zid, context.runtime.state.whatami ) .try_into() .unwrap(); @@ -659,18 +690,9 @@ fn routers_linkstate_data(context: &AdminContext, query: Query) { let tables = zread!(context.runtime.state.router.tables.tables); if let Err(e) = query - .reply(Ok(Sample::new( - reply_key, - Value::from( - tables - .hat_code - .info(&tables, WhatAmI::Router) - .as_bytes() - .to_vec(), - ) - .encoding(KnownEncoding::TextPlain.into()), - ))) - .res() + .reply(reply_key, tables.hat_code.info(&tables, WhatAmI::Router)) + .encoding(Encoding::TEXT_PLAIN) + .wait() { tracing::error!("Error sending AdminSpace reply: {:?}", e); } @@ -679,7 +701,7 @@ fn routers_linkstate_data(context: &AdminContext, query: Query) { fn peers_linkstate_data(context: &AdminContext, query: Query) { let reply_key: OwnedKeyExpr = format!( "@/{}/{}/linkstate/peers", - context.runtime.state.whatami, context.runtime.state.zid + context.runtime.state.zid, context.runtime.state.whatami ) .try_into() .unwrap(); @@ -687,18 +709,9 @@ fn peers_linkstate_data(context: &AdminContext, query: Query) { let tables = zread!(context.runtime.state.router.tables.tables); if let Err(e) = query - .reply(Ok(Sample::new( - reply_key, - Value::from( - tables - .hat_code - .info(&tables, WhatAmI::Peer) - .as_bytes() - .to_vec(), - ) - .encoding(KnownEncoding::TextPlain.into()), - ))) - .res() + .reply(reply_key, tables.hat_code.info(&tables, WhatAmI::Peer)) + .encoding(Encoding::TEXT_PLAIN) + .wait() { tracing::error!("Error sending AdminSpace reply: {:?}", e); } @@ -709,19 +722,18 @@ fn subscribers_data(context: &AdminContext, query: Query) { for sub in tables.hat_code.get_subscriptions(&tables) { let key = KeyExpr::try_from(format!( "@/{}/{}/subscriber/{}", - context.runtime.state.whatami, context.runtime.state.zid, + context.runtime.state.whatami, sub.0.expr() )) .unwrap(); if query.key_expr().intersects(&key) { + let payload = + ZBytes::from(serde_json::to_string(&sub.1).unwrap_or_else(|_| "{}".to_string())); if let Err(e) = query - .reply(Ok(Sample::new( - key, - Value::from(serde_json::to_string(&sub.1).unwrap_or_else(|_| "{}".to_string())) - .encoding(KnownEncoding::AppJson.into()), - ))) - .res() + .reply(key, payload) + .encoding(Encoding::APPLICATION_JSON) + .wait() { tracing::error!("Error sending AdminSpace reply: {:?}", e); } @@ -734,21 +746,18 @@ fn queryables_data(context: &AdminContext, query: Query) { for qabl in tables.hat_code.get_queryables(&tables) { let key = KeyExpr::try_from(format!( "@/{}/{}/queryable/{}", - context.runtime.state.whatami, context.runtime.state.zid, + context.runtime.state.whatami, qabl.0.expr() )) .unwrap(); if query.key_expr().intersects(&key) { + let payload = + ZBytes::from(serde_json::to_string(&qabl.1).unwrap_or_else(|_| "{}".to_string())); if let Err(e) = query - .reply(Ok(Sample::new( - key, - Value::from( - serde_json::to_string(&qabl.1).unwrap_or_else(|_| "{}".to_string()), - ) - .encoding(KnownEncoding::AppJson.into()), - ))) - .res() + .reply(key, payload) + .encoding(Encoding::APPLICATION_JSON) + .wait() { tracing::error!("Error sending AdminSpace reply: {:?}", e); } @@ -756,12 +765,12 @@ fn queryables_data(context: &AdminContext, query: Query) { } } -#[cfg(all(feature = "unstable", feature = "plugins"))] +#[cfg(feature = "plugins")] fn plugins_data(context: &AdminContext, query: Query) { let guard = context.runtime.plugins_manager(); let root_key = format!( "@/{}/{}/plugins", - context.runtime.state.whatami, &context.runtime.state.zid + &context.runtime.state.zid, context.runtime.state.whatami ); let root_key = unsafe { keyexpr::from_str_unchecked(&root_key) }; tracing::debug!("requested plugins status {:?}", query.key_expr()); @@ -771,20 +780,29 @@ fn plugins_data(context: &AdminContext, query: Query) { tracing::debug!("plugin status: {:?}", status); let key = root_key.join(status.id()).unwrap(); let status = serde_json::to_value(status).unwrap(); - if let Err(e) = query.reply(Ok(Sample::new(key, Value::from(status)))).res() { - tracing::error!("Error sending AdminSpace reply: {:?}", e); + match ZBytes::try_from(status) { + Ok(zbuf) => { + if let Err(e) = query + .reply(key, zbuf) + .encoding(Encoding::APPLICATION_JSON) + .wait() + { + tracing::error!("Error sending AdminSpace reply: {:?}", e); + } + } + Err(e) => tracing::debug!("Admin query error: {}", e), } } } } -#[cfg(all(feature = "unstable", feature = "plugins"))] +#[cfg(feature = "plugins")] fn plugins_status(context: &AdminContext, query: Query) { - let selector = query.selector(); + let key_expr = query.key_expr(); let guard = context.runtime.plugins_manager(); let mut root_key = format!( "@/{}/{}/status/plugins/", - context.runtime.state.whatami, &context.runtime.state.zid + &context.runtime.state.zid, context.runtime.state.whatami ); for plugin in guard.started_plugins_iter() { @@ -794,11 +812,9 @@ fn plugins_status(context: &AdminContext, query: Query) { if let Ok(key_expr) = KeyExpr::try_from(plugin_path_key.clone()) { if query.key_expr().intersects(&key_expr) { if let Err(e) = query - .reply(Ok(Sample::new( - key_expr, - serde_json::Value::String(plugin.path().into()), - ))) - .res() + .reply(key_expr, plugin.path()) + .encoding(Encoding::TEXT_PLAIN) + .wait() { tracing::error!("Error sending AdminSpace reply: {:?}", e); } @@ -816,18 +832,18 @@ fn plugins_status(context: &AdminContext, query: Query) { return; } match std::panic::catch_unwind(std::panic::AssertUnwindSafe(|| { - plugin.instance().adminspace_getter(&selector, plugin_key) + plugin.instance().adminspace_getter(key_expr, plugin_key) })) { Ok(Ok(responses)) => { for response in responses { if let Ok(key_expr) = KeyExpr::try_from(response.key) { - if let Err(e) = query.reply(Ok(Sample::new( - key_expr, - Value::from(response.value).encoding(KnownEncoding::AppJson.into()), - ))) - .res() - { - tracing::error!("Error sending AdminSpace reply: {:?}", e); + match ZBytes::try_from(response.value) { + Ok(zbuf) => { + if let Err(e) = query.reply(key_expr, zbuf).encoding(Encoding::APPLICATION_JSON).wait() { + tracing::error!("Error sending AdminSpace reply: {:?}", e); + } + }, + Err(e) => tracing::debug!("Admin query error: {}", e), } } else { tracing::error!("Error: plugin {} replied with an invalid key", plugin_key); @@ -850,7 +866,7 @@ fn plugins_status(context: &AdminContext, query: Query) { } } -#[cfg(all(feature = "unstable", feature = "plugins"))] +#[cfg(feature = "plugins")] fn with_extended_string R>( prefix: &mut String, suffixes: &[&str], diff --git a/zenoh/src/net/runtime/mod.rs b/zenoh/src/net/runtime/mod.rs index c3f8815a50..9abb01b94e 100644 --- a/zenoh/src/net/runtime/mod.rs +++ b/zenoh/src/net/runtime/mod.rs @@ -20,29 +20,34 @@ mod adminspace; pub mod orchestrator; -use super::primitives::DeMux; -use super::routing; -use super::routing::router::Router; -use crate::config::{unwrap_or_default, Config, ModeDependent, Notifier}; -#[cfg(all(feature = "unstable", feature = "plugins"))] -use crate::plugins::sealed::PluginsManager; -use crate::{GIT_VERSION, LONG_VERSION}; -pub use adminspace::AdminSpace; -use futures::stream::StreamExt; -use futures::Future; -use std::any::Any; -use std::sync::{Arc, Weak}; -#[cfg(all(feature = "unstable", feature = "plugins"))] +#[cfg(feature = "plugins")] use std::sync::{Mutex, MutexGuard}; -use std::time::Duration; +use std::{ + any::Any, + sync::{ + atomic::{AtomicU32, Ordering}, + Arc, Weak, + }, + time::Duration, +}; + +pub use adminspace::AdminSpace; +use futures::{stream::StreamExt, Future}; use tokio::task::JoinHandle; use tokio_util::sync::CancellationToken; use uhlc::{HLCBuilder, HLC}; +use zenoh_config::wrappers::ZenohId; use zenoh_link::{EndPoint, Link}; use zenoh_plugin_trait::{PluginStartArgs, StructVersion}; -use zenoh_protocol::core::{Locator, WhatAmI, ZenohId}; -use zenoh_protocol::network::NetworkMessage; +use zenoh_protocol::{ + core::{Locator, WhatAmI}, + network::NetworkMessage, +}; use zenoh_result::{bail, ZResult}; +#[cfg(feature = "shared-memory")] +use zenoh_shm::api::client_storage::ShmClientStorage; +#[cfg(feature = "shared-memory")] +use zenoh_shm::reader::ShmReader; use zenoh_sync::get_mut_unchecked; use zenoh_task::TaskController; use zenoh_transport::{ @@ -50,10 +55,21 @@ use zenoh_transport::{ TransportManager, TransportMulticastEventHandler, TransportPeer, TransportPeerEventHandler, }; +use self::orchestrator::StartConditions; +use super::{primitives::DeMux, routing, routing::router::Router}; +#[cfg(feature = "plugins")] +use crate::api::loader::{load_plugins, start_plugins}; +#[cfg(feature = "plugins")] +use crate::api::plugins::PluginsManager; +use crate::{ + config::{unwrap_or_default, Config, ModeDependent, Notifier}, + GIT_VERSION, LONG_VERSION, +}; + pub(crate) struct RuntimeState { zid: ZenohId, whatami: WhatAmI, - metadata: serde_json::Value, + next_id: AtomicU32, router: Arc, config: Notifier, manager: TransportManager, @@ -61,8 +77,9 @@ pub(crate) struct RuntimeState { locators: std::sync::RwLock>, hlc: Option>, task_controller: TaskController, - #[cfg(all(feature = "unstable", feature = "plugins"))] + #[cfg(feature = "plugins")] plugins_manager: Mutex, + start_conditions: Arc, } pub struct WeakRuntime { @@ -77,38 +94,49 @@ impl WeakRuntime { pub struct RuntimeBuilder { config: Config, - #[cfg(all(feature = "unstable", feature = "plugins"))] + #[cfg(feature = "plugins")] plugins_manager: Option, + #[cfg(feature = "shared-memory")] + shm_clients: Option>, } impl RuntimeBuilder { pub fn new(config: Config) -> Self { Self { config, - #[cfg(all(feature = "unstable", feature = "plugins"))] + #[cfg(feature = "plugins")] plugins_manager: None, + #[cfg(feature = "shared-memory")] + shm_clients: None, } } - #[cfg(all(feature = "unstable", feature = "plugins"))] + #[cfg(feature = "plugins")] pub fn plugins_manager>>(mut self, plugins_manager: T) -> Self { self.plugins_manager = plugins_manager.into(); self } + #[cfg(feature = "shared-memory")] + pub fn shm_clients(mut self, shm_clients: Option>) -> Self { + self.shm_clients = shm_clients; + self + } + pub async fn build(self) -> ZResult { let RuntimeBuilder { config, - #[cfg(all(feature = "unstable", feature = "plugins"))] + #[cfg(feature = "plugins")] mut plugins_manager, + #[cfg(feature = "shared-memory")] + shm_clients, } = self; tracing::debug!("Zenoh Rust API {}", GIT_VERSION); - let zid = *config.id(); + let zid = (*config.id()).into(); tracing::info!("Using ZID: {}", zid); let whatami = unwrap_or_default!(config.mode()); - let metadata = config.metadata().clone(); let hlc = (*unwrap_or_default!(config.timestamping().enabled().get(whatami))) .then(|| Arc::new(HLCBuilder::new().with_id(uhlc::ID::from(&zid)).build())); @@ -118,27 +146,32 @@ impl RuntimeBuilder { runtime: std::sync::RwLock::new(WeakRuntime { state: Weak::new() }), }); - let transport_manager = TransportManager::builder() + let transport_manager_builder = TransportManager::builder() .from_config(&config) .await? .whatami(whatami) - .zid(zid) - .build(handler.clone())?; + .zid(zid); + + #[cfg(feature = "shared-memory")] + let transport_manager_builder = + transport_manager_builder.shm_reader(shm_clients.map(ShmReader::new)); + + let transport_manager = transport_manager_builder.build(handler.clone())?; // Plugins manager - #[cfg(all(feature = "unstable", feature = "plugins"))] + #[cfg(feature = "plugins")] let plugins_manager = plugins_manager .take() - .unwrap_or_else(|| crate::plugins::loader::load_plugins(&config)); + .unwrap_or_else(|| load_plugins(&config)); // Admin space creation flag let start_admin_space = *config.adminspace.enabled(); let config = Notifier::new(config); let runtime = Runtime { state: Arc::new(RuntimeState { - zid, + zid: zid.into(), whatami, - metadata, + next_id: AtomicU32::new(1), // 0 is reserved for routing core router, config: config.clone(), manager: transport_manager, @@ -146,8 +179,9 @@ impl RuntimeBuilder { locators: std::sync::RwLock::new(vec![]), hlc, task_controller: TaskController::default(), - #[cfg(all(feature = "unstable", feature = "plugins"))] + #[cfg(feature = "plugins")] plugins_manager: Mutex::new(plugins_manager), + start_conditions: Arc::new(StartConditions::default()), }), }; *handler.runtime.write().unwrap() = Runtime::downgrade(&runtime); @@ -159,8 +193,8 @@ impl RuntimeBuilder { } // Start plugins - #[cfg(all(feature = "unstable", feature = "plugins"))] - crate::plugins::loader::start_plugins(&runtime); + #[cfg(feature = "plugins")] + start_plugins(&runtime); // Start notifier task let receiver = config.subscribe(); @@ -215,7 +249,7 @@ impl Runtime { &self.state.manager } - #[cfg(all(feature = "unstable", feature = "plugins"))] + #[cfg(feature = "plugins")] #[inline(always)] pub(crate) fn plugins_manager(&self) -> MutexGuard<'_, PluginsManager> { zlock!(self.state.plugins_manager) @@ -225,6 +259,11 @@ impl Runtime { zwrite!(self.state.transport_handlers).push(handler); } + #[inline] + pub fn next_id(&self) -> u32 { + self.state.next_id.fetch_add(1, Ordering::SeqCst) + } + pub async fn close(&self) -> ZResult<()> { tracing::trace!("Runtime::close())"); // TODO: Plugins should be stopped @@ -311,6 +350,10 @@ impl Runtime { pub fn get_cancellation_token(&self) -> CancellationToken { self.state.task_controller.get_cancellation_token() } + + pub(crate) fn start_conditions(&self) -> &Arc { + &self.state.start_conditions + } } struct RuntimeTransportEventHandler { diff --git a/zenoh/src/net/runtime/orchestrator.rs b/zenoh/src/net/runtime/orchestrator.rs index 798a3fc694..fca109fc24 100644 --- a/zenoh/src/net/runtime/orchestrator.rs +++ b/zenoh/src/net/runtime/orchestrator.rs @@ -11,37 +11,110 @@ // Contributors: // ZettaScale Zenoh Team, // -use super::{Runtime, RuntimeSession}; +use std::{ + net::{IpAddr, Ipv6Addr, SocketAddr}, + time::Duration, +}; + use futures::prelude::*; use socket2::{Domain, Socket, Type}; -use std::net::{IpAddr, Ipv6Addr, SocketAddr}; -use std::time::Duration; -use tokio::net::UdpSocket; -use zenoh_buffers::reader::DidntRead; -use zenoh_buffers::{reader::HasReader, writer::HasWriter}; +use tokio::{ + net::UdpSocket, + sync::{futures::Notified, Mutex, Notify}, +}; +use zenoh_buffers::{ + reader::{DidntRead, HasReader}, + writer::HasWriter, +}; use zenoh_codec::{RCodec, WCodec, Zenoh080}; use zenoh_config::{ get_global_connect_timeout, get_global_listener_timeout, unwrap_or_default, ModeDependent, }; use zenoh_link::{Locator, LocatorInspector}; use zenoh_protocol::{ - core::{whatami::WhatAmIMatcher, EndPoint, WhatAmI, ZenohId}, - scouting::{Hello, Scout, ScoutingBody, ScoutingMessage}, + core::{whatami::WhatAmIMatcher, EndPoint, WhatAmI, ZenohIdProto}, + scouting::{HelloProto, Scout, ScoutingBody, ScoutingMessage}, }; use zenoh_result::{bail, zerror, ZResult}; +use super::{Runtime, RuntimeSession}; + const RCV_BUF_SIZE: usize = u16::MAX as usize; const SCOUT_INITIAL_PERIOD: Duration = Duration::from_millis(1_000); const SCOUT_MAX_PERIOD: Duration = Duration::from_millis(8_000); const SCOUT_PERIOD_INCREASE_FACTOR: u32 = 2; -const ROUTER_DEFAULT_LISTENER: &str = "tcp/[::]:7447"; -const PEER_DEFAULT_LISTENER: &str = "tcp/[::]:0"; pub enum Loop { Continue, Break, } +#[derive(Default, Debug)] +pub(crate) struct PeerConnector { + zid: Option, + terminated: bool, +} + +#[derive(Default, Debug)] +pub(crate) struct StartConditions { + notify: Notify, + peer_connectors: Mutex>, +} + +impl StartConditions { + pub(crate) fn notified(&self) -> Notified<'_> { + self.notify.notified() + } + + pub(crate) async fn add_peer_connector(&self) -> usize { + let mut peer_connectors = self.peer_connectors.lock().await; + peer_connectors.push(PeerConnector::default()); + peer_connectors.len() - 1 + } + + pub(crate) async fn add_peer_connector_zid(&self, zid: ZenohIdProto) { + let mut peer_connectors = self.peer_connectors.lock().await; + if !peer_connectors.iter().any(|pc| pc.zid == Some(zid)) { + peer_connectors.push(PeerConnector { + zid: Some(zid), + terminated: false, + }) + } + } + + pub(crate) async fn set_peer_connector_zid(&self, idx: usize, zid: ZenohIdProto) { + let mut peer_connectors = self.peer_connectors.lock().await; + if let Some(peer_connector) = peer_connectors.get_mut(idx) { + peer_connector.zid = Some(zid); + } + } + + pub(crate) async fn terminate_peer_connector(&self, idx: usize) { + let mut peer_connectors = self.peer_connectors.lock().await; + if let Some(peer_connector) = peer_connectors.get_mut(idx) { + peer_connector.terminated = true; + } + if !peer_connectors.iter().any(|pc| !pc.terminated) { + self.notify.notify_one() + } + } + + pub(crate) async fn terminate_peer_connector_zid(&self, zid: ZenohIdProto) { + let mut peer_connectors = self.peer_connectors.lock().await; + if let Some(peer_connector) = peer_connectors.iter_mut().find(|pc| pc.zid == Some(zid)) { + peer_connector.terminated = true; + } else { + peer_connectors.push(PeerConnector { + zid: Some(zid), + terminated: true, + }) + } + if !peer_connectors.iter().any(|pc| !pc.terminated) { + self.notify.notify_one() + } + } +} + impl Runtime { pub async fn start(&mut self) -> ZResult<()> { match self.whatami() { @@ -55,7 +128,12 @@ impl Runtime { let (peers, scouting, addr, ifaces, timeout, multicast_ttl) = { let guard = self.state.config.lock(); ( - guard.connect().endpoints().clone(), + guard + .connect() + .endpoints() + .client() + .unwrap_or(&vec![]) + .clone(), unwrap_or_default!(guard.scouting().multicast().enabled()), unwrap_or_default!(guard.scouting().multicast().address()), unwrap_or_default!(guard.scouting().multicast().interface()), @@ -91,35 +169,23 @@ impl Runtime { } async fn start_peer(&self) -> ZResult<()> { - let (listeners, peers, scouting, listen, autoconnect, addr, ifaces, delay) = { + let (listeners, peers, scouting, listen, autoconnect, addr, ifaces, delay, linkstate) = { let guard = &self.state.config.lock(); - let listeners = if guard.listen().endpoints().is_empty() { - let endpoint: EndPoint = PEER_DEFAULT_LISTENER.parse().unwrap(); - let protocol = endpoint.protocol(); - let mut listeners = vec![]; - if self - .state - .manager - .config - .protocols - .iter() - .any(|p| p.as_str() == protocol.as_str()) - { - listeners.push(endpoint) - } - listeners - } else { - guard.listen().endpoints().clone() - }; ( - listeners, - guard.connect().endpoints().clone(), + guard.listen().endpoints().peer().unwrap_or(&vec![]).clone(), + guard + .connect() + .endpoints() + .peer() + .unwrap_or(&vec![]) + .clone(), unwrap_or_default!(guard.scouting().multicast().enabled()), *unwrap_or_default!(guard.scouting().multicast().listen().peer()), *unwrap_or_default!(guard.scouting().multicast().autoconnect().peer()), unwrap_or_default!(guard.scouting().multicast().address()), unwrap_or_default!(guard.scouting().multicast().interface()), Duration::from_millis(unwrap_or_default!(guard.scouting().delay())), + unwrap_or_default!(guard.routing().peer().mode()) == *"linkstate", ) }; @@ -130,39 +196,42 @@ impl Runtime { if scouting { self.start_scout(listen, autoconnect, addr, ifaces).await?; } - tokio::time::sleep(delay).await; + + if linkstate { + tokio::time::sleep(delay).await; + } else if (scouting || !peers.is_empty()) + && tokio::time::timeout(delay, self.state.start_conditions.notified()) + .await + .is_err() + && !peers.is_empty() + { + tracing::warn!("Scouting delay elapsed before start conditions are met."); + } Ok(()) } async fn start_router(&self) -> ZResult<()> { - let (listeners, peers, scouting, listen, autoconnect, addr, ifaces) = { + let (listeners, peers, scouting, listen, autoconnect, addr, ifaces, delay) = { let guard = self.state.config.lock(); - let listeners = if guard.listen().endpoints().is_empty() { - let endpoint: EndPoint = ROUTER_DEFAULT_LISTENER.parse().unwrap(); - let protocol = endpoint.protocol(); - let mut listeners = vec![]; - if self - .state - .manager - .config - .protocols - .iter() - .any(|p| p.as_str() == protocol.as_str()) - { - listeners.push(endpoint) - } - listeners - } else { - guard.listen().endpoints().clone() - }; ( - listeners, - guard.connect().endpoints().clone(), + guard + .listen() + .endpoints() + .router() + .unwrap_or(&vec![]) + .clone(), + guard + .connect() + .endpoints() + .router() + .unwrap_or(&vec![]) + .clone(), unwrap_or_default!(guard.scouting().multicast().enabled()), *unwrap_or_default!(guard.scouting().multicast().listen().router()), *unwrap_or_default!(guard.scouting().multicast().autoconnect().router()), unwrap_or_default!(guard.scouting().multicast().address()), unwrap_or_default!(guard.scouting().multicast().interface()), + Duration::from_millis(unwrap_or_default!(guard.scouting().delay())), ) }; @@ -174,6 +243,7 @@ impl Runtime { self.start_scout(listen, autoconnect, addr, ifaces).await?; } + tokio::time::sleep(delay).await; Ok(()) } @@ -276,7 +346,7 @@ impl Runtime { } } else { // try to connect with retry waiting - self.peer_connector_retry(endpoint).await; + let _ = self.peer_connector_retry(endpoint).await; return Ok(()); } } @@ -308,7 +378,7 @@ impl Runtime { } } else if retry_config.exit_on_failure { // try to connect with retry waiting - self.peer_connector_retry(endpoint).await; + let _ = self.peer_connector_retry(endpoint).await; } else { // try to connect in background self.spawn_peer_connector(endpoint).await? @@ -334,7 +404,16 @@ impl Runtime { } pub(crate) async fn update_peers(&self) -> ZResult<()> { - let peers = { self.state.config.lock().connect().endpoints().clone() }; + let peers = { + self.state + .config + .lock() + .connect() + .endpoints() + .get(self.state.whatami) + .unwrap_or(&vec![]) + .clone() + }; let transports = self.manager().get_transports_unicast().await; if self.state.whatami == WhatAmI::Client { @@ -665,14 +744,31 @@ impl Runtime { .await? { let this = self.clone(); - self.spawn(async move { this.peer_connector_retry(peer).await }); + let idx = self.state.start_conditions.add_peer_connector().await; + let config = this.config().lock(); + let gossip = unwrap_or_default!(config.scouting().gossip().enabled()); + drop(config); + self.spawn(async move { + if let Ok(zid) = this.peer_connector_retry(peer).await { + this.state + .start_conditions + .set_peer_connector_zid(idx, zid) + .await; + } + if !gossip { + this.state + .start_conditions + .terminate_peer_connector(idx) + .await; + } + }); Ok(()) } else { bail!("Forbidden multicast endpoint in connect list!") } } - async fn peer_connector_retry(&self, peer: EndPoint) { + async fn peer_connector_retry(&self, peer: EndPoint) -> ZResult { let retry_config = self.get_connect_retry_config(&peer); let mut period = retry_config.period(); let cancellation_token = self.get_cancellation_token(); @@ -692,7 +788,7 @@ impl Runtime { *zwrite!(orch_transport.endpoint) = Some(peer); } } - break; + return transport.get_zid(); } Ok(Err(e)) => { tracing::debug!( @@ -712,7 +808,7 @@ impl Runtime { } } } - _ = cancellation_token.cancelled() => { break; } + _ = cancellation_token.cancelled() => { bail!(zerror!("Peer connector terminated")); } } tokio::time::sleep(period.next_duration()).await; } @@ -724,7 +820,7 @@ impl Runtime { mcast_addr: &SocketAddr, f: F, ) where - F: Fn(Hello) -> Fut + std::marker::Send + std::marker::Sync + Clone, + F: Fn(HelloProto) -> Fut + std::marker::Send + std::marker::Sync + Clone, Fut: Future + std::marker::Send, Self: Sized, { @@ -815,7 +911,7 @@ impl Runtime { } #[must_use] - async fn connect(&self, zid: &ZenohId, locators: &[Locator]) -> bool { + async fn connect(&self, zid: &ZenohIdProto, locators: &[Locator]) -> bool { const ERR: &str = "Unable to connect to newly scouted peer "; let inspector = LocatorInspector::default(); @@ -868,15 +964,22 @@ impl Runtime { } } - tracing::warn!( - "Unable to connect to any locator of scouted peer {}: {:?}", - zid, - locators - ); + if self.manager().get_transport_unicast(zid).await.is_none() { + tracing::warn!( + "Unable to connect to any locator of scouted peer {}: {:?}", + zid, + locators + ); + } else { + tracing::trace!( + "Unable to connect to any locator of scouted peer {}: Already connected!", + zid + ); + } false } - pub async fn connect_peer(&self, zid: &ZenohId, locators: &[Locator]) { + pub async fn connect_peer(&self, zid: &ZenohIdProto, locators: &[Locator]) { let manager = self.manager(); if zid != &manager.zid() { let has_unicast = manager.get_transport_unicast(zid).await.is_some(); @@ -999,7 +1102,7 @@ impl Runtime { let codec = Zenoh080::new(); let zid = self.manager().zid(); - let hello: ScoutingMessage = Hello { + let hello: ScoutingMessage = HelloProto { version: zenoh_protocol::VERSION, whatami: self.whatami(), zid, @@ -1058,6 +1161,8 @@ impl Runtime { .lock() .connect() .endpoints() + .get(session.runtime.state.whatami) + .unwrap_or(&vec![]) .clone() }; if peers.contains(endpoint) { diff --git a/zenoh/src/net/tests/tables.rs b/zenoh/src/net/tests/tables.rs index b6da5a2391..5fd8a49261 100644 --- a/zenoh/src/net/tests/tables.rs +++ b/zenoh/src/net/tests/tables.rs @@ -11,30 +11,40 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::net::primitives::{DummyPrimitives, EPrimitives, Primitives}; -use crate::net::routing::dispatcher::tables::{self, Tables}; -use crate::net::routing::router::*; -use crate::net::routing::RoutingContext; -use std::convert::{TryFrom, TryInto}; -use std::sync::Arc; +use std::{ + convert::{TryFrom, TryInto}, + sync::Arc, +}; + use uhlc::HLC; use zenoh_buffers::ZBuf; use zenoh_config::Config; use zenoh_core::zlock; -use zenoh_protocol::core::Encoding; -use zenoh_protocol::core::{ - key_expr::keyexpr, ExprId, Reliability, WhatAmI, WireExpr, ZenohId, EMPTY_EXPR_ID, +use zenoh_protocol::{ + core::{ + key_expr::keyexpr, Encoding, ExprId, Reliability, WhatAmI, WireExpr, ZenohIdProto, + EMPTY_EXPR_ID, + }, + network::{ + declare::subscriber::ext::SubscriberInfo, ext, Declare, DeclareBody, DeclareKeyExpr, + }, + zenoh::{PushBody, Put}, +}; + +use crate::net::{ + primitives::{DummyPrimitives, EPrimitives, Primitives}, + routing::{ + dispatcher::tables::{self, Tables}, + router::*, + RoutingContext, + }, }; -use zenoh_protocol::network::declare::subscriber::ext::SubscriberInfo; -use zenoh_protocol::network::declare::Mode; -use zenoh_protocol::network::{ext, Declare, DeclareBody, DeclareKeyExpr}; -use zenoh_protocol::zenoh::{PushBody, Put}; #[test] fn base_test() { let config = Config::default(); let router = Router::new( - ZenohId::try_from([1]).unwrap(), + ZenohIdProto::try_from([1]).unwrap(), WhatAmI::Client, Some(Arc::new(HLC::default())), &config, @@ -59,13 +69,13 @@ fn base_test() { let sub_info = SubscriberInfo { reliability: Reliability::Reliable, - mode: Mode::Push, }; declare_subscription( zlock!(tables.ctrl_lock).as_ref(), &tables, &mut face.upgrade().unwrap(), + 0, &WireExpr::from(1).with_suffix("four/five"), &sub_info, NodeId::default(), @@ -131,7 +141,7 @@ fn match_test() { let config = Config::default(); let router = Router::new( - ZenohId::try_from([1]).unwrap(), + ZenohIdProto::try_from([1]).unwrap(), WhatAmI::Client, Some(Arc::new(HLC::default())), &config, @@ -167,11 +177,84 @@ fn match_test() { } } +#[test] +fn multisub_test() { + let config = Config::default(); + let router = Router::new( + ZenohIdProto::try_from([1]).unwrap(), + WhatAmI::Client, + Some(Arc::new(HLC::default())), + &config, + ) + .unwrap(); + let tables = router.tables.clone(); + + let primitives = Arc::new(DummyPrimitives {}); + let face0 = Arc::downgrade(&router.new_primitives(primitives).state); + assert!(face0.upgrade().is_some()); + + // -------------- + let sub_info = SubscriberInfo { + reliability: Reliability::Reliable, + }; + declare_subscription( + zlock!(tables.ctrl_lock).as_ref(), + &tables, + &mut face0.upgrade().unwrap(), + 0, + &"sub".into(), + &sub_info, + NodeId::default(), + &mut |p, m| p.send_declare(m), + ); + let optres = Resource::get_resource(zread!(tables.tables)._get_root(), "sub") + .map(|res| Arc::downgrade(&res)); + assert!(optres.is_some()); + let res = optres.unwrap(); + assert!(res.upgrade().is_some()); + + declare_subscription( + zlock!(tables.ctrl_lock).as_ref(), + &tables, + &mut face0.upgrade().unwrap(), + 1, + &"sub".into(), + &sub_info, + NodeId::default(), + &mut |p, m| p.send_declare(m), + ); + assert!(res.upgrade().is_some()); + + undeclare_subscription( + zlock!(tables.ctrl_lock).as_ref(), + &tables, + &mut face0.upgrade().unwrap(), + 0, + &WireExpr::empty(), + NodeId::default(), + &mut |p, m| p.send_declare(m), + ); + assert!(res.upgrade().is_some()); + + undeclare_subscription( + zlock!(tables.ctrl_lock).as_ref(), + &tables, + &mut face0.upgrade().unwrap(), + 1, + &WireExpr::empty(), + NodeId::default(), + &mut |p, m| p.send_declare(m), + ); + assert!(res.upgrade().is_none()); + + tables::close_face(&tables, &face0); +} + #[tokio::test(flavor = "multi_thread", worker_threads = 1)] async fn clean_test() { let config = Config::default(); let router = Router::new( - ZenohId::try_from([1]).unwrap(), + ZenohIdProto::try_from([1]).unwrap(), WhatAmI::Client, Some(Arc::new(HLC::default())), &config, @@ -235,13 +318,13 @@ async fn clean_test() { let sub_info = SubscriberInfo { reliability: Reliability::Reliable, - mode: Mode::Push, }; declare_subscription( zlock!(tables.ctrl_lock).as_ref(), &tables, &mut face0.upgrade().unwrap(), + 0, &"todrop1/todrop11".into(), &sub_info, NodeId::default(), @@ -257,6 +340,7 @@ async fn clean_test() { zlock!(tables.ctrl_lock).as_ref(), &tables, &mut face0.upgrade().unwrap(), + 1, &WireExpr::from(1).with_suffix("/todrop12"), &sub_info, NodeId::default(), @@ -273,7 +357,8 @@ async fn clean_test() { zlock!(tables.ctrl_lock).as_ref(), &tables, &mut face0.upgrade().unwrap(), - &WireExpr::from(1).with_suffix("/todrop12"), + 1, + &WireExpr::empty(), NodeId::default(), &mut |p, m| p.send_declare(m), ); @@ -288,7 +373,8 @@ async fn clean_test() { zlock!(tables.ctrl_lock).as_ref(), &tables, &mut face0.upgrade().unwrap(), - &"todrop1/todrop11".into(), + 0, + &WireExpr::empty(), NodeId::default(), &mut |p, m| p.send_declare(m), ); @@ -307,6 +393,7 @@ async fn clean_test() { zlock!(tables.ctrl_lock).as_ref(), &tables, &mut face0.upgrade().unwrap(), + 2, &"todrop3".into(), &sub_info, NodeId::default(), @@ -322,7 +409,8 @@ async fn clean_test() { zlock!(tables.ctrl_lock).as_ref(), &tables, &mut face0.upgrade().unwrap(), - &"todrop3".into(), + 2, + &WireExpr::empty(), NodeId::default(), &mut |p, m| p.send_declare(m), ); @@ -338,6 +426,7 @@ async fn clean_test() { zlock!(tables.ctrl_lock).as_ref(), &tables, &mut face0.upgrade().unwrap(), + 3, &"todrop5".into(), &sub_info, NodeId::default(), @@ -347,6 +436,7 @@ async fn clean_test() { zlock!(tables.ctrl_lock).as_ref(), &tables, &mut face0.upgrade().unwrap(), + 4, &"todrop6".into(), &sub_info, NodeId::default(), @@ -429,6 +519,8 @@ impl ClientPrimitives { } impl Primitives for ClientPrimitives { + fn send_interest(&self, _msg: zenoh_protocol::network::Interest) {} + fn send_declare(&self, msg: zenoh_protocol::network::Declare) { match msg.body { DeclareBody::DeclareKeyExpr(d) => { @@ -456,6 +548,8 @@ impl Primitives for ClientPrimitives { } impl EPrimitives for ClientPrimitives { + fn send_interest(&self, _ctx: RoutingContext) {} + fn send_declare(&self, ctx: RoutingContext) { match ctx.msg.body { DeclareBody::DeclareKeyExpr(d) => { @@ -473,11 +567,15 @@ impl EPrimitives for ClientPrimitives { *zlock!(self.data) = Some(msg.wire_expr.to_owned()); } - fn send_request(&self, _ctx: RoutingContext) {} + fn send_request(&self, msg: zenoh_protocol::network::Request) { + *zlock!(self.data) = Some(msg.wire_expr.to_owned()); + } - fn send_response(&self, _ctx: RoutingContext) {} + fn send_response(&self, msg: zenoh_protocol::network::Response) { + *zlock!(self.data) = Some(msg.wire_expr.to_owned()); + } - fn send_response_final(&self, _ctx: RoutingContext) {} + fn send_response_final(&self, _msg: zenoh_protocol::network::ResponseFinal) {} fn as_any(&self) -> &dyn std::any::Any { self @@ -488,7 +586,7 @@ impl EPrimitives for ClientPrimitives { fn client_test() { let config = Config::default(); let router = Router::new( - ZenohId::try_from([1]).unwrap(), + ZenohIdProto::try_from([1]).unwrap(), WhatAmI::Client, Some(Arc::new(HLC::default())), &config, @@ -498,7 +596,6 @@ fn client_test() { let sub_info = SubscriberInfo { reliability: Reliability::Reliable, - mode: Mode::Push, }; let primitives0 = Arc::new(ClientPrimitives::new()); @@ -512,9 +609,10 @@ fn client_test() { Primitives::send_declare( primitives0.as_ref(), Declare { - ext_qos: ext::QoSType::declare_default(), + interest_id: None, + ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), + ext_nodeid: ext::NodeIdType::DEFAULT, body: DeclareBody::DeclareKeyExpr(DeclareKeyExpr { id: 11, wire_expr: "test/client".into(), @@ -525,6 +623,7 @@ fn client_test() { zlock!(tables.ctrl_lock).as_ref(), &tables, &mut face0.upgrade().unwrap(), + 0, &WireExpr::from(11).with_suffix("/**"), &sub_info, NodeId::default(), @@ -539,9 +638,10 @@ fn client_test() { Primitives::send_declare( primitives0.as_ref(), Declare { - ext_qos: ext::QoSType::declare_default(), + interest_id: None, + ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), + ext_nodeid: ext::NodeIdType::DEFAULT, body: DeclareBody::DeclareKeyExpr(DeclareKeyExpr { id: 12, wire_expr: WireExpr::from(11).with_suffix("/z1_pub1"), @@ -560,9 +660,10 @@ fn client_test() { Primitives::send_declare( primitives1.as_ref(), Declare { - ext_qos: ext::QoSType::declare_default(), + interest_id: None, + ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), + ext_nodeid: ext::NodeIdType::DEFAULT, body: DeclareBody::DeclareKeyExpr(DeclareKeyExpr { id: 21, wire_expr: "test/client".into(), @@ -573,6 +674,7 @@ fn client_test() { zlock!(tables.ctrl_lock).as_ref(), &tables, &mut face1.upgrade().unwrap(), + 0, &WireExpr::from(21).with_suffix("/**"), &sub_info, NodeId::default(), @@ -587,9 +689,10 @@ fn client_test() { Primitives::send_declare( primitives1.as_ref(), Declare { - ext_qos: ext::QoSType::declare_default(), + interest_id: None, + ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), + ext_nodeid: ext::NodeIdType::DEFAULT, body: DeclareBody::DeclareKeyExpr(DeclareKeyExpr { id: 22, wire_expr: WireExpr::from(21).with_suffix("/z2_pub1"), @@ -608,9 +711,10 @@ fn client_test() { Primitives::send_declare( primitives2.as_ref(), Declare { - ext_qos: ext::QoSType::declare_default(), + interest_id: None, + ext_qos: ext::QoSType::DECLARE, ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), + ext_nodeid: ext::NodeIdType::DEFAULT, body: DeclareBody::DeclareKeyExpr(DeclareKeyExpr { id: 31, wire_expr: "test/client".into(), @@ -621,6 +725,7 @@ fn client_test() { zlock!(tables.ctrl_lock).as_ref(), &tables, &mut face2.upgrade().unwrap(), + 0, &WireExpr::from(31).with_suffix("/**"), &sub_info, NodeId::default(), @@ -635,11 +740,11 @@ fn client_test() { &tables, &face0.upgrade().unwrap(), &"test/client/z1_wr1".into(), - ext::QoSType::default(), + ext::QoSType::DEFAULT, None, PushBody::Put(Put { timestamp: None, - encoding: Encoding::default(), + encoding: Encoding::empty(), ext_sinfo: None, #[cfg(feature = "shared-memory")] ext_shm: None, @@ -669,11 +774,11 @@ fn client_test() { &router.tables, &face0.upgrade().unwrap(), &WireExpr::from(11).with_suffix("/z1_wr2"), - ext::QoSType::default(), + ext::QoSType::DEFAULT, None, PushBody::Put(Put { timestamp: None, - encoding: Encoding::default(), + encoding: Encoding::empty(), ext_sinfo: None, #[cfg(feature = "shared-memory")] ext_shm: None, @@ -703,11 +808,11 @@ fn client_test() { &router.tables, &face1.upgrade().unwrap(), &"test/client/**".into(), - ext::QoSType::default(), + ext::QoSType::DEFAULT, None, PushBody::Put(Put { timestamp: None, - encoding: Encoding::default(), + encoding: Encoding::empty(), ext_sinfo: None, #[cfg(feature = "shared-memory")] ext_shm: None, @@ -737,11 +842,11 @@ fn client_test() { &router.tables, &face0.upgrade().unwrap(), &12.into(), - ext::QoSType::default(), + ext::QoSType::DEFAULT, None, PushBody::Put(Put { timestamp: None, - encoding: Encoding::default(), + encoding: Encoding::empty(), ext_sinfo: None, #[cfg(feature = "shared-memory")] ext_shm: None, @@ -771,11 +876,11 @@ fn client_test() { &router.tables, &face1.upgrade().unwrap(), &22.into(), - ext::QoSType::default(), + ext::QoSType::DEFAULT, None, PushBody::Put(Put { timestamp: None, - encoding: Encoding::default(), + encoding: Encoding::empty(), ext_sinfo: None, #[cfg(feature = "shared-memory")] ext_shm: None, diff --git a/zenoh/src/prelude.rs b/zenoh/src/prelude.rs index 36a841d1ef..373d56c65a 100644 --- a/zenoh/src/prelude.rs +++ b/zenoh/src/prelude.rs @@ -16,64 +16,50 @@ //! //! This prelude is similar to the standard library's prelude in that you'll //! almost always want to import its entire contents, but unlike the standard -//! library's prelude you'll have to do so manually. An example of using this is: +//! library's prelude you'll have to do so manually. +//! +//! Examples: //! //! ``` -//! use zenoh::prelude::r#async::*; +//!use zenoh::prelude::*; //! ``` -pub use common::*; -pub(crate) mod common { - pub use crate::key_expr::{keyexpr, KeyExpr, OwnedKeyExpr}; - pub use zenoh_buffers::{ - buffer::{Buffer, SplitBuffer}, - reader::HasReader, - writer::HasWriter, - }; - pub use zenoh_core::Resolve; - - pub(crate) type Id = usize; - - pub use crate::config::{self, Config, ValidatedMap}; - pub use crate::handlers::IntoCallbackReceiverPair; - pub use crate::selector::{Parameter, Parameters, Selector}; - pub use crate::session::{Session, SessionDeclarations}; - - pub use crate::query::{QueryConsolidation, QueryTarget}; - - pub use crate::value::Value; - /// The encoding of a zenoh `Value`. - pub use zenoh_protocol::core::{Encoding, KnownEncoding}; - - pub use crate::query::ConsolidationMode; +mod _prelude { #[zenoh_macros::unstable] - pub use crate::sample::Locality; - #[cfg(not(feature = "unstable"))] - pub(crate) use crate::sample::Locality; - pub use crate::sample::Sample; - - pub use zenoh_protocol::core::SampleKind; - - pub use crate::publication::Priority; + pub use crate::api::publisher::PublisherDeclarations; #[zenoh_macros::unstable] - pub use crate::publication::PublisherDeclarations; - pub use zenoh_protocol::core::{CongestionControl, Reliability, WhatAmI}; - - /// A [`Locator`] contains a choice of protocol, an address and port, as well as optional additional properties to work with. - pub use zenoh_protocol::core::EndPoint; - /// A [`Locator`] contains a choice of protocol, an address and port, as well as optional additional properties to work with. - pub use zenoh_protocol::core::Locator; - /// The global unique id of a zenoh peer. - pub use zenoh_protocol::core::ZenohId; + pub use crate::api::selector::ZenohParameters; + pub use crate::{ + api::{ + builders::sample::{ + EncodingBuilderTrait, QoSBuilderTrait, SampleBuilderTrait, TimestampBuilderTrait, + }, + session::{SessionDeclarations, Undeclarable}, + }, + config::ValidatedMap, + Error as ZError, Resolvable, Resolve, Result as ZResult, + }; } +pub use _prelude::*; + +#[allow(deprecated)] +pub use crate::AsyncResolve; +#[allow(deprecated)] +pub use crate::SyncResolve; +pub use crate::Wait; + /// Prelude to import when using Zenoh's sync API. +#[deprecated(since = "1.0.0", note = "use `zenoh::prelude` instead")] pub mod sync { - pub use super::common::*; - pub use zenoh_core::SyncResolve; + pub use super::_prelude::*; + #[allow(deprecated)] + pub use crate::SyncResolve; } /// Prelude to import when using Zenoh's async API. +#[deprecated(since = "1.0.0", note = "use `zenoh::prelude` instead")] pub mod r#async { - pub use super::common::*; - pub use zenoh_core::AsyncResolve; + pub use super::_prelude::*; + #[allow(deprecated)] + pub use crate::AsyncResolve; } diff --git a/zenoh/src/query.rs b/zenoh/src/query.rs deleted file mode 100644 index 8ac43ee4ba..0000000000 --- a/zenoh/src/query.rs +++ /dev/null @@ -1,408 +0,0 @@ -// -// Copyright (c) 2023 ZettaScale Technology -// -// This program and the accompanying materials are made available under the -// terms of the Eclipse Public License 2.0 which is available at -// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 -// which is available at https://www.apache.org/licenses/LICENSE-2.0. -// -// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 -// -// Contributors: -// ZettaScale Zenoh Team, -// - -//! Query primitives. - -use crate::handlers::{locked, Callback, DefaultHandler}; -use crate::prelude::*; -#[zenoh_macros::unstable] -use crate::sample::Attachment; -use crate::Session; -use std::collections::HashMap; -use std::future::Ready; -use std::time::Duration; -use zenoh_core::{AsyncResolve, Resolvable, SyncResolve}; -use zenoh_result::ZResult; - -/// The [`Queryable`](crate::queryable::Queryable)s that should be target of a [`get`](Session::get). -pub use zenoh_protocol::core::QueryTarget; - -/// The kind of consolidation. -pub use zenoh_protocol::core::ConsolidationMode; - -/// The operation: either manual or automatic. -#[derive(Clone, Copy, Debug, PartialEq, Eq)] -pub enum Mode { - Auto, - Manual(T), -} - -/// The replies consolidation strategy to apply on replies to a [`get`](Session::get). -#[derive(Clone, Copy, Debug, PartialEq, Eq)] -pub struct QueryConsolidation { - pub(crate) mode: Mode, -} - -impl QueryConsolidation { - /// Automatic query consolidation strategy selection. - pub const AUTO: Self = Self { mode: Mode::Auto }; - - pub(crate) const fn from_mode(mode: ConsolidationMode) -> Self { - Self { - mode: Mode::Manual(mode), - } - } - - /// Returns the requested [`ConsolidationMode`]. - pub fn mode(&self) -> Mode { - self.mode - } -} -impl From> for QueryConsolidation { - fn from(mode: Mode) -> Self { - Self { mode } - } -} -impl From for QueryConsolidation { - fn from(mode: ConsolidationMode) -> Self { - Self::from_mode(mode) - } -} - -impl Default for QueryConsolidation { - fn default() -> Self { - QueryConsolidation::AUTO - } -} - -/// Structs returned by a [`get`](Session::get). -#[non_exhaustive] -#[derive(Clone, Debug)] -pub struct Reply { - /// The result of this Reply. - pub sample: Result, - /// The id of the zenoh instance that answered this Reply. - pub replier_id: ZenohId, -} - -pub(crate) struct QueryState { - pub(crate) nb_final: usize, - pub(crate) selector: Selector<'static>, - pub(crate) scope: Option>, - pub(crate) reception_mode: ConsolidationMode, - pub(crate) replies: Option>, - pub(crate) callback: Callback<'static, Reply>, -} - -/// A builder for initializing a `query`. -/// -/// # Examples -/// ``` -/// # #[tokio::main] -/// # async fn main() { -/// use zenoh::prelude::r#async::*; -/// use zenoh::query::*; -/// -/// let session = zenoh::open(config::peer()).res().await.unwrap(); -/// let replies = session -/// .get("key/expression?value>1") -/// .target(QueryTarget::All) -/// .consolidation(ConsolidationMode::None) -/// .res() -/// .await -/// .unwrap(); -/// while let Ok(reply) = replies.recv_async().await { -/// println!("Received {:?}", reply.sample) -/// } -/// # } -/// ``` -#[must_use = "Resolvables do nothing unless you resolve them using the `res` method from either `SyncResolve` or `AsyncResolve`"] -#[derive(Debug)] -pub struct GetBuilder<'a, 'b, Handler> { - pub(crate) session: &'a Session, - pub(crate) selector: ZResult>, - pub(crate) scope: ZResult>>, - pub(crate) target: QueryTarget, - pub(crate) consolidation: QueryConsolidation, - pub(crate) destination: Locality, - pub(crate) timeout: Duration, - pub(crate) handler: Handler, - pub(crate) value: Option, - #[cfg(feature = "unstable")] - pub(crate) attachment: Option, -} - -impl<'a, 'b> GetBuilder<'a, 'b, DefaultHandler> { - /// Receive the replies for this query with a callback. - /// - /// # Examples - /// ``` - /// # #[tokio::main] - /// # async fn main() { - /// use zenoh::prelude::r#async::*; - /// - /// let session = zenoh::open(config::peer()).res().await.unwrap(); - /// let queryable = session - /// .get("key/expression") - /// .callback(|reply| {println!("Received {:?}", reply.sample);}) - /// .res() - /// .await - /// .unwrap(); - /// # } - /// ``` - #[inline] - pub fn callback(self, callback: Callback) -> GetBuilder<'a, 'b, Callback> - where - Callback: Fn(Reply) + Send + Sync + 'static, - { - let GetBuilder { - session, - selector, - scope, - target, - consolidation, - destination, - timeout, - value, - #[cfg(feature = "unstable")] - attachment, - handler: _, - } = self; - GetBuilder { - session, - selector, - scope, - target, - consolidation, - destination, - timeout, - value, - #[cfg(feature = "unstable")] - attachment, - handler: callback, - } - } - - /// Receive the replies for this query with a mutable callback. - /// - /// Using this guarantees that your callback will never be called concurrently. - /// If your callback is also accepted by the [`callback`](GetBuilder::callback) method, we suggest you use it instead of `callback_mut` - /// - /// # Examples - /// ``` - /// # #[tokio::main] - /// # async fn main() { - /// use zenoh::prelude::r#async::*; - /// - /// let session = zenoh::open(config::peer()).res().await.unwrap(); - /// let mut n = 0; - /// let queryable = session - /// .get("key/expression") - /// .callback_mut(move |reply| {n += 1;}) - /// .res() - /// .await - /// .unwrap(); - /// # } - /// ``` - #[inline] - pub fn callback_mut( - self, - callback: CallbackMut, - ) -> GetBuilder<'a, 'b, impl Fn(Reply) + Send + Sync + 'static> - where - CallbackMut: FnMut(Reply) + Send + Sync + 'static, - { - self.callback(locked(callback)) - } - - /// Receive the replies for this query with a [`Handler`](crate::prelude::IntoCallbackReceiverPair). - /// - /// # Examples - /// ``` - /// # #[tokio::main] - /// # async fn main() { - /// use zenoh::prelude::r#async::*; - /// - /// let session = zenoh::open(config::peer()).res().await.unwrap(); - /// let replies = session - /// .get("key/expression") - /// .with(flume::bounded(32)) - /// .res() - /// .await - /// .unwrap(); - /// while let Ok(reply) = replies.recv_async().await { - /// println!("Received {:?}", reply.sample); - /// } - /// # } - /// ``` - #[inline] - pub fn with(self, handler: Handler) -> GetBuilder<'a, 'b, Handler> - where - Handler: IntoCallbackReceiverPair<'static, Reply>, - { - let GetBuilder { - session, - selector, - scope, - target, - consolidation, - destination, - timeout, - value, - #[cfg(feature = "unstable")] - attachment, - handler: _, - } = self; - GetBuilder { - session, - selector, - scope, - target, - consolidation, - destination, - timeout, - value, - #[cfg(feature = "unstable")] - attachment, - handler, - } - } -} -impl<'a, 'b, Handler> GetBuilder<'a, 'b, Handler> { - /// Change the target of the query. - #[inline] - pub fn target(mut self, target: QueryTarget) -> Self { - self.target = target; - self - } - - /// Change the consolidation mode of the query. - #[inline] - pub fn consolidation>(mut self, consolidation: QC) -> Self { - self.consolidation = consolidation.into(); - self - } - - /// Restrict the matching queryables that will receive the query - /// to the ones that have the given [`Locality`](crate::prelude::Locality). - #[zenoh_macros::unstable] - #[inline] - pub fn allowed_destination(mut self, destination: Locality) -> Self { - self.destination = destination; - self - } - - /// Set query timeout. - #[inline] - pub fn timeout(mut self, timeout: Duration) -> Self { - self.timeout = timeout; - self - } - - /// Set query value. - #[inline] - pub fn with_value(mut self, value: IntoValue) -> Self - where - IntoValue: Into, - { - self.value = Some(value.into()); - self - } - - #[zenoh_macros::unstable] - pub fn with_attachment(mut self, attachment: Attachment) -> Self { - self.attachment = Some(attachment); - self - } - - /// By default, `get` guarantees that it will only receive replies whose key expressions intersect - /// with the queried key expression. - /// - /// If allowed to through `accept_replies(ReplyKeyExpr::Any)`, queryables may also reply on key - /// expressions that don't intersect with the query's. - #[zenoh_macros::unstable] - pub fn accept_replies(self, accept: ReplyKeyExpr) -> Self { - let Self { - session, - selector, - scope, - target, - consolidation, - destination, - timeout, - value, - attachment, - handler, - } = self; - Self { - session, - selector: selector.and_then(|s| s.accept_any_keyexpr(accept == ReplyKeyExpr::Any)), - scope, - target, - consolidation, - destination, - timeout, - value, - attachment, - handler, - } - } -} - -pub(crate) const _REPLY_KEY_EXPR_ANY_SEL_PARAM: &str = "_anyke"; -#[zenoh_macros::unstable] -pub const REPLY_KEY_EXPR_ANY_SEL_PARAM: &str = _REPLY_KEY_EXPR_ANY_SEL_PARAM; - -#[zenoh_macros::unstable] -#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Default)] -pub enum ReplyKeyExpr { - Any, - #[default] - MatchingQuery, -} - -impl Resolvable for GetBuilder<'_, '_, Handler> -where - Handler: IntoCallbackReceiverPair<'static, Reply> + Send, - Handler::Receiver: Send, -{ - type To = ZResult; -} - -impl SyncResolve for GetBuilder<'_, '_, Handler> -where - Handler: IntoCallbackReceiverPair<'static, Reply> + Send, - Handler::Receiver: Send, -{ - fn res_sync(self) -> ::To { - let (callback, receiver) = self.handler.into_cb_receiver_pair(); - - self.session - .query( - &self.selector?, - &self.scope?, - self.target, - self.consolidation, - self.destination, - self.timeout, - self.value, - #[cfg(feature = "unstable")] - self.attachment, - callback, - ) - .map(|_| receiver) - } -} - -impl AsyncResolve for GetBuilder<'_, '_, Handler> -where - Handler: IntoCallbackReceiverPair<'static, Reply> + Send, - Handler::Receiver: Send, -{ - type Future = Ready; - - fn res_async(self) -> Self::Future { - std::future::ready(self.res_sync()) - } -} diff --git a/zenoh/src/queryable.rs b/zenoh/src/queryable.rs deleted file mode 100644 index 751e454610..0000000000 --- a/zenoh/src/queryable.rs +++ /dev/null @@ -1,660 +0,0 @@ -// -// Copyright (c) 2023 ZettaScale Technology -// -// This program and the accompanying materials are made available under the -// terms of the Eclipse Public License 2.0 which is available at -// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 -// which is available at https://www.apache.org/licenses/LICENSE-2.0. -// -// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 -// -// Contributors: -// ZettaScale Zenoh Team, -// - -//! Queryable primitives. - -use crate::handlers::{locked, DefaultHandler}; -use crate::net::primitives::Primitives; -use crate::prelude::*; -#[zenoh_macros::unstable] -use crate::query::ReplyKeyExpr; -#[zenoh_macros::unstable] -use crate::sample::Attachment; -use crate::sample::DataInfo; -use crate::SessionRef; -use crate::Undeclarable; - -use std::fmt; -use std::future::Ready; -use std::ops::Deref; -use std::sync::Arc; -use zenoh_core::{AsyncResolve, Resolvable, SyncResolve}; -use zenoh_protocol::core::WireExpr; -use zenoh_protocol::network::{response, Mapping, RequestId, Response, ResponseFinal}; -use zenoh_protocol::zenoh::ext::ValueType; -use zenoh_protocol::zenoh::reply::ext::ConsolidationType; -use zenoh_protocol::zenoh::{self, ResponseBody}; -use zenoh_result::ZResult; - -pub(crate) struct QueryInner { - /// The key expression of this Query. - pub(crate) key_expr: KeyExpr<'static>, - /// This Query's selector parameters. - pub(crate) parameters: String, - /// This Query's body. - pub(crate) value: Option, - - pub(crate) qid: RequestId, - pub(crate) zid: ZenohId, - pub(crate) primitives: Arc, - #[cfg(feature = "unstable")] - pub(crate) attachment: Option, -} - -impl Drop for QueryInner { - fn drop(&mut self) { - self.primitives.send_response_final(ResponseFinal { - rid: self.qid, - ext_qos: response::ext::QoSType::response_final_default(), - ext_tstamp: None, - }); - } -} - -/// Structs received by a [`Queryable`]. -#[derive(Clone)] -pub struct Query { - pub(crate) inner: Arc, -} - -impl Query { - /// The full [`Selector`] of this Query. - #[inline(always)] - pub fn selector(&self) -> Selector<'_> { - Selector { - key_expr: self.inner.key_expr.clone(), - parameters: (&self.inner.parameters).into(), - } - } - - /// The key selector part of this Query. - #[inline(always)] - pub fn key_expr(&self) -> &KeyExpr<'static> { - &self.inner.key_expr - } - - /// This Query's selector parameters. - #[inline(always)] - pub fn parameters(&self) -> &str { - &self.inner.parameters - } - - /// This Query's value. - #[inline(always)] - pub fn value(&self) -> Option<&Value> { - self.inner.value.as_ref() - } - - #[zenoh_macros::unstable] - pub fn attachment(&self) -> Option<&Attachment> { - self.inner.attachment.as_ref() - } - - /// Sends a reply to this Query. - /// - /// By default, queries only accept replies whose key expression intersects with the query's. - /// Unless the query has enabled disjoint replies (you can check this through [`Query::accepts_replies`]), - /// replying on a disjoint key expression will result in an error when resolving the reply. - #[inline(always)] - pub fn reply(&self, result: Result) -> ReplyBuilder<'_> { - ReplyBuilder { - query: self, - result, - } - } - - /// Queries may or may not accept replies on key expressions that do not intersect with their own key expression. - /// This getter allows you to check whether or not a specific query does. - #[zenoh_macros::unstable] - pub fn accepts_replies(&self) -> ZResult { - self._accepts_any_replies().map(|any| { - if any { - ReplyKeyExpr::Any - } else { - ReplyKeyExpr::MatchingQuery - } - }) - } - fn _accepts_any_replies(&self) -> ZResult { - self.parameters() - .get_bools([crate::query::_REPLY_KEY_EXPR_ANY_SEL_PARAM]) - .map(|a| a[0]) - } -} - -impl fmt::Debug for Query { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.debug_struct("Query") - .field("key_selector", &self.inner.key_expr) - .field("parameters", &self.inner.parameters) - .finish() - } -} - -impl fmt::Display for Query { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.debug_struct("Query") - .field( - "selector", - &format!("{}{}", &self.inner.key_expr, &self.inner.parameters), - ) - .finish() - } -} - -/// A builder returned by [`Query::reply()`](Query::reply). -#[must_use = "Resolvables do nothing unless you resolve them using the `res` method from either `SyncResolve` or `AsyncResolve`"] -#[derive(Debug)] -pub struct ReplyBuilder<'a> { - query: &'a Query, - result: Result, -} - -impl<'a> ReplyBuilder<'a> { - #[allow(clippy::result_large_err)] - #[zenoh_macros::unstable] - pub fn with_attachment(mut self, attachment: Attachment) -> Result { - match &mut self.result { - Ok(sample) => { - sample.attachment = Some(attachment); - Ok(self) - } - Err(_) => Err((self, attachment)), - } - } -} - -impl<'a> Resolvable for ReplyBuilder<'a> { - type To = ZResult<()>; -} - -impl SyncResolve for ReplyBuilder<'_> { - fn res_sync(self) -> ::To { - match self.result { - Ok(sample) => { - if !self.query._accepts_any_replies().unwrap_or(false) - && !self.query.key_expr().intersects(&sample.key_expr) - { - bail!("Attempted to reply on `{}`, which does not intersect with query `{}`, despite query only allowing replies on matching key expressions", sample.key_expr, self.query.key_expr()) - } - let Sample { - key_expr, - value: Value { payload, encoding }, - kind, - timestamp, - qos, - #[cfg(feature = "unstable")] - source_info, - #[cfg(feature = "unstable")] - attachment, - } = sample; - #[allow(unused_mut)] - let mut data_info = DataInfo { - kind, - encoding: Some(encoding), - timestamp, - qos, - source_id: None, - source_sn: None, - }; - #[allow(unused_mut)] - let mut ext_attachment = None; - #[cfg(feature = "unstable")] - { - data_info.source_id = source_info.source_id; - data_info.source_sn = source_info.source_sn; - if let Some(attachment) = attachment { - ext_attachment = Some(attachment.into()); - } - } - self.query.inner.primitives.send_response(Response { - rid: self.query.inner.qid, - wire_expr: WireExpr { - scope: 0, - suffix: std::borrow::Cow::Owned(key_expr.into()), - mapping: Mapping::Sender, - }, - payload: ResponseBody::Reply(zenoh::Reply { - timestamp: data_info.timestamp, - encoding: data_info.encoding.unwrap_or_default(), - ext_sinfo: if data_info.source_id.is_some() || data_info.source_sn.is_some() - { - Some(zenoh::reply::ext::SourceInfoType { - zid: data_info.source_id.unwrap_or_default(), - eid: 0, // @TODO use proper EntityId (#703) - sn: data_info.source_sn.unwrap_or_default() as u32, - }) - } else { - None - }, - ext_consolidation: ConsolidationType::default(), - #[cfg(feature = "shared-memory")] - ext_shm: None, - ext_attachment, - ext_unknown: vec![], - payload, - }), - ext_qos: response::ext::QoSType::response_default(), - ext_tstamp: None, - ext_respid: Some(response::ext::ResponderIdType { - zid: self.query.inner.zid, - eid: 0, // @TODO use proper EntityId (#703) - }), - }); - Ok(()) - } - Err(payload) => { - self.query.inner.primitives.send_response(Response { - rid: self.query.inner.qid, - wire_expr: WireExpr { - scope: 0, - suffix: std::borrow::Cow::Owned(self.query.key_expr().as_str().to_owned()), - mapping: Mapping::Sender, - }, - payload: ResponseBody::Err(zenoh::Err { - timestamp: None, - is_infrastructure: false, - ext_sinfo: None, - ext_unknown: vec![], - ext_body: Some(ValueType { - #[cfg(feature = "shared-memory")] - ext_shm: None, - payload: payload.payload, - encoding: payload.encoding, - }), - code: 0, // TODO - }), - ext_qos: response::ext::QoSType::response_default(), - ext_tstamp: None, - ext_respid: Some(response::ext::ResponderIdType { - zid: self.query.inner.zid, - eid: 0, // @TODO use proper EntityId (#703) - }), - }); - Ok(()) - } - } - } -} - -impl<'a> AsyncResolve for ReplyBuilder<'a> { - type Future = Ready; - - fn res_async(self) -> Self::Future { - std::future::ready(self.res_sync()) - } -} - -pub(crate) struct QueryableState { - pub(crate) id: Id, - pub(crate) key_expr: WireExpr<'static>, - pub(crate) complete: bool, - pub(crate) origin: Locality, - pub(crate) callback: Arc, -} - -impl fmt::Debug for QueryableState { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.debug_struct("Queryable") - .field("id", &self.id) - .field("key_expr", &self.key_expr) - .field("complete", &self.complete) - .finish() - } -} - -/// An entity able to reply to queries through a callback. -/// -/// CallbackQueryables can be created from a zenoh [`Session`](crate::Session) -/// with the [`declare_queryable`](crate::Session::declare_queryable) function -/// and the [`callback`](QueryableBuilder::callback) function -/// of the resulting builder. -/// -/// Queryables are automatically undeclared when dropped. -/// -/// # Examples -/// ```no_run -/// # #[tokio::main] -/// # async fn main() { -/// use futures::prelude::*; -/// use zenoh::prelude::r#async::*; -/// -/// let session = zenoh::open(config::peer()).res().await.unwrap(); -/// let queryable = session.declare_queryable("key/expression").res().await.unwrap(); -/// while let Ok(query) = queryable.recv_async().await { -/// println!(">> Handling query '{}'", query.selector()); -/// query.reply(Ok(Sample::try_from("key/expression", "value").unwrap())) -/// .res() -/// .await -/// .unwrap(); -/// } -/// # } -/// ``` -#[derive(Debug)] -pub(crate) struct CallbackQueryable<'a> { - pub(crate) session: SessionRef<'a>, - pub(crate) state: Arc, - pub(crate) alive: bool, -} - -impl<'a> Undeclarable<(), QueryableUndeclaration<'a>> for CallbackQueryable<'a> { - fn undeclare_inner(self, _: ()) -> QueryableUndeclaration<'a> { - QueryableUndeclaration { queryable: self } - } -} - -/// A [`Resolvable`] returned when undeclaring a queryable. -/// -/// # Examples -/// ``` -/// # #[tokio::main] -/// # async fn main() { -/// use zenoh::prelude::r#async::*; -/// -/// let session = zenoh::open(config::peer()).res().await.unwrap(); -/// let queryable = session.declare_queryable("key/expression").res().await.unwrap(); -/// queryable.undeclare().res().await.unwrap(); -/// # } -/// ``` -#[must_use = "Resolvables do nothing unless you resolve them using the `res` method from either `SyncResolve` or `AsyncResolve`"] -pub struct QueryableUndeclaration<'a> { - queryable: CallbackQueryable<'a>, -} - -impl Resolvable for QueryableUndeclaration<'_> { - type To = ZResult<()>; -} - -impl SyncResolve for QueryableUndeclaration<'_> { - fn res_sync(mut self) -> ::To { - self.queryable.alive = false; - self.queryable - .session - .close_queryable(self.queryable.state.id) - } -} - -impl<'a> AsyncResolve for QueryableUndeclaration<'a> { - type Future = Ready; - - fn res_async(self) -> Self::Future { - std::future::ready(self.res_sync()) - } -} - -impl Drop for CallbackQueryable<'_> { - fn drop(&mut self) { - if self.alive { - let _ = self.session.close_queryable(self.state.id); - } - } -} - -/// A builder for initializing a [`Queryable`]. -/// -/// # Examples -/// ``` -/// # #[tokio::main] -/// # async fn main() { -/// use zenoh::prelude::r#async::*; -/// use zenoh::queryable; -/// -/// let session = zenoh::open(config::peer()).res().await.unwrap(); -/// let queryable = session.declare_queryable("key/expression").res().await.unwrap(); -/// # } -/// ``` -#[must_use = "Resolvables do nothing unless you resolve them using the `res` method from either `SyncResolve` or `AsyncResolve`"] -#[derive(Debug)] -pub struct QueryableBuilder<'a, 'b, Handler> { - pub(crate) session: SessionRef<'a>, - pub(crate) key_expr: ZResult>, - pub(crate) complete: bool, - pub(crate) origin: Locality, - pub(crate) handler: Handler, -} - -impl<'a, 'b> QueryableBuilder<'a, 'b, DefaultHandler> { - /// Receive the queries for this Queryable with a callback. - /// - /// # Examples - /// ``` - /// # #[tokio::main] - /// # async fn main() { - /// use zenoh::prelude::r#async::*; - /// - /// let session = zenoh::open(config::peer()).res().await.unwrap(); - /// let queryable = session - /// .declare_queryable("key/expression") - /// .callback(|query| {println!(">> Handling query '{}'", query.selector());}) - /// .res() - /// .await - /// .unwrap(); - /// # } - /// ``` - #[inline] - pub fn callback(self, callback: Callback) -> QueryableBuilder<'a, 'b, Callback> - where - Callback: Fn(Query) + Send + Sync + 'static, - { - let QueryableBuilder { - session, - key_expr, - complete, - origin, - handler: _, - } = self; - QueryableBuilder { - session, - key_expr, - complete, - origin, - handler: callback, - } - } - - /// Receive the queries for this Queryable with a mutable callback. - /// - /// Using this guarantees that your callback will never be called concurrently. - /// If your callback is also accepted by the [`callback`](QueryableBuilder::callback) method, we suggest you use it instead of `callback_mut` - /// - /// # Examples - /// ``` - /// # #[tokio::main] - /// # async fn main() { - /// use zenoh::prelude::r#async::*; - /// - /// let session = zenoh::open(config::peer()).res().await.unwrap(); - /// let mut n = 0; - /// let queryable = session - /// .declare_queryable("key/expression") - /// .callback_mut(move |query| {n += 1;}) - /// .res() - /// .await - /// .unwrap(); - /// # } - /// ``` - #[inline] - pub fn callback_mut( - self, - callback: CallbackMut, - ) -> QueryableBuilder<'a, 'b, impl Fn(Query) + Send + Sync + 'static> - where - CallbackMut: FnMut(Query) + Send + Sync + 'static, - { - self.callback(locked(callback)) - } - - /// Receive the queries for this Queryable with a [`Handler`](crate::prelude::IntoCallbackReceiverPair). - /// - /// # Examples - /// ```no_run - /// # #[tokio::main] - /// # async fn main() { - /// use zenoh::prelude::r#async::*; - /// - /// let session = zenoh::open(config::peer()).res().await.unwrap(); - /// let queryable = session - /// .declare_queryable("key/expression") - /// .with(flume::bounded(32)) - /// .res() - /// .await - /// .unwrap(); - /// while let Ok(query) = queryable.recv_async().await { - /// println!(">> Handling query '{}'", query.selector()); - /// } - /// # } - /// ``` - #[inline] - pub fn with(self, handler: Handler) -> QueryableBuilder<'a, 'b, Handler> - where - Handler: crate::prelude::IntoCallbackReceiverPair<'static, Query>, - { - let QueryableBuilder { - session, - key_expr, - complete, - origin, - handler: _, - } = self; - QueryableBuilder { - session, - key_expr, - complete, - origin, - handler, - } - } - - /// Restrict the matching queries that will be receive by this [`Queryable`] - /// to the ones that have the given [`Locality`](crate::prelude::Locality). - #[inline] - #[zenoh_macros::unstable] - pub fn allowed_origin(mut self, origin: Locality) -> Self { - self.origin = origin; - self - } -} -impl<'a, 'b, Handler> QueryableBuilder<'a, 'b, Handler> { - /// Change queryable completeness. - #[inline] - pub fn complete(mut self, complete: bool) -> Self { - self.complete = complete; - self - } -} - -/// A queryable that provides data through a [`Handler`](crate::prelude::IntoCallbackReceiverPair). -/// -/// Queryables can be created from a zenoh [`Session`] -/// with the [`declare_queryable`](crate::Session::declare_queryable) function -/// and the [`with`](QueryableBuilder::with) function -/// of the resulting builder. -/// -/// Queryables are automatically undeclared when dropped. -/// -/// # Examples -/// ```no_run -/// # #[tokio::main] -/// # async fn main() { -/// use zenoh::prelude::r#async::*; -/// -/// let session = zenoh::open(config::peer()).res().await.unwrap(); -/// let queryable = session -/// .declare_queryable("key/expression") -/// .with(flume::bounded(32)) -/// .res() -/// .await -/// .unwrap(); -/// while let Ok(query) = queryable.recv_async().await { -/// println!(">> Handling query '{}'", query.selector()); -/// query.reply(Ok(Sample::try_from("key/expression", "value").unwrap())) -/// .res() -/// .await -/// .unwrap(); -/// } -/// # } -/// ``` -#[non_exhaustive] -#[derive(Debug)] -pub struct Queryable<'a, Receiver> { - pub(crate) queryable: CallbackQueryable<'a>, - pub receiver: Receiver, -} - -impl<'a, Receiver> Queryable<'a, Receiver> { - #[inline] - pub fn undeclare(self) -> impl Resolve> + 'a { - Undeclarable::undeclare_inner(self, ()) - } -} - -impl<'a, T> Undeclarable<(), QueryableUndeclaration<'a>> for Queryable<'a, T> { - fn undeclare_inner(self, _: ()) -> QueryableUndeclaration<'a> { - Undeclarable::undeclare_inner(self.queryable, ()) - } -} - -impl Deref for Queryable<'_, Receiver> { - type Target = Receiver; - - fn deref(&self) -> &Self::Target { - &self.receiver - } -} - -impl<'a, Handler> Resolvable for QueryableBuilder<'a, '_, Handler> -where - Handler: IntoCallbackReceiverPair<'static, Query> + Send, - Handler::Receiver: Send, -{ - type To = ZResult>; -} - -impl<'a, Handler> SyncResolve for QueryableBuilder<'a, '_, Handler> -where - Handler: IntoCallbackReceiverPair<'static, Query> + Send, - Handler::Receiver: Send, -{ - fn res_sync(self) -> ::To { - let session = self.session; - let (callback, receiver) = self.handler.into_cb_receiver_pair(); - session - .declare_queryable_inner( - &self.key_expr?.to_wire(&session), - self.complete, - self.origin, - callback, - ) - .map(|qable_state| Queryable { - queryable: CallbackQueryable { - session, - state: qable_state, - alive: true, - }, - receiver, - }) - } -} - -impl<'a, Handler> AsyncResolve for QueryableBuilder<'a, '_, Handler> -where - Handler: IntoCallbackReceiverPair<'static, Query> + Send, - Handler::Receiver: Send, -{ - type Future = Ready; - - fn res_async(self) -> Self::Future { - std::future::ready(self.res_sync()) - } -} diff --git a/zenoh/src/sample.rs b/zenoh/src/sample.rs deleted file mode 100644 index ae9cd78469..0000000000 --- a/zenoh/src/sample.rs +++ /dev/null @@ -1,579 +0,0 @@ -// -// Copyright (c) 2023 ZettaScale Technology -// -// This program and the accompanying materials are made available under the -// terms of the Eclipse Public License 2.0 which is available at -// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 -// which is available at https://www.apache.org/licenses/LICENSE-2.0. -// -// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 -// -// Contributors: -// ZettaScale Zenoh Team, -// - -//! Sample primitives -use crate::buffers::ZBuf; -use crate::prelude::ZenohId; -use crate::prelude::{KeyExpr, SampleKind, Value}; -use crate::query::Reply; -use crate::time::{new_reception_timestamp, Timestamp}; -use crate::Priority; -#[zenoh_macros::unstable] -use serde::Serialize; -use std::convert::{TryFrom, TryInto}; -use zenoh_protocol::core::{CongestionControl, Encoding}; -use zenoh_protocol::network::push::ext::QoSType; - -pub type SourceSn = u64; - -/// The locality of samples to be received by subscribers or targeted by publishers. -#[zenoh_macros::unstable] -#[derive(Clone, Copy, Debug, Default, Serialize, PartialEq, Eq)] -pub enum Locality { - SessionLocal, - Remote, - #[default] - Any, -} -#[cfg(not(feature = "unstable"))] -#[derive(Clone, Copy, Debug, Default, PartialEq, Eq)] -pub(crate) enum Locality { - SessionLocal, - Remote, - #[default] - Any, -} - -#[derive(Debug, Clone, PartialEq, Eq, Default)] -pub(crate) struct DataInfo { - pub kind: SampleKind, - pub encoding: Option, - pub timestamp: Option, - pub source_id: Option, - pub source_sn: Option, - pub qos: QoS, -} - -/// Information on the source of a zenoh [`Sample`]. -#[zenoh_macros::unstable] -#[derive(Debug, Clone)] -pub struct SourceInfo { - /// The [`ZenohId`] of the zenoh instance that published the concerned [`Sample`]. - pub source_id: Option, - /// The sequence number of the [`Sample`] from the source. - pub source_sn: Option, -} - -#[test] -#[cfg(feature = "unstable")] -fn source_info_stack_size() { - assert_eq!(std::mem::size_of::(), 16); - assert_eq!(std::mem::size_of::>(), 17); - assert_eq!(std::mem::size_of::>(), 16); - assert_eq!(std::mem::size_of::(), 17 + 16 + 7); -} - -#[zenoh_macros::unstable] -impl SourceInfo { - pub(crate) fn empty() -> Self { - SourceInfo { - source_id: None, - source_sn: None, - } - } -} - -#[zenoh_macros::unstable] -impl From for SourceInfo { - fn from(data_info: DataInfo) -> Self { - SourceInfo { - source_id: data_info.source_id, - source_sn: data_info.source_sn, - } - } -} - -#[zenoh_macros::unstable] -impl From> for SourceInfo { - fn from(data_info: Option) -> Self { - match data_info { - Some(data_info) => data_info.into(), - None => SourceInfo::empty(), - } - } -} - -mod attachment { - #[zenoh_macros::unstable] - use zenoh_buffers::{ - reader::{HasReader, Reader}, - writer::HasWriter, - ZBuf, ZBufReader, ZSlice, - }; - #[zenoh_macros::unstable] - use zenoh_codec::{RCodec, WCodec, Zenoh080}; - #[zenoh_macros::unstable] - use zenoh_protocol::zenoh::ext::AttachmentType; - - /// A builder for [`Attachment`] - #[zenoh_macros::unstable] - #[derive(Debug)] - pub struct AttachmentBuilder { - pub(crate) inner: Vec, - } - #[zenoh_macros::unstable] - impl Default for AttachmentBuilder { - fn default() -> Self { - Self::new() - } - } - #[zenoh_macros::unstable] - impl AttachmentBuilder { - pub fn new() -> Self { - Self { inner: Vec::new() } - } - fn _insert(&mut self, key: &[u8], value: &[u8]) { - let codec = Zenoh080; - let mut writer = self.inner.writer(); - codec.write(&mut writer, key).unwrap(); // Infallible, barring alloc failure - codec.write(&mut writer, value).unwrap(); // Infallible, barring alloc failure - } - /// Inserts a key-value pair to the attachment. - /// - /// Note that [`Attachment`] is a list of non-unique key-value pairs: inserting at the same key multiple times leads to both values being transmitted for that key. - pub fn insert + ?Sized, Value: AsRef<[u8]> + ?Sized>( - &mut self, - key: &Key, - value: &Value, - ) { - self._insert(key.as_ref(), value.as_ref()) - } - pub fn build(self) -> Attachment { - Attachment { - inner: self.inner.into(), - } - } - } - #[zenoh_macros::unstable] - impl From for Attachment { - fn from(value: AttachmentBuilder) -> Self { - Attachment { - inner: value.inner.into(), - } - } - } - #[zenoh_macros::unstable] - #[derive(Clone)] - pub struct Attachment { - pub(crate) inner: ZBuf, - } - #[zenoh_macros::unstable] - impl Default for Attachment { - fn default() -> Self { - Self::new() - } - } - #[zenoh_macros::unstable] - impl From for AttachmentType { - fn from(this: Attachment) -> Self { - AttachmentType { buffer: this.inner } - } - } - #[zenoh_macros::unstable] - impl From> for Attachment { - fn from(this: AttachmentType) -> Self { - Attachment { inner: this.buffer } - } - } - #[zenoh_macros::unstable] - impl Attachment { - pub fn new() -> Self { - Self { - inner: ZBuf::empty(), - } - } - pub fn is_empty(&self) -> bool { - self.len() == 0 - } - pub fn len(&self) -> usize { - self.iter().count() - } - pub fn iter(&self) -> AttachmentIterator { - self.into_iter() - } - fn _get(&self, key: &[u8]) -> Option { - self.iter() - .find_map(|(k, v)| (k.as_slice() == key).then_some(v)) - } - pub fn get>(&self, key: &Key) -> Option { - self._get(key.as_ref()) - } - fn _insert(&mut self, key: &[u8], value: &[u8]) { - let codec = Zenoh080; - let mut writer = self.inner.writer(); - codec.write(&mut writer, key).unwrap(); // Infallible, barring alloc failure - codec.write(&mut writer, value).unwrap(); // Infallible, barring alloc failure - } - /// Inserts a key-value pair to the attachment. - /// - /// Note that [`Attachment`] is a list of non-unique key-value pairs: inserting at the same key multiple times leads to both values being transmitted for that key. - /// - /// [`Attachment`] is not very efficient at inserting, so if you wish to perform multiple inserts, it's generally better to [`Attachment::extend`] after performing the inserts on an [`AttachmentBuilder`] - pub fn insert + ?Sized, Value: AsRef<[u8]> + ?Sized>( - &mut self, - key: &Key, - value: &Value, - ) { - self._insert(key.as_ref(), value.as_ref()) - } - fn _extend(&mut self, with: Self) -> &mut Self { - for slice in with.inner.zslices().cloned() { - self.inner.push_zslice(slice); - } - self - } - pub fn extend(&mut self, with: impl Into) -> &mut Self { - let with = with.into(); - self._extend(with) - } - } - #[zenoh_macros::unstable] - pub struct AttachmentIterator<'a> { - reader: ZBufReader<'a>, - } - #[zenoh_macros::unstable] - impl<'a> core::iter::IntoIterator for &'a Attachment { - type Item = (ZSlice, ZSlice); - type IntoIter = AttachmentIterator<'a>; - fn into_iter(self) -> Self::IntoIter { - AttachmentIterator { - reader: self.inner.reader(), - } - } - } - #[zenoh_macros::unstable] - impl core::fmt::Debug for Attachment { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "{{")?; - for (key, value) in self { - let key = key.as_slice(); - let value = value.as_slice(); - match core::str::from_utf8(key) { - Ok(key) => write!(f, "\"{key}\": ")?, - Err(_) => { - write!(f, "0x")?; - for byte in key { - write!(f, "{byte:02X}")? - } - } - } - match core::str::from_utf8(value) { - Ok(value) => write!(f, "\"{value}\", ")?, - Err(_) => { - write!(f, "0x")?; - for byte in value { - write!(f, "{byte:02X}")? - } - write!(f, ", ")? - } - } - } - write!(f, "}}") - } - } - #[zenoh_macros::unstable] - impl<'a> core::iter::Iterator for AttachmentIterator<'a> { - type Item = (ZSlice, ZSlice); - fn next(&mut self) -> Option { - let key = Zenoh080.read(&mut self.reader).ok()?; - let value = Zenoh080.read(&mut self.reader).ok()?; - Some((key, value)) - } - fn size_hint(&self) -> (usize, Option) { - ( - (self.reader.remaining() != 0) as usize, - Some(self.reader.remaining() / 2), - ) - } - } - #[zenoh_macros::unstable] - impl<'a> core::iter::FromIterator<(&'a [u8], &'a [u8])> for AttachmentBuilder { - fn from_iter>(iter: T) -> Self { - let codec = Zenoh080; - let mut buffer: Vec = Vec::new(); - let mut writer = buffer.writer(); - for (key, value) in iter { - codec.write(&mut writer, key).unwrap(); // Infallible, barring allocation failures - codec.write(&mut writer, value).unwrap(); // Infallible, barring allocation failures - } - Self { inner: buffer } - } - } - #[zenoh_macros::unstable] - impl<'a> core::iter::FromIterator<(&'a [u8], &'a [u8])> for Attachment { - fn from_iter>(iter: T) -> Self { - AttachmentBuilder::from_iter(iter).into() - } - } -} -#[zenoh_macros::unstable] -pub use attachment::{Attachment, AttachmentBuilder, AttachmentIterator}; - -/// A zenoh sample. -#[non_exhaustive] -#[derive(Clone, Debug)] -pub struct Sample { - /// The key expression on which this Sample was published. - pub key_expr: KeyExpr<'static>, - /// The value of this Sample. - pub value: Value, - /// The kind of this Sample. - pub kind: SampleKind, - /// The [`Timestamp`] of this Sample. - pub timestamp: Option, - /// Quality of service settings this sample was sent with. - pub qos: QoS, - - #[cfg(feature = "unstable")] - ///