diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 862f5edd8b..e147d11856 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -102,6 +102,16 @@ jobs: - name: Run doctests run: cargo test --doc + - name: Install valgrind + if: ${{ matrix.os == 'ubuntu-latest' }} + run: sudo apt-get install -y valgrind + shell: bash + + - name: Run memory leaks check + if: ${{ matrix.os == 'ubuntu-latest' }} + run: ci/valgrind-check/run.sh + shell: bash + # NOTE: In GitHub repository settings, the "Require status checks to pass # before merging" branch protection rule ensures that commits are only merged # from branches where specific status checks have passed. These checks are diff --git a/.github/workflows/enforce-linking-issues.yml b/.github/workflows/enforce-linking-issues.yml deleted file mode 100644 index 37bb2cf63e..0000000000 --- a/.github/workflows/enforce-linking-issues.yml +++ /dev/null @@ -1,60 +0,0 @@ -name: Enforce linking issues - -on: - pull_request_target: - types: [opened, edited, labeled] - workflow_call: - -defaults: - run: - shell: bash - -jobs: - main: - name: Enforce referencing a closing issue - runs-on: ubuntu-latest - steps: - - name: Count closing issue references - id: has-closing-issue - uses: actions/github-script@v7 - with: - result-encoding: string - script: | - const query = `query ($owner: String!, $name: String!, $number: Int!) { - repository(owner: $owner, name: $name) { - pullRequest(number: $number) { - closingIssuesReferences(first: 100) { - totalCount - } - } - } - }`; - - const reply = await github.graphql(query, { - owner: context.repo.owner, - name: context.repo.repo, - number: context.payload.pull_request.number - }); - - return reply - .repository - .pullRequest - .closingIssuesReferences - .totalCount > 0; - - - if: ${{ steps.has-closing-issue.outputs.result != 'true' }} - name: Suggest that the contributor link an issue - uses: actions/github-script@v7 - with: - github-token: ${{ secrets.BOT_TOKEN_WORKFLOW }} - script: | - const login = "${{ github.event.pull_request.user.login }}"; - const syntaxUrl = "https://docs.github.com/en/issues/tracking-your-work-with-issues/linking-a-pull-request-to-an-issue"; - const message = `@${login} If this pull request contains a bugfix or a new feature, then please consider using \`Closes #ISSUE-NUMBER\` [syntax](${syntaxUrl}) to link it to an issue.` - - github.rest.issues.createComment({ - owner: context.repo.owner, - repo: context.repo.repo, - issue_number: context.payload.pull_request.number, - body: message, - }); diff --git a/.github/workflows/pre-release.yml b/.github/workflows/pre-release.yml index 631302d722..2a1f760e26 100644 --- a/.github/workflows/pre-release.yml +++ b/.github/workflows/pre-release.yml @@ -35,10 +35,8 @@ jobs: - name: Clone this repository uses: actions/checkout@v4 - - name: Install Rust toolchain - run: | - rustup show - rustup component add rustfmt clippy + - name: Install rustup components + run: rustup component add rustfmt clippy - name: Code format check run: cargo fmt --check @@ -62,11 +60,8 @@ jobs: - name: Clone this repository uses: actions/checkout@v4 - - name: Install Rust toolchain - run: rustup show - - name: Install nextest - run: cargo install --locked cargo-nextest + run: cargo +stable install --locked cargo-nextest env: CARGO_REGISTRIES_CRATES_IO_PROTOCOL: sparse @@ -83,7 +78,7 @@ jobs: ASYNC_STD_THREAD_COUNT: 4 doc: - name: Doc generation + name: Generate documentation needs: checks runs-on: ubuntu-latest steps: @@ -94,7 +89,7 @@ jobs: - name: Install Rust toolchain nightly for docs gen run: rustup toolchain install nightly - - name: generate doc + - name: Run rustdoc using Nightly Rust and Zenoh unstable # NOTE: force 'unstable' feature for doc generation, as forced for docs.rs build in zenoh/Cargo.toml run: > cargo +nightly rustdoc --manifest-path ./zenoh/Cargo.toml --lib --features unstable -j3 diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index fe050776ec..f5ff3fd2f9 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -20,24 +20,38 @@ on: inputs: live-run: type: boolean - description: If false (or undefined) the workflow runs in dry-run mode (i.e. with no side-effects) + description: Live-run required: false default: false version: type: string - description: Release number. If undefined, the workflow auto-generates a version using git-describe + description: Release number required: false jobs: tag: - name: Bump and tag crates - uses: eclipse-zenoh/ci/.github/workflows/tag-crates.yml@main - with: - repo: ${{ github.repository }} - live-run: ${{ inputs.live-run || false }} - version: ${{ inputs.version }} - inter-deps-pattern: zenoh.* - secrets: inherit + name: Branch, bump & tag crates + runs-on: ubuntu-latest + outputs: + version: ${{ steps.create-release-branch.outputs.version }} + branch: ${{ steps.create-release-branch.outputs.branch }} + steps: + - id: create-release-branch + uses: eclipse-zenoh/ci/create-release-branch@main + with: + repo: ${{ github.repository }} + live-run: ${{ inputs.live-run || false }} + version: ${{ inputs.version }} + github-token: ${{ secrets.BOT_TOKEN_WORKFLOW }} + + - uses: eclipse-zenoh/ci/bump-crates@main + with: + repo: ${{ github.repository }} + version: ${{ steps.create-release-branch.outputs.version }} + branch: ${{ steps.create-release-branch.outputs.branch }} + bump-deps-pattern: zenoh.* + bump-deps-version: ${{ steps.create-release-branch.outputs.version }} + github-token: ${{ secrets.BOT_TOKEN_WORKFLOW }} build-debian: name: Build Debian packages @@ -64,14 +78,14 @@ jobs: secrets: inherit cargo: - name: Publish Cargo crates needs: tag + name: Publish Cargo crates uses: eclipse-zenoh/ci/.github/workflows/release-crates-cargo.yml@main with: - repos: ${{ github.repository }} + repo: ${{ github.repository }} live-run: ${{ inputs.live-run || false }} branch: ${{ needs.tag.outputs.branch }} - inter-deps-pattern: zenoh.* + unpublished-deps-patterns: "zenoh.*" secrets: inherit debian: @@ -146,9 +160,10 @@ jobs: uses: eclipse-zenoh/ci/.github/workflows/release-crates-dockerhub.yml@main with: no-build: true - live-run: ${{ inputs.live-run || false }} + live-run: true version: ${{ needs.tag.outputs.version }} repo: ${{ github.repository }} + branch: ${{ needs.tag.outputs.branch }} tags: "eclipse/zenoh:${{ needs.tag.outputs.version }}" binary: zenohd files: | @@ -158,6 +173,7 @@ jobs: platforms: | linux/arm64 linux/amd64 + licenses: EPL-2.0 OR Apache-2.0 secrets: inherit ghcr: @@ -166,10 +182,11 @@ jobs: uses: eclipse-zenoh/ci/.github/workflows/release-crates-ghcr.yml@main with: no-build: true - live-run: ${{ inputs.live-run || false }} + live-run: true version: ${{ needs.tag.outputs.version }} repo: ${{ github.repository }} - tags: "${{ github.repository }}:${{ needs.tag.outputs.version }}" + branch: ${{ needs.tag.outputs.branch }} + tags: "ghcr.io/${{ github.repository }}:${{ needs.tag.outputs.version }}" binary: zenohd files: | zenohd @@ -178,4 +195,5 @@ jobs: platforms: | linux/arm64 linux/amd64 + licenses: EPL-2.0 OR Apache-2.0 secrets: inherit diff --git a/.github/workflows/sync-lockfiles.yml b/.github/workflows/sync-lockfiles.yml index 581deeb5c8..5240ab403f 100644 --- a/.github/workflows/sync-lockfiles.yml +++ b/.github/workflows/sync-lockfiles.yml @@ -18,6 +18,9 @@ jobs: fetch: name: Fetch Zenoh's lockfile runs-on: ubuntu-latest + outputs: + zenoh-head-hash: ${{ steps.info.outputs.head-hash }} + zenoh-head-date: ${{ steps.info.outputs.head-date }} steps: - name: Checkout Zenoh uses: actions/checkout@v4 @@ -25,6 +28,12 @@ jobs: repository: eclipse-zenoh/zenoh ref: ${{ inputs.branch }} + - id: info + name: Get HEAD info + run: | + echo "head-hash=$(git log -1 --format=%h)" >> $GITHUB_OUTPUT + echo "head-date=$(git log -1 --format=%ad)" >> $GITHUB_OUTPUT + - name: Upload lockfile uses: actions/upload-artifact@v3 with: @@ -94,8 +103,14 @@ jobs: # NOTE: If there is a pending PR, this action will simply update it with a forced push. uses: peter-evans/create-pull-request@v6 with: - title: Sync lockfile with Zenoh's - body: Automated synchronization of the Cargo lockfile with Zenoh. This is done to ensure plugin ABI compatibility. + title: Sync `Cargo.lock` with `eclipse-zenoh/zenoh@${{ needs.fetch.outputs.zenoh-head-hash }}` from `${{ needs.fetch.outputs.zenoh-head-date }}`" + body: > + This pull request synchronizes ${{ matrix.dependant }}'s Cargo lockfile with zenoh's. + This is done to ensure ABI compatibility between Zenoh applications, backends & plugins. + + - **Zenoh HEAD hash**: eclipse-zenoh/zenoh@${{ needs.fetch.outputs.zenoh-head-hash }} + - **Zenoh HEAD date**: ${{ needs.fetch.outputs.zenoh-head-date }} + - **Workflow run**: [${{ github.run_id }}](https://github.com/eclipse-zenoh/zenoh/actions/runs/${{ github.run_id }}) commit-message: "chore: Sync Cargo lockfile with Zenoh's" committer: eclipse-zenoh-bot author: eclipse-zenoh-bot @@ -107,6 +122,10 @@ jobs: - name: Enable auto merge for the pull request if: steps.cpr.outputs.pull-request-operation == 'created' - run: gh pr merge -R "eclipse-zenoh/${{ matrix.dependant }}" --merge --auto "${{ steps.cpr.outputs.pull-request-number }}" + run: > + gh pr merge "${{ steps.cpr.outputs.pull-request-number }}" + --repo "eclipse-zenoh/${{ matrix.dependant }}" + --squash + --auto env: GH_TOKEN: ${{ secrets.BOT_TOKEN_WORKFLOW }} diff --git a/.gitignore b/.gitignore index 695d0464b1..105dae1aa7 100644 --- a/.gitignore +++ b/.gitignore @@ -20,3 +20,5 @@ .vscode cargo-timing*.html + +ci/valgrind-check/*.log diff --git a/Cargo.lock b/Cargo.lock index 05d608cb3d..b331a798b1 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -165,9 +165,9 @@ checksum = "4b46cbb362ab8752921c97e041f5e366ee6297bd428a31275b9fcf1e380f7299" [[package]] name = "anstream" -version = "0.6.13" +version = "0.6.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d96bd03f33fe50a863e394ee9718a706f988b9079b20c3784fb726e7678b62fb" +checksum = "96b09b5178381e0874812a9b157f7fe84982617e48f71f4e3235482775e5b540" dependencies = [ "anstyle", "anstyle-parse", @@ -436,7 +436,7 @@ checksum = "bc00ceb34980c03614e35a3a4e218276a0a824e911d07651cd0d858a51e8c0f0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.33", ] [[package]] @@ -729,7 +729,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.33", ] [[package]] @@ -1015,7 +1015,7 @@ checksum = "d150dea618e920167e5973d70ae6ece4385b7164e0d799fe7c122dd0a5d912ad" dependencies = [ "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.33", ] [[package]] @@ -1103,9 +1103,9 @@ dependencies = [ [[package]] name = "env_logger" -version = "0.11.3" +version = "0.11.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38b35839ba51819680ba087cd351788c9a3c476841207e0b8cee0b04722343b9" +checksum = "6c012a26a7f605efc424dd53697843a72be7dc86ad2d01f7814337794a12231d" dependencies = [ "anstream", "anstyle", @@ -1122,9 +1122,9 @@ checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5" [[package]] name = "erased-serde" -version = "0.4.4" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b73807008a3c7f171cc40312f37d95ef0396e048b5848d775f54b1a4dd4a0d3" +checksum = "6c138974f9d5e7fe373eb04df7cae98833802ae4b11c24ac7039a21d5af4b26c" dependencies = [ "serde", ] @@ -1337,7 +1337,7 @@ checksum = "89ca545a94061b6365f2c7355b4b32bd20df3ff95f02da9329b34ccc3bd6ee72" dependencies = [ "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.33", ] [[package]] @@ -1541,9 +1541,9 @@ dependencies = [ [[package]] name = "http" -version = "1.1.0" +version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21b9ddb458710bc376481b842f5da65cdf31522de232c1ca8146abce2a358258" +checksum = "b32afd38673a8016f7c9ae69e5af41a58f81b1d31689040f2f1959594ce194ea" dependencies = [ "bytes", "fnv", @@ -1703,12 +1703,6 @@ dependencies = [ "nom", ] -[[package]] -name = "iter-read" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c397ca3ea05ad509c4ec451fea28b4771236a376ca1c69fd5143aae0cf8f93c4" - [[package]] name = "itertools" version = "0.10.5" @@ -1854,9 +1848,9 @@ dependencies = [ [[package]] name = "log" -version = "0.4.21" +version = "0.4.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90ed8c1e510134f979dbc4f070f87d4313098b704861a105fe34231c70a3901c" +checksum = "b5e6163cb8c49088c2c36f57875e58ccd8c87c7427f7fbd50ea6710b2f3f2e8f" dependencies = [ "serde", "value-bag", @@ -2117,9 +2111,9 @@ dependencies = [ [[package]] name = "num-traits" -version = "0.2.18" +version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da0df0e5185db44f69b44f26786fe401b6c293d1907744beaa7fa62b2e5a517a" +checksum = "f30b0abd723be7e2ffca1272140fac1a2f084c77ec3e123c192b66af1ee9e6c2" dependencies = [ "autocfg", "libm", @@ -2274,7 +2268,7 @@ dependencies = [ "pest_meta", "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.33", ] [[package]] @@ -2298,48 +2292,6 @@ dependencies = [ "indexmap", ] -[[package]] -name = "phf" -version = "0.11.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ade2d8b8f33c7333b51bcf0428d37e217e9f32192ae4772156f65063b8ce03dc" -dependencies = [ - "phf_macros", - "phf_shared", -] - -[[package]] -name = "phf_generator" -version = "0.11.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "48e4cc64c2ad9ebe670cb8fd69dd50ae301650392e81c05f9bfcb2d5bdbc24b0" -dependencies = [ - "phf_shared", - "rand 0.8.5", -] - -[[package]] -name = "phf_macros" -version = "0.11.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3444646e286606587e49f3bcf1679b8cef1dc2c5ecc29ddacaffc305180d464b" -dependencies = [ - "phf_generator", - "phf_shared", - "proc-macro2", - "quote", - "syn 2.0.52", -] - -[[package]] -name = "phf_shared" -version = "0.11.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90fcb95eef784c2ac79119d1dd819e162b5da872ce6f3c3abe1e8ca1c082f72b" -dependencies = [ - "siphasher", -] - [[package]] name = "pin-project" version = "1.1.3" @@ -2357,7 +2309,7 @@ checksum = "4359fd9c9171ec6e8c62926d6faaf553a8dc3f64e1507e76da7911b4f6a04405" dependencies = [ "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.33", ] [[package]] @@ -2500,9 +2452,9 @@ checksum = "dc375e1527247fe1a97d8b7156678dfe7c1af2fc075c9a4db3690ecd2a148068" [[package]] name = "proc-macro2" -version = "1.0.78" +version = "1.0.67" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2422ad645d89c99f8f3e6b88a9fdeca7fabeac836b1002371c4367c8f984aae" +checksum = "3d433d9f1a3e8c1263d9456598b16fec66f4acc9a74dacffd35c7bb09b3a1328" dependencies = [ "unicode-ident", ] @@ -2557,9 +2509,9 @@ dependencies = [ [[package]] name = "quote" -version = "1.0.35" +version = "1.0.33" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "291ec9ab5efd934aaf503a6466c5d5251535d108ee747472c3977cc5acc868ef" +checksum = "5267fca4496028628a95160fc423a33e8b2e6af8a5302579e322e4b520293cae" dependencies = [ "proc-macro2", ] @@ -2865,9 +2817,9 @@ dependencies = [ [[package]] name = "rustls" -version = "0.22.3" +version = "0.22.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "99008d7ad0bbbea527ec27bddbc0e432c5b87d8175178cee68d2eec9c4a1813c" +checksum = "e87c9956bd9807afa1f77e0f7594af32566e830e088a5576d27c5b6f30f49d41" dependencies = [ "log", "ring 0.17.6", @@ -2923,9 +2875,9 @@ dependencies = [ [[package]] name = "rustls-pki-types" -version = "1.4.1" +version = "1.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ecd36cc4259e3e4514335c4a138c6b43171a8d61d8f5c9348f9fc7529416f247" +checksum = "5ede67b28608b4c60685c7d54122d4400d90f62b40caee7700e700380a390fa8" [[package]] name = "rustls-webpki" @@ -3068,45 +3020,22 @@ checksum = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3" [[package]] name = "serde" -version = "1.0.197" +version = "1.0.188" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3fb1c873e1b9b056a4dc4c0c198b24c3ffa059243875552b2bd0933b1aee4ce2" +checksum = "cf9e0fcba69a370eed61bcf2b728575f726b50b55cba78064753d708ddc7549e" dependencies = [ "serde_derive", ] -[[package]] -name = "serde-pickle" -version = "1.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c762ad136a26407c6a80825813600ceeab5e613660d93d79a41f0ec877171e71" -dependencies = [ - "byteorder", - "iter-read", - "num-bigint", - "num-traits", - "serde", -] - -[[package]] -name = "serde_cbor" -version = "0.11.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2bef2ebfde456fb76bbcf9f59315333decc4fda0b2b44b420243c11e0f5ec1f5" -dependencies = [ - "half", - "serde", -] - [[package]] name = "serde_derive" -version = "1.0.197" +version = "1.0.188" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7eb0b34b42edc17f6b7cac84a52a1c5f0e1bb2227e997ca9011ea3dd34e8610b" +checksum = "4eca7ac642d82aa35b60049a6eccb4be6be75e599bd2e9adb5f875a737654af2" dependencies = [ "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.33", ] [[package]] @@ -3131,9 +3060,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.114" +version = "1.0.107" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c5f09b1bd632ef549eaa9f60a1f8de742bdbc698e6cee2095fc84dde5f549ae0" +checksum = "6b420ce6e3d8bd882e9b243c6eed35dbc9a6110c9769e74b584e0d68d1f20c65" dependencies = [ "itoa", "ryu", @@ -3313,12 +3242,6 @@ dependencies = [ "event-listener 2.5.3", ] -[[package]] -name = "siphasher" -version = "0.3.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38b58827f4464d87d377d175e90bf58eb00fd8716ff0a62f80356b5e61555d0d" - [[package]] name = "slab" version = "0.4.9" @@ -3548,9 +3471,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.52" +version = "2.0.33" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b699d15b36d1f02c3e7c69f8ffef53de37aefae075d8488d4ba1a7788d574a07" +checksum = "9caece70c63bfba29ec2fed841a09851b14a235c60010fa4de58089b6c025668" dependencies = [ "proc-macro2", "quote", @@ -3574,7 +3497,7 @@ checksum = "49922ecae66cc8a249b77e68d1d0623c1b2c514f0060c27cdc68bd62a1219d35" dependencies = [ "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.33", ] [[package]] @@ -3701,9 +3624,9 @@ dependencies = [ [[package]] name = "tokio" -version = "1.37.0" +version = "1.36.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1adbebffeca75fcfd058afa480fb6c0b81e165a0323f9c9d39c9697e37c46787" +checksum = "61285f6515fa018fb2d1e46eb21223fff441ee8db5d0f1435e8ab4f5cdb80931" dependencies = [ "backtrace", "bytes", @@ -3724,7 +3647,7 @@ checksum = "5b8a1e28f2deaa14e508979454cb3a223b10b938b45af148bc0986de36f1923b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.33", ] [[package]] @@ -3743,7 +3666,7 @@ version = "0.25.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "775e0c0f0adb3a2f22a00c4745d728b479985fc15ee7ca6a2608388c5569860f" dependencies = [ - "rustls 0.22.3", + "rustls 0.22.2", "rustls-pki-types", "tokio", ] @@ -3822,7 +3745,7 @@ checksum = "5f4f31f56159e98206da9efd823404b79b6ef3143b4a7ab76e67b1751b25a4ab" dependencies = [ "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.33", ] [[package]] @@ -3954,12 +3877,6 @@ version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1" -[[package]] -name = "unwrap-infallible" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "151ac09978d3c2862c4e39b557f4eceee2cc72150bc4cb4f16abf061b6e381fb" - [[package]] name = "unzip-n" version = "0.1.2" @@ -4036,9 +3953,9 @@ dependencies = [ [[package]] name = "value-bag" -version = "1.8.1" +version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "74797339c3b98616c009c7c3eb53a0ce41e85c8ec66bd3db96ed132d20cfdee8" +checksum = "d92ccd67fb88503048c01b59152a04effd0782d035a83a6d256ce6085f08f4a3" dependencies = [ "value-bag-serde1", "value-bag-sval2", @@ -4046,9 +3963,9 @@ dependencies = [ [[package]] name = "value-bag-serde1" -version = "1.8.1" +version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc35703541cbccb5278ef7b589d79439fc808ff0b5867195a3230f9a47421d39" +checksum = "b0b9f3feef403a50d4d67e9741a6d8fc688bcbb4e4f31bd4aab72cc690284394" dependencies = [ "erased-serde", "serde", @@ -4057,9 +3974,9 @@ dependencies = [ [[package]] name = "value-bag-sval2" -version = "1.8.1" +version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "285b43c29d0b4c0e65aad24561baee67a1b69dc9be9375d4a85138cbf556f7f8" +checksum = "30b24f4146b6f3361e91cbf527d1fb35e9376c3c0cef72ca5ec5af6d640fad7d" dependencies = [ "sval", "sval_buffer", @@ -4143,7 +4060,7 @@ dependencies = [ "once_cell", "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.33", "wasm-bindgen-shared", ] @@ -4177,7 +4094,7 @@ checksum = "54681b18a46765f095758388f2d0cf16eb8d4169b639ab575a8f5693af210c7b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.33", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -4471,21 +4388,16 @@ dependencies = [ "ordered-float", "paste", "petgraph", - "phf", "rand 0.8.5", "regex", "rustc_version 0.4.0", "serde", - "serde-pickle", - "serde_cbor", "serde_json", - "serde_yaml", "socket2 0.5.6", "stop-token", "tokio", "tokio-util", "uhlc", - "unwrap-infallible", "uuid", "vec_map", "zenoh-buffers", @@ -4503,6 +4415,7 @@ dependencies = [ "zenoh-runtime", "zenoh-shm", "zenoh-sync", + "zenoh-task", "zenoh-transport", "zenoh-util", ] @@ -4610,9 +4523,7 @@ dependencies = [ "rustc_version 0.4.0", "tokio", "zenoh", - "zenoh-collections", "zenoh-ext", - "zenoh-shm", ] [[package]] @@ -4625,10 +4536,7 @@ dependencies = [ "flume", "futures", "log", - "phf", "serde", - "serde_cbor", - "serde_json", "tokio", "zenoh", "zenoh-core", @@ -4636,6 +4544,7 @@ dependencies = [ "zenoh-result", "zenoh-runtime", "zenoh-sync", + "zenoh-task", "zenoh-util", ] @@ -4684,7 +4593,7 @@ dependencies = [ "flume", "futures", "log", - "rustls 0.22.3", + "rustls 0.22.2", "rustls-webpki 0.102.2", "serde", "tokio", @@ -4771,7 +4680,7 @@ dependencies = [ "base64 0.21.4", "futures", "log", - "rustls 0.22.3", + "rustls 0.22.2", "rustls-pemfile 2.0.0", "rustls-pki-types", "rustls-webpki 0.102.2", @@ -4896,7 +4805,7 @@ version = "0.11.0-dev" dependencies = [ "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.33", "zenoh-keyexpr", ] @@ -5018,6 +4927,7 @@ dependencies = [ name = "zenoh-runtime" version = "0.11.0-dev" dependencies = [ + "futures", "lazy_static", "tokio", "zenoh-collections", @@ -5049,6 +4959,18 @@ dependencies = [ "zenoh-runtime", ] +[[package]] +name = "zenoh-task" +version = "0.11.0-dev" +dependencies = [ + "futures", + "log", + "tokio", + "tokio-util", + "zenoh-core", + "zenoh-runtime", +] + [[package]] name = "zenoh-transport" version = "0.11.0-dev" @@ -5080,6 +5002,7 @@ dependencies = [ "zenoh-runtime", "zenoh-shm", "zenoh-sync", + "zenoh-task", "zenoh-util", ] @@ -5154,7 +5077,7 @@ checksum = "9ce1b18ccd8e73a9321186f97e46f9f04b778851177567b1975109d26a08d2a6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.33", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index d02f84eca8..da99cb1fdc 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -26,6 +26,7 @@ members = [ "commons/zenoh-result", "commons/zenoh-shm", "commons/zenoh-sync", + "commons/zenoh-task", "commons/zenoh-util", "commons/zenoh-runtime", "examples", @@ -51,7 +52,7 @@ members = [ "zenoh-ext", "zenohd", ] -exclude = ["ci/nostd-check"] +exclude = ["ci/nostd-check", "ci/valgrind-check"] [workspace.package] rust-version = "1.66.1" @@ -111,12 +112,11 @@ libloading = "0.8" log = "0.4.17" lz4_flex = "0.11" nix = { version = "0.27", features = ["fs"] } -num_cpus = "1.16.0" +num_cpus = "1.15.0" ordered-float = "4.1.1" panic-message = "0.3.0" paste = "1.0.12" petgraph = "0.6.3" -phf = { version = "0.11.2", features = ["macros"] } pnet = "0.34" pnet_datalink = "0.34" proc-macro2 = "1.0.51" @@ -139,9 +139,7 @@ secrecy = { version = "0.8.0", features = ["serde", "alloc"] } serde = { version = "1.0.154", default-features = false, features = [ "derive", ] } # Default features are disabled due to usage in no_std crates -serde_cbor = "0.11.2" -serde_json = "1.0.114" -serde-pickle = "1.1.1" +serde_json = "1.0.94" serde_yaml = "0.9.19" sha3 = "0.10.6" shared_memory = "0.12.4" @@ -159,7 +157,6 @@ tokio-rustls = "0.25.0" console-subscriber = "0.2" typenum = "1.16.0" uhlc = { version = "0.7.0", default-features = false } # Default features are disabled due to usage in no_std crates -unwrap-infallible = "0.1.5" unzip-n = "0.1.2" url = "2.3.1" urlencoding = "2.1.2" @@ -201,6 +198,7 @@ zenoh-link = { version = "0.11.0-dev", path = "io/zenoh-link" } zenoh-link-commons = { version = "0.11.0-dev", path = "io/zenoh-link-commons" } zenoh = { version = "0.11.0-dev", path = "zenoh", default-features = false } zenoh-runtime = { version = "0.11.0-dev", path = "commons/zenoh-runtime" } +zenoh-task = { version = "0.11.0-dev", path = "commons/zenoh-task" } [profile.dev] debug = true @@ -219,4 +217,4 @@ debug = false # If you want debug symbol in release mode, set the env variab lto = "fat" codegen-units = 1 opt-level = 3 -panic = "abort" +panic = "abort" \ No newline at end of file diff --git a/README.md b/README.md index 5bce835c29..fb268141a5 100644 --- a/README.md +++ b/README.md @@ -61,7 +61,8 @@ Then you can start run `zenohd`. ------------------------------- ## How to build it -> :warning: **WARNING** :warning: : Zenoh and its ecosystem are under active development. When you build from git, make sure you also build from git any other Zenoh repository you plan to use (e.g. binding, plugin, backend, etc.). It may happen that some changes in git are not compatible with the most recent packaged Zenoh release (e.g. deb, docker, pip). We put particular effort in mantaining compatibility between the various git repositories in the Zenoh project. +> [!WARNING] +> Zenoh and its ecosystem are under active development. When you build from git, make sure you also build from git any other Zenoh repository you plan to use (e.g. binding, plugin, backend, etc.). It may happen that some changes in git are not compatible with the most recent packaged Zenoh release (e.g. deb, docker, pip). We put particular effort in mantaining compatibility between the various git repositories in the Zenoh project. Install [Cargo and Rust](https://doc.rust-lang.org/cargo/getting-started/installation.html). Zenoh can be succesfully compiled with Rust stable (>= 1.71.0), so no special configuration is required from your side. If you already have the Rust toolchain installed, make sure it is up-to-date with: @@ -94,6 +95,7 @@ Zenoh's router is built as `target/release/zenohd`. All the examples are built i **Routed tests:** +> [!NOTE] > **Windows users**: to properly execute the commands below in PowerShell you need to escape `"` characters as `\"`. - **put/store/get** @@ -168,19 +170,22 @@ See other examples of Zenoh usage in [examples/](examples) If not specified, the REST plugin will be active on any interface (`[::]`) and port `8000`. -> :warning: **WARNING** :warning: : The following documentation pertains to the v0.6+ API, which comes many changes to the behaviour and configuration of Zenoh. +> [!WARNING] +> The following documentation pertains to the v0.6+ API, which comes many changes to the behaviour and configuration of Zenoh. To access the v0.5 version of the code and matching README, please go to the [0.5.0-beta.9](https://github.com/eclipse-zenoh/zenoh/tree/0.5.0-beta.9) tagged version. ------------------------------- ## Plugins -> :warning: **WARNING** :warning: : As Rust doesn't have a stable ABI, the plugins should be +> [!WARNING] +> As Rust doesn't have a stable ABI, the plugins should be built with the exact same Rust version than `zenohd`, and using for `zenoh` dependency the same version (or commit number) than 'zenohd'. Otherwise, incompatibilities in memory mapping of shared types between `zenohd` and the library can lead to a `"SIGSEV"` crash. By default the Zenoh router is delivered or built with 2 plugins. These may be configured through a configuration file, or through individual changes to the configuration via the `--cfg` CLI option or via zenoh puts on individual parts of the configuration. -> :warning: **WARNING** :warning: : since `v0.6`, `zenohd` no longer loads every available plugin at startup. Instead, only configured plugins are loaded (after processing `--cfg` and `--plugin` options). Once `zenohd` is running, plugins can be hot-loaded and, if they support it, reconfigured at runtime by editing their configuration through the adminspace. +> [!WARNING] +> Since `v0.6`, `zenohd` no longer loads every available plugin at startup. Instead, only configured plugins are loaded (after processing `--cfg` and `--plugin` options). Once `zenohd` is running, plugins can be hot-loaded and, if they support it, reconfigured at runtime by editing their configuration through the adminspace. Note that the REST plugin is added to the configuration by the default value of the `--rest-http-port` CLI argument. diff --git a/ci/valgrind-check/Cargo.toml b/ci/valgrind-check/Cargo.toml new file mode 100644 index 0000000000..cf6f6a844b --- /dev/null +++ b/ci/valgrind-check/Cargo.toml @@ -0,0 +1,37 @@ +# +# Copyright (c) 2024 ZettaScale Technology +# +# This program and the accompanying materials are made available under the +# terms of the Eclipse Public License 2.0 which is available at +# http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +# which is available at https://www.apache.org/licenses/LICENSE-2.0. +# +# SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +# +# Contributors: +# ZettaScale Zenoh Team, +# +[package] +name = "valgrind-check" +version = "0.1.0" +repository = "https://github.com/eclipse-zenoh/zenoh" +homepage = "http://zenoh.io" +license = "EPL-2.0 OR Apache-2.0" +edition = "2021" +categories = ["network-programming"] +description = "Internal crate for zenoh." + +[dependencies] +tokio = { version = "1.35.1", features = ["rt-multi-thread", "time", "io-std"] } +env_logger = "0.11.0" +futures = "0.3.25" +zenoh = { path = "../../zenoh/" } +zenoh-runtime = { path = "../../commons/zenoh-runtime/" } + +[[bin]] +name = "pub_sub" +path = "src/pub_sub/bin/z_pub_sub.rs" + +[[bin]] +name = "queryable_get" +path = "src/queryable_get/bin/z_queryable_get.rs" diff --git a/ci/valgrind-check/run.sh b/ci/valgrind-check/run.sh new file mode 100755 index 0000000000..7e2a7dd1a8 --- /dev/null +++ b/ci/valgrind-check/run.sh @@ -0,0 +1,21 @@ +#!/usr/bin/env bash +set -e +SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) + +function check_leaks { + echo "Checking $1 for memory leaks" + valgrind --leak-check=full --num-callers=50 --log-file="$SCRIPT_DIR/$1_leaks.log" $SCRIPT_DIR/target/debug/$1 + num_leaks=$(grep 'ERROR SUMMARY: [0-9]+' -Eo "$SCRIPT_DIR/$1_leaks.log" | grep '[0-9]+' -Eo) + echo "Detected $num_leaks memory leaks" + if (( num_leaks == 0 )) + then + return 0 + else + cat $SCRIPT_DIR/$1_leaks.log + return -1 + fi +} + +cargo build --manifest-path=$SCRIPT_DIR/Cargo.toml +check_leaks "queryable_get" +check_leaks "pub_sub" \ No newline at end of file diff --git a/ci/valgrind-check/src/pub_sub/bin/z_pub_sub.rs b/ci/valgrind-check/src/pub_sub/bin/z_pub_sub.rs new file mode 100644 index 0000000000..fac3437f39 --- /dev/null +++ b/ci/valgrind-check/src/pub_sub/bin/z_pub_sub.rs @@ -0,0 +1,58 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// +use std::time::Duration; +use zenoh::config::Config; +use zenoh::prelude::r#async::*; + +#[tokio::main] +async fn main() { + let _z = zenoh_runtime::ZRuntimePoolGuard; + env_logger::init(); + + let pub_key_expr = KeyExpr::try_from("test/valgrind/data").unwrap(); + let sub_key_expr = KeyExpr::try_from("test/valgrind/**").unwrap(); + + println!("Declaring Publisher on '{pub_key_expr}'..."); + let pub_session = zenoh::open(Config::default()).res().await.unwrap(); + let publisher = pub_session + .declare_publisher(&pub_key_expr) + .res() + .await + .unwrap(); + + println!("Declaring Subscriber on '{sub_key_expr}'..."); + let sub_session = zenoh::open(Config::default()).res().await.unwrap(); + let _subscriber = sub_session + .declare_subscriber(&sub_key_expr) + .callback(|sample| { + println!( + ">> [Subscriber] Received {} ('{}': '{}')", + sample.kind, + sample.key_expr.as_str(), + sample.value + ); + }) + .res() + .await + .unwrap(); + + for idx in 0..5 { + tokio::time::sleep(Duration::from_secs(1)).await; + let buf = format!("[{idx:4}] data"); + println!("Putting Data ('{}': '{}')...", &pub_key_expr, buf); + publisher.put(buf).res().await.unwrap(); + } + + tokio::time::sleep(Duration::from_secs(1)).await; +} diff --git a/ci/valgrind-check/src/queryable_get/bin/z_queryable_get.rs b/ci/valgrind-check/src/queryable_get/bin/z_queryable_get.rs new file mode 100644 index 0000000000..102b6a036c --- /dev/null +++ b/ci/valgrind-check/src/queryable_get/bin/z_queryable_get.rs @@ -0,0 +1,71 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// +use std::convert::TryFrom; +use std::time::Duration; +use zenoh::config::Config; +use zenoh::prelude::r#async::*; + +#[tokio::main] +async fn main() { + let _z = zenoh_runtime::ZRuntimePoolGuard; + env_logger::init(); + + let queryable_key_expr = KeyExpr::try_from("test/valgrind/data").unwrap(); + let get_selector = Selector::try_from("test/valgrind/**").unwrap(); + + println!("Declaring Queryable on '{queryable_key_expr}'..."); + let queryable_session = zenoh::open(Config::default()).res().await.unwrap(); + let _queryable = queryable_session + .declare_queryable(&queryable_key_expr.clone()) + .callback(move |query| { + println!(">> Handling query '{}'", query.selector()); + let reply = Ok(Sample::new( + queryable_key_expr.clone(), + query.value().unwrap().clone(), + )); + zenoh_runtime::ZRuntime::Application.block_in_place( + async move { query.reply(reply).res().await.unwrap(); } + ); + }) + .complete(true) + .res() + .await + .unwrap(); + + println!("Declaring Get session for '{get_selector}'..."); + let get_session = zenoh::open(Config::default()).res().await.unwrap(); + + for idx in 0..5 { + tokio::time::sleep(Duration::from_secs(1)).await; + println!("Sending Query '{get_selector}'..."); + let replies = get_session + .get(&get_selector) + .with_value(idx) + .target(QueryTarget::All) + .res() + .await + .unwrap(); + while let Ok(reply) = replies.recv_async().await { + match reply.sample { + Ok(sample) => println!( + ">> Received ('{}': '{}')", + sample.key_expr.as_str(), + sample.value, + ), + Err(err) => println!(">> Received (ERROR: '{}')", String::try_from(&err).unwrap()), + } + } + } + tokio::time::sleep(Duration::from_secs(1)).await; +} diff --git a/commons/zenoh-buffers/src/lib.rs b/commons/zenoh-buffers/src/lib.rs index 117fb412b7..4dee599ea7 100644 --- a/commons/zenoh-buffers/src/lib.rs +++ b/commons/zenoh-buffers/src/lib.rs @@ -101,8 +101,7 @@ pub mod buffer { let mut slices = self.slices(); match slices.len() { 0 => Cow::Borrowed(b""), - // SAFETY: it's safe to use unwrap_unchecked() beacuse we are explicitly checking the length is 1. - 1 => Cow::Borrowed(unsafe { slices.next().unwrap_unchecked() }), + 1 => Cow::Borrowed(slices.next().unwrap()), _ => Cow::Owned(slices.fold(Vec::new(), |mut acc, it| { acc.extend(it); acc @@ -199,18 +198,6 @@ pub mod reader { fn rewind(&mut self, mark: Self::Mark) -> bool; } - pub trait AdvanceableReader: Reader { - fn skip(&mut self, offset: usize) -> Result<(), DidntRead>; - fn backtrack(&mut self, offset: usize) -> Result<(), DidntRead>; - fn advance(&mut self, offset: isize) -> Result<(), DidntRead> { - if offset > 0 { - self.skip(offset as usize) - } else { - self.backtrack((-offset) as usize) - } - } - } - #[derive(Debug, Clone, Copy)] pub struct DidntSiphon; diff --git a/commons/zenoh-buffers/src/zbuf.rs b/commons/zenoh-buffers/src/zbuf.rs index 4a655ce36a..1365397966 100644 --- a/commons/zenoh-buffers/src/zbuf.rs +++ b/commons/zenoh-buffers/src/zbuf.rs @@ -15,17 +15,12 @@ use crate::ZSliceKind; use crate::{ buffer::{Buffer, SplitBuffer}, - reader::{ - AdvanceableReader, BacktrackableReader, DidntRead, DidntSiphon, HasReader, Reader, - SiphonableReader, - }, + reader::{BacktrackableReader, DidntRead, DidntSiphon, HasReader, Reader, SiphonableReader}, writer::{BacktrackableWriter, DidntWrite, HasWriter, Writer}, - ZSlice, ZSliceBuffer, + ZSlice, }; use alloc::{sync::Arc, vec::Vec}; use core::{cmp, iter, mem, num::NonZeroUsize, ops::RangeBounds, ptr}; -#[cfg(feature = "std")] -use std::io; use zenoh_collections::SingleOrVec; fn get_mut_unchecked(arc: &mut Arc) -> &mut T { @@ -39,10 +34,8 @@ pub struct ZBuf { impl ZBuf { #[must_use] - pub const fn empty() -> Self { - Self { - slices: SingleOrVec::empty(), - } + pub fn empty() -> Self { + Self::default() } pub fn clear(&mut self) { @@ -63,21 +56,6 @@ impl ZBuf { } } - pub fn to_zslice(&self) -> ZSlice { - let mut slices = self.zslices(); - match self.slices.len() { - 0 => ZSlice::empty(), - // SAFETY: it's safe to use unwrap_unchecked() beacuse we are explicitly checking the length is 1. - 1 => unsafe { slices.next().unwrap_unchecked().clone() }, - _ => slices - .fold(Vec::new(), |mut acc, it| { - acc.extend(it.as_slice()); - acc - }) - .into(), - } - } - pub fn splice>(&mut self, erased: Range, replacement: &[u8]) { let start = match erased.start_bound() { core::ops::Bound::Included(n) => *n, @@ -94,7 +72,6 @@ impl ZBuf { } self.insert(start, replacement); } - fn remove(&mut self, mut start: usize, mut end: usize) { assert!(start <= end); assert!(end <= self.len()); @@ -123,7 +100,6 @@ impl ZBuf { let drain_end = end_slice_idx + (end_slice.start >= end_slice.end) as usize; self.slices.drain(drain_start..drain_end); } - fn insert(&mut self, mut at: usize, slice: &[u8]) { if slice.is_empty() { return; @@ -219,34 +195,17 @@ impl PartialEq for ZBuf { } // From impls -impl From for ZBuf { - fn from(t: ZSlice) -> Self { - let mut zbuf = ZBuf::empty(); - zbuf.push_zslice(t); - zbuf - } -} - -impl From> for ZBuf -where - T: ZSliceBuffer + 'static, -{ - fn from(t: Arc) -> Self { - let zslice: ZSlice = t.into(); - Self::from(zslice) - } -} - impl From for ZBuf where - T: ZSliceBuffer + 'static, + T: Into, { fn from(t: T) -> Self { + let mut zbuf = ZBuf::empty(); let zslice: ZSlice = t.into(); - Self::from(zslice) + zbuf.push_zslice(zslice); + zbuf } } - // Reader #[derive(Debug, Default, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)] pub struct ZBufPos { @@ -306,7 +265,7 @@ impl<'a> Reader for ZBufReader<'a> { } fn read_exact(&mut self, into: &mut [u8]) -> Result<(), DidntRead> { - let len = Reader::read(self, into)?; + let len = self.read(into)?; if len.get() == into.len() { Ok(()) } else { @@ -353,7 +312,7 @@ impl<'a> Reader for ZBufReader<'a> { match (slice.len() - self.cursor.byte).cmp(&len) { cmp::Ordering::Less => { let mut buffer = crate::vec::uninit(len); - Reader::read_exact(self, &mut buffer)?; + self.read_exact(&mut buffer)?; Ok(buffer.into()) } cmp::Ordering::Equal => { @@ -424,81 +383,13 @@ impl<'a> SiphonableReader for ZBufReader<'a> { } #[cfg(feature = "std")] -impl<'a> io::Read for ZBufReader<'a> { - fn read(&mut self, buf: &mut [u8]) -> io::Result { +impl<'a> std::io::Read for ZBufReader<'a> { + fn read(&mut self, buf: &mut [u8]) -> std::io::Result { match ::read(self, buf) { Ok(n) => Ok(n.get()), - Err(_) => Ok(0), - } - } -} - -impl<'a> AdvanceableReader for ZBufReader<'a> { - fn skip(&mut self, offset: usize) -> Result<(), DidntRead> { - let mut remaining_offset = offset; - while remaining_offset > 0 { - let s = self.inner.slices.get(self.cursor.slice).ok_or(DidntRead)?; - let remains_in_current_slice = s.len() - self.cursor.byte; - let advance = remaining_offset.min(remains_in_current_slice); - remaining_offset -= advance; - self.cursor.byte += advance; - if self.cursor.byte == s.len() { - self.cursor.slice += 1; - self.cursor.byte = 0; - } - } - Ok(()) - } - - fn backtrack(&mut self, offset: usize) -> Result<(), DidntRead> { - let mut remaining_offset = offset; - while remaining_offset > 0 { - let backtrack = remaining_offset.min(self.cursor.byte); - remaining_offset -= backtrack; - self.cursor.byte -= backtrack; - if self.cursor.byte == 0 { - if self.cursor.slice == 0 { - break; - } - self.cursor.slice -= 1; - self.cursor.byte = self - .inner - .slices - .get(self.cursor.slice) - .ok_or(DidntRead)? - .len(); - } - } - if remaining_offset == 0 { - Ok(()) - } else { - Err(DidntRead) - } - } -} - -#[cfg(feature = "std")] -impl<'a> io::Seek for ZBufReader<'a> { - fn seek(&mut self, pos: io::SeekFrom) -> io::Result { - let current_pos = self - .inner - .slices() - .take(self.cursor.slice) - .fold(0, |acc, s| acc + s.len()) - + self.cursor.byte; - let current_pos = i64::try_from(current_pos) - .map_err(|e| std::io::Error::new(std::io::ErrorKind::Other, format!("{}", e)))?; - - let offset = match pos { - std::io::SeekFrom::Start(s) => i64::try_from(s).unwrap_or(i64::MAX) - current_pos, - std::io::SeekFrom::Current(s) => s, - std::io::SeekFrom::End(s) => self.inner.len() as i64 + s - current_pos, - }; - match self.advance(offset as isize) { - Ok(()) => Ok((offset + current_pos) as u64), Err(_) => Err(std::io::Error::new( - std::io::ErrorKind::InvalidInput, - "InvalidInput", + std::io::ErrorKind::UnexpectedEof, + "UnexpectedEof", )), } } @@ -718,18 +609,18 @@ impl BacktrackableWriter for ZBufWriter<'_> { } #[cfg(feature = "std")] -impl<'a> io::Write for ZBufWriter<'a> { - fn write(&mut self, buf: &[u8]) -> io::Result { +impl<'a> std::io::Write for ZBufWriter<'a> { + fn write(&mut self, buf: &[u8]) -> std::io::Result { match ::write(self, buf) { Ok(n) => Ok(n.get()), - Err(_) => Err(io::Error::new( - io::ErrorKind::UnexpectedEof, + Err(_) => Err(std::io::Error::new( + std::io::ErrorKind::UnexpectedEof, "UnexpectedEof", )), } } - fn flush(&mut self) -> io::Result<()> { + fn flush(&mut self) -> std::io::Result<()> { Ok(()) } } @@ -772,47 +663,4 @@ mod tests { assert_eq!(zbuf1, zbuf2); } - - #[cfg(feature = "std")] - #[test] - fn zbuf_seek() { - use super::{HasReader, ZBuf}; - use crate::reader::Reader; - use std::io::Seek; - - let mut buf = ZBuf::empty(); - buf.push_zslice([0u8, 1u8, 2u8, 3u8].into()); - buf.push_zslice([4u8, 5u8, 6u8, 7u8, 8u8].into()); - buf.push_zslice([9u8, 10u8, 11u8, 12u8, 13u8, 14u8].into()); - let mut reader = buf.reader(); - - assert_eq!(reader.stream_position().unwrap(), 0); - assert_eq!(reader.read_u8().unwrap(), 0); - assert_eq!(reader.seek(std::io::SeekFrom::Current(6)).unwrap(), 7); - assert_eq!(reader.read_u8().unwrap(), 7); - assert_eq!(reader.seek(std::io::SeekFrom::Current(-5)).unwrap(), 3); - assert_eq!(reader.read_u8().unwrap(), 3); - assert_eq!(reader.seek(std::io::SeekFrom::Current(10)).unwrap(), 14); - assert_eq!(reader.read_u8().unwrap(), 14); - reader.seek(std::io::SeekFrom::Current(100)).unwrap_err(); - - assert_eq!(reader.seek(std::io::SeekFrom::Start(0)).unwrap(), 0); - assert_eq!(reader.read_u8().unwrap(), 0); - assert_eq!(reader.seek(std::io::SeekFrom::Start(12)).unwrap(), 12); - assert_eq!(reader.read_u8().unwrap(), 12); - assert_eq!(reader.seek(std::io::SeekFrom::Start(15)).unwrap(), 15); - reader.read_u8().unwrap_err(); - reader.seek(std::io::SeekFrom::Start(100)).unwrap_err(); - - assert_eq!(reader.seek(std::io::SeekFrom::End(0)).unwrap(), 15); - reader.read_u8().unwrap_err(); - assert_eq!(reader.seek(std::io::SeekFrom::End(-5)).unwrap(), 10); - assert_eq!(reader.read_u8().unwrap(), 10); - assert_eq!(reader.seek(std::io::SeekFrom::End(-15)).unwrap(), 0); - assert_eq!(reader.read_u8().unwrap(), 0); - reader.seek(std::io::SeekFrom::End(-20)).unwrap_err(); - - assert_eq!(reader.seek(std::io::SeekFrom::Start(10)).unwrap(), 10); - reader.seek(std::io::SeekFrom::Current(-100)).unwrap_err(); - } } diff --git a/commons/zenoh-buffers/src/zslice.rs b/commons/zenoh-buffers/src/zslice.rs index 05c77cac7d..e53e6f3334 100644 --- a/commons/zenoh-buffers/src/zslice.rs +++ b/commons/zenoh-buffers/src/zslice.rs @@ -92,45 +92,24 @@ pub struct ZSlice { } impl ZSlice { - #[deprecated(note = "use `new` instead")] pub fn make( buf: Arc, start: usize, end: usize, - ) -> Result> { - Self::new(buf, start, end) - } - - pub fn new( - buf: Arc, - start: usize, - end: usize, ) -> Result> { if start <= end && end <= buf.as_slice().len() { - // unsafe: this operation is safe because we just checked the slice boundaries - Ok(unsafe { ZSlice::new_unchecked(buf, start, end) }) + Ok(ZSlice { + buf, + start, + end, + #[cfg(feature = "shared-memory")] + kind: ZSliceKind::Raw, + }) } else { Err(buf) } } - pub fn empty() -> Self { - unsafe { ZSlice::new_unchecked(Arc::new([]), 0, 0) } - } - - /// # Safety - /// This function does not verify wether the `start` and `end` indexes are within the buffer boundaries. - /// If a [`ZSlice`] is built via this constructor, a later access may panic if `start` and `end` indexes are out-of-bound. - pub unsafe fn new_unchecked(buf: Arc, start: usize, end: usize) -> Self { - ZSlice { - buf, - start, - end, - #[cfg(feature = "shared-memory")] - kind: ZSliceKind::Raw, - } - } - #[inline] #[must_use] pub fn downcast_ref(&self) -> Option<&T> diff --git a/commons/zenoh-codec/Cargo.toml b/commons/zenoh-codec/Cargo.toml index 5b7b8de6ed..72f507a596 100644 --- a/commons/zenoh-codec/Cargo.toml +++ b/commons/zenoh-codec/Cargo.toml @@ -41,6 +41,7 @@ shared-memory = [ "zenoh-shm", "zenoh-protocol/shared-memory" ] +complete_n = ["zenoh-protocol/complete_n"] [dependencies] log = { workspace = true, optional = true } diff --git a/commons/zenoh-codec/benches/codec.rs b/commons/zenoh-codec/benches/codec.rs index d897038f91..1c46a700a7 100644 --- a/commons/zenoh-codec/benches/codec.rs +++ b/commons/zenoh-codec/benches/codec.rs @@ -75,19 +75,19 @@ fn criterion_benchmark(c: &mut Criterion) { let codec = Zenoh080::new(); let frame = FrameHeader { - reliability: Reliability::DEFAULT, + reliability: Reliability::default(), sn: TransportSn::MIN, - ext_qos: zenoh_protocol::transport::frame::ext::QoSType::DEFAULT, + ext_qos: zenoh_protocol::transport::frame::ext::QoSType::default(), }; let data = Push { wire_expr: WireExpr::empty(), - ext_qos: ext::QoSType::DEFAULT, + ext_qos: ext::QoSType::default(), ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, + ext_nodeid: ext::NodeIdType::default(), payload: PushBody::Put(Put { timestamp: None, - encoding: Encoding::empty(), + encoding: Encoding::default(), ext_sinfo: None, #[cfg(feature = "shared-memory")] ext_shm: None, @@ -121,19 +121,19 @@ fn criterion_benchmark(c: &mut Criterion) { let codec = Zenoh080::new(); let frame = FrameHeader { - reliability: Reliability::DEFAULT, + reliability: Reliability::default(), sn: TransportSn::MIN, - ext_qos: zenoh_protocol::transport::frame::ext::QoSType::DEFAULT, + ext_qos: zenoh_protocol::transport::frame::ext::QoSType::default(), }; let data = Push { wire_expr: WireExpr::empty(), - ext_qos: ext::QoSType::DEFAULT, + ext_qos: ext::QoSType::default(), ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, + ext_nodeid: ext::NodeIdType::default(), payload: PushBody::Put(Put { timestamp: None, - encoding: Encoding::empty(), + encoding: Encoding::default(), ext_sinfo: None, #[cfg(feature = "shared-memory")] ext_shm: None, @@ -162,19 +162,19 @@ fn criterion_benchmark(c: &mut Criterion) { let codec = Zenoh080::new(); let frame = FrameHeader { - reliability: Reliability::DEFAULT, + reliability: Reliability::default(), sn: TransportSn::MIN, - ext_qos: zenoh_protocol::transport::frame::ext::QoSType::DEFAULT, + ext_qos: zenoh_protocol::transport::frame::ext::QoSType::default(), }; let data = Push { wire_expr: WireExpr::empty(), - ext_qos: ext::QoSType::DEFAULT, + ext_qos: ext::QoSType::default(), ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, + ext_nodeid: ext::NodeIdType::default(), payload: PushBody::Put(Put { timestamp: None, - encoding: Encoding::empty(), + encoding: Encoding::default(), ext_sinfo: None, #[cfg(feature = "shared-memory")] ext_shm: None, @@ -210,12 +210,12 @@ fn criterion_benchmark(c: &mut Criterion) { let data = Push { wire_expr: WireExpr::empty(), - ext_qos: ext::QoSType::DEFAULT, + ext_qos: ext::QoSType::default(), ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, + ext_nodeid: ext::NodeIdType::default(), payload: PushBody::Put(Put { timestamp: None, - encoding: Encoding::empty(), + encoding: Encoding::default(), ext_sinfo: None, #[cfg(feature = "shared-memory")] ext_shm: None, @@ -238,12 +238,12 @@ fn criterion_benchmark(c: &mut Criterion) { let data = Push { wire_expr: WireExpr::empty(), - ext_qos: ext::QoSType::DEFAULT, + ext_qos: ext::QoSType::default(), ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, + ext_nodeid: ext::NodeIdType::default(), payload: PushBody::Put(Put { timestamp: None, - encoding: Encoding::empty(), + encoding: Encoding::default(), ext_sinfo: None, #[cfg(feature = "shared-memory")] ext_shm: None, @@ -277,12 +277,12 @@ fn criterion_benchmark(c: &mut Criterion) { let data = Push { wire_expr: WireExpr::empty(), - ext_qos: ext::QoSType::DEFAULT, + ext_qos: ext::QoSType::default(), ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, + ext_nodeid: ext::NodeIdType::default(), payload: PushBody::Put(Put { timestamp: None, - encoding: Encoding::empty(), + encoding: Encoding::default(), ext_sinfo: None, #[cfg(feature = "shared-memory")] ext_shm: None, @@ -305,7 +305,7 @@ fn criterion_benchmark(c: &mut Criterion) { let mut idx = 0; while idx < zslice.len() { let len = (zslice.len() - idx).min(chunk); - zbuf.push_zslice(ZSlice::new(buff.clone(), idx, idx + len).unwrap()); + zbuf.push_zslice(ZSlice::make(buff.clone(), idx, idx + len).unwrap()); idx += len; } diff --git a/commons/zenoh-codec/src/common/mod.rs b/commons/zenoh-codec/src/common/mod.rs index f34f9872bf..4c25c93241 100644 --- a/commons/zenoh-codec/src/common/mod.rs +++ b/commons/zenoh-codec/src/common/mod.rs @@ -12,3 +12,4 @@ // ZettaScale Zenoh Team, // pub mod extension; +mod priority; diff --git a/commons/zenoh-codec/src/common/priority.rs b/commons/zenoh-codec/src/common/priority.rs new file mode 100644 index 0000000000..776229971e --- /dev/null +++ b/commons/zenoh-codec/src/common/priority.rs @@ -0,0 +1,66 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// +use crate::{RCodec, WCodec, Zenoh080, Zenoh080Header}; +use core::convert::TryInto; +use zenoh_buffers::{ + reader::{DidntRead, Reader}, + writer::{DidntWrite, Writer}, +}; +use zenoh_protocol::{common::imsg, core::Priority}; + +impl WCodec<&Priority, &mut W> for Zenoh080 +where + W: Writer, +{ + type Output = Result<(), DidntWrite>; + + fn write(self, writer: &mut W, x: &Priority) -> Self::Output { + // Header + let header = imsg::id::PRIORITY | ((*x as u8) << imsg::HEADER_BITS); + self.write(&mut *writer, header)?; + Ok(()) + } +} + +impl RCodec for Zenoh080 +where + R: Reader, +{ + type Error = DidntRead; + + fn read(self, reader: &mut R) -> Result { + let header: u8 = self.read(&mut *reader)?; + let codec = Zenoh080Header::new(header); + + codec.read(reader) + } +} + +impl RCodec for Zenoh080Header +where + R: Reader, +{ + type Error = DidntRead; + + fn read(self, _reader: &mut R) -> Result { + if imsg::mid(self.header) != imsg::id::PRIORITY { + return Err(DidntRead); + } + + let priority: Priority = (imsg::flags(self.header) >> imsg::HEADER_BITS) + .try_into() + .map_err(|_| DidntRead)?; + Ok(priority) + } +} diff --git a/commons/zenoh-codec/src/core/encoding.rs b/commons/zenoh-codec/src/core/encoding.rs index c8033cdd5f..478bcf1cd8 100644 --- a/commons/zenoh-codec/src/core/encoding.rs +++ b/commons/zenoh-codec/src/core/encoding.rs @@ -12,22 +12,16 @@ // ZettaScale Zenoh Team, // use crate::{LCodec, RCodec, WCodec, Zenoh080, Zenoh080Bounded}; +use alloc::string::String; use zenoh_buffers::{ reader::{DidntRead, Reader}, writer::{DidntWrite, Writer}, }; -use zenoh_protocol::{ - common::imsg, - core::encoding::{flag, Encoding, EncodingId}, -}; +use zenoh_protocol::core::Encoding; impl LCodec<&Encoding> for Zenoh080 { fn w_len(self, x: &Encoding) -> usize { - let mut len = self.w_len((x.id as u32) << 1); - if let Some(schema) = x.schema.as_ref() { - len += self.w_len(schema.as_slice()); - } - len + 1 + self.w_len(x.suffix()) } } @@ -38,17 +32,9 @@ where type Output = Result<(), DidntWrite>; fn write(self, writer: &mut W, x: &Encoding) -> Self::Output { - let mut id = (x.id as u32) << 1; - - if x.schema.is_some() { - id |= flag::S; - } - let zodec = Zenoh080Bounded::::new(); - zodec.write(&mut *writer, id)?; - if let Some(schema) = x.schema.as_ref() { - let zodec = Zenoh080Bounded::::new(); - zodec.write(&mut *writer, schema)?; - } + let zodec = Zenoh080Bounded::::new(); + zodec.write(&mut *writer, *x.prefix() as u8)?; + zodec.write(&mut *writer, x.suffix())?; Ok(()) } } @@ -60,20 +46,10 @@ where type Error = DidntRead; fn read(self, reader: &mut R) -> Result { - let zodec = Zenoh080Bounded::::new(); - let id: u32 = zodec.read(&mut *reader)?; - let (id, has_schema) = ( - (id >> 1) as EncodingId, - imsg::has_flag(id as u8, flag::S as u8), - ); - - let mut schema = None; - if has_schema { - let zodec = Zenoh080Bounded::::new(); - schema = Some(zodec.read(&mut *reader)?); - } - - let encoding = Encoding { id, schema }; + let zodec = Zenoh080Bounded::::new(); + let prefix: u8 = zodec.read(&mut *reader)?; + let suffix: String = zodec.read(&mut *reader)?; + let encoding = Encoding::new(prefix, suffix).map_err(|_| DidntRead)?; Ok(encoding) } } diff --git a/commons/zenoh-codec/src/core/mod.rs b/commons/zenoh-codec/src/core/mod.rs index c8e19f057f..1f48def695 100644 --- a/commons/zenoh-codec/src/core/mod.rs +++ b/commons/zenoh-codec/src/core/mod.rs @@ -13,6 +13,7 @@ // mod encoding; mod locator; +mod property; #[cfg(feature = "shared-memory")] mod shm; mod timestamp; diff --git a/commons/zenoh-codec/src/core/property.rs b/commons/zenoh-codec/src/core/property.rs new file mode 100644 index 0000000000..bb7f760208 --- /dev/null +++ b/commons/zenoh-codec/src/core/property.rs @@ -0,0 +1,84 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// +use crate::{RCodec, WCodec, Zenoh080}; +use alloc::vec::Vec; +use zenoh_buffers::{ + reader::{DidntRead, Reader}, + writer::{DidntWrite, Writer}, +}; +use zenoh_protocol::core::Property; + +impl WCodec<&Property, &mut W> for Zenoh080 +where + W: Writer, +{ + type Output = Result<(), DidntWrite>; + + fn write(self, writer: &mut W, x: &Property) -> Self::Output { + let Property { key, value } = x; + + self.write(&mut *writer, key)?; + self.write(&mut *writer, value.as_slice())?; + Ok(()) + } +} + +impl RCodec for Zenoh080 +where + R: Reader, +{ + type Error = DidntRead; + + fn read(self, reader: &mut R) -> Result { + let key: u64 = self.read(&mut *reader)?; + let value: Vec = self.read(&mut *reader)?; + + Ok(Property { key, value }) + } +} + +impl WCodec<&[Property], &mut W> for Zenoh080 +where + W: Writer, +{ + type Output = Result<(), DidntWrite>; + + fn write(self, writer: &mut W, x: &[Property]) -> Self::Output { + self.write(&mut *writer, x.len())?; + for p in x.iter() { + self.write(&mut *writer, p)?; + } + + Ok(()) + } +} + +impl RCodec, &mut R> for Zenoh080 +where + R: Reader, +{ + type Error = DidntRead; + + fn read(self, reader: &mut R) -> Result, Self::Error> { + let num: usize = self.read(&mut *reader)?; + + let mut ps = Vec::with_capacity(num); + for _ in 0..num { + let p: Property = self.read(&mut *reader)?; + ps.push(p); + } + + Ok(ps) + } +} diff --git a/commons/zenoh-codec/src/core/wire_expr.rs b/commons/zenoh-codec/src/core/wire_expr.rs index aa6f77b379..6caba6c8c7 100644 --- a/commons/zenoh-codec/src/core/wire_expr.rs +++ b/commons/zenoh-codec/src/core/wire_expr.rs @@ -65,7 +65,7 @@ where Ok(WireExpr { scope, suffix: suffix.into(), - mapping: Mapping::DEFAULT, + mapping: Mapping::default(), }) } } diff --git a/commons/zenoh-codec/src/core/zint.rs b/commons/zenoh-codec/src/core/zint.rs index d5160e2ee6..1c2f5a28e4 100644 --- a/commons/zenoh-codec/src/core/zint.rs +++ b/commons/zenoh-codec/src/core/zint.rs @@ -17,42 +17,41 @@ use zenoh_buffers::{ writer::{DidntWrite, Writer}, }; -const VLE_LEN_MAX: usize = vle_len(u64::MAX); - -const fn vle_len(x: u64) -> usize { - const B1: u64 = u64::MAX << 7; - const B2: u64 = u64::MAX << (7 * 2); - const B3: u64 = u64::MAX << (7 * 3); - const B4: u64 = u64::MAX << (7 * 4); - const B5: u64 = u64::MAX << (7 * 5); - const B6: u64 = u64::MAX << (7 * 6); - const B7: u64 = u64::MAX << (7 * 7); - const B8: u64 = u64::MAX << (7 * 8); - - if (x & B1) == 0 { - 1 - } else if (x & B2) == 0 { - 2 - } else if (x & B3) == 0 { - 3 - } else if (x & B4) == 0 { - 4 - } else if (x & B5) == 0 { - 5 - } else if (x & B6) == 0 { - 6 - } else if (x & B7) == 0 { - 7 - } else if (x & B8) == 0 { - 8 - } else { - 9 - } -} +const VLE_LEN: usize = 10; impl LCodec for Zenoh080 { fn w_len(self, x: u64) -> usize { - vle_len(x) + const B1: u64 = u64::MAX << 7; + const B2: u64 = u64::MAX << (7 * 2); + const B3: u64 = u64::MAX << (7 * 3); + const B4: u64 = u64::MAX << (7 * 4); + const B5: u64 = u64::MAX << (7 * 5); + const B6: u64 = u64::MAX << (7 * 6); + const B7: u64 = u64::MAX << (7 * 7); + const B8: u64 = u64::MAX << (7 * 8); + const B9: u64 = u64::MAX << (7 * 9); + + if (x & B1) == 0 { + 1 + } else if (x & B2) == 0 { + 2 + } else if (x & B3) == 0 { + 3 + } else if (x & B4) == 0 { + 4 + } else if (x & B5) == 0 { + 5 + } else if (x & B6) == 0 { + 6 + } else if (x & B7) == 0 { + 7 + } else if (x & B8) == 0 { + 8 + } else if (x & B9) == 0 { + 9 + } else { + 10 + } } } @@ -111,33 +110,17 @@ where type Output = Result<(), DidntWrite>; fn write(self, writer: &mut W, mut x: u64) -> Self::Output { - writer.with_slot(VLE_LEN_MAX, move |buffer| { + writer.with_slot(VLE_LEN, move |buffer| { let mut len = 0; - while (x & !0x7f_u64) != 0 { - // SAFETY: buffer is guaranteed to be VLE_LEN long where VLE_LEN is - // the maximum number of bytes a VLE can take once encoded. - // I.e.: x is shifted 7 bits to the right every iteration, - // the loop is at most VLE_LEN iterations. - unsafe { - *buffer.get_unchecked_mut(len) = (x as u8) | 0x80_u8; - } + let mut b = x as u8; + while x > 0x7f { + buffer[len] = b | 0x80; len += 1; x >>= 7; + b = x as u8; } - // In case len == VLE_LEN then all the bits have already been written in the latest iteration. - // Else we haven't written all the necessary bytes yet. - if len != VLE_LEN_MAX { - // SAFETY: buffer is guaranteed to be VLE_LEN long where VLE_LEN is - // the maximum number of bytes a VLE can take once encoded. - // I.e.: x is shifted 7 bits to the right every iteration, - // the loop is at most VLE_LEN iterations. - unsafe { - *buffer.get_unchecked_mut(len) = x as u8; - } - len += 1; - } - // The number of written bytes - len + buffer[len] = b; + len + 1 })?; Ok(()) } @@ -154,14 +137,19 @@ where let mut v = 0; let mut i = 0; - // 7 * VLE_LEN is beyond the maximum number of shift bits - while (b & 0x80_u8) != 0 && i != 7 * (VLE_LEN_MAX - 1) { - v |= ((b & 0x7f_u8) as u64) << i; - b = reader.read_u8()?; + let mut k = VLE_LEN; + while b > 0x7f && k > 0 { + v |= ((b & 0x7f) as u64) << i; i += 7; + b = reader.read_u8()?; + k -= 1; + } + if k > 0 { + v |= ((b & 0x7f) as u64) << i; + Ok(v) + } else { + Err(DidntRead) } - v |= (b as u64) << i; - Ok(v) } } diff --git a/commons/zenoh-codec/src/network/declare.rs b/commons/zenoh-codec/src/network/declare.rs index 6e9dad12ce..20916dc359 100644 --- a/commons/zenoh-codec/src/network/declare.rs +++ b/commons/zenoh-codec/src/network/declare.rs @@ -19,16 +19,11 @@ use zenoh_buffers::{ ZBuf, }; use zenoh_protocol::{ - common::{ - iext, - imsg::{self, HEADER_BITS}, - ZExtZ64, - }, + common::{iext, imsg, ZExtZ64}, core::{ExprId, ExprLen, WireExpr}, network::{ declare::{ self, common, interest, keyexpr, queryable, subscriber, token, Declare, DeclareBody, - DeclareMode, Interest, }, id, Mapping, }, @@ -52,7 +47,8 @@ where DeclareBody::DeclareToken(r) => self.write(&mut *writer, r)?, DeclareBody::UndeclareToken(r) => self.write(&mut *writer, r)?, DeclareBody::DeclareInterest(r) => self.write(&mut *writer, r)?, - DeclareBody::DeclareFinal(r) => self.write(&mut *writer, r)?, + DeclareBody::FinalInterest(r) => self.write(&mut *writer, r)?, + DeclareBody::UndeclareInterest(r) => self.write(&mut *writer, r)?, } Ok(()) @@ -80,7 +76,8 @@ where D_TOKEN => DeclareBody::DeclareToken(codec.read(&mut *reader)?), U_TOKEN => DeclareBody::UndeclareToken(codec.read(&mut *reader)?), D_INTEREST => DeclareBody::DeclareInterest(codec.read(&mut *reader)?), - D_FINAL => DeclareBody::DeclareFinal(codec.read(&mut *reader)?), + F_INTEREST => DeclareBody::FinalInterest(codec.read(&mut *reader)?), + U_INTEREST => DeclareBody::UndeclareInterest(codec.read(&mut *reader)?), _ => return Err(DidntRead), }; @@ -97,7 +94,6 @@ where fn write(self, writer: &mut W, x: &Declare) -> Self::Output { let Declare { - mode, ext_qos, ext_tstamp, ext_nodeid, @@ -106,31 +102,16 @@ where // Header let mut header = id::DECLARE; - header |= match mode { - DeclareMode::Push => 0b00, - DeclareMode::Response(_) => 0b01, - DeclareMode::Request(_) => 0b10, - DeclareMode::RequestContinuous(_) => 0b11, - } << HEADER_BITS; - - let mut n_exts = ((ext_qos != &declare::ext::QoSType::DEFAULT) as u8) + let mut n_exts = ((ext_qos != &declare::ext::QoSType::default()) as u8) + (ext_tstamp.is_some() as u8) - + ((ext_nodeid != &declare::ext::NodeIdType::DEFAULT) as u8); + + ((ext_nodeid != &declare::ext::NodeIdType::default()) as u8); if n_exts != 0 { header |= declare::flag::Z; } self.write(&mut *writer, header)?; - // Body - if let DeclareMode::Request(rid) - | DeclareMode::RequestContinuous(rid) - | DeclareMode::Response(rid) = mode - { - self.write(&mut *writer, rid)?; - } - // Extensions - if ext_qos != &declare::ext::QoSType::DEFAULT { + if ext_qos != &declare::ext::QoSType::default() { n_exts -= 1; self.write(&mut *writer, (*ext_qos, n_exts != 0))?; } @@ -138,7 +119,7 @@ where n_exts -= 1; self.write(&mut *writer, (ts, n_exts != 0))?; } - if ext_nodeid != &declare::ext::NodeIdType::DEFAULT { + if ext_nodeid != &declare::ext::NodeIdType::default() { n_exts -= 1; self.write(&mut *writer, (*ext_nodeid, n_exts != 0))?; } @@ -175,19 +156,10 @@ where return Err(DidntRead); } - // Body - let mode = match (self.header >> HEADER_BITS) & 0b11 { - 0b00 => DeclareMode::Push, - 0b01 => DeclareMode::Response(self.codec.read(&mut *reader)?), - 0b10 => DeclareMode::Request(self.codec.read(&mut *reader)?), - 0b11 => DeclareMode::RequestContinuous(self.codec.read(&mut *reader)?), - _ => return Err(DidntRead), - }; - // Extensions - let mut ext_qos = declare::ext::QoSType::DEFAULT; + let mut ext_qos = declare::ext::QoSType::default(); let mut ext_tstamp = None; - let mut ext_nodeid = declare::ext::NodeIdType::DEFAULT; + let mut ext_nodeid = declare::ext::NodeIdType::default(); let mut has_ext = imsg::has_flag(self.header, declare::flag::Z); while has_ext { @@ -219,68 +191,14 @@ where let body: DeclareBody = self.codec.read(&mut *reader)?; Ok(Declare { - mode, + body, ext_qos, ext_tstamp, ext_nodeid, - body, }) } } -// Final -impl WCodec<&common::DeclareFinal, &mut W> for Zenoh080 -where - W: Writer, -{ - type Output = Result<(), DidntWrite>; - - fn write(self, writer: &mut W, x: &common::DeclareFinal) -> Self::Output { - let common::DeclareFinal = x; - - // Header - let header = declare::id::D_FINAL; - self.write(&mut *writer, header)?; - - Ok(()) - } -} - -impl RCodec for Zenoh080 -where - R: Reader, -{ - type Error = DidntRead; - - fn read(self, reader: &mut R) -> Result { - let header: u8 = self.read(&mut *reader)?; - let codec = Zenoh080Header::new(header); - - codec.read(reader) - } -} - -impl RCodec for Zenoh080Header -where - R: Reader, -{ - type Error = DidntRead; - - fn read(self, reader: &mut R) -> Result { - if imsg::mid(self.header) != declare::id::D_FINAL { - return Err(DidntRead); - } - - // Extensions - let has_ext = imsg::has_flag(self.header, token::flag::Z); - if has_ext { - extension::skip_all(reader, "Final")?; - } - - Ok(common::DeclareFinal) - } -} - // DeclareKeyExpr impl WCodec<&keyexpr::DeclareKeyExpr, &mut W> for Zenoh080 where @@ -422,11 +340,11 @@ where // Header let mut header = declare::id::D_SUBSCRIBER; - let mut n_exts = (ext_info != &subscriber::ext::SubscriberInfo::DEFAULT) as u8; + let mut n_exts = (ext_info != &subscriber::ext::SubscriberInfo::default()) as u8; if n_exts != 0 { header |= subscriber::flag::Z; } - if wire_expr.mapping != Mapping::DEFAULT { + if wire_expr.mapping != Mapping::default() { header |= subscriber::flag::M; } if wire_expr.has_suffix() { @@ -439,7 +357,7 @@ where self.write(&mut *writer, wire_expr)?; // Extensions - if ext_info != &subscriber::ext::SubscriberInfo::DEFAULT { + if ext_info != &subscriber::ext::SubscriberInfo::default() { n_exts -= 1; self.write(&mut *writer, (*ext_info, n_exts != 0))?; } @@ -484,7 +402,7 @@ where }; // Extensions - let mut ext_info = subscriber::ext::SubscriberInfo::DEFAULT; + let mut ext_info = subscriber::ext::SubscriberInfo::default(); let mut has_ext = imsg::has_flag(self.header, subscriber::flag::Z); while has_ext { @@ -522,19 +440,14 @@ where let subscriber::UndeclareSubscriber { id, ext_wire_expr } = x; // Header - let mut header = declare::id::U_SUBSCRIBER; - if !ext_wire_expr.is_null() { - header |= subscriber::flag::Z; - } + let header = declare::id::U_SUBSCRIBER | subscriber::flag::Z; self.write(&mut *writer, header)?; // Body self.write(&mut *writer, id)?; // Extension - if !ext_wire_expr.is_null() { - self.write(&mut *writer, (ext_wire_expr, false))?; - } + self.write(&mut *writer, (ext_wire_expr, false))?; Ok(()) } @@ -569,6 +482,7 @@ where let id: subscriber::SubscriberId = self.codec.read(&mut *reader)?; // Extensions + // WARNING: this is a temporary and mandatory extension used for undeclarations let mut ext_wire_expr = common::ext::WireExprType::null(); let mut has_ext = imsg::has_flag(self.header, subscriber::flag::Z); @@ -592,46 +506,7 @@ where } // QueryableInfo -impl WCodec<(&queryable::ext::QueryableInfoType, bool), &mut W> for Zenoh080 -where - W: Writer, -{ - type Output = Result<(), DidntWrite>; - fn write(self, writer: &mut W, x: (&queryable::ext::QueryableInfoType, bool)) -> Self::Output { - let (x, more) = x; - - let mut flags: u8 = 0; - if x.complete { - flags |= queryable::ext::flag::C; - } - let v: u64 = (flags as u64) | ((x.distance as u64) << 8); - let ext = queryable::ext::QueryableInfo::new(v); - - self.write(&mut *writer, (&ext, more)) - } -} - -impl RCodec<(queryable::ext::QueryableInfoType, bool), &mut R> for Zenoh080Header -where - R: Reader, -{ - type Error = DidntRead; - - fn read( - self, - reader: &mut R, - ) -> Result<(queryable::ext::QueryableInfoType, bool), Self::Error> { - let (ext, more): (queryable::ext::QueryableInfo, bool) = self.read(&mut *reader)?; - - let complete = imsg::has_flag(ext.value as u8, queryable::ext::flag::C); - let distance = (ext.value >> 8) as u16; - - Ok(( - queryable::ext::QueryableInfoType { complete, distance }, - more, - )) - } -} +crate::impl_zextz64!(queryable::ext::QueryableInfo, queryable::ext::Info::ID); // DeclareQueryable impl WCodec<&queryable::DeclareQueryable, &mut W> for Zenoh080 @@ -649,11 +524,11 @@ where // Header let mut header = declare::id::D_QUERYABLE; - let mut n_exts = (ext_info != &queryable::ext::QueryableInfoType::DEFAULT) as u8; + let mut n_exts = (ext_info != &queryable::ext::QueryableInfo::default()) as u8; if n_exts != 0 { header |= subscriber::flag::Z; } - if wire_expr.mapping != Mapping::DEFAULT { + if wire_expr.mapping != Mapping::default() { header |= subscriber::flag::M; } if wire_expr.has_suffix() { @@ -664,9 +539,9 @@ where // Body self.write(&mut *writer, id)?; self.write(&mut *writer, wire_expr)?; - if ext_info != &queryable::ext::QueryableInfoType::DEFAULT { + if ext_info != &queryable::ext::QueryableInfo::default() { n_exts -= 1; - self.write(&mut *writer, (ext_info, n_exts != 0))?; + self.write(&mut *writer, (*ext_info, n_exts != 0))?; } Ok(()) @@ -709,15 +584,15 @@ where }; // Extensions - let mut ext_info = queryable::ext::QueryableInfoType::DEFAULT; + let mut ext_info = queryable::ext::QueryableInfo::default(); let mut has_ext = imsg::has_flag(self.header, queryable::flag::Z); while has_ext { let ext: u8 = self.codec.read(&mut *reader)?; let eodec = Zenoh080Header::new(ext); match iext::eid(ext) { - queryable::ext::QueryableInfo::ID => { - let (i, ext): (queryable::ext::QueryableInfoType, bool) = + queryable::ext::Info::ID => { + let (i, ext): (queryable::ext::QueryableInfo, bool) = eodec.read(&mut *reader)?; ext_info = i; has_ext = ext; @@ -789,6 +664,7 @@ where let id: queryable::QueryableId = self.codec.read(&mut *reader)?; // Extensions + // WARNING: this is a temporary and mandatory extension used for undeclarations let mut ext_wire_expr = common::ext::WireExprType::null(); let mut has_ext = imsg::has_flag(self.header, queryable::flag::Z); @@ -823,7 +699,7 @@ where // Header let mut header = declare::id::D_TOKEN; - if wire_expr.mapping != Mapping::DEFAULT { + if wire_expr.mapping != Mapping::default() { header |= subscriber::flag::M; } if wire_expr.has_suffix() { @@ -936,6 +812,7 @@ where let id: token::TokenId = self.codec.read(&mut *reader)?; // Extensions + // WARNING: this is a temporary and mandatory extension used for undeclarations let mut ext_wire_expr = common::ext::WireExprType::null(); let mut has_ext = imsg::has_flag(self.header, interest::flag::Z); @@ -967,19 +844,25 @@ where fn write(self, writer: &mut W, x: &interest::DeclareInterest) -> Self::Output { let interest::DeclareInterest { - interest: _, + id, wire_expr, + interest, } = x; // Header - let header = declare::id::D_INTEREST; + let mut header = declare::id::D_INTEREST; + if wire_expr.mapping != Mapping::default() { + header |= subscriber::flag::M; + } + if wire_expr.has_suffix() { + header |= subscriber::flag::N; + } self.write(&mut *writer, header)?; // Body - self.write(&mut *writer, x.options())?; - if let Some(we) = wire_expr.as_ref() { - self.write(&mut *writer, we)?; - } + self.write(&mut *writer, id)?; + self.write(&mut *writer, wire_expr)?; + self.write(&mut *writer, interest.as_u8())?; Ok(()) } @@ -1010,20 +893,15 @@ where } // Body - let options: u8 = self.codec.read(&mut *reader)?; - let interest = Interest::from(options); - - let mut wire_expr = None; - if interest.restricted() { - let ccond = Zenoh080Condition::new(interest.named()); - let mut we: WireExpr<'static> = ccond.read(&mut *reader)?; - we.mapping = if interest.mapping() { - Mapping::Sender - } else { - Mapping::Receiver - }; - wire_expr = Some(we); - } + let id: interest::InterestId = self.codec.read(&mut *reader)?; + let ccond = Zenoh080Condition::new(imsg::has_flag(self.header, token::flag::N)); + let mut wire_expr: WireExpr<'static> = ccond.read(&mut *reader)?; + wire_expr.mapping = if imsg::has_flag(self.header, token::flag::M) { + Mapping::Sender + } else { + Mapping::Receiver + }; + let interest: u8 = self.codec.read(&mut *reader)?; // Extensions let has_ext = imsg::has_flag(self.header, token::flag::Z); @@ -1032,12 +910,148 @@ where } Ok(interest::DeclareInterest { - interest, + id, wire_expr, + interest: interest.into(), }) } } +// FinalInterest +impl WCodec<&interest::FinalInterest, &mut W> for Zenoh080 +where + W: Writer, +{ + type Output = Result<(), DidntWrite>; + + fn write(self, writer: &mut W, x: &interest::FinalInterest) -> Self::Output { + let interest::FinalInterest { id } = x; + + // Header + let header = declare::id::F_INTEREST; + self.write(&mut *writer, header)?; + + // Body + self.write(&mut *writer, id)?; + + Ok(()) + } +} + +impl RCodec for Zenoh080 +where + R: Reader, +{ + type Error = DidntRead; + + fn read(self, reader: &mut R) -> Result { + let header: u8 = self.read(&mut *reader)?; + let codec = Zenoh080Header::new(header); + + codec.read(reader) + } +} + +impl RCodec for Zenoh080Header +where + R: Reader, +{ + type Error = DidntRead; + + fn read(self, reader: &mut R) -> Result { + if imsg::mid(self.header) != declare::id::F_INTEREST { + return Err(DidntRead); + } + + // Body + let id: interest::InterestId = self.codec.read(&mut *reader)?; + + // Extensions + let has_ext = imsg::has_flag(self.header, token::flag::Z); + if has_ext { + extension::skip_all(reader, "FinalInterest")?; + } + + Ok(interest::FinalInterest { id }) + } +} + +// UndeclareInterest +impl WCodec<&interest::UndeclareInterest, &mut W> for Zenoh080 +where + W: Writer, +{ + type Output = Result<(), DidntWrite>; + + fn write(self, writer: &mut W, x: &interest::UndeclareInterest) -> Self::Output { + let interest::UndeclareInterest { id, ext_wire_expr } = x; + + // Header + let header = declare::id::U_INTEREST | interest::flag::Z; + self.write(&mut *writer, header)?; + + // Body + self.write(&mut *writer, id)?; + + // Extension + self.write(&mut *writer, (ext_wire_expr, false))?; + + Ok(()) + } +} + +impl RCodec for Zenoh080 +where + R: Reader, +{ + type Error = DidntRead; + + fn read(self, reader: &mut R) -> Result { + let header: u8 = self.read(&mut *reader)?; + let codec = Zenoh080Header::new(header); + + codec.read(reader) + } +} + +impl RCodec for Zenoh080Header +where + R: Reader, +{ + type Error = DidntRead; + + fn read(self, reader: &mut R) -> Result { + if imsg::mid(self.header) != declare::id::U_INTEREST { + return Err(DidntRead); + } + + // Body + let id: interest::InterestId = self.codec.read(&mut *reader)?; + + // Extensions + // WARNING: this is a temporary and mandatory extension used for undeclarations + let mut ext_wire_expr = common::ext::WireExprType::null(); + + let mut has_ext = imsg::has_flag(self.header, interest::flag::Z); + while has_ext { + let ext: u8 = self.codec.read(&mut *reader)?; + let eodec = Zenoh080Header::new(ext); + match iext::eid(ext) { + common::ext::WireExprExt::ID => { + let (we, ext): (common::ext::WireExprType, bool) = eodec.read(&mut *reader)?; + ext_wire_expr = we; + has_ext = ext; + } + _ => { + has_ext = extension::skip(reader, "UndeclareInterest", ext)?; + } + } + } + + Ok(interest::UndeclareInterest { id, ext_wire_expr }) + } +} + // WARNING: this is a temporary extension used for undeclarations impl WCodec<(&common::ext::WireExprType, bool), &mut W> for Zenoh080 where diff --git a/commons/zenoh-codec/src/network/mod.rs b/commons/zenoh-codec/src/network/mod.rs index 3a227cd42a..c1f2489b88 100644 --- a/commons/zenoh-codec/src/network/mod.rs +++ b/commons/zenoh-codec/src/network/mod.rs @@ -26,8 +26,8 @@ use zenoh_buffers::{ }; use zenoh_protocol::{ common::{imsg, ZExtZ64, ZExtZBufHeader}, - core::{EntityId, Reliability, ZenohId}, - network::{ext::EntityGlobalIdType, *}, + core::{Reliability, ZenohId}, + network::{ext::EntityIdType, *}, }; // NetworkMessage @@ -58,7 +58,7 @@ where type Error = DidntRead; fn read(self, reader: &mut R) -> Result { - let codec = Zenoh080Reliability::new(Reliability::DEFAULT); + let codec = Zenoh080Reliability::new(Reliability::default()); codec.read(reader) } } @@ -218,21 +218,21 @@ where } // Extension: EntityId -impl LCodec<&ext::EntityGlobalIdType<{ ID }>> for Zenoh080 { - fn w_len(self, x: &ext::EntityGlobalIdType<{ ID }>) -> usize { - let EntityGlobalIdType { zid, eid } = x; +impl LCodec<&ext::EntityIdType<{ ID }>> for Zenoh080 { + fn w_len(self, x: &ext::EntityIdType<{ ID }>) -> usize { + let EntityIdType { zid, eid } = x; 1 + self.w_len(zid) + self.w_len(*eid) } } -impl WCodec<(&ext::EntityGlobalIdType<{ ID }>, bool), &mut W> for Zenoh080 +impl WCodec<(&ext::EntityIdType<{ ID }>, bool), &mut W> for Zenoh080 where W: Writer, { type Output = Result<(), DidntWrite>; - fn write(self, writer: &mut W, x: (&ext::EntityGlobalIdType<{ ID }>, bool)) -> Self::Output { + fn write(self, writer: &mut W, x: (&ext::EntityIdType<{ ID }>, bool)) -> Self::Output { let (x, more) = x; let header: ZExtZBufHeader<{ ID }> = ZExtZBufHeader::new(self.w_len(x)); self.write(&mut *writer, (&header, more))?; @@ -248,13 +248,13 @@ where } } -impl RCodec<(ext::EntityGlobalIdType<{ ID }>, bool), &mut R> for Zenoh080Header +impl RCodec<(ext::EntityIdType<{ ID }>, bool), &mut R> for Zenoh080Header where R: Reader, { type Error = DidntRead; - fn read(self, reader: &mut R) -> Result<(ext::EntityGlobalIdType<{ ID }>, bool), Self::Error> { + fn read(self, reader: &mut R) -> Result<(ext::EntityIdType<{ ID }>, bool), Self::Error> { let (_, more): (ZExtZBufHeader<{ ID }>, bool) = self.read(&mut *reader)?; let flags: u8 = self.codec.read(&mut *reader)?; @@ -263,8 +263,8 @@ where let lodec = Zenoh080Length::new(length); let zid: ZenohId = lodec.read(&mut *reader)?; - let eid: EntityId = self.codec.read(&mut *reader)?; + let eid: u32 = self.codec.read(&mut *reader)?; - Ok((ext::EntityGlobalIdType { zid, eid }, more)) + Ok((ext::EntityIdType { zid, eid }, more)) } } diff --git a/commons/zenoh-codec/src/network/oam.rs b/commons/zenoh-codec/src/network/oam.rs index 9751e9952d..ff6daeb020 100644 --- a/commons/zenoh-codec/src/network/oam.rs +++ b/commons/zenoh-codec/src/network/oam.rs @@ -52,7 +52,8 @@ where header |= iext::ENC_ZBUF; } } - let mut n_exts = ((ext_qos != &ext::QoSType::DEFAULT) as u8) + (ext_tstamp.is_some() as u8); + let mut n_exts = + ((ext_qos != &ext::QoSType::default()) as u8) + (ext_tstamp.is_some() as u8); if n_exts != 0 { header |= flag::Z; } @@ -62,7 +63,7 @@ where self.write(&mut *writer, id)?; // Extensions - if ext_qos != &ext::QoSType::DEFAULT { + if ext_qos != &ext::QoSType::default() { n_exts -= 1; self.write(&mut *writer, (*ext_qos, n_exts != 0))?; } @@ -114,7 +115,7 @@ where let id: OamId = self.codec.read(&mut *reader)?; // Extensions - let mut ext_qos = ext::QoSType::DEFAULT; + let mut ext_qos = ext::QoSType::default(); let mut ext_tstamp = None; let mut has_ext = imsg::has_flag(self.header, flag::Z); diff --git a/commons/zenoh-codec/src/network/push.rs b/commons/zenoh-codec/src/network/push.rs index b9ec2ba5db..10a8489b29 100644 --- a/commons/zenoh-codec/src/network/push.rs +++ b/commons/zenoh-codec/src/network/push.rs @@ -44,13 +44,13 @@ where // Header let mut header = id::PUSH; - let mut n_exts = ((ext_qos != &ext::QoSType::DEFAULT) as u8) + let mut n_exts = ((ext_qos != &ext::QoSType::default()) as u8) + (ext_tstamp.is_some() as u8) - + ((ext_nodeid != &ext::NodeIdType::DEFAULT) as u8); + + ((ext_nodeid != &ext::NodeIdType::default()) as u8); if n_exts != 0 { header |= flag::Z; } - if wire_expr.mapping != Mapping::DEFAULT { + if wire_expr.mapping != Mapping::default() { header |= flag::M; } if wire_expr.has_suffix() { @@ -62,7 +62,7 @@ where self.write(&mut *writer, wire_expr)?; // Extensions - if ext_qos != &ext::QoSType::DEFAULT { + if ext_qos != &ext::QoSType::default() { n_exts -= 1; self.write(&mut *writer, (*ext_qos, n_exts != 0))?; } @@ -70,7 +70,7 @@ where n_exts -= 1; self.write(&mut *writer, (ts, n_exts != 0))?; } - if ext_nodeid != &ext::NodeIdType::DEFAULT { + if ext_nodeid != &ext::NodeIdType::default() { n_exts -= 1; self.write(&mut *writer, (*ext_nodeid, n_exts != 0))?; } @@ -116,9 +116,9 @@ where }; // Extensions - let mut ext_qos = ext::QoSType::DEFAULT; + let mut ext_qos = ext::QoSType::default(); let mut ext_tstamp = None; - let mut ext_nodeid = ext::NodeIdType::DEFAULT; + let mut ext_nodeid = ext::NodeIdType::default(); let mut has_ext = imsg::has_flag(self.header, flag::Z); while has_ext { diff --git a/commons/zenoh-codec/src/network/request.rs b/commons/zenoh-codec/src/network/request.rs index 6173840d7e..19711ff147 100644 --- a/commons/zenoh-codec/src/network/request.rs +++ b/commons/zenoh-codec/src/network/request.rs @@ -43,6 +43,8 @@ where ext::TargetType::BestMatching => 0, ext::TargetType::All => 1, ext::TargetType::AllComplete => 2, + #[cfg(feature = "complete_n")] + ext::TargetType::Complete(n) => 3 + *n, }; let ext = ext::Target::new(v); self.write(&mut *writer, (&ext, more)) @@ -61,6 +63,9 @@ where 0 => ext::TargetType::BestMatching, 1 => ext::TargetType::All, 2 => ext::TargetType::AllComplete, + #[cfg(feature = "complete_n")] + n => ext::TargetType::Complete(n - 3), + #[cfg(not(feature = "complete_n"))] _ => return Err(DidntRead), }; Ok((rt, more)) @@ -88,16 +93,16 @@ where // Header let mut header = id::REQUEST; - let mut n_exts = ((ext_qos != &ext::QoSType::DEFAULT) as u8) + let mut n_exts = ((ext_qos != &ext::QoSType::default()) as u8) + (ext_tstamp.is_some() as u8) - + ((ext_target != &ext::TargetType::DEFAULT) as u8) + + ((ext_target != &ext::TargetType::default()) as u8) + (ext_budget.is_some() as u8) + (ext_timeout.is_some() as u8) - + ((ext_nodeid != &ext::NodeIdType::DEFAULT) as u8); + + ((ext_nodeid != &ext::NodeIdType::default()) as u8); if n_exts != 0 { header |= flag::Z; } - if wire_expr.mapping != Mapping::DEFAULT { + if wire_expr.mapping != Mapping::default() { header |= flag::M; } if wire_expr.has_suffix() { @@ -110,7 +115,7 @@ where self.write(&mut *writer, wire_expr)?; // Extensions - if ext_qos != &ext::QoSType::DEFAULT { + if ext_qos != &ext::QoSType::default() { n_exts -= 1; self.write(&mut *writer, (*ext_qos, n_exts != 0))?; } @@ -118,7 +123,7 @@ where n_exts -= 1; self.write(&mut *writer, (ts, n_exts != 0))?; } - if ext_target != &ext::TargetType::DEFAULT { + if ext_target != &ext::TargetType::default() { n_exts -= 1; self.write(&mut *writer, (ext_target, n_exts != 0))?; } @@ -132,7 +137,7 @@ where let e = ext::Timeout::new(to.as_millis() as u64); self.write(&mut *writer, (&e, n_exts != 0))?; } - if ext_nodeid != &ext::NodeIdType::DEFAULT { + if ext_nodeid != &ext::NodeIdType::default() { n_exts -= 1; self.write(&mut *writer, (*ext_nodeid, n_exts != 0))?; } @@ -180,10 +185,10 @@ where }; // Extensions - let mut ext_qos = ext::QoSType::DEFAULT; + let mut ext_qos = ext::QoSType::default(); let mut ext_tstamp = None; - let mut ext_nodeid = ext::NodeIdType::DEFAULT; - let mut ext_target = ext::TargetType::DEFAULT; + let mut ext_nodeid = ext::NodeIdType::default(); + let mut ext_target = ext::TargetType::default(); let mut ext_limit = None; let mut ext_timeout = None; diff --git a/commons/zenoh-codec/src/network/response.rs b/commons/zenoh-codec/src/network/response.rs index 5b69e8b109..bec7df2967 100644 --- a/commons/zenoh-codec/src/network/response.rs +++ b/commons/zenoh-codec/src/network/response.rs @@ -48,13 +48,13 @@ where // Header let mut header = id::RESPONSE; - let mut n_exts = ((ext_qos != &ext::QoSType::DEFAULT) as u8) + let mut n_exts = ((ext_qos != &ext::QoSType::default()) as u8) + (ext_tstamp.is_some() as u8) + (ext_respid.is_some() as u8); if n_exts != 0 { header |= flag::Z; } - if wire_expr.mapping != Mapping::DEFAULT { + if wire_expr.mapping != Mapping::default() { header |= flag::M; } if wire_expr.has_suffix() { @@ -67,7 +67,7 @@ where self.write(&mut *writer, wire_expr)?; // Extensions - if ext_qos != &ext::QoSType::DEFAULT { + if ext_qos != &ext::QoSType::default() { n_exts -= 1; self.write(&mut *writer, (*ext_qos, n_exts != 0))?; } @@ -123,7 +123,7 @@ where }; // Extensions - let mut ext_qos = ext::QoSType::DEFAULT; + let mut ext_qos = ext::QoSType::default(); let mut ext_tstamp = None; let mut ext_respid = None; @@ -183,7 +183,8 @@ where // Header let mut header = id::RESPONSE_FINAL; - let mut n_exts = ((ext_qos != &ext::QoSType::DEFAULT) as u8) + (ext_tstamp.is_some() as u8); + let mut n_exts = + ((ext_qos != &ext::QoSType::default()) as u8) + (ext_tstamp.is_some() as u8); if n_exts != 0 { header |= flag::Z; } @@ -193,7 +194,7 @@ where self.write(&mut *writer, rid)?; // Extensions - if ext_qos != &ext::QoSType::DEFAULT { + if ext_qos != &ext::QoSType::default() { n_exts -= 1; self.write(&mut *writer, (*ext_qos, n_exts != 0))?; } @@ -235,7 +236,7 @@ where let rid: RequestId = bodec.read(&mut *reader)?; // Extensions - let mut ext_qos = ext::QoSType::DEFAULT; + let mut ext_qos = ext::QoSType::default(); let mut ext_tstamp = None; let mut has_ext = imsg::has_flag(self.header, flag::Z); diff --git a/commons/zenoh-codec/src/transport/fragment.rs b/commons/zenoh-codec/src/transport/fragment.rs index b01e2c2bae..b66f395df1 100644 --- a/commons/zenoh-codec/src/transport/fragment.rs +++ b/commons/zenoh-codec/src/transport/fragment.rs @@ -48,7 +48,7 @@ where if *more { header |= flag::M; } - if ext_qos != &ext::QoSType::DEFAULT { + if ext_qos != &ext::QoSType::default() { header |= flag::Z; } self.write(&mut *writer, header)?; @@ -57,7 +57,7 @@ where self.write(&mut *writer, sn)?; // Extensions - if ext_qos != &ext::QoSType::DEFAULT { + if ext_qos != &ext::QoSType::default() { self.write(&mut *writer, (*ext_qos, false))?; } @@ -97,7 +97,7 @@ where let sn: TransportSn = self.codec.read(&mut *reader)?; // Extensions - let mut ext_qos = ext::QoSType::DEFAULT; + let mut ext_qos = ext::QoSType::default(); let mut has_ext = imsg::has_flag(self.header, flag::Z); while has_ext { diff --git a/commons/zenoh-codec/src/transport/frame.rs b/commons/zenoh-codec/src/transport/frame.rs index ab82a024c4..8d39aabcdb 100644 --- a/commons/zenoh-codec/src/transport/frame.rs +++ b/commons/zenoh-codec/src/transport/frame.rs @@ -46,7 +46,7 @@ where if let Reliability::Reliable = reliability { header |= flag::R; } - if ext_qos != &ext::QoSType::DEFAULT { + if ext_qos != &ext::QoSType::default() { header |= flag::Z; } self.write(&mut *writer, header)?; @@ -55,7 +55,7 @@ where self.write(&mut *writer, sn)?; // Extensions - if ext_qos != &ext::QoSType::DEFAULT { + if ext_qos != &ext::QoSType::default() { self.write(&mut *writer, (x.ext_qos, false))?; } @@ -94,7 +94,7 @@ where let sn: TransportSn = self.codec.read(&mut *reader)?; // Extensions - let mut ext_qos = ext::QoSType::DEFAULT; + let mut ext_qos = ext::QoSType::default(); let mut has_ext = imsg::has_flag(self.header, flag::Z); while has_ext { diff --git a/commons/zenoh-codec/src/transport/join.rs b/commons/zenoh-codec/src/transport/join.rs index d87ceecc78..80c1663413 100644 --- a/commons/zenoh-codec/src/transport/join.rs +++ b/commons/zenoh-codec/src/transport/join.rs @@ -121,7 +121,7 @@ where let (_, more): (ZExtZBufHeader<{ ext::QoS::ID }>, bool) = self.read(&mut *reader)?; // Body - let mut ext_qos = Box::new([PrioritySn::DEFAULT; Priority::NUM]); + let mut ext_qos = Box::new([PrioritySn::default(); Priority::NUM]); for p in ext_qos.iter_mut() { *p = self.codec.read(&mut *reader)?; } diff --git a/commons/zenoh-codec/src/transport/oam.rs b/commons/zenoh-codec/src/transport/oam.rs index 6861f638d3..e2f905abf8 100644 --- a/commons/zenoh-codec/src/transport/oam.rs +++ b/commons/zenoh-codec/src/transport/oam.rs @@ -47,7 +47,7 @@ where header |= iext::ENC_ZBUF; } } - let mut n_exts = (ext_qos != &ext::QoSType::DEFAULT) as u8; + let mut n_exts = (ext_qos != &ext::QoSType::default()) as u8; if n_exts != 0 { header |= flag::Z; } @@ -57,7 +57,7 @@ where self.write(&mut *writer, id)?; // Extensions - if ext_qos != &ext::QoSType::DEFAULT { + if ext_qos != &ext::QoSType::default() { n_exts -= 1; self.write(&mut *writer, (*ext_qos, n_exts != 0))?; } @@ -105,7 +105,7 @@ where let id: OamId = self.codec.read(&mut *reader)?; // Extensions - let mut ext_qos = ext::QoSType::DEFAULT; + let mut ext_qos = ext::QoSType::default(); let mut has_ext = imsg::has_flag(self.header, flag::Z); while has_ext { diff --git a/commons/zenoh-codec/src/zenoh/ack.rs b/commons/zenoh-codec/src/zenoh/ack.rs new file mode 100644 index 0000000000..78cbca2987 --- /dev/null +++ b/commons/zenoh-codec/src/zenoh/ack.rs @@ -0,0 +1,129 @@ +// +// Copyright (c) 2022 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// +use crate::{common::extension, RCodec, WCodec, Zenoh080, Zenoh080Header}; +use alloc::vec::Vec; +use zenoh_buffers::{ + reader::{DidntRead, Reader}, + writer::{DidntWrite, Writer}, +}; +use zenoh_protocol::{ + common::{iext, imsg}, + zenoh::{ + ack::{ext, flag, Ack}, + id, + }, +}; + +impl WCodec<&Ack, &mut W> for Zenoh080 +where + W: Writer, +{ + type Output = Result<(), DidntWrite>; + + fn write(self, writer: &mut W, x: &Ack) -> Self::Output { + let Ack { + timestamp, + ext_sinfo, + ext_unknown, + } = x; + + // Header + let mut header = id::ACK; + if timestamp.is_some() { + header |= flag::T; + } + let mut n_exts = ((ext_sinfo.is_some()) as u8) + (ext_unknown.len() as u8); + if n_exts != 0 { + header |= flag::Z; + } + self.write(&mut *writer, header)?; + + // Body + if let Some(ts) = timestamp.as_ref() { + self.write(&mut *writer, ts)?; + } + + // Extensions + if let Some(sinfo) = ext_sinfo.as_ref() { + n_exts -= 1; + self.write(&mut *writer, (sinfo, n_exts != 0))?; + } + for u in ext_unknown.iter() { + n_exts -= 1; + self.write(&mut *writer, (u, n_exts != 0))?; + } + + Ok(()) + } +} + +impl RCodec for Zenoh080 +where + R: Reader, +{ + type Error = DidntRead; + + fn read(self, reader: &mut R) -> Result { + let header: u8 = self.read(&mut *reader)?; + let codec = Zenoh080Header::new(header); + codec.read(reader) + } +} + +impl RCodec for Zenoh080Header +where + R: Reader, +{ + type Error = DidntRead; + + fn read(self, reader: &mut R) -> Result { + if imsg::mid(self.header) != id::ACK { + return Err(DidntRead); + } + + // Body + let mut timestamp: Option = None; + if imsg::has_flag(self.header, flag::T) { + timestamp = Some(self.codec.read(&mut *reader)?); + } + + // Extensions + let mut ext_sinfo: Option = None; + let mut ext_unknown = Vec::new(); + + let mut has_ext = imsg::has_flag(self.header, flag::Z); + while has_ext { + let ext: u8 = self.codec.read(&mut *reader)?; + let eodec = Zenoh080Header::new(ext); + match iext::eid(ext) { + ext::SourceInfo::ID => { + let (s, ext): (ext::SourceInfoType, bool) = eodec.read(&mut *reader)?; + ext_sinfo = Some(s); + has_ext = ext; + } + _ => { + let (u, ext) = extension::read(reader, "Ack", ext)?; + ext_unknown.push(u); + has_ext = ext; + } + } + } + + Ok(Ack { + timestamp, + ext_sinfo, + ext_unknown, + }) + } +} diff --git a/commons/zenoh-codec/src/zenoh/err.rs b/commons/zenoh-codec/src/zenoh/err.rs index b459f67b3f..5cef1a6389 100644 --- a/commons/zenoh-codec/src/zenoh/err.rs +++ b/commons/zenoh-codec/src/zenoh/err.rs @@ -11,16 +11,14 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::{common::extension, RCodec, WCodec, Zenoh080, Zenoh080Bounded, Zenoh080Header}; +use crate::{common::extension, RCodec, WCodec, Zenoh080, Zenoh080Header}; use alloc::vec::Vec; use zenoh_buffers::{ reader::{DidntRead, Reader}, writer::{DidntWrite, Writer}, - ZBuf, }; use zenoh_protocol::{ common::{iext, imsg}, - core::Encoding, zenoh::{ err::{ext, flag, Err}, id, @@ -35,26 +33,33 @@ where fn write(self, writer: &mut W, x: &Err) -> Self::Output { let Err { - encoding, + code, + is_infrastructure, + timestamp, ext_sinfo, + ext_body, ext_unknown, - payload, } = x; // Header let mut header = id::ERR; - if encoding != &Encoding::empty() { - header |= flag::E; + if timestamp.is_some() { + header |= flag::T; } - let mut n_exts = (ext_sinfo.is_some() as u8) + (ext_unknown.len() as u8); + if *is_infrastructure { + header |= flag::I; + } + let mut n_exts = + (ext_sinfo.is_some() as u8) + (ext_body.is_some() as u8) + (ext_unknown.len() as u8); if n_exts != 0 { header |= flag::Z; } self.write(&mut *writer, header)?; // Body - if encoding != &Encoding::empty() { - self.write(&mut *writer, encoding)?; + self.write(&mut *writer, code)?; + if let Some(ts) = timestamp.as_ref() { + self.write(&mut *writer, ts)?; } // Extensions @@ -62,15 +67,15 @@ where n_exts -= 1; self.write(&mut *writer, (sinfo, n_exts != 0))?; } + if let Some(body) = ext_body.as_ref() { + n_exts -= 1; + self.write(&mut *writer, (body, n_exts != 0))?; + } for u in ext_unknown.iter() { n_exts -= 1; self.write(&mut *writer, (u, n_exts != 0))?; } - // Payload - let bodec = Zenoh080Bounded::::new(); - bodec.write(&mut *writer, payload)?; - Ok(()) } } @@ -100,13 +105,16 @@ where } // Body - let mut encoding = Encoding::empty(); - if imsg::has_flag(self.header, flag::E) { - encoding = self.codec.read(&mut *reader)?; + let code: u16 = self.codec.read(&mut *reader)?; + let is_infrastructure = imsg::has_flag(self.header, flag::I); + let mut timestamp: Option = None; + if imsg::has_flag(self.header, flag::T) { + timestamp = Some(self.codec.read(&mut *reader)?); } // Extensions let mut ext_sinfo: Option = None; + let mut ext_body: Option = None; let mut ext_unknown = Vec::new(); let mut has_ext = imsg::has_flag(self.header, flag::Z); @@ -119,6 +127,11 @@ where ext_sinfo = Some(s); has_ext = ext; } + ext::ErrBodyType::VID | ext::ErrBodyType::SID => { + let (s, ext): (ext::ErrBodyType, bool) = eodec.read(&mut *reader)?; + ext_body = Some(s); + has_ext = ext; + } _ => { let (u, ext) = extension::read(reader, "Err", ext)?; ext_unknown.push(u); @@ -127,15 +140,13 @@ where } } - // Payload - let bodec = Zenoh080Bounded::::new(); - let payload: ZBuf = bodec.read(&mut *reader)?; - Ok(Err { - encoding, + code, + is_infrastructure, + timestamp, ext_sinfo, + ext_body, ext_unknown, - payload, }) } } diff --git a/commons/zenoh-codec/src/zenoh/mod.rs b/commons/zenoh-codec/src/zenoh/mod.rs index dc38e5ee84..2e3ea48be7 100644 --- a/commons/zenoh-codec/src/zenoh/mod.rs +++ b/commons/zenoh-codec/src/zenoh/mod.rs @@ -11,8 +11,10 @@ // Contributors: // ZettaScale Zenoh Team, // +pub mod ack; pub mod del; pub mod err; +pub mod pull; pub mod put; pub mod query; pub mod reply; @@ -31,7 +33,7 @@ use zenoh_buffers::{ use zenoh_protocol::common::{iext, ZExtUnit}; use zenoh_protocol::{ common::{imsg, ZExtZBufHeader}, - core::{Encoding, EntityGlobalId, EntityId, ZenohId}, + core::{Encoding, ZenohId}, zenoh::{ext, id, PushBody, RequestBody, ResponseBody}, }; @@ -80,6 +82,9 @@ where fn write(self, writer: &mut W, x: &RequestBody) -> Self::Output { match x { RequestBody::Query(b) => self.write(&mut *writer, b), + RequestBody::Put(b) => self.write(&mut *writer, b), + RequestBody::Del(b) => self.write(&mut *writer, b), + RequestBody::Pull(b) => self.write(&mut *writer, b), } } } @@ -96,6 +101,9 @@ where let codec = Zenoh080Header::new(header); let body = match imsg::mid(codec.header) { id::QUERY => RequestBody::Query(codec.read(&mut *reader)?), + id::PUT => RequestBody::Put(codec.read(&mut *reader)?), + id::DEL => RequestBody::Del(codec.read(&mut *reader)?), + id::PULL => RequestBody::Pull(codec.read(&mut *reader)?), _ => return Err(DidntRead), }; @@ -114,6 +122,8 @@ where match x { ResponseBody::Reply(b) => self.write(&mut *writer, b), ResponseBody::Err(b) => self.write(&mut *writer, b), + ResponseBody::Ack(b) => self.write(&mut *writer, b), + ResponseBody::Put(b) => self.write(&mut *writer, b), } } } @@ -131,6 +141,8 @@ where let body = match imsg::mid(codec.header) { id::REPLY => ResponseBody::Reply(codec.read(&mut *reader)?), id::ERR => ResponseBody::Err(codec.read(&mut *reader)?), + id::ACK => ResponseBody::Ack(codec.read(&mut *reader)?), + id::PUT => ResponseBody::Put(codec.read(&mut *reader)?), _ => return Err(DidntRead), }; @@ -141,9 +153,9 @@ where // Extension: SourceInfo impl LCodec<&ext::SourceInfoType<{ ID }>> for Zenoh080 { fn w_len(self, x: &ext::SourceInfoType<{ ID }>) -> usize { - let ext::SourceInfoType { id, sn } = x; + let ext::SourceInfoType { zid, eid, sn } = x; - 1 + self.w_len(&id.zid) + self.w_len(id.eid) + self.w_len(*sn) + 1 + self.w_len(zid) + self.w_len(*eid) + self.w_len(*sn) } } @@ -155,18 +167,18 @@ where fn write(self, writer: &mut W, x: (&ext::SourceInfoType<{ ID }>, bool)) -> Self::Output { let (x, more) = x; - let ext::SourceInfoType { id, sn } = x; + let ext::SourceInfoType { zid, eid, sn } = x; let header: ZExtZBufHeader<{ ID }> = ZExtZBufHeader::new(self.w_len(x)); self.write(&mut *writer, (&header, more))?; - let flags: u8 = (id.zid.size() as u8 - 1) << 4; + let flags: u8 = (zid.size() as u8 - 1) << 4; self.write(&mut *writer, flags)?; - let lodec = Zenoh080Length::new(id.zid.size()); - lodec.write(&mut *writer, &id.zid)?; + let lodec = Zenoh080Length::new(zid.size()); + lodec.write(&mut *writer, zid)?; - self.write(&mut *writer, id.eid)?; + self.write(&mut *writer, eid)?; self.write(&mut *writer, sn)?; Ok(()) } @@ -187,16 +199,10 @@ where let lodec = Zenoh080Length::new(length); let zid: ZenohId = lodec.read(&mut *reader)?; - let eid: EntityId = self.codec.read(&mut *reader)?; + let eid: u32 = self.codec.read(&mut *reader)?; let sn: u32 = self.codec.read(&mut *reader)?; - Ok(( - ext::SourceInfoType { - id: EntityGlobalId { zid, eid }, - sn, - }, - more, - )) + Ok((ext::SourceInfoType { zid, eid, sn }, more)) } } diff --git a/commons/zenoh-codec/src/zenoh/pull.rs b/commons/zenoh-codec/src/zenoh/pull.rs new file mode 100644 index 0000000000..dc71901d58 --- /dev/null +++ b/commons/zenoh-codec/src/zenoh/pull.rs @@ -0,0 +1,93 @@ +// +// Copyright (c) 2022 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// +use crate::{common::extension, RCodec, WCodec, Zenoh080, Zenoh080Header}; +use alloc::vec::Vec; +use zenoh_buffers::{ + reader::{DidntRead, Reader}, + writer::{DidntWrite, Writer}, +}; + +use zenoh_protocol::{ + common::imsg, + zenoh::{ + id, + pull::{flag, Pull}, + }, +}; + +impl WCodec<&Pull, &mut W> for Zenoh080 +where + W: Writer, +{ + type Output = Result<(), DidntWrite>; + + fn write(self, writer: &mut W, x: &Pull) -> Self::Output { + let Pull { ext_unknown } = x; + + // Header + let mut header = id::PULL; + let mut n_exts = ext_unknown.len() as u8; + if n_exts != 0 { + header |= flag::Z; + } + self.write(&mut *writer, header)?; + + // Extensions + for u in ext_unknown.iter() { + n_exts -= 1; + self.write(&mut *writer, (u, n_exts != 0))?; + } + + Ok(()) + } +} + +impl RCodec for Zenoh080 +where + R: Reader, +{ + type Error = DidntRead; + + fn read(self, reader: &mut R) -> Result { + let header: u8 = self.read(&mut *reader)?; + let codec = Zenoh080Header::new(header); + codec.read(reader) + } +} + +impl RCodec for Zenoh080Header +where + R: Reader, +{ + type Error = DidntRead; + + fn read(self, reader: &mut R) -> Result { + if imsg::mid(self.header) != id::PULL { + return Err(DidntRead); + } + + // Extensions + let mut ext_unknown = Vec::new(); + + let mut has_ext = imsg::has_flag(self.header, flag::Z); + while has_ext { + let ext: u8 = self.codec.read(&mut *reader)?; + let (u, ext) = extension::read(reader, "Pull", ext)?; + ext_unknown.push(u); + has_ext = ext; + } + + Ok(Pull { ext_unknown }) + } +} diff --git a/commons/zenoh-codec/src/zenoh/put.rs b/commons/zenoh-codec/src/zenoh/put.rs index 776b47245f..ebc364cf9b 100644 --- a/commons/zenoh-codec/src/zenoh/put.rs +++ b/commons/zenoh-codec/src/zenoh/put.rs @@ -54,7 +54,7 @@ where if timestamp.is_some() { header |= flag::T; } - if encoding != &Encoding::empty() { + if encoding != &Encoding::default() { header |= flag::E; } let mut n_exts = (ext_sinfo.is_some()) as u8 @@ -73,7 +73,7 @@ where if let Some(ts) = timestamp.as_ref() { self.write(&mut *writer, ts)?; } - if encoding != &Encoding::empty() { + if encoding != &Encoding::default() { self.write(&mut *writer, encoding)?; } @@ -143,7 +143,7 @@ where timestamp = Some(self.codec.read(&mut *reader)?); } - let mut encoding = Encoding::empty(); + let mut encoding = Encoding::default(); if imsg::has_flag(self.header, flag::E) { encoding = self.codec.read(&mut *reader)?; } diff --git a/commons/zenoh-codec/src/zenoh/query.rs b/commons/zenoh-codec/src/zenoh/query.rs index efac7b5671..09b01b2266 100644 --- a/commons/zenoh-codec/src/zenoh/query.rs +++ b/commons/zenoh-codec/src/zenoh/query.rs @@ -22,44 +22,48 @@ use zenoh_protocol::{ common::{iext, imsg}, zenoh::{ id, - query::{ext, flag, Consolidation, Query}, + query::{ext, flag, Query}, }, }; -// Consolidation -impl WCodec for Zenoh080 +// Extension Consolidation +impl WCodec<(ext::ConsolidationType, bool), &mut W> for Zenoh080 where W: Writer, { type Output = Result<(), DidntWrite>; - fn write(self, writer: &mut W, x: Consolidation) -> Self::Output { + fn write(self, writer: &mut W, x: (ext::ConsolidationType, bool)) -> Self::Output { + let (x, more) = x; let v: u64 = match x { - Consolidation::Auto => 0, - Consolidation::None => 1, - Consolidation::Monotonic => 2, - Consolidation::Latest => 3, + ext::ConsolidationType::Auto => 0, + ext::ConsolidationType::None => 1, + ext::ConsolidationType::Monotonic => 2, + ext::ConsolidationType::Latest => 3, + ext::ConsolidationType::Unique => 4, }; - self.write(&mut *writer, v) + let v = ext::Consolidation::new(v); + self.write(&mut *writer, (&v, more)) } } -impl RCodec for Zenoh080 +impl RCodec<(ext::ConsolidationType, bool), &mut R> for Zenoh080Header where R: Reader, { type Error = DidntRead; - fn read(self, reader: &mut R) -> Result { - let v: u64 = self.read(&mut *reader)?; - let c = match v { - 0 => Consolidation::Auto, - 1 => Consolidation::None, - 2 => Consolidation::Monotonic, - 3 => Consolidation::Latest, - _ => Consolidation::Auto, // Fallback on Auto if Consolidation is unknown + fn read(self, reader: &mut R) -> Result<(ext::ConsolidationType, bool), Self::Error> { + let (ext, more): (ext::Consolidation, bool) = self.read(&mut *reader)?; + let c = match ext.value { + 0 => ext::ConsolidationType::Auto, + 1 => ext::ConsolidationType::None, + 2 => ext::ConsolidationType::Monotonic, + 3 => ext::ConsolidationType::Latest, + 4 => ext::ConsolidationType::Unique, + _ => return Err(DidntRead), }; - Ok(c) + Ok((c, more)) } } @@ -71,9 +75,9 @@ where fn write(self, writer: &mut W, x: &Query) -> Self::Output { let Query { - consolidation, parameters, ext_sinfo, + ext_consolidation, ext_body, ext_attachment, ext_unknown, @@ -81,13 +85,11 @@ where // Header let mut header = id::QUERY; - if consolidation != &Consolidation::DEFAULT { - header |= flag::C; - } if !parameters.is_empty() { header |= flag::P; } let mut n_exts = (ext_sinfo.is_some() as u8) + + ((ext_consolidation != &ext::ConsolidationType::default()) as u8) + (ext_body.is_some() as u8) + (ext_attachment.is_some() as u8) + (ext_unknown.len() as u8); @@ -97,9 +99,6 @@ where self.write(&mut *writer, header)?; // Body - if consolidation != &Consolidation::DEFAULT { - self.write(&mut *writer, *consolidation)?; - } if !parameters.is_empty() { self.write(&mut *writer, parameters)?; } @@ -109,6 +108,10 @@ where n_exts -= 1; self.write(&mut *writer, (sinfo, n_exts != 0))?; } + if ext_consolidation != &ext::ConsolidationType::default() { + n_exts -= 1; + self.write(&mut *writer, (*ext_consolidation, n_exts != 0))?; + } if let Some(body) = ext_body.as_ref() { n_exts -= 1; self.write(&mut *writer, (body, n_exts != 0))?; @@ -151,11 +154,6 @@ where } // Body - let mut consolidation = Consolidation::DEFAULT; - if imsg::has_flag(self.header, flag::C) { - consolidation = self.codec.read(&mut *reader)?; - } - let mut parameters = String::new(); if imsg::has_flag(self.header, flag::P) { parameters = self.codec.read(&mut *reader)?; @@ -163,6 +161,7 @@ where // Extensions let mut ext_sinfo: Option = None; + let mut ext_consolidation = ext::ConsolidationType::default(); let mut ext_body: Option = None; let mut ext_attachment: Option = None; let mut ext_unknown = Vec::new(); @@ -177,6 +176,11 @@ where ext_sinfo = Some(s); has_ext = ext; } + ext::Consolidation::ID => { + let (c, ext): (ext::ConsolidationType, bool) = eodec.read(&mut *reader)?; + ext_consolidation = c; + has_ext = ext; + } ext::QueryBodyType::SID | ext::QueryBodyType::VID => { let (s, ext): (ext::QueryBodyType, bool) = eodec.read(&mut *reader)?; ext_body = Some(s); @@ -196,9 +200,9 @@ where } Ok(Query { - consolidation, parameters, ext_sinfo, + ext_consolidation, ext_body, ext_attachment, ext_unknown, diff --git a/commons/zenoh-codec/src/zenoh/reply.rs b/commons/zenoh-codec/src/zenoh/reply.rs index 308004a1c2..d98c72b341 100644 --- a/commons/zenoh-codec/src/zenoh/reply.rs +++ b/commons/zenoh-codec/src/zenoh/reply.rs @@ -11,18 +11,23 @@ // Contributors: // ZettaScale Zenoh Team, // +#[cfg(not(feature = "shared-memory"))] +use crate::Zenoh080Bounded; +#[cfg(feature = "shared-memory")] +use crate::Zenoh080Sliced; use crate::{common::extension, RCodec, WCodec, Zenoh080, Zenoh080Header}; use alloc::vec::Vec; use zenoh_buffers::{ reader::{DidntRead, Reader}, writer::{DidntWrite, Writer}, + ZBuf, }; use zenoh_protocol::{ - common::imsg, + common::{iext, imsg}, + core::Encoding, zenoh::{ id, - query::Consolidation, - reply::{flag, Reply, ReplyBody}, + reply::{ext, flag, Reply}, }, }; @@ -34,35 +39,81 @@ where fn write(self, writer: &mut W, x: &Reply) -> Self::Output { let Reply { - consolidation, + timestamp, + encoding, + ext_sinfo, + ext_consolidation, + #[cfg(feature = "shared-memory")] + ext_shm, + ext_attachment, ext_unknown, payload, } = x; // Header let mut header = id::REPLY; - if consolidation != &Consolidation::DEFAULT { - header |= flag::C; + if timestamp.is_some() { + header |= flag::T; + } + if encoding != &Encoding::default() { + header |= flag::E; + } + let mut n_exts = (ext_sinfo.is_some()) as u8 + + ((ext_consolidation != &ext::ConsolidationType::default()) as u8) + + (ext_attachment.is_some()) as u8 + + (ext_unknown.len() as u8); + #[cfg(feature = "shared-memory")] + { + n_exts += ext_shm.is_some() as u8; } - let mut n_exts = ext_unknown.len() as u8; if n_exts != 0 { header |= flag::Z; } self.write(&mut *writer, header)?; // Body - if consolidation != &Consolidation::DEFAULT { - self.write(&mut *writer, *consolidation)?; + if let Some(ts) = timestamp.as_ref() { + self.write(&mut *writer, ts)?; + } + if encoding != &Encoding::default() { + self.write(&mut *writer, encoding)?; } // Extensions + if let Some(sinfo) = ext_sinfo.as_ref() { + n_exts -= 1; + self.write(&mut *writer, (sinfo, n_exts != 0))?; + } + if ext_consolidation != &ext::ConsolidationType::default() { + n_exts -= 1; + self.write(&mut *writer, (*ext_consolidation, n_exts != 0))?; + } + #[cfg(feature = "shared-memory")] + if let Some(eshm) = ext_shm.as_ref() { + n_exts -= 1; + self.write(&mut *writer, (eshm, n_exts != 0))?; + } + if let Some(att) = ext_attachment.as_ref() { + n_exts -= 1; + self.write(&mut *writer, (att, n_exts != 0))?; + } for u in ext_unknown.iter() { n_exts -= 1; self.write(&mut *writer, (u, n_exts != 0))?; } // Payload - self.write(&mut *writer, payload)?; + #[cfg(feature = "shared-memory")] + { + let codec = Zenoh080Sliced::::new(ext_shm.is_some()); + codec.write(&mut *writer, payload)?; + } + + #[cfg(not(feature = "shared-memory"))] + { + let bodec = Zenoh080Bounded::::new(); + bodec.write(&mut *writer, payload)?; + } Ok(()) } @@ -93,27 +144,81 @@ where } // Body - let mut consolidation = Consolidation::DEFAULT; - if imsg::has_flag(self.header, flag::C) { - consolidation = self.codec.read(&mut *reader)?; + let mut timestamp: Option = None; + if imsg::has_flag(self.header, flag::T) { + timestamp = Some(self.codec.read(&mut *reader)?); + } + + let mut encoding = Encoding::default(); + if imsg::has_flag(self.header, flag::E) { + encoding = self.codec.read(&mut *reader)?; } // Extensions + let mut ext_sinfo: Option = None; + let mut ext_consolidation = ext::ConsolidationType::default(); + #[cfg(feature = "shared-memory")] + let mut ext_shm: Option = None; + let mut ext_attachment: Option = None; let mut ext_unknown = Vec::new(); let mut has_ext = imsg::has_flag(self.header, flag::Z); while has_ext { let ext: u8 = self.codec.read(&mut *reader)?; - let (u, ext) = extension::read(reader, "Reply", ext)?; - ext_unknown.push(u); - has_ext = ext; + let eodec = Zenoh080Header::new(ext); + match iext::eid(ext) { + ext::SourceInfo::ID => { + let (s, ext): (ext::SourceInfoType, bool) = eodec.read(&mut *reader)?; + ext_sinfo = Some(s); + has_ext = ext; + } + ext::Consolidation::ID => { + let (c, ext): (ext::ConsolidationType, bool) = eodec.read(&mut *reader)?; + ext_consolidation = c; + has_ext = ext; + } + #[cfg(feature = "shared-memory")] + ext::Shm::ID => { + let (s, ext): (ext::ShmType, bool) = eodec.read(&mut *reader)?; + ext_shm = Some(s); + has_ext = ext; + } + ext::Attachment::ID => { + let (a, ext): (ext::AttachmentType, bool) = eodec.read(&mut *reader)?; + ext_attachment = Some(a); + has_ext = ext; + } + _ => { + let (u, ext) = extension::read(reader, "Reply", ext)?; + ext_unknown.push(u); + has_ext = ext; + } + } } // Payload - let payload: ReplyBody = self.codec.read(&mut *reader)?; + let payload: ZBuf = { + #[cfg(feature = "shared-memory")] + { + let codec = Zenoh080Sliced::::new(ext_shm.is_some()); + codec.read(&mut *reader)? + } + + #[cfg(not(feature = "shared-memory"))] + { + let bodec = Zenoh080Bounded::::new(); + bodec.read(&mut *reader)? + } + }; Ok(Reply { - consolidation, + timestamp, + encoding, + ext_sinfo, + ext_consolidation, + #[cfg(feature = "shared-memory")] + ext_shm, + ext_attachment, ext_unknown, payload, }) diff --git a/commons/zenoh-codec/tests/codec.rs b/commons/zenoh-codec/tests/codec.rs index d28ba9a4d3..3fdb95e1b5 100644 --- a/commons/zenoh-codec/tests/codec.rs +++ b/commons/zenoh-codec/tests/codec.rs @@ -31,22 +31,6 @@ use zenoh_protocol::{ zenoh, zextunit, zextz64, zextzbuf, }; -#[test] -fn zbuf_test() { - let mut buffer = vec![0u8; 64]; - - let zbuf = ZBuf::empty(); - let mut writer = buffer.writer(); - - let codec = Zenoh080::new(); - codec.write(&mut writer, &zbuf).unwrap(); - println!("Buffer: {:?}", buffer); - - let mut reader = buffer.reader(); - let ret: ZBuf = codec.read(&mut reader).unwrap(); - assert_eq!(ret, zbuf); -} - const NUM_ITER: usize = 100; const MAX_PAYLOAD_SIZE: usize = 256; @@ -137,28 +121,10 @@ macro_rules! run { // Core #[test] fn codec_zint() { - run!(u8, { u8::MIN }); - run!(u8, { u8::MAX }); run!(u8, { thread_rng().gen::() }); - - run!(u16, { u16::MIN }); - run!(u16, { u16::MAX }); run!(u16, { thread_rng().gen::() }); - - run!(u32, { u32::MIN }); - run!(u32, { u32::MAX }); run!(u32, { thread_rng().gen::() }); - - run!(u64, { u64::MIN }); - run!(u64, { u64::MAX }); - let codec = Zenoh080::new(); - for i in 1..=codec.w_len(u64::MAX) { - run!(u64, { 1 << (7 * i) }); - } run!(u64, { thread_rng().gen::() }); - - run!(usize, { usize::MIN }); - run!(usize, { usize::MAX }); run!(usize, thread_rng().gen::()); } @@ -172,12 +138,11 @@ fn codec_zint_len() { codec.write(&mut writer, n).unwrap(); assert_eq!(codec.w_len(n), buff.len()); - for i in 1..=codec.w_len(u64::MAX) { + for i in 1..=9 { let mut buff = vec![]; let mut writer = buff.writer(); let n: u64 = 1 << (7 * i); codec.write(&mut writer, n).unwrap(); - println!("ZInt len: {} {:02x?}", n, buff); assert_eq!(codec.w_len(n), buff.len()); } @@ -591,7 +556,7 @@ fn codec_network() { run!(NetworkMessage, NetworkMessage::rand()); } -// Zenoh +// Zenoh new #[test] fn codec_put() { run!(zenoh::Put, zenoh::Put::rand()); @@ -616,3 +581,13 @@ fn codec_reply() { fn codec_err() { run!(zenoh::Err, zenoh::Err::rand()); } + +#[test] +fn codec_ack() { + run!(zenoh::Ack, zenoh::Ack::rand()); +} + +#[test] +fn codec_pull() { + run!(zenoh::Pull, zenoh::Pull::rand()); +} diff --git a/commons/zenoh-collections/src/ring_buffer.rs b/commons/zenoh-collections/src/ring_buffer.rs index e9f7909d5f..fd60030ebc 100644 --- a/commons/zenoh-collections/src/ring_buffer.rs +++ b/commons/zenoh-collections/src/ring_buffer.rs @@ -40,15 +40,6 @@ impl RingBuffer { Some(elem) } - #[inline] - pub fn push_force(&mut self, elem: T) -> Option { - self.push(elem).and_then(|elem| { - let ret = self.buffer.pop_front(); - self.buffer.push_back(elem); - ret - }) - } - #[inline] pub fn pull(&mut self) -> Option { let x = self.buffer.pop_front(); diff --git a/commons/zenoh-collections/src/single_or_vec.rs b/commons/zenoh-collections/src/single_or_vec.rs index ed82bf49af..c68ac6d8ff 100644 --- a/commons/zenoh-collections/src/single_or_vec.rs +++ b/commons/zenoh-collections/src/single_or_vec.rs @@ -30,10 +30,6 @@ enum SingleOrVecInner { } impl SingleOrVecInner { - const fn empty() -> Self { - SingleOrVecInner::Vec(Vec::new()) - } - fn push(&mut self, value: T) { match self { SingleOrVecInner::Vec(vec) if vec.capacity() == 0 => *self = Self::Single(value), @@ -57,7 +53,7 @@ where impl Default for SingleOrVecInner { fn default() -> Self { - Self::empty() + SingleOrVecInner::Vec(Vec::new()) } } @@ -92,10 +88,6 @@ where pub struct SingleOrVec(SingleOrVecInner); impl SingleOrVec { - pub const fn empty() -> Self { - Self(SingleOrVecInner::empty()) - } - pub fn push(&mut self, value: T) { self.0.push(value); } @@ -182,17 +174,14 @@ impl SingleOrVec { self.vectorize().insert(at, value); } } - enum DrainInner<'a, T> { Vec(alloc::vec::Drain<'a, T>), Single(&'a mut SingleOrVecInner), Done, } - pub struct Drain<'a, T> { inner: DrainInner<'a, T>, } - impl<'a, T> Iterator for Drain<'a, T> { type Item = T; diff --git a/commons/zenoh-protocol/Cargo.toml b/commons/zenoh-protocol/Cargo.toml index 9d7e35d690..93c92ee33f 100644 --- a/commons/zenoh-protocol/Cargo.toml +++ b/commons/zenoh-protocol/Cargo.toml @@ -36,6 +36,7 @@ std = [ test = ["rand", "zenoh-buffers/test"] shared-memory = ["std", "zenoh-buffers/shared-memory"] stats = [] +complete_n = [] [dependencies] const_format = { workspace = true } diff --git a/commons/zenoh-protocol/src/common/mod.rs b/commons/zenoh-protocol/src/common/mod.rs index ef53e5a8ac..d11d0b0c52 100644 --- a/commons/zenoh-protocol/src/common/mod.rs +++ b/commons/zenoh-protocol/src/common/mod.rs @@ -19,6 +19,21 @@ pub use extension::*; /*************************************/ // Inner Message IDs pub mod imsg { + pub mod id { + // Zenoh Messages + pub const DECLARE: u8 = 0x0b; + pub const DATA: u8 = 0x0c; + pub const QUERY: u8 = 0x0d; + pub const PULL: u8 = 0x0e; + pub const UNIT: u8 = 0x0f; + pub const LINK_STATE_LIST: u8 = 0x10; + + // Message decorators + pub const PRIORITY: u8 = 0x1c; + pub const ROUTING_CONTEXT: u8 = 0x1d; + pub const REPLY_CONTEXT: u8 = 0x1e; + } + // Header mask pub const HEADER_BITS: u8 = 5; pub const HEADER_MASK: u8 = !(0xff << HEADER_BITS); diff --git a/commons/zenoh-protocol/src/core/cowstr.rs b/commons/zenoh-protocol/src/core/cowstr.rs index 209d020f40..33dac4524f 100644 --- a/commons/zenoh-protocol/src/core/cowstr.rs +++ b/commons/zenoh-protocol/src/core/cowstr.rs @@ -21,7 +21,7 @@ enum CowStrInner<'a> { } pub struct CowStr<'a>(CowStrInner<'a>); impl<'a> CowStr<'a> { - pub(crate) const fn borrowed(s: &'a str) -> Self { + pub(crate) fn borrowed(s: &'a str) -> Self { Self(CowStrInner::Borrowed(s)) } pub fn as_str(&self) -> &str { diff --git a/commons/zenoh-protocol/src/core/encoding.rs b/commons/zenoh-protocol/src/core/encoding.rs index 70afdbf143..f202b8e79c 100644 --- a/commons/zenoh-protocol/src/core/encoding.rs +++ b/commons/zenoh-protocol/src/core/encoding.rs @@ -11,68 +11,280 @@ // Contributors: // ZettaScale Zenoh Team, // -use core::fmt::Debug; -use zenoh_buffers::ZSlice; - -pub type EncodingId = u16; - -/// [`Encoding`] is a metadata that indicates how the data payload should be interpreted. -/// For wire-efficiency and extensibility purposes, Zenoh defines an [`Encoding`] as -/// composed of an unsigned integer prefix and a bytes schema. The actual meaning of the -/// prefix and schema are out-of-scope of the protocol definition. Therefore, Zenoh does not -/// impose any encoding mapping and users are free to use any mapping they like. -/// Nevertheless, it is worth highlighting that Zenoh still provides a default mapping as part -/// of the API as per user convenience. That mapping has no impact on the Zenoh protocol definition. -#[derive(Clone, Debug, PartialEq, Eq)] -pub struct Encoding { - pub id: EncodingId, - pub schema: Option, +use crate::core::CowStr; +use alloc::{borrow::Cow, string::String}; +use core::{ + convert::TryFrom, + fmt::{self, Debug}, + mem, +}; +use zenoh_result::{bail, zerror, ZError, ZResult}; + +mod consts { + pub(super) const MIMES: [&str; 21] = [ + /* 0 */ "", + /* 1 */ "application/octet-stream", + /* 2 */ "application/custom", // non iana standard + /* 3 */ "text/plain", + /* 4 */ "application/properties", // non iana standard + /* 5 */ "application/json", // if not readable from casual users + /* 6 */ "application/sql", + /* 7 */ "application/integer", // non iana standard + /* 8 */ "application/float", // non iana standard + /* 9 */ + "application/xml", // if not readable from casual users (RFC 3023, sec 3) + /* 10 */ "application/xhtml+xml", + /* 11 */ "application/x-www-form-urlencoded", + /* 12 */ "text/json", // non iana standard - if readable from casual users + /* 13 */ "text/html", + /* 14 */ "text/xml", // if readable from casual users (RFC 3023, section 3) + /* 15 */ "text/css", + /* 16 */ "text/csv", + /* 17 */ "text/javascript", + /* 18 */ "image/jpeg", + /* 19 */ "image/png", + /* 20 */ "image/gif", + ]; +} + +#[repr(u8)] +#[derive(Clone, Copy, Debug, PartialEq, Eq)] +pub enum KnownEncoding { + Empty = 0, + AppOctetStream = 1, + AppCustom = 2, + TextPlain = 3, + AppProperties = 4, + AppJson = 5, + AppSql = 6, + AppInteger = 7, + AppFloat = 8, + AppXml = 9, + AppXhtmlXml = 10, + AppXWwwFormUrlencoded = 11, + TextJson = 12, + TextHtml = 13, + TextXml = 14, + TextCss = 15, + TextCsv = 16, + TextJavascript = 17, + ImageJpeg = 18, + ImagePng = 19, + ImageGif = 20, +} + +impl From for u8 { + fn from(val: KnownEncoding) -> Self { + val as u8 + } +} + +impl From for &str { + fn from(val: KnownEncoding) -> Self { + consts::MIMES[u8::from(val) as usize] + } +} + +impl TryFrom for KnownEncoding { + type Error = ZError; + fn try_from(value: u8) -> Result { + if value < consts::MIMES.len() as u8 + 1 { + Ok(unsafe { mem::transmute(value) }) + } else { + Err(zerror!("Unknown encoding")) + } + } } -/// # Encoding field +impl AsRef for KnownEncoding { + fn as_ref(&self) -> &str { + consts::MIMES[u8::from(*self) as usize] + } +} + +/// The encoding of a zenoh `zenoh::Value`. /// -/// ```text -/// 7 6 5 4 3 2 1 0 -/// +-+-+-+-+-+-+-+-+ -/// ~ id: z16 |S~ -/// +---------------+ -/// ~schema: ~ -- if S==1 -/// +---------------+ -/// ``` -pub mod flag { - pub const S: u32 = 1; // 0x01 Suffix if S==1 then schema is present +/// A zenoh encoding is a HTTP Mime type represented, for wire efficiency, +/// as an integer prefix (that maps to a string) and a string suffix. +#[derive(Clone, Debug, PartialEq, Eq)] +pub enum Encoding { + Exact(KnownEncoding), + WithSuffix(KnownEncoding, CowStr<'static>), } impl Encoding { - /// Returns a new [`Encoding`] object with default empty prefix ID. - pub const fn empty() -> Self { - Self { - id: 0, - schema: None, + pub fn new(prefix: u8, suffix: IntoCowStr) -> ZResult + where + IntoCowStr: Into> + AsRef, + { + let prefix = KnownEncoding::try_from(prefix)?; + let suffix = suffix.into(); + if suffix.as_bytes().len() > u8::MAX as usize { + bail!("Suffix length is limited to 255 characters") + } + if suffix.as_ref().is_empty() { + Ok(Encoding::Exact(prefix)) + } else { + Ok(Encoding::WithSuffix(prefix, suffix.into())) + } + } + + /// Sets the suffix of this encoding. + pub fn with_suffix(self, suffix: IntoCowStr) -> ZResult + where + IntoCowStr: Into> + AsRef, + { + match self { + Encoding::Exact(e) => Encoding::new(e as u8, suffix), + Encoding::WithSuffix(e, s) => Encoding::new(e as u8, s + suffix.as_ref()), } } + + pub fn as_ref<'a, T>(&'a self) -> T + where + &'a Self: Into, + { + self.into() + } + + /// Returns `true`if the string representation of this encoding starts with + /// the string representation of ther given encoding. + pub fn starts_with(&self, with: T) -> bool + where + T: Into, + { + let with: Encoding = with.into(); + self.prefix() == with.prefix() && self.suffix().starts_with(with.suffix()) + } + + pub const fn prefix(&self) -> &KnownEncoding { + match self { + Encoding::Exact(e) | Encoding::WithSuffix(e, _) => e, + } + } + + pub fn suffix(&self) -> &str { + match self { + Encoding::Exact(_) => "", + Encoding::WithSuffix(_, s) => s.as_ref(), + } + } +} + +impl Encoding { + pub const EMPTY: Encoding = Encoding::Exact(KnownEncoding::Empty); + pub const APP_OCTET_STREAM: Encoding = Encoding::Exact(KnownEncoding::AppOctetStream); + pub const APP_CUSTOM: Encoding = Encoding::Exact(KnownEncoding::AppCustom); + pub const TEXT_PLAIN: Encoding = Encoding::Exact(KnownEncoding::TextPlain); + pub const APP_PROPERTIES: Encoding = Encoding::Exact(KnownEncoding::AppProperties); + pub const APP_JSON: Encoding = Encoding::Exact(KnownEncoding::AppJson); + pub const APP_SQL: Encoding = Encoding::Exact(KnownEncoding::AppSql); + pub const APP_INTEGER: Encoding = Encoding::Exact(KnownEncoding::AppInteger); + pub const APP_FLOAT: Encoding = Encoding::Exact(KnownEncoding::AppFloat); + pub const APP_XML: Encoding = Encoding::Exact(KnownEncoding::AppXml); + pub const APP_XHTML_XML: Encoding = Encoding::Exact(KnownEncoding::AppXhtmlXml); + pub const APP_XWWW_FORM_URLENCODED: Encoding = + Encoding::Exact(KnownEncoding::AppXWwwFormUrlencoded); + pub const TEXT_JSON: Encoding = Encoding::Exact(KnownEncoding::TextJson); + pub const TEXT_HTML: Encoding = Encoding::Exact(KnownEncoding::TextHtml); + pub const TEXT_XML: Encoding = Encoding::Exact(KnownEncoding::TextXml); + pub const TEXT_CSS: Encoding = Encoding::Exact(KnownEncoding::TextCss); + pub const TEXT_CSV: Encoding = Encoding::Exact(KnownEncoding::TextCsv); + pub const TEXT_JAVASCRIPT: Encoding = Encoding::Exact(KnownEncoding::TextJavascript); + pub const IMAGE_JPEG: Encoding = Encoding::Exact(KnownEncoding::ImageJpeg); + pub const IMAGE_PNG: Encoding = Encoding::Exact(KnownEncoding::ImagePng); + pub const IMAGE_GIF: Encoding = Encoding::Exact(KnownEncoding::ImageGif); +} + +impl fmt::Display for Encoding { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match self { + Encoding::Exact(e) => f.write_str(e.as_ref()), + Encoding::WithSuffix(e, s) => { + f.write_str(e.as_ref())?; + f.write_str(s) + } + } + } +} + +impl From<&'static str> for Encoding { + fn from(s: &'static str) -> Self { + for (i, v) in consts::MIMES.iter().enumerate().skip(1) { + if let Some(suffix) = s.strip_prefix(v) { + if suffix.is_empty() { + return Encoding::Exact(unsafe { mem::transmute(i as u8) }); + } else { + return Encoding::WithSuffix(unsafe { mem::transmute(i as u8) }, suffix.into()); + } + } + } + if s.is_empty() { + Encoding::Exact(KnownEncoding::Empty) + } else { + Encoding::WithSuffix(KnownEncoding::Empty, s.into()) + } + } +} + +impl From for Encoding { + fn from(mut s: String) -> Self { + for (i, v) in consts::MIMES.iter().enumerate().skip(1) { + if s.starts_with(v) { + s.replace_range(..v.len(), ""); + if s.is_empty() { + return Encoding::Exact(unsafe { mem::transmute(i as u8) }); + } else { + return Encoding::WithSuffix(unsafe { mem::transmute(i as u8) }, s.into()); + } + } + } + if s.is_empty() { + Encoding::Exact(KnownEncoding::Empty) + } else { + Encoding::WithSuffix(KnownEncoding::Empty, s.into()) + } + } +} + +impl From<&KnownEncoding> for Encoding { + fn from(e: &KnownEncoding) -> Encoding { + Encoding::Exact(*e) + } +} + +impl From for Encoding { + fn from(e: KnownEncoding) -> Encoding { + Encoding::Exact(e) + } } impl Default for Encoding { fn default() -> Self { - Self::empty() + KnownEncoding::Empty.into() } } impl Encoding { #[cfg(feature = "test")] pub fn rand() -> Self { - use rand::Rng; + use rand::{ + distributions::{Alphanumeric, DistString}, + Rng, + }; const MIN: usize = 2; const MAX: usize = 16; let mut rng = rand::thread_rng(); - let id: EncodingId = rng.gen(); - let schema = rng - .gen_bool(0.5) - .then_some(ZSlice::rand(rng.gen_range(MIN..MAX))); - Encoding { id, schema } + let prefix: u8 = rng.gen_range(0..20); + let suffix: String = if rng.gen_bool(0.5) { + let len = rng.gen_range(MIN..MAX); + Alphanumeric.sample_string(&mut rng, len) + } else { + String::new() + }; + Encoding::new(prefix, suffix).unwrap() } } diff --git a/commons/zenoh-protocol/src/core/endpoint.rs b/commons/zenoh-protocol/src/core/endpoint.rs index a8fcb3ae98..5e921345e4 100644 --- a/commons/zenoh-protocol/src/core/endpoint.rs +++ b/commons/zenoh-protocol/src/core/endpoint.rs @@ -497,12 +497,7 @@ impl fmt::Debug for ConfigMut<'_> { } } -/// A string that respects the [`EndPoint`] canon form: `[#]`. -/// -/// `` is a valid [`Locator`] and `` is of the form `=;...;=` where keys are alphabetically sorted. -/// `` is optional and can be provided to configure some aspectes for an [`EndPoint`], e.g. the interface to listen on or connect to. -/// -/// A full [`EndPoint`] string is hence in the form of `/
[?][#config]`. +/// A `String` that respects the [`EndPoint`] canon form: `#`, such that `` is a valid [`Locator`] `` is of the form `=;...;=` where keys are alphabetically sorted. #[derive(Clone, PartialEq, Eq, Hash, serde::Serialize, serde::Deserialize)] #[serde(into = "String")] #[serde(try_from = "String")] diff --git a/commons/zenoh-protocol/src/core/locator.rs b/commons/zenoh-protocol/src/core/locator.rs index 50b909b12f..cdd3dfa64c 100644 --- a/commons/zenoh-protocol/src/core/locator.rs +++ b/commons/zenoh-protocol/src/core/locator.rs @@ -16,9 +16,9 @@ use alloc::{borrow::ToOwned, string::String}; use core::{convert::TryFrom, fmt, hash::Hash, str::FromStr}; use zenoh_result::{Error as ZError, ZResult}; -/// A string that respects the [`Locator`] canon form: `/
[?]`. -/// -/// `` is of the form `=;...;=` where keys are alphabetically sorted. +// Locator +/// A `String` that respects the [`Locator`] canon form: `/
[?]`, +/// such that `` is of the form `=;...;=` where keys are alphabetically sorted. #[derive(Clone, PartialEq, Eq, Hash, serde::Serialize, serde::Deserialize)] #[serde(into = "String")] #[serde(try_from = "String")] @@ -122,3 +122,67 @@ impl Locator { EndPoint::rand().into() } } + +// pub(crate) trait HasCanonForm { +// fn is_canon(&self) -> bool; + +// type Output; +// fn canonicalize(self) -> Self::Output; +// } + +// fn cmp(this: &str, than: &str) -> core::cmp::Ordering { +// let is_longer = this.len().cmp(&than.len()); +// let this = this.chars(); +// let than = than.chars(); +// let zip = this.zip(than); +// for (this, than) in zip { +// match this.cmp(&than) { +// core::cmp::Ordering::Equal => {} +// o => return o, +// } +// } +// is_longer +// } + +// impl<'a, T: Iterator + Clone, V> HasCanonForm for T { +// fn is_canon(&self) -> bool { +// let mut iter = self.clone(); +// let mut acc = if let Some((key, _)) = iter.next() { +// key +// } else { +// return true; +// }; +// for (key, _) in iter { +// if cmp(key, acc) != core::cmp::Ordering::Greater { +// return false; +// } +// acc = key; +// } +// true +// } + +// type Output = Vec<(&'a str, V)>; +// fn canonicalize(mut self) -> Self::Output { +// let mut result = Vec::new(); +// if let Some(v) = self.next() { +// result.push(v); +// } +// 'outer: for (k, v) in self { +// for (i, (x, _)) in result.iter().enumerate() { +// match cmp(k, x) { +// core::cmp::Ordering::Less => { +// result.insert(i, (k, v)); +// continue 'outer; +// } +// core::cmp::Ordering::Equal => { +// result[i].1 = v; +// continue 'outer; +// } +// core::cmp::Ordering::Greater => {} +// } +// } +// result.push((k, v)) +// } +// result +// } +// } diff --git a/commons/zenoh-protocol/src/core/mod.rs b/commons/zenoh-protocol/src/core/mod.rs index 20fcf85dd9..2547034c44 100644 --- a/commons/zenoh-protocol/src/core/mod.rs +++ b/commons/zenoh-protocol/src/core/mod.rs @@ -16,6 +16,7 @@ use alloc::{ boxed::Box, format, string::{String, ToString}, + vec::Vec, }; use core::{ convert::{From, TryFrom, TryInto}, @@ -41,8 +42,8 @@ pub use wire_expr::*; mod cowstr; pub use cowstr::CowStr; -pub mod encoding; -pub use encoding::{Encoding, EncodingId}; +mod encoding; +pub use encoding::{Encoding, KnownEncoding}; pub mod locator; pub use locator::*; @@ -53,6 +54,43 @@ pub use endpoint::*; pub mod resolution; pub use resolution::*; +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct Property { + pub key: u64, + pub value: Vec, +} + +/// The kind of a `Sample`. +#[repr(u8)] +#[derive(Debug, Default, Copy, Clone, PartialEq, Eq)] +pub enum SampleKind { + /// if the `Sample` was issued by a `put` operation. + #[default] + Put = 0, + /// if the `Sample` was issued by a `delete` operation. + Delete = 1, +} + +impl fmt::Display for SampleKind { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + SampleKind::Put => write!(f, "PUT"), + SampleKind::Delete => write!(f, "DELETE"), + } + } +} + +impl TryFrom for SampleKind { + type Error = u64; + fn try_from(kind: u64) -> Result { + match kind { + 0 => Ok(SampleKind::Put), + 1 => Ok(SampleKind::Delete), + _ => Err(kind), + } + } +} + /// The global unique id of a zenoh peer. #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] #[repr(transparent)] @@ -261,27 +299,6 @@ impl<'de> serde::Deserialize<'de> for ZenohId { } } -/// The unique id of a zenoh entity inside it's parent [`Session`]. -pub type EntityId = u32; - -/// The global unique id of a zenoh entity. -#[derive(Debug, Default, Clone, Eq, Hash, PartialEq)] -pub struct EntityGlobalId { - pub zid: ZenohId, - pub eid: EntityId, -} - -impl EntityGlobalId { - #[cfg(feature = "test")] - pub fn rand() -> Self { - use rand::Rng; - Self { - zid: ZenohId::rand(), - eid: rand::thread_rng().gen(), - } - } -} - #[repr(u8)] #[derive(Debug, Default, Copy, Clone, Eq, Hash, PartialEq)] pub enum Priority { @@ -297,8 +314,6 @@ pub enum Priority { } impl Priority { - /// Default - pub const DEFAULT: Self = Self::Data; /// The lowest Priority pub const MIN: Self = Self::Background; /// The highest Priority @@ -339,8 +354,6 @@ pub enum Reliability { } impl Reliability { - pub const DEFAULT: Self = Self::BestEffort; - #[cfg(feature = "test")] pub fn rand() -> Self { use rand::Rng; @@ -361,13 +374,6 @@ pub struct Channel { pub reliability: Reliability, } -impl Channel { - pub const DEFAULT: Self = Self { - priority: Priority::DEFAULT, - reliability: Reliability::DEFAULT, - }; -} - /// The kind of congestion control. #[derive(Debug, Default, Copy, Clone, PartialEq, Eq)] #[repr(u8)] @@ -377,6 +383,51 @@ pub enum CongestionControl { Block = 1, } -impl CongestionControl { - pub const DEFAULT: Self = Self::Drop; +/// The subscription mode. +#[derive(Debug, Default, Copy, Clone, PartialEq, Eq)] +#[repr(u8)] +pub enum SubMode { + #[default] + Push = 0, + Pull = 1, +} + +#[derive(Debug, Clone, PartialEq, Eq, Default)] +pub struct SubInfo { + pub reliability: Reliability, + pub mode: SubMode, +} + +#[derive(Debug, Default, Clone, PartialEq, Eq, Hash)] +pub struct QueryableInfo { + pub complete: u64, // Default 0: incomplete + pub distance: u64, // Default 0: no distance +} + +/// The kind of consolidation. +#[derive(Debug, Clone, PartialEq, Eq, Copy)] +pub enum ConsolidationMode { + /// No consolidation applied: multiple samples may be received for the same key-timestamp. + None, + /// Monotonic consolidation immediately forwards samples, except if one with an equal or more recent timestamp + /// has already been sent with the same key. + /// + /// This optimizes latency while potentially reducing bandwidth. + /// + /// Note that this doesn't cause re-ordering, but drops the samples for which a more recent timestamp has already + /// been observed with the same key. + Monotonic, + /// Holds back samples to only send the set of samples that had the highest timestamp for their key. + Latest, +} + +/// The `zenoh::queryable::Queryable`s that should be target of a `zenoh::Session::get()`. +#[derive(Debug, Default, Clone, Copy, PartialEq, Eq)] +pub enum QueryTarget { + #[default] + BestMatching, + All, + AllComplete, + #[cfg(feature = "complete_n")] + Complete(u64), } diff --git a/commons/zenoh-protocol/src/core/wire_expr.rs b/commons/zenoh-protocol/src/core/wire_expr.rs index a66b1aa212..7b0dee7471 100644 --- a/commons/zenoh-protocol/src/core/wire_expr.rs +++ b/commons/zenoh-protocol/src/core/wire_expr.rs @@ -71,10 +71,6 @@ impl<'a> WireExpr<'a> { } } - pub fn is_empty(&self) -> bool { - self.scope == 0 && self.suffix.as_ref().is_empty() - } - pub fn as_str(&'a self) -> &'a str { if self.scope == 0 { self.suffix.as_ref() @@ -261,7 +257,7 @@ impl WireExpr<'_> { WireExpr { scope, suffix: suffix.into(), - mapping: Mapping::DEFAULT, + mapping: Mapping::default(), } } } diff --git a/commons/zenoh-protocol/src/lib.rs b/commons/zenoh-protocol/src/lib.rs index 8d26f52ed9..2e1a2fa7cf 100644 --- a/commons/zenoh-protocol/src/lib.rs +++ b/commons/zenoh-protocol/src/lib.rs @@ -28,7 +28,7 @@ pub mod transport; pub mod zenoh; // Zenoh version -pub const VERSION: u8 = 0x09; +pub const VERSION: u8 = 0x08; // Zenoh protocol uses the following conventions for message definition and representation. // diff --git a/commons/zenoh-protocol/src/network/declare.rs b/commons/zenoh-protocol/src/network/declare.rs index 31e8adcc6e..76415d52f5 100644 --- a/commons/zenoh-protocol/src/network/declare.rs +++ b/commons/zenoh-protocol/src/network/declare.rs @@ -18,8 +18,7 @@ use crate::{ zextz64, zextzbuf, }; use alloc::borrow::Cow; -pub use common::*; -use core::sync::atomic::AtomicU32; +use core::ops::BitOr; pub use interest::*; pub use keyexpr::*; pub use queryable::*; @@ -27,65 +26,27 @@ pub use subscriber::*; pub use token::*; pub mod flag { - pub const I: u8 = 1 << 5; // 0x20 Interest if I==1 then the declare is in a response to an Interest with future==false - // pub const X: u8 = 1 << 6; // 0x40 Reserved + // pub const X: u8 = 1 << 5; // 0x20 Reserved + // pub const X: u8 = 1 << 6; // 0x40 Reserved pub const Z: u8 = 1 << 7; // 0x80 Extensions if Z==1 then an extension will follow } /// Flags: -/// - |: Mode The mode of the the declaration* -/// -/ +/// - X: Reserved +/// - X: Reserved /// - Z: Extension If Z==1 then at least one extension is present /// /// 7 6 5 4 3 2 1 0 /// +-+-+-+-+-+-+-+-+ -/// |Z|Mod| DECLARE | +/// |Z|X|X| DECLARE | /// +-+-+-+---------+ -/// ~ rid:z32 ~ if Mode != Push -/// +---------------+ /// ~ [decl_exts] ~ if Z==1 /// +---------------+ /// ~ declaration ~ /// +---------------+ /// -/// *Mode of declaration: -/// - Mode 0b00: Push -/// - Mode 0b01: Response -/// - Mode 0b10: Request -/// - Mode 0b11: RequestContinuous - -/// The resolution of a RequestId -pub type DeclareRequestId = u32; -pub type AtomicDeclareRequestId = AtomicU32; - -#[derive(Debug, Clone, PartialEq, Eq)] -pub enum DeclareMode { - Push, - Request(DeclareRequestId), - RequestContinuous(DeclareRequestId), - Response(DeclareRequestId), -} - -impl DeclareMode { - #[cfg(feature = "test")] - pub fn rand() -> Self { - use rand::Rng; - - let mut rng = rand::thread_rng(); - - match rng.gen_range(0..4) { - 0 => DeclareMode::Push, - 1 => DeclareMode::Request(rng.gen()), - 2 => DeclareMode::RequestContinuous(rng.gen()), - 3 => DeclareMode::Response(rng.gen()), - _ => unreachable!(), - } - } -} - #[derive(Debug, Clone, PartialEq, Eq)] pub struct Declare { - pub mode: DeclareMode, pub ext_qos: ext::QoSType, pub ext_tstamp: Option, pub ext_nodeid: ext::NodeIdType, @@ -122,8 +83,8 @@ pub mod id { pub const U_TOKEN: u8 = 0x07; pub const D_INTEREST: u8 = 0x08; - - pub const D_FINAL: u8 = 0x1A; + pub const F_INTEREST: u8 = 0x09; + pub const U_INTEREST: u8 = 0x0A; } #[derive(Debug, Clone, PartialEq, Eq)] @@ -137,7 +98,8 @@ pub enum DeclareBody { DeclareToken(DeclareToken), UndeclareToken(UndeclareToken), DeclareInterest(DeclareInterest), - DeclareFinal(DeclareFinal), + FinalInterest(FinalInterest), + UndeclareInterest(UndeclareInterest), } impl DeclareBody { @@ -147,7 +109,7 @@ impl DeclareBody { let mut rng = rand::thread_rng(); - match rng.gen_range(0..10) { + match rng.gen_range(0..11) { 0 => DeclareBody::DeclareKeyExpr(DeclareKeyExpr::rand()), 1 => DeclareBody::UndeclareKeyExpr(UndeclareKeyExpr::rand()), 2 => DeclareBody::DeclareSubscriber(DeclareSubscriber::rand()), @@ -157,7 +119,8 @@ impl DeclareBody { 6 => DeclareBody::DeclareToken(DeclareToken::rand()), 7 => DeclareBody::UndeclareToken(UndeclareToken::rand()), 8 => DeclareBody::DeclareInterest(DeclareInterest::rand()), - 9 => DeclareBody::DeclareFinal(DeclareFinal::rand()), + 9 => DeclareBody::FinalInterest(FinalInterest::rand()), + 10 => DeclareBody::UndeclareInterest(UndeclareInterest::rand()), _ => unreachable!(), } } @@ -170,51 +133,50 @@ impl Declare { let mut rng = rand::thread_rng(); - let mode = DeclareMode::rand(); + let body = DeclareBody::rand(); let ext_qos = ext::QoSType::rand(); let ext_tstamp = rng.gen_bool(0.5).then(ext::TimestampType::rand); let ext_nodeid = ext::NodeIdType::rand(); - let body = DeclareBody::rand(); Self { - mode, + body, ext_qos, ext_tstamp, ext_nodeid, - body, } } } -pub mod common { - use super::*; +#[derive(Debug, Default, Copy, Clone, PartialEq, Eq)] +#[repr(u8)] +pub enum Mode { + #[default] + Push, + Pull, +} - /// ```text - /// Flags: - /// - X: Reserved - /// - X: Reserved - /// - Z: Extension If Z==1 then at least one extension is present - /// - /// 7 6 5 4 3 2 1 0 - /// +-+-+-+-+-+-+-+-+ - /// |Z|x|x| D_FINAL | - /// +---------------+ - /// ~ [final_exts] ~ if Z==1 - /// +---------------+ - /// ``` - #[derive(Debug, Clone, PartialEq, Eq)] - pub struct DeclareFinal; +impl Mode { + #[cfg(feature = "test")] + fn rand() -> Self { + use rand::Rng; - impl DeclareFinal { - #[cfg(feature = "test")] - pub fn rand() -> Self { - Self + let mut rng = rand::thread_rng(); + + if rng.gen_bool(0.5) { + Mode::Push + } else { + Mode::Pull } } +} + +pub mod common { + use super::*; pub mod ext { use super::*; + // WARNING: this is a temporary and mandatory extension used for undeclarations pub type WireExprExt = zextzbuf!(0x0f, true); #[derive(Debug, Clone, PartialEq, Eq)] pub struct WireExprType { @@ -232,10 +194,6 @@ pub mod common { } } - pub fn is_null(&self) -> bool { - self.wire_expr.is_empty() - } - #[cfg(feature = "test")] pub fn rand() -> Self { Self { @@ -327,11 +285,9 @@ pub mod keyexpr { } pub mod subscriber { - use crate::core::EntityId; - use super::*; - pub type SubscriberId = EntityId; + pub type SubscriberId = u32; pub mod flag { pub const N: u8 = 1 << 5; // 0x20 Named if N==1 then the key expr has name/suffix @@ -358,7 +314,9 @@ pub mod subscriber { /// ~ [decl_exts] ~ if Z==1 /// +---------------+ /// - /// - if R==1 then the subscription is reliable, else it is best effort /// + /// - if R==1 then the subscription is reliable, else it is best effort + /// - if P==1 then the subscription is pull, else it is push + /// /// ``` #[derive(Debug, Clone, PartialEq, Eq)] pub struct DeclareSubscriber { @@ -379,35 +337,29 @@ pub mod subscriber { /// +-+-+-+-+-+-+-+-+ /// |Z|0_1| ID | /// +-+-+-+---------+ - /// % reserved |R% + /// % reserved |P|R% /// +---------------+ /// /// - if R==1 then the subscription is reliable, else it is best effort + /// - if P==1 then the subscription is pull, else it is push /// - rsv: Reserved /// ``` - #[derive(Debug, Clone, Copy, PartialEq, Eq)] + #[derive(Debug, Default, Clone, Copy, PartialEq, Eq)] pub struct SubscriberInfo { pub reliability: Reliability, + pub mode: Mode, } impl SubscriberInfo { pub const R: u64 = 1; - - pub const DEFAULT: Self = Self { - reliability: Reliability::DEFAULT, - }; + pub const P: u64 = 1 << 1; #[cfg(feature = "test")] pub fn rand() -> Self { let reliability = Reliability::rand(); + let mode = Mode::rand(); - Self { reliability } - } - } - - impl Default for SubscriberInfo { - fn default() -> Self { - Self::DEFAULT + Self { reliability, mode } } } @@ -418,7 +370,12 @@ pub mod subscriber { } else { Reliability::BestEffort }; - Self { reliability } + let mode = if imsg::has_option(ext.value, SubscriberInfo::P) { + Mode::Pull + } else { + Mode::Push + }; + Self { reliability, mode } } } @@ -428,6 +385,9 @@ pub mod subscriber { if ext.reliability == Reliability::Reliable { v |= SubscriberInfo::R; } + if ext.mode == Mode::Pull { + v |= SubscriberInfo::P; + } Info::new(v) } } @@ -469,6 +429,7 @@ pub mod subscriber { #[derive(Debug, Clone, PartialEq, Eq)] pub struct UndeclareSubscriber { pub id: SubscriberId, + // WARNING: this is a temporary and mandatory extension used for undeclarations pub ext_wire_expr: common::ext::WireExprType, } @@ -487,11 +448,9 @@ pub mod subscriber { } pub mod queryable { - use crate::core::EntityId; - use super::*; - pub type QueryableId = EntityId; + pub type QueryableId = u32; pub mod flag { pub const N: u8 = 1 << 5; // 0x20 Named if N==1 then the key expr has name/suffix @@ -527,52 +486,54 @@ pub mod queryable { pub struct DeclareQueryable { pub id: QueryableId, pub wire_expr: WireExpr<'static>, - pub ext_info: ext::QueryableInfoType, + pub ext_info: ext::QueryableInfo, } pub mod ext { use super::*; - pub type QueryableInfo = zextz64!(0x01, false); + pub type Info = zextz64!(0x01, false); - pub mod flag { - pub const C: u8 = 1; // 0x01 Complete if C==1 then the queryable is complete - } - /// /// 7 6 5 4 3 2 1 0 /// +-+-+-+-+-+-+-+-+ /// |Z|0_1| ID | /// +-+-+-+---------+ - /// |x|x|x|x|x|x|x|C| + /// ~ complete_n ~ /// +---------------+ - /// ~ distance ~ + /// ~ distance ~ /// +---------------+ - #[derive(Debug, Clone, Copy, PartialEq, Eq)] - pub struct QueryableInfoType { - pub complete: bool, // Default false: incomplete - pub distance: u16, // Default 0: no distance + #[derive(Debug, Default, Clone, Copy, PartialEq, Eq)] + pub struct QueryableInfo { + pub complete: u8, // Default 0: incomplete // @TODO: maybe a bitflag + pub distance: u32, // Default 0: no distance } - impl QueryableInfoType { - pub const DEFAULT: Self = Self { - complete: false, - distance: 0, - }; - + impl QueryableInfo { #[cfg(feature = "test")] pub fn rand() -> Self { use rand::Rng; let mut rng = rand::thread_rng(); - let complete: bool = rng.gen_bool(0.5); - let distance: u16 = rng.gen(); + let complete: u8 = rng.gen(); + let distance: u32 = rng.gen(); + + Self { complete, distance } + } + } + + impl From for QueryableInfo { + fn from(ext: Info) -> Self { + let complete = ext.value as u8; + let distance = (ext.value >> 8) as u32; Self { complete, distance } } } - impl Default for QueryableInfoType { - fn default() -> Self { - Self::DEFAULT + impl From for Info { + fn from(ext: QueryableInfo) -> Self { + let mut v: u64 = ext.complete as u64; + v |= (ext.distance as u64) << 8; + Info::new(v) } } } @@ -585,7 +546,7 @@ pub mod queryable { let id: QueryableId = rng.gen(); let wire_expr = WireExpr::rand(); - let ext_info = ext::QueryableInfoType::rand(); + let ext_info = ext::QueryableInfo::rand(); Self { id, @@ -613,6 +574,7 @@ pub mod queryable { #[derive(Debug, Clone, PartialEq, Eq)] pub struct UndeclareQueryable { pub id: QueryableId, + // WARNING: this is a temporary and mandatory extension used for undeclarations pub ext_wire_expr: common::ext::WireExprType, } @@ -698,6 +660,7 @@ pub mod token { #[derive(Debug, Clone, PartialEq, Eq)] pub struct UndeclareToken { pub id: TokenId, + // WARNING: this is a temporary and mandatory extension used for undeclarations pub ext_wire_expr: common::ext::WireExprType, } @@ -716,81 +679,49 @@ pub mod token { } pub mod interest { - use core::{ - fmt::{self, Debug}, - ops::{Add, AddAssign, Sub, SubAssign}, - }; - use super::*; pub type InterestId = u32; pub mod flag { - // pub const X: u8 = 1 << 5; // 0x20 Reserved - // pub const X: u8 = 1 << 6; // 0x40 Reserved + pub const N: u8 = 1 << 5; // 0x20 Named if N==1 then the key expr has name/suffix + pub const M: u8 = 1 << 6; // 0x40 Mapping if M==1 then key expr mapping is the one declared by the sender, else it is the one declared by the receiver pub const Z: u8 = 1 << 7; // 0x80 Extensions if Z==1 then an extension will follow } /// # DeclareInterest message /// - /// The DECLARE INTEREST message is sent to request the transmission of current and optionally future - /// declarations of a given kind matching a target keyexpr. E.g., a declare interest could be - /// sent to request the transmisison of all current subscriptions matching `a/*`. - /// - /// The behaviour of a DECLARE INTEREST depends on the DECLARE MODE in the DECLARE MESSAGE: - /// - Push: invalid - /// - Request: only current declarations - /// - RequestContinous: current and future declarations - /// - Response: invalid - /// - /// E.g., the [`DeclareInterest`] message flow is the following for a Request: - /// - /// ```text - /// A B - /// | DECL INTEREST | - /// |------------------>| -- Sent in Declare::Request. - /// | | This is a DeclareInterest e.g. for subscriber declarations. - /// | | - /// | DECL SUBSCRIBER | - /// |<------------------| -- Sent in Declare::Response - /// | DECL SUBSCRIBER | - /// |<------------------| -- Sent in Declare::Response - /// | DECL SUBSCRIBER | - /// |<------------------| -- Sent in Declare::Response - /// | | - /// | FINAL | - /// |<------------------| -- Sent in Declare::Response - /// ``` - /// + /// The DECLARE INTEREST message is sent to request the transmission of existing and future + /// declarations of a given kind matching a target keyexpr. E.g., a declare interest could be sent to + /// request the transmisison of all existing subscriptions matching `a/*`. A FINAL INTEREST is used to + /// mark the end of the transmission of exisiting matching declarations. /// - /// And the [`DeclareInterest`] message flow is the following for a RequestContinuous: + /// E.g., the [`DeclareInterest`]/[`FinalInterest`]/[`UndeclareInterest`] message flow is the following: /// /// ```text /// A B /// | DECL INTEREST | - /// |------------------>| -- Sent in Declare::RequestContinuous. - /// | | This is a DeclareInterest e.g. for subscriber declarations/undeclarations. + /// |------------------>| -- This is a DeclareInterest e.g. for subscriber declarations/undeclarations. /// | | /// | DECL SUBSCRIBER | - /// |<------------------| -- Sent in Declare::Push + /// |<------------------| /// | DECL SUBSCRIBER | - /// |<------------------| -- Sent in Declare::Push + /// |<------------------| /// | DECL SUBSCRIBER | - /// |<------------------| -- Sent in Declare::Push + /// |<------------------| /// | | - /// | FINAL | - /// |<------------------| -- Sent in Declare::Response + /// | FINAL INTEREST | + /// |<------------------| -- The FinalInterest signals that all known subscribers have been transmitted. /// | | /// | DECL SUBSCRIBER | - /// |<------------------| -- Sent in Declare::Push. This is a new subscriber declaration. + /// |<------------------| -- This is a new subscriber declaration. /// | UNDECL SUBSCRIBER | - /// |<------------------| -- Sent in Declare::Push. This is a new subscriber undeclaration. + /// |<------------------| -- This is a new subscriber undeclaration. /// | | /// | ... | /// | | - /// | FINAL | - /// |------------------>| -- Sent in Declare::RequestContinuous. - /// | | This stops the transmission of subscriber declarations/undeclarations. + /// | UNDECL INTEREST | + /// |------------------>| -- This is an UndeclareInterest to stop receiving subscriber declarations/undeclarations. /// | | /// ``` /// @@ -798,19 +729,21 @@ pub mod interest { /// /// ```text /// Flags: - /// - X: Reserved - /// - X: Reserved + /// - N: Named If N==1 then the key expr has name/suffix + /// - M: Mapping if M==1 then key expr mapping is the one declared by the sender, else it is the one declared by the receiver /// - Z: Extension If Z==1 then at least one extension is present /// /// 7 6 5 4 3 2 1 0 /// +-+-+-+-+-+-+-+-+ - /// |Z|X|X| D_INT | + /// |Z|M|N| D_INT | /// +---------------+ - /// |A|M|N|R|T|Q|S|K| (*) + /// ~ intst_id:z32 ~ /// +---------------+ - /// ~ key_scope:z16 ~ if R==1 + /// ~ key_scope:z16 ~ + /// +---------------+ + /// ~ key_suffix ~ if N==1 -- /// +---------------+ - /// ~ key_suffix ~ if R==1 && N==1 -- + /// |A|F|C|X|T|Q|S|K| (*) /// +---------------+ /// ~ [decl_exts] ~ if Z==1 /// +---------------+ @@ -819,108 +752,63 @@ pub mod interest { /// - if S==1 then the interest refers to subscribers /// - if Q==1 then the interest refers to queryables /// - if T==1 then the interest refers to tokens - /// - if R==1 then the interest is restricted to the matching key expression, else it is for all key expressions. - /// - if N==1 then the key expr has name/suffix. If R==0 then N should be set to 0. - /// - if M==1 then key expr mapping is the one declared by the sender, else it is the one declared by the receiver. - /// If R==0 then M should be set to 0. + /// - if C==1 then the interest refers to the current declarations. + /// - if F==1 then the interest refers to the future declarations. Note that if F==0 then: + /// - replies SHOULD NOT be sent after the FinalInterest; + /// - UndeclareInterest SHOULD NOT be sent after the FinalInterest. /// - if A==1 then the replies SHOULD be aggregated /// ``` #[derive(Debug, Clone, PartialEq, Eq)] pub struct DeclareInterest { + pub id: InterestId, + pub wire_expr: WireExpr<'static>, pub interest: Interest, - pub wire_expr: Option>, } - impl DeclareInterest { - pub fn options(&self) -> u8 { - let mut interest = self.interest; - if let Some(we) = self.wire_expr.as_ref() { - interest += Interest::RESTRICTED; - if we.has_suffix() { - interest += Interest::NAMED; - } - if let Mapping::Sender = we.mapping { - interest += Interest::MAPPING; - } - } - interest.options - } - - #[cfg(feature = "test")] - pub fn rand() -> Self { - use rand::Rng; - let mut rng = rand::thread_rng(); - - let wire_expr = rng.gen_bool(0.5).then_some(WireExpr::rand()); - let interest = Interest::rand(); - - Self { - wire_expr, - interest, - } - } - } - - #[derive(Clone, Copy)] - pub struct Interest { - options: u8, - } + #[repr(transparent)] + #[derive(Debug, Clone, PartialEq, Eq)] + pub struct Interest(u8); impl Interest { - // Flags - pub const KEYEXPRS: Interest = Interest::options(1); - pub const SUBSCRIBERS: Interest = Interest::options(1 << 1); - pub const QUERYABLES: Interest = Interest::options(1 << 2); - pub const TOKENS: Interest = Interest::options(1 << 3); - const RESTRICTED: Interest = Interest::options(1 << 4); - const NAMED: Interest = Interest::options(1 << 5); - const MAPPING: Interest = Interest::options(1 << 6); - pub const AGGREGATE: Interest = Interest::options(1 << 7); - pub const ALL: Interest = Interest::options( - Interest::KEYEXPRS.options - | Interest::SUBSCRIBERS.options - | Interest::QUERYABLES.options - | Interest::TOKENS.options, - ); - - const fn options(options: u8) -> Self { - Self { options } - } - - pub const fn empty() -> Self { - Self { options: 0 } - } + pub const KEYEXPRS: Interest = Interest(1); + pub const SUBSCRIBERS: Interest = Interest(1 << 1); + pub const QUERYABLES: Interest = Interest(1 << 2); + pub const TOKENS: Interest = Interest(1 << 3); + // pub const X: Interest = Interest(1 << 4); + pub const CURRENT: Interest = Interest(1 << 5); + pub const FUTURE: Interest = Interest(1 << 6); + pub const AGGREGATE: Interest = Interest(1 << 7); pub const fn keyexprs(&self) -> bool { - imsg::has_flag(self.options, Self::KEYEXPRS.options) + imsg::has_flag(self.0, Self::KEYEXPRS.0) } pub const fn subscribers(&self) -> bool { - imsg::has_flag(self.options, Self::SUBSCRIBERS.options) + imsg::has_flag(self.0, Self::SUBSCRIBERS.0) } pub const fn queryables(&self) -> bool { - imsg::has_flag(self.options, Self::QUERYABLES.options) + imsg::has_flag(self.0, Self::QUERYABLES.0) } pub const fn tokens(&self) -> bool { - imsg::has_flag(self.options, Self::TOKENS.options) + imsg::has_flag(self.0, Self::TOKENS.0) } - pub const fn restricted(&self) -> bool { - imsg::has_flag(self.options, Self::RESTRICTED.options) + pub const fn current(&self) -> bool { + imsg::has_flag(self.0, Self::CURRENT.0) } - pub const fn named(&self) -> bool { - imsg::has_flag(self.options, Self::NAMED.options) + pub const fn future(&self) -> bool { + imsg::has_flag(self.0, Self::FUTURE.0) } - pub const fn mapping(&self) -> bool { - imsg::has_flag(self.options, Self::MAPPING.options) + pub const fn aggregate(&self) -> bool { + imsg::has_flag(self.0, Self::AGGREGATE.0) } - pub const fn aggregate(&self) -> bool { - imsg::has_flag(self.options, Self::AGGREGATE.options) + pub const fn as_u8(&self) -> u8 { + self.0 } #[cfg(feature = "test")] @@ -928,105 +816,108 @@ pub mod interest { use rand::Rng; let mut rng = rand::thread_rng(); - let mut s = Self::empty(); - if rng.gen_bool(0.5) { - s += Interest::KEYEXPRS; - } - if rng.gen_bool(0.5) { - s += Interest::SUBSCRIBERS; - } - if rng.gen_bool(0.5) { - s += Interest::TOKENS; - } - if rng.gen_bool(0.5) { - s += Interest::AGGREGATE; - } - s + let inner: u8 = rng.gen(); + + Self(inner) } } - impl PartialEq for Interest { - fn eq(&self, other: &Self) -> bool { - self.keyexprs() == other.keyexprs() - && self.subscribers() == other.subscribers() - && self.queryables() == other.queryables() - && self.tokens() == other.tokens() - && self.aggregate() == other.aggregate() + impl BitOr for Interest { + type Output = Self; + + fn bitor(self, rhs: Self) -> Self::Output { + Self(self.0 | rhs.0) } } - impl Debug for Interest { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "Interest {{ ")?; - if self.keyexprs() { - write!(f, "K:Y, ")?; - } else { - write!(f, "K:N, ")?; - } - if self.subscribers() { - write!(f, "S:Y, ")?; - } else { - write!(f, "S:N, ")?; - } - if self.queryables() { - write!(f, "Q:Y, ")?; - } else { - write!(f, "Q:N, ")?; - } - if self.tokens() { - write!(f, "T:Y, ")?; - } else { - write!(f, "T:N, ")?; - } - if self.aggregate() { - write!(f, "A:Y")?; - } else { - write!(f, "A:N")?; - } - write!(f, " }}")?; - Ok(()) + impl From for Interest { + fn from(v: u8) -> Self { + Self(v) } } - impl Eq for Interest {} + impl DeclareInterest { + #[cfg(feature = "test")] + pub fn rand() -> Self { + use rand::Rng; + let mut rng = rand::thread_rng(); - impl Add for Interest { - type Output = Self; + let id: InterestId = rng.gen(); + let wire_expr = WireExpr::rand(); + let interest = Interest::rand(); - #[allow(clippy::suspicious_arithmetic_impl)] // Allows to implement Add & Sub for Interest - fn add(self, rhs: Self) -> Self::Output { Self { - options: self.options | rhs.options, + id, + wire_expr, + interest, } } } - impl AddAssign for Interest { - #[allow(clippy::suspicious_op_assign_impl)] // Allows to implement Add & Sub for Interest - fn add_assign(&mut self, rhs: Self) { - self.options |= rhs.options; - } + /// ```text + /// Flags: + /// - X: Reserved + /// - X: Reserved + /// - Z: Extension If Z==1 then at least one extension is present + /// + /// 7 6 5 4 3 2 1 0 + /// +-+-+-+-+-+-+-+-+ + /// |Z|X|X| F_INT | + /// +---------------+ + /// ~ intst_id:z32 ~ + /// +---------------+ + /// ~ [decl_exts] ~ if Z==1 + /// +---------------+ + /// ``` + #[derive(Debug, Clone, PartialEq, Eq)] + pub struct FinalInterest { + pub id: InterestId, } - impl Sub for Interest { - type Output = Self; + impl FinalInterest { + #[cfg(feature = "test")] + pub fn rand() -> Self { + use rand::Rng; + let mut rng = rand::thread_rng(); - fn sub(self, rhs: Self) -> Self::Output { - Self { - options: self.options & !rhs.options, - } + let id: InterestId = rng.gen(); + + Self { id } } } - impl SubAssign for Interest { - fn sub_assign(&mut self, rhs: Self) { - self.options &= !rhs.options; - } + /// ```text + /// Flags: + /// - X: Reserved + /// - X: Reserved + /// - Z: Extension If Z==1 then at least one extension is present + /// + /// 7 6 5 4 3 2 1 0 + /// +-+-+-+-+-+-+-+-+ + /// |Z|X|X| U_INT | + /// +---------------+ + /// ~ intst_id:z32 ~ + /// +---------------+ + /// ~ [decl_exts] ~ if Z==1 + /// +---------------+ + /// ``` + #[derive(Debug, Clone, PartialEq, Eq)] + pub struct UndeclareInterest { + pub id: InterestId, + // WARNING: this is a temporary and mandatory extension used for undeclarations + pub ext_wire_expr: common::ext::WireExprType, } - impl From for Interest { - fn from(options: u8) -> Self { - Self { options } + impl UndeclareInterest { + #[cfg(feature = "test")] + pub fn rand() -> Self { + use rand::Rng; + let mut rng = rand::thread_rng(); + + let id: InterestId = rng.gen(); + let ext_wire_expr = common::ext::WireExprType::rand(); + + Self { id, ext_wire_expr } } } } diff --git a/commons/zenoh-protocol/src/network/mod.rs b/commons/zenoh-protocol/src/network/mod.rs index e60388f425..b2ae5deabe 100644 --- a/commons/zenoh-protocol/src/network/mod.rs +++ b/commons/zenoh-protocol/src/network/mod.rs @@ -20,9 +20,9 @@ pub mod response; use core::fmt; pub use declare::{ - Declare, DeclareBody, DeclareFinal, DeclareInterest, DeclareKeyExpr, DeclareMode, - DeclareQueryable, DeclareSubscriber, DeclareToken, UndeclareKeyExpr, UndeclareQueryable, - UndeclareSubscriber, UndeclareToken, + Declare, DeclareBody, DeclareInterest, DeclareKeyExpr, DeclareQueryable, DeclareSubscriber, + DeclareToken, UndeclareInterest, UndeclareKeyExpr, UndeclareQueryable, UndeclareSubscriber, + UndeclareToken, }; pub use oam::Oam; pub use push::Push; @@ -51,8 +51,6 @@ pub enum Mapping { } impl Mapping { - pub const DEFAULT: Self = Self::Receiver; - #[cfg(feature = "test")] pub fn rand() -> Self { use rand::Rng; @@ -110,18 +108,6 @@ impl NetworkMessage { true } - #[inline] - pub fn is_express(&self) -> bool { - match &self.body { - NetworkBody::Push(msg) => msg.ext_qos.is_express(), - NetworkBody::Request(msg) => msg.ext_qos.is_express(), - NetworkBody::Response(msg) => msg.ext_qos.is_express(), - NetworkBody::ResponseFinal(msg) => msg.ext_qos.is_express(), - NetworkBody::Declare(msg) => msg.ext_qos.is_express(), - NetworkBody::OAM(msg) => msg.ext_qos.is_express(), - } - } - #[inline] pub fn is_droppable(&self) -> bool { if !self.is_reliable() { @@ -129,11 +115,11 @@ impl NetworkMessage { } let cc = match &self.body { + NetworkBody::Declare(msg) => msg.ext_qos.get_congestion_control(), NetworkBody::Push(msg) => msg.ext_qos.get_congestion_control(), NetworkBody::Request(msg) => msg.ext_qos.get_congestion_control(), NetworkBody::Response(msg) => msg.ext_qos.get_congestion_control(), NetworkBody::ResponseFinal(msg) => msg.ext_qos.get_congestion_control(), - NetworkBody::Declare(msg) => msg.ext_qos.get_congestion_control(), NetworkBody::OAM(msg) => msg.ext_qos.get_congestion_control(), }; @@ -143,11 +129,11 @@ impl NetworkMessage { #[inline] pub fn priority(&self) -> Priority { match &self.body { + NetworkBody::Declare(msg) => msg.ext_qos.get_priority(), NetworkBody::Push(msg) => msg.ext_qos.get_priority(), NetworkBody::Request(msg) => msg.ext_qos.get_priority(), NetworkBody::Response(msg) => msg.ext_qos.get_priority(), NetworkBody::ResponseFinal(msg) => msg.ext_qos.get_priority(), - NetworkBody::Declare(msg) => msg.ext_qos.get_priority(), NetworkBody::OAM(msg) => msg.ext_qos.get_priority(), } } @@ -212,7 +198,7 @@ impl From for NetworkMessage { pub mod ext { use crate::{ common::{imsg, ZExtZ64}, - core::{CongestionControl, EntityId, Priority, ZenohId}, + core::{CongestionControl, Priority, ZenohId}, }; use core::fmt; @@ -240,16 +226,6 @@ pub mod ext { const D_FLAG: u8 = 0b00001000; const E_FLAG: u8 = 0b00010000; - pub const DEFAULT: Self = Self::new(Priority::DEFAULT, CongestionControl::DEFAULT, false); - - pub const DECLARE: Self = Self::new(Priority::DEFAULT, CongestionControl::Block, false); - pub const PUSH: Self = Self::new(Priority::DEFAULT, CongestionControl::Drop, false); - pub const REQUEST: Self = Self::new(Priority::DEFAULT, CongestionControl::Block, false); - pub const RESPONSE: Self = Self::new(Priority::DEFAULT, CongestionControl::Block, false); - pub const RESPONSE_FINAL: Self = - Self::new(Priority::DEFAULT, CongestionControl::Block, false); - pub const OAM: Self = Self::new(Priority::DEFAULT, CongestionControl::Block, false); - pub const fn new( priority: Priority, congestion_control: CongestionControl, @@ -306,11 +282,35 @@ pub mod ext { let inner: u8 = rng.gen(); Self { inner } } + + pub fn declare_default() -> Self { + Self::new(Priority::default(), CongestionControl::Block, false) + } + + pub fn push_default() -> Self { + Self::new(Priority::default(), CongestionControl::Drop, false) + } + + pub fn request_default() -> Self { + Self::new(Priority::default(), CongestionControl::Block, false) + } + + pub fn response_default() -> Self { + Self::new(Priority::default(), CongestionControl::Block, false) + } + + pub fn response_final_default() -> Self { + Self::new(Priority::default(), CongestionControl::Block, false) + } + + pub fn oam_default() -> Self { + Self::new(Priority::default(), CongestionControl::Block, false) + } } impl Default for QoSType<{ ID }> { fn default() -> Self { - Self::new(Priority::DEFAULT, CongestionControl::DEFAULT, false) + Self::new(Priority::default(), CongestionControl::default(), false) } } @@ -378,9 +378,6 @@ pub mod ext { } impl NodeIdType<{ ID }> { - // node_id == 0 means the message has been generated by the node itself - pub const DEFAULT: Self = Self { node_id: 0 }; - #[cfg(feature = "test")] pub fn rand() -> Self { use rand::Rng; @@ -392,7 +389,8 @@ pub mod ext { impl Default for NodeIdType<{ ID }> { fn default() -> Self { - Self::DEFAULT + // node_id == 0 means the message has been generated by the node itself + Self { node_id: 0 } } } @@ -419,19 +417,19 @@ pub mod ext { /// % eid % /// +---------------+ #[derive(Debug, Clone, PartialEq, Eq)] - pub struct EntityGlobalIdType { + pub struct EntityIdType { pub zid: ZenohId, - pub eid: EntityId, + pub eid: u32, } - impl EntityGlobalIdType<{ ID }> { + impl EntityIdType<{ ID }> { #[cfg(feature = "test")] pub fn rand() -> Self { use rand::Rng; let mut rng = rand::thread_rng(); let zid = ZenohId::rand(); - let eid: EntityId = rng.gen(); + let eid: u32 = rng.gen(); Self { zid, eid } } } diff --git a/commons/zenoh-protocol/src/network/request.rs b/commons/zenoh-protocol/src/network/request.rs index ff978744e8..9e0137ea3a 100644 --- a/commons/zenoh-protocol/src/network/request.rs +++ b/commons/zenoh-protocol/src/network/request.rs @@ -66,6 +66,7 @@ pub struct Request { pub mod ext { use crate::{ common::{ZExtZ64, ZExtZBuf}, + core::QueryTarget, zextz64, zextzbuf, }; use core::{num::NonZeroU32, time::Duration}; @@ -87,17 +88,9 @@ pub mod ext { /// +---------------+ /// /// The `zenoh::queryable::Queryable`s that should be target of a `zenoh::Session::get()`. - #[derive(Debug, Default, Clone, Copy, PartialEq, Eq)] - pub enum TargetType { - #[default] - BestMatching, - All, - AllComplete, - } + pub type TargetType = QueryTarget; impl TargetType { - pub const DEFAULT: Self = Self::BestMatching; - #[cfg(feature = "test")] pub fn rand() -> Self { use rand::prelude::*; @@ -107,6 +100,8 @@ pub mod ext { TargetType::All, TargetType::AllComplete, TargetType::BestMatching, + #[cfg(feature = "complete_n")] + TargetType::Complete(rng.gen()), ] .choose(&mut rng) .unwrap() diff --git a/commons/zenoh-protocol/src/network/response.rs b/commons/zenoh-protocol/src/network/response.rs index 6f0925429b..9ef2c26a10 100644 --- a/commons/zenoh-protocol/src/network/response.rs +++ b/commons/zenoh-protocol/src/network/response.rs @@ -67,7 +67,7 @@ pub mod ext { pub type TimestampType = crate::network::ext::TimestampType<{ Timestamp::ID }>; pub type ResponderId = zextzbuf!(0x3, false); - pub type ResponderIdType = crate::network::ext::EntityGlobalIdType<{ ResponderId::ID }>; + pub type ResponderIdType = crate::network::ext::EntityIdType<{ ResponderId::ID }>; } impl Response { diff --git a/commons/zenoh-protocol/src/transport/init.rs b/commons/zenoh-protocol/src/transport/init.rs index de517a353c..1327288471 100644 --- a/commons/zenoh-protocol/src/transport/init.rs +++ b/commons/zenoh-protocol/src/transport/init.rs @@ -165,7 +165,7 @@ impl InitSyn { let whatami = WhatAmI::rand(); let zid = ZenohId::default(); let resolution = Resolution::rand(); - let batch_size: BatchSize = rng.gen(); + let batch_size: u16 = rng.gen(); let ext_qos = rng.gen_bool(0.5).then_some(ZExtUnit::rand()); let ext_shm = rng.gen_bool(0.5).then_some(ZExtZBuf::rand()); let ext_auth = rng.gen_bool(0.5).then_some(ZExtZBuf::rand()); @@ -221,7 +221,7 @@ impl InitAck { } else { Resolution::rand() }; - let batch_size: BatchSize = rng.gen(); + let batch_size: u16 = rng.gen(); let cookie = ZSlice::rand(64); let ext_qos = rng.gen_bool(0.5).then_some(ZExtUnit::rand()); let ext_shm = rng.gen_bool(0.5).then_some(ZExtZBuf::rand()); diff --git a/commons/zenoh-protocol/src/transport/join.rs b/commons/zenoh-protocol/src/transport/join.rs index a5cf1422a6..c5fbb98430 100644 --- a/commons/zenoh-protocol/src/transport/join.rs +++ b/commons/zenoh-protocol/src/transport/join.rs @@ -141,7 +141,7 @@ impl Join { let whatami = WhatAmI::rand(); let zid = ZenohId::default(); let resolution = Resolution::rand(); - let batch_size: BatchSize = rng.gen(); + let batch_size: u16 = rng.gen(); let lease = if rng.gen_bool(0.5) { Duration::from_secs(rng.gen()) } else { diff --git a/commons/zenoh-protocol/src/transport/mod.rs b/commons/zenoh-protocol/src/transport/mod.rs index e92860f441..258b43baf6 100644 --- a/commons/zenoh-protocol/src/transport/mod.rs +++ b/commons/zenoh-protocol/src/transport/mod.rs @@ -39,7 +39,6 @@ use crate::network::NetworkMessage; /// the boundary of the serialized messages. The length is encoded as little-endian. /// In any case, the length of a message must not exceed 65_535 bytes. pub type BatchSize = u16; -pub type AtomicBatchSize = core::sync::atomic::AtomicU16; pub mod batch_size { use super::BatchSize; @@ -85,18 +84,13 @@ pub enum TransportBodyLowLatency { pub type TransportSn = u32; -#[derive(Debug, Copy, Clone, PartialEq, Eq)] +#[derive(Debug, Copy, Clone, PartialEq, Eq, Default)] pub struct PrioritySn { pub reliable: TransportSn, pub best_effort: TransportSn, } impl PrioritySn { - pub const DEFAULT: Self = Self { - reliable: TransportSn::MIN, - best_effort: TransportSn::MIN, - }; - #[cfg(feature = "test")] pub fn rand() -> Self { use rand::Rng; @@ -267,8 +261,7 @@ pub mod ext { } impl QoSType<{ ID }> { - const P_MASK: u8 = 0b00000111; - pub const DEFAULT: Self = Self::new(Priority::DEFAULT); + pub const P_MASK: u8 = 0b00000111; pub const fn new(priority: Priority) -> Self { Self { @@ -292,7 +285,7 @@ pub mod ext { impl Default for QoSType<{ ID }> { fn default() -> Self { - Self::DEFAULT + Self::new(Priority::default()) } } diff --git a/commons/zenoh-protocol/src/zenoh/ack.rs b/commons/zenoh-protocol/src/zenoh/ack.rs new file mode 100644 index 0000000000..d40bf58791 --- /dev/null +++ b/commons/zenoh-protocol/src/zenoh/ack.rs @@ -0,0 +1,84 @@ +// +// Copyright (c) 2022 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// +use crate::common::ZExtUnknown; +use alloc::vec::Vec; +use uhlc::Timestamp; + +/// # Ack message +/// +/// ```text +/// Flags: +/// - T: Timestamp If T==1 then the timestamp if present +/// - X: Reserved +/// - Z: Extension If Z==1 then at least one extension is present +/// +/// 7 6 5 4 3 2 1 0 +/// +-+-+-+-+-+-+-+-+ +/// |Z|X|T| ACK | +/// +-+-+-+---------+ +/// ~ ts: ~ if T==1 +/// +---------------+ +/// ~ [err_exts] ~ if Z==1 +/// +---------------+ +/// ``` +pub mod flag { + pub const T: u8 = 1 << 5; // 0x20 Timestamp if T==0 then the timestamp if present + // pub const X: u8 = 1 << 6; // 0x40 Reserved + pub const Z: u8 = 1 << 7; // 0x80 Extensions if Z==1 then an extension will follow +} + +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct Ack { + pub timestamp: Option, + pub ext_sinfo: Option, + pub ext_unknown: Vec, +} + +pub mod ext { + use crate::{common::ZExtZBuf, zextzbuf}; + + /// # SourceInfo extension + /// Used to carry additional information about the source of data + pub type SourceInfo = zextzbuf!(0x1, false); + pub type SourceInfoType = crate::zenoh::ext::SourceInfoType<{ SourceInfo::ID }>; +} + +impl Ack { + #[cfg(feature = "test")] + pub fn rand() -> Self { + use crate::{common::iext, core::ZenohId}; + use rand::Rng; + let mut rng = rand::thread_rng(); + + let timestamp = rng.gen_bool(0.5).then_some({ + let time = uhlc::NTP64(rng.gen()); + let id = uhlc::ID::try_from(ZenohId::rand().to_le_bytes()).unwrap(); + Timestamp::new(time, id) + }); + let ext_sinfo = rng.gen_bool(0.5).then_some(ext::SourceInfoType::rand()); + let mut ext_unknown = Vec::new(); + for _ in 0..rng.gen_range(0..4) { + ext_unknown.push(ZExtUnknown::rand2( + iext::mid(ext::SourceInfo::ID) + 1, + false, + )); + } + + Self { + timestamp, + ext_sinfo, + ext_unknown, + } + } +} diff --git a/commons/zenoh-protocol/src/zenoh/err.rs b/commons/zenoh-protocol/src/zenoh/err.rs index eacbb26596..648efff441 100644 --- a/commons/zenoh-protocol/src/zenoh/err.rs +++ b/commons/zenoh-protocol/src/zenoh/err.rs @@ -11,41 +11,43 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::{common::ZExtUnknown, core::Encoding}; +use crate::common::ZExtUnknown; use alloc::vec::Vec; -use zenoh_buffers::ZBuf; +use uhlc::Timestamp; /// # Err message /// /// ```text /// Flags: -/// - X: Reserved -/// - E: Encoding If E==1 then the encoding is present +/// - T: Timestamp If T==1 then the timestamp if present +/// - I: Infrastructure If I==1 then the error is related to the infrastructure else to the user /// - Z: Extension If Z==1 then at least one extension is present /// /// 7 6 5 4 3 2 1 0 /// +-+-+-+-+-+-+-+-+ -/// |Z|E|X| ERR | +/// |Z|I|T| ERR | /// +-+-+-+---------+ -/// ~ encoding ~ if E==1 +/// % code:z16 % /// +---------------+ -/// ~ [err_exts] ~ if Z==1 +/// ~ ts: ~ if T==1 /// +---------------+ -/// ~ pl: ~ -- Payload +/// ~ [err_exts] ~ if Z==1 /// +---------------+ /// ``` pub mod flag { - // pub const X: u8 = 1 << 5; // 0x20 Reserved - pub const E: u8 = 1 << 6; // 0x40 Encoding if E==1 then the encoding is present + pub const T: u8 = 1 << 5; // 0x20 Timestamp if T==0 then the timestamp if present + pub const I: u8 = 1 << 6; // 0x40 Infrastructure if I==1 then the error is related to the infrastructure else to the user pub const Z: u8 = 1 << 7; // 0x80 Extensions if Z==1 then an extension will follow } #[derive(Debug, Clone, PartialEq, Eq)] pub struct Err { - pub encoding: Encoding, + pub code: u16, + pub is_infrastructure: bool, + pub timestamp: Option, pub ext_sinfo: Option, + pub ext_body: Option, pub ext_unknown: Vec, - pub payload: ZBuf, } pub mod ext { @@ -55,31 +57,45 @@ pub mod ext { /// Used to carry additional information about the source of data pub type SourceInfo = zextzbuf!(0x1, false); pub type SourceInfoType = crate::zenoh::ext::SourceInfoType<{ SourceInfo::ID }>; + + /// # ErrBody extension + /// Used to carry a body attached to the query + /// Shared Memory extension is automatically defined by ValueType extension if + /// #[cfg(feature = "shared-memory")] is defined. + pub type ErrBodyType = crate::zenoh::ext::ValueType<{ ZExtZBuf::<0x02>::id(false) }, 0x03>; } impl Err { #[cfg(feature = "test")] pub fn rand() -> Self { - use crate::common::iext; + use crate::{common::iext, core::ZenohId}; use rand::Rng; let mut rng = rand::thread_rng(); - let encoding = Encoding::rand(); + let code: u16 = rng.gen(); + let is_infrastructure = rng.gen_bool(0.5); + let timestamp = rng.gen_bool(0.5).then_some({ + let time = uhlc::NTP64(rng.gen()); + let id = uhlc::ID::try_from(ZenohId::rand().to_le_bytes()).unwrap(); + Timestamp::new(time, id) + }); let ext_sinfo = rng.gen_bool(0.5).then_some(ext::SourceInfoType::rand()); + let ext_body = rng.gen_bool(0.5).then_some(ext::ErrBodyType::rand()); let mut ext_unknown = Vec::new(); for _ in 0..rng.gen_range(0..4) { ext_unknown.push(ZExtUnknown::rand2( - iext::mid(ext::SourceInfo::ID) + 1, + iext::mid(ext::ErrBodyType::SID) + 1, false, )); } - let payload = ZBuf::rand(rng.gen_range(0..=64)); Self { - encoding, + code, + is_infrastructure, + timestamp, ext_sinfo, + ext_body, ext_unknown, - payload, } } } diff --git a/commons/zenoh-protocol/src/zenoh/mod.rs b/commons/zenoh-protocol/src/zenoh/mod.rs index 7bca48f3ba..e67576e673 100644 --- a/commons/zenoh-protocol/src/zenoh/mod.rs +++ b/commons/zenoh-protocol/src/zenoh/mod.rs @@ -11,15 +11,19 @@ // Contributors: // ZettaScale Zenoh Team, // +pub mod ack; pub mod del; pub mod err; +pub mod pull; pub mod put; pub mod query; pub mod reply; use crate::core::Encoding; +pub use ack::Ack; pub use del::Del; pub use err::Err; +pub use pull::Pull; pub use put::Put; pub use query::{Consolidation, Query}; pub use reply::Reply; @@ -31,6 +35,8 @@ pub mod id { pub const QUERY: u8 = 0x03; pub const REPLY: u8 = 0x04; pub const ERR: u8 = 0x05; + pub const ACK: u8 = 0x06; + pub const PULL: u8 = 0x07; } // DataInfo @@ -77,6 +83,9 @@ impl From for PushBody { #[derive(Debug, Clone, PartialEq, Eq)] pub enum RequestBody { Query(Query), + Put(Put), + Del(Del), + Pull(Pull), } impl RequestBody { @@ -86,8 +95,10 @@ impl RequestBody { let mut rng = rand::thread_rng(); - match rng.gen_range(0..1) { + match rng.gen_range(0..3) { 0 => RequestBody::Query(Query::rand()), + 1 => RequestBody::Put(Put::rand()), + 2 => RequestBody::Del(Del::rand()), _ => unreachable!(), } } @@ -99,22 +110,39 @@ impl From for RequestBody { } } +impl From for RequestBody { + fn from(p: Put) -> RequestBody { + RequestBody::Put(p) + } +} + +impl From for RequestBody { + fn from(d: Del) -> RequestBody { + RequestBody::Del(d) + } +} + // Response #[derive(Debug, Clone, PartialEq, Eq)] pub enum ResponseBody { Reply(Reply), Err(Err), + Ack(Ack), + Put(Put), } impl ResponseBody { #[cfg(feature = "test")] pub fn rand() -> Self { use rand::Rng; + let mut rng = rand::thread_rng(); - match rng.gen_range(0..2) { + match rng.gen_range(0..4) { 0 => ResponseBody::Reply(Reply::rand()), 1 => ResponseBody::Err(Err::rand()), + 2 => ResponseBody::Ack(Ack::rand()), + 3 => ResponseBody::Put(Put::rand()), _ => unreachable!(), } } @@ -132,10 +160,16 @@ impl From for ResponseBody { } } +impl From for ResponseBody { + fn from(r: Ack) -> ResponseBody { + ResponseBody::Ack(r) + } +} + pub mod ext { use zenoh_buffers::ZBuf; - use crate::core::{Encoding, EntityGlobalId}; + use crate::core::{Encoding, ZenohId}; /// 7 6 5 4 3 2 1 0 /// +-+-+-+-+-+-+-+-+ @@ -149,7 +183,8 @@ pub mod ext { /// +---------------+ #[derive(Debug, Clone, PartialEq, Eq)] pub struct SourceInfoType { - pub id: EntityGlobalId, + pub zid: ZenohId, + pub eid: u32, pub sn: u32, } @@ -159,9 +194,10 @@ pub mod ext { use rand::Rng; let mut rng = rand::thread_rng(); - let id = EntityGlobalId::rand(); + let zid = ZenohId::rand(); + let eid: u32 = rng.gen(); let sn: u32 = rng.gen(); - Self { id, sn } + Self { zid, eid, sn } } } @@ -184,14 +220,12 @@ pub mod ext { } } - /// ```text /// 7 6 5 4 3 2 1 0 /// +-+-+-+-+-+-+-+-+ /// ~ encoding ~ /// +---------------+ /// ~ pl: [u8;z32] ~ -- Payload /// +---------------+ - /// ``` #[derive(Debug, Clone, PartialEq, Eq)] pub struct ValueType { #[cfg(feature = "shared-memory")] diff --git a/commons/zenoh-protocol/src/zenoh/pull.rs b/commons/zenoh-protocol/src/zenoh/pull.rs new file mode 100644 index 0000000000..eb4f7eb55e --- /dev/null +++ b/commons/zenoh-protocol/src/zenoh/pull.rs @@ -0,0 +1,56 @@ +// +// Copyright (c) 2022 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// +use crate::common::ZExtUnknown; +use alloc::vec::Vec; + +/// # Pull message +/// +/// ```text +/// Flags: +/// - X: Reserved +/// - X: Reserved +/// - Z: Extension If Z==1 then at least one extension is present +/// +/// 7 6 5 4 3 2 1 0 +/// +-+-+-+-+-+-+-+-+ +/// |Z|X|X| PULL | +/// +-+-+-+---------+ +/// ~ [pull_exts] ~ if Z==1 +/// +---------------+ +/// ``` +pub mod flag { + // pub const X: u8 = 1 << 5; // 0x20 Reserved + // pub const X: u8 = 1 << 6; // 0x40 Reserved + pub const Z: u8 = 1 << 7; // 0x80 Extensions if Z==1 then an extension will follow +} + +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct Pull { + pub ext_unknown: Vec, +} + +impl Pull { + #[cfg(feature = "test")] + pub fn rand() -> Self { + use rand::Rng; + let mut rng = rand::thread_rng(); + + let mut ext_unknown = Vec::new(); + for _ in 0..rng.gen_range(0..4) { + ext_unknown.push(ZExtUnknown::rand2(1, false)); + } + + Self { ext_unknown } + } +} diff --git a/commons/zenoh-protocol/src/zenoh/query.rs b/commons/zenoh-protocol/src/zenoh/query.rs index f1baaebe20..7432840492 100644 --- a/commons/zenoh-protocol/src/zenoh/query.rs +++ b/commons/zenoh-protocol/src/zenoh/query.rs @@ -11,7 +11,7 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::common::ZExtUnknown; +use crate::{common::ZExtUnknown, core::ConsolidationMode}; use alloc::{string::String, vec::Vec}; /// The kind of consolidation. @@ -33,21 +33,35 @@ pub enum Consolidation { Monotonic, /// Holds back samples to only send the set of samples that had the highest timestamp for their key. Latest, - // Remove the duplicates of any samples based on the their timestamp. - // Unique, + /// Remove the duplicates of any samples based on the their timestamp. + Unique, } impl Consolidation { - pub const DEFAULT: Self = Self::Auto; - #[cfg(feature = "test")] pub fn rand() -> Self { use rand::prelude::SliceRandom; let mut rng = rand::thread_rng(); - *[Self::None, Self::Monotonic, Self::Latest, Self::Auto] - .choose(&mut rng) - .unwrap() + *[ + Self::None, + Self::Monotonic, + Self::Latest, + Self::Unique, + Self::Auto, + ] + .choose(&mut rng) + .unwrap() + } +} + +impl From for Consolidation { + fn from(val: ConsolidationMode) -> Self { + match val { + ConsolidationMode::None => Consolidation::None, + ConsolidationMode::Monotonic => Consolidation::Monotonic, + ConsolidationMode::Latest => Consolidation::Latest, + } } } @@ -55,45 +69,50 @@ impl Consolidation { /// /// ```text /// Flags: -/// - C: Consolidation if C==1 then consolidation is present /// - P: Parameters If P==1 then the parameters are present +/// - X: Reserved /// - Z: Extension If Z==1 then at least one extension is present /// /// 7 6 5 4 3 2 1 0 /// +-+-+-+-+-+-+-+-+ -/// |Z|P|C| QUERY | +/// |Z|X|P| QUERY | /// +-+-+-+---------+ -/// % consolidation % if C==1 -/// +---------------+ /// ~ ps: ~ if P==1 /// +---------------+ /// ~ [qry_exts] ~ if Z==1 /// +---------------+ /// ``` pub mod flag { - pub const C: u8 = 1 << 5; // 0x20 Consolidation if C==1 then consolidation is present - pub const P: u8 = 1 << 6; // 0x40 Parameters if P==1 then the parameters are present + pub const P: u8 = 1 << 5; // 0x20 Parameters if P==1 then the parameters are present + // pub const X: u8 = 1 << 6; // 0x40 Reserved pub const Z: u8 = 1 << 7; // 0x80 Extensions if Z==1 then an extension will follow } #[derive(Debug, Clone, PartialEq, Eq)] pub struct Query { - pub consolidation: Consolidation, pub parameters: String, pub ext_sinfo: Option, + pub ext_consolidation: Consolidation, pub ext_body: Option, pub ext_attachment: Option, pub ext_unknown: Vec, } pub mod ext { - use crate::{common::ZExtZBuf, zextzbuf}; + use crate::{ + common::{ZExtZ64, ZExtZBuf}, + zextz64, zextzbuf, + }; /// # SourceInfo extension /// Used to carry additional information about the source of data pub type SourceInfo = zextzbuf!(0x1, false); pub type SourceInfoType = crate::zenoh::ext::SourceInfoType<{ SourceInfo::ID }>; + /// # Consolidation extension + pub type Consolidation = zextz64!(0x2, true); + pub type ConsolidationType = crate::zenoh::query::Consolidation; + /// # QueryBody extension /// Used to carry a body attached to the query /// Shared Memory extension is automatically defined by ValueType extension if @@ -118,7 +137,6 @@ impl Query { const MIN: usize = 2; const MAX: usize = 16; - let consolidation = Consolidation::rand(); let parameters: String = if rng.gen_bool(0.5) { let len = rng.gen_range(MIN..MAX); Alphanumeric.sample_string(&mut rng, len) @@ -126,6 +144,7 @@ impl Query { String::new() }; let ext_sinfo = rng.gen_bool(0.5).then_some(ext::SourceInfoType::rand()); + let ext_consolidation = Consolidation::rand(); let ext_body = rng.gen_bool(0.5).then_some(ext::QueryBodyType::rand()); let ext_attachment = rng.gen_bool(0.5).then_some(ext::AttachmentType::rand()); let mut ext_unknown = Vec::new(); @@ -137,9 +156,9 @@ impl Query { } Self { - consolidation, parameters, ext_sinfo, + ext_consolidation, ext_body, ext_attachment, ext_unknown, diff --git a/commons/zenoh-protocol/src/zenoh/reply.rs b/commons/zenoh-protocol/src/zenoh/reply.rs index 7cbab4ca0a..2395e1e9b2 100644 --- a/commons/zenoh-protocol/src/zenoh/reply.rs +++ b/commons/zenoh-protocol/src/zenoh/reply.rs @@ -11,61 +11,115 @@ // Contributors: // ZettaScale Zenoh Team, // -use crate::{ - common::ZExtUnknown, - zenoh::{query::Consolidation, PushBody}, -}; +use crate::{common::ZExtUnknown, core::Encoding}; use alloc::vec::Vec; +use uhlc::Timestamp; +use zenoh_buffers::ZBuf; /// # Reply message /// /// ```text /// Flags: -/// - C: Consolidation if C==1 then consolidation is present -/// - X: Reserved +/// - T: Timestamp If T==1 then the timestamp if present +/// - E: Encoding If E==1 then the encoding is present /// - Z: Extension If Z==1 then at least one extension is present /// /// 7 6 5 4 3 2 1 0 /// +-+-+-+-+-+-+-+-+ -/// |Z|X|C| REPLY | +/// |Z|E|T| REPLY | /// +-+-+-+---------+ -/// % consolidation % if C==1 +/// ~ ts: ~ if T==1 +/// +---------------+ +/// ~ encoding ~ if E==1 /// +---------------+ /// ~ [repl_exts] ~ if Z==1 /// +---------------+ -/// ~ ReplyBody ~ -- Payload +/// ~ pl: ~ -- Payload /// +---------------+ /// ``` pub mod flag { - pub const C: u8 = 1 << 5; // 0x20 Consolidation if C==1 then consolidation is present - // pub const X: u8 = 1 << 6; // 0x40 Reserved + pub const T: u8 = 1 << 5; // 0x20 Timestamp if T==0 then the timestamp if present + pub const E: u8 = 1 << 6; // 0x40 Encoding if E==1 then the encoding is present pub const Z: u8 = 1 << 7; // 0x80 Extensions if Z==1 then an extension will follow } #[derive(Debug, Clone, PartialEq, Eq)] pub struct Reply { - pub consolidation: Consolidation, + pub timestamp: Option, + pub encoding: Encoding, + pub ext_sinfo: Option, + pub ext_consolidation: ext::ConsolidationType, + #[cfg(feature = "shared-memory")] + pub ext_shm: Option, + pub ext_attachment: Option, pub ext_unknown: Vec, - pub payload: ReplyBody, + pub payload: ZBuf, } -pub type ReplyBody = PushBody; +pub mod ext { + #[cfg(feature = "shared-memory")] + use crate::{common::ZExtUnit, zextunit}; + use crate::{ + common::{ZExtZ64, ZExtZBuf}, + zextz64, zextzbuf, + }; + + /// # SourceInfo extension + /// Used to carry additional information about the source of data + pub type SourceInfo = zextzbuf!(0x1, false); + pub type SourceInfoType = crate::zenoh::ext::SourceInfoType<{ SourceInfo::ID }>; + + /// # Consolidation extension + pub type Consolidation = zextz64!(0x2, true); + pub type ConsolidationType = crate::zenoh::query::ext::ConsolidationType; + + /// # Shared Memory extension + /// Used to carry additional information about the shared-memory layour of data + #[cfg(feature = "shared-memory")] + pub type Shm = zextunit!(0x3, true); + #[cfg(feature = "shared-memory")] + pub type ShmType = crate::zenoh::ext::ShmType<{ Shm::ID }>; + + /// # User attachment + pub type Attachment = zextzbuf!(0x4, false); + pub type AttachmentType = crate::zenoh::ext::AttachmentType<{ Attachment::ID }>; +} impl Reply { #[cfg(feature = "test")] pub fn rand() -> Self { + use crate::{common::iext, core::ZenohId, zenoh::Consolidation}; use rand::Rng; let mut rng = rand::thread_rng(); - let payload = ReplyBody::rand(); - let consolidation = Consolidation::rand(); + let timestamp = rng.gen_bool(0.5).then_some({ + let time = uhlc::NTP64(rng.gen()); + let id = uhlc::ID::try_from(ZenohId::rand().to_le_bytes()).unwrap(); + Timestamp::new(time, id) + }); + let encoding = Encoding::rand(); + let ext_sinfo = rng.gen_bool(0.5).then_some(ext::SourceInfoType::rand()); + let ext_consolidation = Consolidation::rand(); + #[cfg(feature = "shared-memory")] + let ext_shm = rng.gen_bool(0.5).then_some(ext::ShmType::rand()); + let ext_attachment = rng.gen_bool(0.5).then_some(ext::AttachmentType::rand()); let mut ext_unknown = Vec::new(); for _ in 0..rng.gen_range(0..4) { - ext_unknown.push(ZExtUnknown::rand2(1, false)); + ext_unknown.push(ZExtUnknown::rand2( + iext::mid(ext::Attachment::ID) + 1, + false, + )); } + let payload = ZBuf::rand(rng.gen_range(1..=64)); Self { - consolidation, + timestamp, + encoding, + ext_sinfo, + ext_consolidation, + #[cfg(feature = "shared-memory")] + ext_shm, + ext_attachment, ext_unknown, payload, } diff --git a/commons/zenoh-runtime/Cargo.toml b/commons/zenoh-runtime/Cargo.toml index b7aa15d634..e5bd64b8c5 100644 --- a/commons/zenoh-runtime/Cargo.toml +++ b/commons/zenoh-runtime/Cargo.toml @@ -13,6 +13,7 @@ description = { workspace = true } # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] +futures = { workspace = true } lazy_static = { workspace = true } zenoh-result = { workspace = true, features = ["std"] } zenoh-collections = { workspace = true, features = ["std"] } diff --git a/commons/zenoh-runtime/src/lib.rs b/commons/zenoh-runtime/src/lib.rs index ac040af838..492e0a6665 100644 --- a/commons/zenoh-runtime/src/lib.rs +++ b/commons/zenoh-runtime/src/lib.rs @@ -22,6 +22,7 @@ use std::{ atomic::{AtomicUsize, Ordering}, OnceLock, }, + time::Duration, }; use tokio::runtime::{Handle, Runtime, RuntimeFlavor}; use zenoh_collections::Properties; @@ -147,6 +148,38 @@ impl ZRuntimePool { } } +// If there are any blocking tasks spawned by ZRuntimes, the function will block until they return. +impl Drop for ZRuntimePool { + fn drop(&mut self) { + let handles: Vec<_> = self + .0 + .drain() + .filter_map(|(_name, mut rt)| { + rt.take() + .map(|r| std::thread::spawn(move || r.shutdown_timeout(Duration::from_secs(1)))) + }) + .collect(); + + for hd in handles { + let _ = hd.join(); + } + } +} + +/// In order to prevent valgrind reporting memory leaks, +/// we use this guard to force drop ZRUNTIME_POOL since Rust does not drop static variables. +#[doc(hidden)] +pub struct ZRuntimePoolGuard; + +impl Drop for ZRuntimePoolGuard { + fn drop(&mut self) { + unsafe { + let ptr = &(*ZRUNTIME_POOL) as *const ZRuntimePool; + std::mem::drop(ptr.read()); + } + } +} + #[derive(Debug, Copy, Clone)] pub struct ZRuntimeConfig { pub application_threads: usize, diff --git a/commons/zenoh-task/Cargo.toml b/commons/zenoh-task/Cargo.toml new file mode 100644 index 0000000000..bf52f13735 --- /dev/null +++ b/commons/zenoh-task/Cargo.toml @@ -0,0 +1,33 @@ +# +# Copyright (c) 2024 ZettaScale Technology +# +# This program and the accompanying materials are made available under the +# terms of the Eclipse Public License 2.0 which is available at +# http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +# which is available at https://www.apache.org/licenses/LICENSE-2.0. +# +# SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +# +# Contributors: +# ZettaScale Zenoh Team, +# +[package] +rust-version = { workspace = true } +name = "zenoh-task" +version = { workspace = true } +repository = { workspace = true } +homepage = { workspace = true } +authors = {workspace = true } +edition = { workspace = true } +license = { workspace = true } +categories = { workspace = true } +description = "Internal crate for zenoh." +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +tokio = { workspace = true, features = ["default", "sync"] } +futures = { workspace = true } +log = { workspace = true } +zenoh-core = { workspace = true } +zenoh-runtime = { workspace = true } +tokio-util = { workspace = true, features = ["rt"] } \ No newline at end of file diff --git a/commons/zenoh-task/src/lib.rs b/commons/zenoh-task/src/lib.rs new file mode 100644 index 0000000000..7b305cee75 --- /dev/null +++ b/commons/zenoh-task/src/lib.rs @@ -0,0 +1,191 @@ +// +// Copyright (c) 2024 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// + +//! ⚠️ WARNING ⚠️ +//! +//! This module is intended for Zenoh's internal use. +//! +//! [Click here for Zenoh's documentation](../zenoh/index.html) + +use futures::future::FutureExt; +use std::future::Future; +use std::time::Duration; +use tokio::task::JoinHandle; +use tokio_util::sync::CancellationToken; +use tokio_util::task::TaskTracker; +use zenoh_core::{ResolveFuture, SyncResolve}; +use zenoh_runtime::ZRuntime; + +#[derive(Clone)] +pub struct TaskController { + tracker: TaskTracker, + token: CancellationToken, +} + +impl Default for TaskController { + fn default() -> Self { + TaskController { + tracker: TaskTracker::new(), + token: CancellationToken::new(), + } + } +} + +impl TaskController { + /// Spawns a task that can be later terminated by call to [`TaskController::terminate_all()`]. + /// Task output is ignored. + pub fn spawn_abortable(&self, future: F) -> JoinHandle<()> + where + F: Future + Send + 'static, + T: Send + 'static, + { + let token = self.token.child_token(); + let task = async move { + tokio::select! { + _ = token.cancelled() => {}, + _ = future => {} + } + }; + self.tracker.spawn(task) + } + + /// Spawns a task using a specified runtime that can be later terminated by call to [`TaskController::terminate_all()`]. + /// Task output is ignored. + pub fn spawn_abortable_with_rt(&self, rt: ZRuntime, future: F) -> JoinHandle<()> + where + F: Future + Send + 'static, + T: Send + 'static, + { + let token = self.token.child_token(); + let task = async move { + tokio::select! { + _ = token.cancelled() => {}, + _ = future => {} + } + }; + self.tracker.spawn_on(task, &rt) + } + + pub fn get_cancellation_token(&self) -> CancellationToken { + self.token.child_token() + } + + /// Spawns a task that can be cancelled via cancellation of a token obtained by [`TaskController::get_cancellation_token()`], + /// or that can run to completion in finite amount of time. + /// It can be later terminated by call to [`TaskController::terminate_all()`]. + pub fn spawn(&self, future: F) -> JoinHandle<()> + where + F: Future + Send + 'static, + T: Send + 'static, + { + self.tracker.spawn(future.map(|_f| ())) + } + + /// Spawns a task that can be cancelled via cancellation of a token obtained by [`TaskController::get_cancellation_token()`], + /// or that can run to completion in finite amount of time, using a specified runtime. + /// It can be later aborted by call to [`TaskController::terminate_all()`]. + pub fn spawn_with_rt(&self, rt: ZRuntime, future: F) -> JoinHandle<()> + where + F: Future + Send + 'static, + T: Send + 'static, + { + self.tracker.spawn_on(future.map(|_f| ()), &rt) + } + + /// Attempts tp terminate all previously spawned tasks + /// The caller must ensure that all tasks spawned with [`TaskController::spawn()`] + /// or [`TaskController::spawn_with_rt()`] can yield in finite amount of time either because they will run to completion + /// or due to cancellation of token acquired via [`TaskController::get_cancellation_token()`]. + /// Tasks spawned with [`TaskController::spawn_abortable()`] or [`TaskController::spawn_abortable_with_rt()`] will be aborted (i.e. terminated upon next await call). + /// The call blocks until all tasks yield or timeout duration expires. + /// Returns 0 in case of success, number of non terminated tasks otherwise. + pub fn terminate_all(&self, timeout: Duration) -> usize { + ResolveFuture::new(async move { self.terminate_all_async(timeout).await }).res_sync() + } + + /// Async version of [`TaskController::terminate_all()`]. + pub async fn terminate_all_async(&self, timeout: Duration) -> usize { + self.tracker.close(); + self.token.cancel(); + if tokio::time::timeout(timeout, self.tracker.wait()) + .await + .is_err() + { + log::error!("Failed to terminate {} tasks", self.tracker.len()); + return self.tracker.len(); + } + 0 + } +} + +pub struct TerminatableTask { + handle: JoinHandle<()>, + token: CancellationToken, +} + +impl TerminatableTask { + pub fn create_cancellation_token() -> CancellationToken { + CancellationToken::new() + } + + /// Spawns a task that can be later terminated by [`TerminatableTask::terminate()`]. + /// Prior to termination attempt the specified cancellation token will be cancelled. + pub fn spawn(rt: ZRuntime, future: F, token: CancellationToken) -> TerminatableTask + where + F: Future + Send + 'static, + T: Send + 'static, + { + TerminatableTask { + handle: rt.spawn(future.map(|_f| ())), + token, + } + } + + /// Spawns a task that can be later aborted by [`TerminatableTask::terminate()`]. + pub fn spawn_abortable(rt: ZRuntime, future: F) -> TerminatableTask + where + F: Future + Send + 'static, + T: Send + 'static, + { + let token = CancellationToken::new(); + let token2 = token.clone(); + let task = async move { + tokio::select! { + _ = token2.cancelled() => {}, + _ = future => {} + } + }; + + TerminatableTask { + handle: rt.spawn(task), + token, + } + } + + /// Attempts to terminate the task. + /// Returns true if task completed / aborted within timeout duration, false otherwise. + pub fn terminate(self, timeout: Duration) -> bool { + ResolveFuture::new(async move { self.terminate_async(timeout).await }).res_sync() + } + + /// Async version of [`TerminatableTask::terminate()`]. + pub async fn terminate_async(self, timeout: Duration) -> bool { + self.token.cancel(); + if tokio::time::timeout(timeout, self.handle).await.is_err() { + log::error!("Failed to terminate the task"); + return false; + }; + true + } +} diff --git a/examples/Cargo.toml b/examples/Cargo.toml index 4a4a4fef3e..b0cf6a0ece 100644 --- a/examples/Cargo.toml +++ b/examples/Cargo.toml @@ -27,7 +27,7 @@ readme = "README.md" publish = false [features] -shared-memory = ["zenoh-shm","zenoh/shared-memory"] +shared-memory = ["zenoh/shared-memory"] unstable = ["zenoh/unstable"] transport_unixpipe = ["zenoh/transport_unixpipe"] @@ -49,10 +49,8 @@ futures = { workspace = true } git-version = { workspace = true } json5 = { workspace = true } log = { workspace = true } -zenoh = { workspace = true } -zenoh-collections = { workspace = true } +zenoh = { workspace = true, default-features = true } zenoh-ext = { workspace = true } -zenoh-shm = { workspace = true, optional = true } [dev-dependencies] rand = { workspace = true, features = ["default"] } @@ -97,11 +95,6 @@ required-features = ["shared-memory"] name = "z_sub" path = "examples/z_sub.rs" -[[example]] -name = "z_sub_shm" -path = "examples/z_sub_shm.rs" -required-features = ["shared-memory"] - [[example]] name = "z_pull" path = "examples/z_pull.rs" diff --git a/examples/README.md b/examples/README.md index 1ecda78cc4..0d38e32185 100644 --- a/examples/README.md +++ b/examples/README.md @@ -79,8 +79,8 @@ ### z_pull - Declares a key expression and a pull subscriber. - On each pull, the pull subscriber will be notified of the last N `put` or `delete` made on each key expression matching the subscriber key expression, and will print this notification. + Declares a key expression and a pull subscriber. + On each pull, the pull subscriber will be notified of the last `put` or `delete` made on each key expression matching the subscriber key expression, and will print this notification. Typical usage: @@ -89,7 +89,7 @@ ``` or ```bash - z_pull -k demo/** --size 3 + z_pull -k 'demo/**' ``` ### z_get diff --git a/examples/examples/z_get.rs b/examples/examples/z_get.rs index 8735ae8daa..0603b4f9fb 100644 --- a/examples/examples/z_get.rs +++ b/examples/examples/z_get.rs @@ -12,6 +12,7 @@ // ZettaScale Zenoh Team, // use clap::Parser; +use std::convert::TryFrom; use std::time::Duration; use zenoh::config::Config; use zenoh::prelude::r#async::*; @@ -28,34 +29,23 @@ async fn main() { let session = zenoh::open(config).res().await.unwrap(); println!("Sending Query '{selector}'..."); - let replies = session - .get(&selector) - .value(value) - .target(target) - .timeout(timeout) - .res() - .await - .unwrap(); + let replies = match value { + Some(value) => session.get(&selector).with_value(value), + None => session.get(&selector), + } + .target(target) + .timeout(timeout) + .res() + .await + .unwrap(); while let Ok(reply) = replies.recv_async().await { match reply.sample { - Ok(sample) => { - let payload = sample - .payload() - .deserialize::() - .unwrap_or_else(|e| format!("{}", e)); - println!( - ">> Received ('{}': '{}')", - sample.key_expr().as_str(), - payload, - ); - } - Err(err) => { - let payload = err - .payload - .deserialize::() - .unwrap_or_else(|e| format!("{}", e)); - println!(">> Received (ERROR: '{}')", payload); - } + Ok(sample) => println!( + ">> Received ('{}': '{}')", + sample.key_expr.as_str(), + sample.value, + ), + Err(err) => println!(">> Received (ERROR: '{}')", String::try_from(&err).unwrap()), } } } diff --git a/examples/examples/z_get_liveliness.rs b/examples/examples/z_get_liveliness.rs index 487f3c25d6..3538b7a05c 100644 --- a/examples/examples/z_get_liveliness.rs +++ b/examples/examples/z_get_liveliness.rs @@ -12,6 +12,7 @@ // ZettaScale Zenoh Team, // use clap::Parser; +use std::convert::TryFrom; use std::time::Duration; use zenoh::config::Config; use zenoh::prelude::r#async::*; @@ -37,14 +38,8 @@ async fn main() { .unwrap(); while let Ok(reply) = replies.recv_async().await { match reply.sample { - Ok(sample) => println!(">> Alive token ('{}')", sample.key_expr().as_str(),), - Err(err) => { - let payload = err - .payload - .deserialize::() - .unwrap_or_else(|e| format!("{}", e)); - println!(">> Received (ERROR: '{}')", payload); - } + Ok(sample) => println!(">> Alive token ('{}')", sample.key_expr.as_str(),), + Err(err) => println!(">> Received (ERROR: '{}')", String::try_from(&err).unwrap()), } } } diff --git a/examples/examples/z_ping.rs b/examples/examples/z_ping.rs index a57c937e48..fe5ed4d46b 100644 --- a/examples/examples/z_ping.rs +++ b/examples/examples/z_ping.rs @@ -22,7 +22,7 @@ fn main() { // initiate logging env_logger::init(); - let (config, warmup, size, n, express) = parse_args(); + let (config, warmup, size, n) = parse_args(); let session = zenoh::open(config).res().unwrap(); // The key expression to publish data on @@ -35,11 +35,10 @@ fn main() { let publisher = session .declare_publisher(key_expr_ping) .congestion_control(CongestionControl::Block) - .express(express) .res() .unwrap(); - let data: Payload = (0usize..size) + let data: Value = (0usize..size) .map(|i| (i % 10) as u8) .collect::>() .into(); @@ -79,9 +78,6 @@ fn main() { #[derive(Parser)] struct Args { - /// express for sending data - #[arg(long, default_value = "false")] - no_express: bool, #[arg(short, long, default_value = "1")] /// The number of seconds to warm up (float) warmup: f64, @@ -94,13 +90,12 @@ struct Args { common: CommonArgs, } -fn parse_args() -> (Config, Duration, usize, usize, bool) { +fn parse_args() -> (Config, Duration, usize, usize) { let args = Args::parse(); ( args.common.into(), Duration::from_secs_f64(args.warmup), args.payload_size, args.samples, - !args.no_express, ) } diff --git a/examples/examples/z_pong.rs b/examples/examples/z_pong.rs index baa5683f62..53ff03b778 100644 --- a/examples/examples/z_pong.rs +++ b/examples/examples/z_pong.rs @@ -21,7 +21,7 @@ fn main() { // initiate logging env_logger::init(); - let (config, express) = parse_args(); + let config = parse_args(); let session = zenoh::open(config).res().unwrap().into_arc(); @@ -34,13 +34,12 @@ fn main() { let publisher = session .declare_publisher(key_expr_pong) .congestion_control(CongestionControl::Block) - .express(express) .res() .unwrap(); let _sub = session .declare_subscriber(key_expr_ping) - .callback(move |sample| publisher.put(sample.payload().clone()).res().unwrap()) + .callback(move |sample| publisher.put(sample.value).res().unwrap()) .res() .unwrap(); std::thread::park(); @@ -48,14 +47,11 @@ fn main() { #[derive(clap::Parser, Clone, PartialEq, Eq, Hash, Debug)] struct Args { - /// express for sending data - #[arg(long, default_value = "false")] - no_express: bool, #[command(flatten)] common: CommonArgs, } -fn parse_args() -> (Config, bool) { +fn parse_args() -> Config { let args = Args::parse(); - (args.common.into(), !args.no_express) + args.common.into() } diff --git a/examples/examples/z_pub.rs b/examples/examples/z_pub.rs index 8cd3c4edba..4863387df0 100644 --- a/examples/examples/z_pub.rs +++ b/examples/examples/z_pub.rs @@ -37,12 +37,12 @@ async fn main() { println!("Putting Data ('{}': '{}')...", &key_expr, buf); let mut put = publisher.put(buf); if let Some(attachment) = &attachment { - put = put.attachment(Some( + put = put.with_attachment( attachment .split('&') .map(|pair| split_once(pair, '=')) .collect(), - )) + ) } put.res().await.unwrap(); } diff --git a/examples/examples/z_pub_thr.rs b/examples/examples/z_pub_thr.rs index 4354ad2e68..89b8b9b55c 100644 --- a/examples/examples/z_pub_thr.rs +++ b/examples/examples/z_pub_thr.rs @@ -23,14 +23,14 @@ fn main() { env_logger::init(); let args = Args::parse(); - let mut prio = Priority::DEFAULT; + let mut prio = Priority::default(); if let Some(p) = args.priority { prio = p.try_into().unwrap(); } let payload_size = args.payload_size; - let data: Payload = (0..payload_size) + let data: Value = (0..payload_size) .map(|i| (i % 10) as u8) .collect::>() .into(); @@ -41,7 +41,6 @@ fn main() { .declare_publisher("test/thr") .congestion_control(CongestionControl::Block) .priority(prio) - .express(args.express) .res() .unwrap(); @@ -66,9 +65,6 @@ fn main() { #[derive(Parser, Clone, PartialEq, Eq, Hash, Debug)] struct Args { - /// express for sending data - #[arg(long, default_value = "false")] - express: bool, /// Priority for sending data #[arg(short, long)] priority: Option, diff --git a/examples/examples/z_pull.rs b/examples/examples/z_pull.rs index 4e44930f4f..bd59be7dee 100644 --- a/examples/examples/z_pull.rs +++ b/examples/examples/z_pull.rs @@ -13,7 +13,8 @@ // use clap::Parser; use std::time::Duration; -use zenoh::{config::Config, handlers::RingBuffer, prelude::r#async::*}; +use zenoh::config::Config; +use zenoh::prelude::r#async::*; use zenoh_examples::CommonArgs; #[tokio::main] @@ -21,69 +22,46 @@ async fn main() { // initiate logging env_logger::init(); - let (config, key_expr, size, interval) = parse_args(); + let (config, key_expr) = parse_args(); println!("Opening session..."); let session = zenoh::open(config).res().await.unwrap(); println!("Declaring Subscriber on '{key_expr}'..."); + let subscriber = session .declare_subscriber(&key_expr) - .with(RingBuffer::new(size)) + .pull_mode() + .callback(|sample| { + println!( + ">> [Subscriber] Received {} ('{}': '{}')", + sample.kind, + sample.key_expr.as_str(), + sample.value, + ); + }) .res() .await .unwrap(); - println!( - "Pulling data every {:#?} seconds. Press CTRL-C to quit...", - interval - ); - loop { - match subscriber.recv() { - Ok(Some(sample)) => { - let payload = sample - .payload() - .deserialize::() - .unwrap_or_else(|e| format!("{}", e)); - println!( - ">> [Subscriber] Pulled {} ('{}': '{}')", - sample.kind(), - sample.key_expr().as_str(), - payload, - ); - } - Ok(None) => { - println!( - ">> [Subscriber] Pulled nothing... sleep for {:#?}", - interval - ); - tokio::time::sleep(interval).await; - } - Err(e) => { - println!(">> [Subscriber] Pull error: {e}"); - return; - } - } + println!("Press CTRL-C to quit..."); + for idx in 0..u32::MAX { + tokio::time::sleep(Duration::from_secs(1)).await; + println!("[{idx:4}] Pulling..."); + subscriber.pull().res().await.unwrap(); } } -#[derive(clap::Parser, Clone, PartialEq, Debug)] +#[derive(clap::Parser, Clone, PartialEq, Eq, Hash, Debug)] struct SubArgs { #[arg(short, long, default_value = "demo/example/**")] /// The Key Expression to subscribe to. key: KeyExpr<'static>, - /// The size of the ringbuffer. - #[arg(long, default_value = "3")] - size: usize, - /// The interval for pulling the ringbuffer. - #[arg(long, default_value = "5.0")] - interval: f32, #[command(flatten)] common: CommonArgs, } -fn parse_args() -> (Config, KeyExpr<'static>, usize, Duration) { +fn parse_args() -> (Config, KeyExpr<'static>) { let args = SubArgs::parse(); - let interval = Duration::from_secs_f32(args.interval); - (args.common.into(), args.key, args.size, interval) + (args.common.into(), args.key) } diff --git a/examples/examples/z_queryable.rs b/examples/examples/z_queryable.rs index 83ac63ce1f..2feac12a8e 100644 --- a/examples/examples/z_queryable.rs +++ b/examples/examples/z_queryable.rs @@ -38,25 +38,20 @@ async fn main() { while let Ok(query) = queryable.recv_async().await { match query.value() { None => println!(">> [Queryable ] Received Query '{}'", query.selector()), - Some(value) => { - let payload = value - .payload - .deserialize::() - .unwrap_or_else(|e| format!("{}", e)); - println!( - ">> [Queryable ] Received Query '{}' with payload '{}'", - query.selector(), - payload - ) - } + Some(value) => println!( + ">> [Queryable ] Received Query '{}' with value '{}'", + query.selector(), + value + ), } println!( ">> [Queryable ] Responding ('{}': '{}')", key_expr.as_str(), value, ); + let reply = Ok(Sample::new(key_expr.clone(), value.clone())); query - .reply(key_expr.clone(), value.clone()) + .reply(reply) .res() .await .unwrap_or_else(|e| println!(">> [Queryable ] Error sending reply: {e}")); diff --git a/examples/examples/z_storage.rs b/examples/examples/z_storage.rs index cb2f40c125..161db6819f 100644 --- a/examples/examples/z_storage.rs +++ b/examples/examples/z_storage.rs @@ -48,12 +48,13 @@ async fn main() { select!( sample = subscriber.recv_async() => { let sample = sample.unwrap(); - let payload = sample.payload().deserialize::().unwrap_or_else(|e| format!("{}", e)); - println!(">> [Subscriber] Received {} ('{}': '{}')", sample.kind(), sample.key_expr().as_str(),payload); - match sample.kind() { - SampleKind::Delete => stored.remove(&sample.key_expr().to_string()), - SampleKind::Put => stored.insert(sample.key_expr().to_string(), sample), - }; + println!(">> [Subscriber] Received {} ('{}': '{}')", + sample.kind, sample.key_expr.as_str(), sample.value); + if sample.kind == SampleKind::Delete { + stored.remove(&sample.key_expr.to_string()); + } else { + stored.insert(sample.key_expr.to_string(), sample); + } }, query = queryable.recv_async() => { @@ -61,7 +62,7 @@ async fn main() { println!(">> [Queryable ] Received Query '{}'", query.selector()); for (stored_name, sample) in stored.iter() { if query.selector().key_expr.intersects(unsafe {keyexpr::from_str_unchecked(stored_name)}) { - query.reply(sample.key_expr().clone(), sample.payload().clone()).res().await.unwrap(); + query.reply(Ok(sample.clone())).res().await.unwrap(); } } } diff --git a/examples/examples/z_sub.rs b/examples/examples/z_sub.rs index 299f0c8f49..d2d86bea8b 100644 --- a/examples/examples/z_sub.rs +++ b/examples/examples/z_sub.rs @@ -32,19 +32,16 @@ async fn main() { let session = zenoh::open(config).res().await.unwrap(); println!("Declaring Subscriber on '{}'...", &key_expr); + let subscriber = session.declare_subscriber(&key_expr).res().await.unwrap(); println!("Press CTRL-C to quit..."); while let Ok(sample) = subscriber.recv_async().await { - let payload = sample - .payload() - .deserialize::() - .unwrap_or_else(|e| format!("{}", e)); println!( ">> [Subscriber] Received {} ('{}': '{}')", - sample.kind(), - sample.key_expr().as_str(), - payload + sample.kind, + sample.key_expr.as_str(), + sample.value ); } } diff --git a/examples/examples/z_sub_liveliness.rs b/examples/examples/z_sub_liveliness.rs index 50ba40c7ac..0d0f9fc5ac 100644 --- a/examples/examples/z_sub_liveliness.rs +++ b/examples/examples/z_sub_liveliness.rs @@ -37,14 +37,14 @@ async fn main() { println!("Press CTRL-C to quit..."); while let Ok(sample) = subscriber.recv_async().await { - match sample.kind() { + match sample.kind { SampleKind::Put => println!( ">> [LivelinessSubscriber] New alive token ('{}')", - sample.key_expr().as_str() + sample.key_expr.as_str() ), SampleKind::Delete => println!( ">> [LivelinessSubscriber] Dropped token ('{}')", - sample.key_expr().as_str() + sample.key_expr.as_str() ), } } diff --git a/examples/examples/z_sub_shm.rs b/examples/examples/z_sub_shm.rs deleted file mode 100644 index 630876f287..0000000000 --- a/examples/examples/z_sub_shm.rs +++ /dev/null @@ -1,66 +0,0 @@ -// -// Copyright (c) 2023 ZettaScale Technology -// -// This program and the accompanying materials are made available under the -// terms of the Eclipse Public License 2.0 which is available at -// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 -// which is available at https://www.apache.org/licenses/LICENSE-2.0. -// -// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 -// -// Contributors: -// ZettaScale Zenoh Team, -// -use clap::Parser; -use zenoh::config::Config; -use zenoh::prelude::r#async::*; -use zenoh_examples::CommonArgs; -use zenoh_shm::SharedMemoryBuf; - -#[tokio::main] -async fn main() { - // Initiate logging - env_logger::init(); - - let (mut config, key_expr) = parse_args(); - - // A probing procedure for shared memory is performed upon session opening. To enable `z_pub_shm` to operate - // over shared memory (and to not fallback on network mode), shared memory needs to be enabled also on the - // subscriber side. By doing so, the probing procedure will succeed and shared memory will operate as expected. - config.transport.shared_memory.set_enabled(true).unwrap(); - - println!("Opening session..."); - let session = zenoh::open(config).res().await.unwrap(); - - println!("Declaring Subscriber on '{}'...", &key_expr); - let subscriber = session.declare_subscriber(&key_expr).res().await.unwrap(); - - println!("Press CTRL-C to quit..."); - while let Ok(sample) = subscriber.recv_async().await { - match sample.payload().deserialize::() { - Ok(payload) => println!( - ">> [Subscriber] Received {} ('{}': '{:02x?}')", - sample.kind(), - sample.key_expr().as_str(), - payload.as_slice() - ), - Err(e) => { - println!(">> [Subscriber] Not a SharedMemoryBuf: {:?}", e); - } - } - } -} - -#[derive(clap::Parser, Clone, PartialEq, Eq, Hash, Debug)] -struct SubArgs { - #[arg(short, long, default_value = "demo/example/**")] - /// The Key Expression to subscribe to. - key: KeyExpr<'static>, - #[command(flatten)] - common: CommonArgs, -} - -fn parse_args() -> (Config, KeyExpr<'static>) { - let args = SubArgs::parse(); - (args.common.into(), args.key) -} diff --git a/io/zenoh-link-commons/src/lib.rs b/io/zenoh-link-commons/src/lib.rs index 138726fd4f..f9ad7166ee 100644 --- a/io/zenoh-link-commons/src/lib.rs +++ b/io/zenoh-link-commons/src/lib.rs @@ -32,7 +32,6 @@ pub use multicast::*; use serde::Serialize; pub use unicast::*; use zenoh_protocol::core::Locator; -use zenoh_protocol::transport::BatchSize; use zenoh_result::ZResult; /*************************************/ @@ -46,7 +45,7 @@ pub struct Link { pub src: Locator, pub dst: Locator, pub group: Option, - pub mtu: BatchSize, + pub mtu: u16, pub is_reliable: bool, pub is_streamed: bool, pub interfaces: Vec, diff --git a/io/zenoh-link-commons/src/multicast.rs b/io/zenoh-link-commons/src/multicast.rs index ccfe6842c1..65bc7195b6 100644 --- a/io/zenoh-link-commons/src/multicast.rs +++ b/io/zenoh-link-commons/src/multicast.rs @@ -22,7 +22,7 @@ use zenoh_buffers::{reader::HasReader, writer::HasWriter}; use zenoh_codec::{RCodec, WCodec, Zenoh080}; use zenoh_protocol::{ core::{EndPoint, Locator}, - transport::{BatchSize, TransportMessage}, + transport::TransportMessage, }; use zenoh_result::{zerror, ZResult}; @@ -44,7 +44,7 @@ pub struct LinkMulticast(pub Arc); #[async_trait] pub trait LinkMulticastTrait: Send + Sync { - fn get_mtu(&self) -> BatchSize; + fn get_mtu(&self) -> u16; fn get_src(&self) -> &Locator; fn get_dst(&self) -> &Locator; fn is_reliable(&self) -> bool; diff --git a/io/zenoh-link-commons/src/unicast.rs b/io/zenoh-link-commons/src/unicast.rs index c21f4a008c..fe87e70e94 100644 --- a/io/zenoh-link-commons/src/unicast.rs +++ b/io/zenoh-link-commons/src/unicast.rs @@ -19,10 +19,7 @@ use core::{ ops::Deref, }; use std::net::SocketAddr; -use zenoh_protocol::{ - core::{EndPoint, Locator}, - transport::BatchSize, -}; +use zenoh_protocol::core::{EndPoint, Locator}; use zenoh_result::ZResult; pub type LinkManagerUnicast = Arc; @@ -44,7 +41,7 @@ pub struct LinkUnicast(pub Arc); #[async_trait] pub trait LinkUnicastTrait: Send + Sync { - fn get_mtu(&self) -> BatchSize; + fn get_mtu(&self) -> u16; fn get_src(&self) -> &Locator; fn get_dst(&self) -> &Locator; fn is_reliable(&self) -> bool; diff --git a/io/zenoh-links/zenoh-link-quic/src/lib.rs b/io/zenoh-links/zenoh-link-quic/src/lib.rs index 4bcabaf5b6..c6d7e16087 100644 --- a/io/zenoh-links/zenoh-link-quic/src/lib.rs +++ b/io/zenoh-links/zenoh-link-quic/src/lib.rs @@ -28,12 +28,9 @@ use std::net::SocketAddr; use zenoh_config::Config; use zenoh_core::zconfigurable; use zenoh_link_commons::{ConfigurationInspector, LocatorInspector}; -use zenoh_protocol::{ - core::{ - endpoint::{Address, Parameters}, - Locator, - }, - transport::BatchSize, +use zenoh_protocol::core::{ + endpoint::{Address, Parameters}, + Locator, }; use zenoh_result::{bail, zerror, ZResult}; @@ -50,7 +47,7 @@ pub const ALPN_QUIC_HTTP: &[&[u8]] = &[b"hq-29"]; // adopted in Zenoh and the usage of 16 bits in Zenoh to encode the // payload length in byte-streamed, the QUIC MTU is constrained to // 2^16 - 1 bytes (i.e., 65535). -const QUIC_MAX_MTU: BatchSize = BatchSize::MAX; +const QUIC_MAX_MTU: u16 = u16::MAX; pub const QUIC_LOCATOR_PREFIX: &str = "quic"; #[derive(Default, Clone, Copy, Debug)] @@ -140,7 +137,7 @@ impl ConfigurationInspector for QuicConfigurator { zconfigurable! { // Default MTU (QUIC PDU) in bytes. - static ref QUIC_DEFAULT_MTU: BatchSize = QUIC_MAX_MTU; + static ref QUIC_DEFAULT_MTU: u16 = QUIC_MAX_MTU; // The LINGER option causes the shutdown() call to block until (1) all application data is delivered // to the remote end or (2) a timeout expires. The timeout is expressed in seconds. // More info on the LINGER option and its dynamics can be found at: diff --git a/io/zenoh-links/zenoh-link-quic/src/unicast.rs b/io/zenoh-links/zenoh-link-quic/src/unicast.rs index 14a01861ca..33953d666d 100644 --- a/io/zenoh-links/zenoh-link-quic/src/unicast.rs +++ b/io/zenoh-links/zenoh-link-quic/src/unicast.rs @@ -34,7 +34,6 @@ use zenoh_link_commons::{ ListenersUnicastIP, NewLinkChannelSender, }; use zenoh_protocol::core::{EndPoint, Locator}; -use zenoh_protocol::transport::BatchSize; use zenoh_result::{bail, zerror, ZError, ZResult}; pub struct LinkUnicastQuic { @@ -136,7 +135,7 @@ impl LinkUnicastTrait for LinkUnicastQuic { } #[inline(always)] - fn get_mtu(&self) -> BatchSize { + fn get_mtu(&self) -> u16 { *QUIC_DEFAULT_MTU } diff --git a/io/zenoh-links/zenoh-link-serial/src/lib.rs b/io/zenoh-links/zenoh-link-serial/src/lib.rs index f7b0b7afeb..fb4d7fcc12 100644 --- a/io/zenoh-links/zenoh-link-serial/src/lib.rs +++ b/io/zenoh-links/zenoh-link-serial/src/lib.rs @@ -25,11 +25,10 @@ pub use unicast::*; use zenoh_core::zconfigurable; use zenoh_link_commons::LocatorInspector; use zenoh_protocol::core::{endpoint::Address, EndPoint, Locator}; -use zenoh_protocol::transport::BatchSize; use zenoh_result::ZResult; // Maximum MTU (Serial PDU) in bytes. -const SERIAL_MAX_MTU: BatchSize = z_serial::MAX_MTU as BatchSize; +const SERIAL_MAX_MTU: u16 = z_serial::MAX_MTU as u16; const DEFAULT_BAUDRATE: u32 = 9_600; @@ -37,11 +36,11 @@ const DEFAULT_EXCLUSIVE: bool = true; pub const SERIAL_LOCATOR_PREFIX: &str = "serial"; -const SERIAL_MTU_LIMIT: BatchSize = SERIAL_MAX_MTU; +const SERIAL_MTU_LIMIT: u16 = SERIAL_MAX_MTU; zconfigurable! { // Default MTU (UDP PDU) in bytes. - static ref SERIAL_DEFAULT_MTU: BatchSize = SERIAL_MTU_LIMIT; + static ref SERIAL_DEFAULT_MTU: u16 = SERIAL_MTU_LIMIT; // Amount of time in microseconds to throttle the accept loop upon an error. // Default set to 100 ms. static ref SERIAL_ACCEPT_THROTTLE_TIME: u64 = 100_000; diff --git a/io/zenoh-links/zenoh-link-serial/src/unicast.rs b/io/zenoh-links/zenoh-link-serial/src/unicast.rs index 0a5bea3c18..0efa40ee90 100644 --- a/io/zenoh-links/zenoh-link-serial/src/unicast.rs +++ b/io/zenoh-links/zenoh-link-serial/src/unicast.rs @@ -30,7 +30,6 @@ use zenoh_link_commons::{ NewLinkChannelSender, }; use zenoh_protocol::core::{EndPoint, Locator}; -use zenoh_protocol::transport::BatchSize; use zenoh_result::{zerror, ZResult}; use z_serial::ZSerial; @@ -178,7 +177,7 @@ impl LinkUnicastTrait for LinkUnicastSerial { } #[inline(always)] - fn get_mtu(&self) -> BatchSize { + fn get_mtu(&self) -> u16 { *SERIAL_DEFAULT_MTU } diff --git a/io/zenoh-links/zenoh-link-tcp/src/lib.rs b/io/zenoh-links/zenoh-link-tcp/src/lib.rs index 0b075d9bf8..1a7d6ae705 100644 --- a/io/zenoh-links/zenoh-link-tcp/src/lib.rs +++ b/io/zenoh-links/zenoh-link-tcp/src/lib.rs @@ -22,7 +22,6 @@ use std::net::SocketAddr; use zenoh_core::zconfigurable; use zenoh_link_commons::LocatorInspector; use zenoh_protocol::core::{endpoint::Address, Locator}; -use zenoh_protocol::transport::BatchSize; use zenoh_result::{zerror, ZResult}; mod unicast; @@ -34,7 +33,7 @@ pub use unicast::*; // adopted in Zenoh and the usage of 16 bits in Zenoh to encode the // payload length in byte-streamed, the TCP MTU is constrained to // 2^16 - 1 bytes (i.e., 65535). -const TCP_MAX_MTU: BatchSize = BatchSize::MAX; +const TCP_MAX_MTU: u16 = u16::MAX; pub const TCP_LOCATOR_PREFIX: &str = "tcp"; @@ -53,7 +52,7 @@ impl LocatorInspector for TcpLocatorInspector { zconfigurable! { // Default MTU (TCP PDU) in bytes. - static ref TCP_DEFAULT_MTU: BatchSize = TCP_MAX_MTU; + static ref TCP_DEFAULT_MTU: u16 = TCP_MAX_MTU; // The LINGER option causes the shutdown() call to block until (1) all application data is delivered // to the remote end or (2) a timeout expires. The timeout is expressed in seconds. // More info on the LINGER option and its dynamics can be found at: diff --git a/io/zenoh-links/zenoh-link-tcp/src/unicast.rs b/io/zenoh-links/zenoh-link-tcp/src/unicast.rs index aaadcf3c23..361f4fe69e 100644 --- a/io/zenoh-links/zenoh-link-tcp/src/unicast.rs +++ b/io/zenoh-links/zenoh-link-tcp/src/unicast.rs @@ -25,7 +25,6 @@ use zenoh_link_commons::{ ListenersUnicastIP, NewLinkChannelSender, BIND_INTERFACE, }; use zenoh_protocol::core::{EndPoint, Locator}; -use zenoh_protocol::transport::BatchSize; use zenoh_result::{bail, zerror, Error as ZError, ZResult}; use super::{ @@ -146,7 +145,7 @@ impl LinkUnicastTrait for LinkUnicastTcp { } #[inline(always)] - fn get_mtu(&self) -> BatchSize { + fn get_mtu(&self) -> u16 { *TCP_DEFAULT_MTU } @@ -172,7 +171,7 @@ impl LinkUnicastTrait for LinkUnicastTcp { // impl Drop for LinkUnicastTcp { // fn drop(&mut self) { // // Close the underlying TCP socket -// zenoh_runtime::ZRuntime::TX.block_in_place(async { +// zenoh_runtime::ZRuntime::Acceptor.block_in_place(async { // let _ = self.get_mut_socket().shutdown().await; // }); // } diff --git a/io/zenoh-links/zenoh-link-tls/src/lib.rs b/io/zenoh-links/zenoh-link-tls/src/lib.rs index 7faebb4cd9..95d59104b4 100644 --- a/io/zenoh-links/zenoh-link-tls/src/lib.rs +++ b/io/zenoh-links/zenoh-link-tls/src/lib.rs @@ -30,12 +30,9 @@ use std::{convert::TryFrom, net::SocketAddr}; use zenoh_config::Config; use zenoh_core::zconfigurable; use zenoh_link_commons::{ConfigurationInspector, LocatorInspector}; -use zenoh_protocol::{ - core::{ - endpoint::{self, Address}, - Locator, - }, - transport::BatchSize, +use zenoh_protocol::core::{ + endpoint::{self, Address}, + Locator, }; use zenoh_result::{bail, zerror, ZResult}; @@ -48,7 +45,7 @@ pub use unicast::*; // adopted in Zenoh and the usage of 16 bits in Zenoh to encode the // payload length in byte-streamed, the TLS MTU is constrained to // 2^16 - 1 bytes (i.e., 65535). -const TLS_MAX_MTU: BatchSize = BatchSize::MAX; +const TLS_MAX_MTU: u16 = u16::MAX; pub const TLS_LOCATOR_PREFIX: &str = "tls"; #[derive(Default, Clone, Copy)] @@ -175,7 +172,7 @@ impl ConfigurationInspector for TlsConfigurator { zconfigurable! { // Default MTU (TLS PDU) in bytes. - static ref TLS_DEFAULT_MTU: BatchSize = TLS_MAX_MTU; + static ref TLS_DEFAULT_MTU: u16 = TLS_MAX_MTU; // The LINGER option causes the shutdown() call to block until (1) all application data is delivered // to the remote end or (2) a timeout expires. The timeout is expressed in seconds. // More info on the LINGER option and its dynamics can be found at: diff --git a/io/zenoh-links/zenoh-link-tls/src/unicast.rs b/io/zenoh-links/zenoh-link-tls/src/unicast.rs index a58e7372dd..b24ce4ac31 100644 --- a/io/zenoh-links/zenoh-link-tls/src/unicast.rs +++ b/io/zenoh-links/zenoh-link-tls/src/unicast.rs @@ -42,8 +42,8 @@ use zenoh_link_commons::{ get_ip_interface_names, LinkManagerUnicastTrait, LinkUnicast, LinkUnicastTrait, ListenersUnicastIP, NewLinkChannelSender, }; +use zenoh_protocol::core::endpoint::Config; use zenoh_protocol::core::{EndPoint, Locator}; -use zenoh_protocol::{core::endpoint::Config, transport::BatchSize}; use zenoh_result::{bail, zerror, ZError, ZResult}; pub struct LinkUnicastTls { @@ -180,7 +180,7 @@ impl LinkUnicastTrait for LinkUnicastTls { } #[inline(always)] - fn get_mtu(&self) -> BatchSize { + fn get_mtu(&self) -> u16 { *TLS_DEFAULT_MTU } @@ -204,8 +204,8 @@ impl Drop for LinkUnicastTls { fn drop(&mut self) { // Close the underlying TCP stream let (tcp_stream, _) = self.get_sock_mut().get_mut(); - let _ = - zenoh_runtime::ZRuntime::TX.block_in_place(async move { tcp_stream.shutdown().await }); + let _ = zenoh_runtime::ZRuntime::Acceptor + .block_in_place(async move { tcp_stream.shutdown().await }); } } diff --git a/io/zenoh-links/zenoh-link-udp/src/lib.rs b/io/zenoh-links/zenoh-link-udp/src/lib.rs index 86db845d8f..91d02cc13d 100644 --- a/io/zenoh-links/zenoh-link-udp/src/lib.rs +++ b/io/zenoh-links/zenoh-link-udp/src/lib.rs @@ -27,7 +27,6 @@ pub use unicast::*; use zenoh_core::zconfigurable; use zenoh_link_commons::LocatorInspector; use zenoh_protocol::core::{endpoint::Address, Locator}; -use zenoh_protocol::transport::BatchSize; use zenoh_result::{zerror, ZResult}; // NOTE: In case of using UDP in high-throughput scenarios, it is recommended to set the @@ -45,24 +44,24 @@ use zenoh_result::{zerror, ZResult}; // Although in IPv6 it is possible to have UDP datagrams of size greater than 65,535 bytes via // IPv6 Jumbograms, its usage in Zenoh is discouraged unless the consequences are very well // understood. -const UDP_MAX_MTU: BatchSize = 65_507; +const UDP_MAX_MTU: u16 = 65_507; pub const UDP_LOCATOR_PREFIX: &str = "udp"; #[cfg(any(target_os = "linux", target_os = "windows"))] // Linux default value of a maximum datagram size is set to UDP MAX MTU. -const UDP_MTU_LIMIT: BatchSize = UDP_MAX_MTU; +const UDP_MTU_LIMIT: u16 = UDP_MAX_MTU; #[cfg(target_os = "macos")] // Mac OS X default value of a maximum datagram size is set to 9216 bytes. -const UDP_MTU_LIMIT: BatchSize = 9_216; +const UDP_MTU_LIMIT: u16 = 9_216; #[cfg(not(any(target_os = "linux", target_os = "macos", target_os = "windows")))] -const UDP_MTU_LIMIT: BatchSize = 8_192; +const UDP_MTU_LIMIT: u16 = 8_192; zconfigurable! { // Default MTU (UDP PDU) in bytes. - static ref UDP_DEFAULT_MTU: BatchSize = UDP_MTU_LIMIT; + static ref UDP_DEFAULT_MTU: u16 = UDP_MTU_LIMIT; // Amount of time in microseconds to throttle the accept loop upon an error. // Default set to 100 ms. static ref UDP_ACCEPT_THROTTLE_TIME: u64 = 100_000; diff --git a/io/zenoh-links/zenoh-link-udp/src/multicast.rs b/io/zenoh-links/zenoh-link-udp/src/multicast.rs index a6e7977052..bc894bd296 100644 --- a/io/zenoh-links/zenoh-link-udp/src/multicast.rs +++ b/io/zenoh-links/zenoh-link-udp/src/multicast.rs @@ -21,7 +21,6 @@ use std::{borrow::Cow, fmt}; use tokio::net::UdpSocket; use zenoh_link_commons::{LinkManagerMulticastTrait, LinkMulticast, LinkMulticastTrait}; use zenoh_protocol::core::{Config, EndPoint, Locator}; -use zenoh_protocol::transport::BatchSize; use zenoh_result::{bail, zerror, Error as ZError, ZResult}; pub struct LinkMulticastUdp { @@ -120,7 +119,7 @@ impl LinkMulticastTrait for LinkMulticastUdp { } #[inline(always)] - fn get_mtu(&self) -> BatchSize { + fn get_mtu(&self) -> u16 { *UDP_DEFAULT_MTU } diff --git a/io/zenoh-links/zenoh-link-udp/src/unicast.rs b/io/zenoh-links/zenoh-link-udp/src/unicast.rs index 5021969bfa..1cd4a0b1ec 100644 --- a/io/zenoh-links/zenoh-link-udp/src/unicast.rs +++ b/io/zenoh-links/zenoh-link-udp/src/unicast.rs @@ -30,7 +30,6 @@ use zenoh_link_commons::{ LinkUnicastTrait, ListenersUnicastIP, NewLinkChannelSender, BIND_INTERFACE, }; use zenoh_protocol::core::{EndPoint, Locator}; -use zenoh_protocol::transport::BatchSize; use zenoh_result::{bail, zerror, Error as ZError, ZResult}; use zenoh_sync::Mvar; @@ -201,7 +200,7 @@ impl LinkUnicastTrait for LinkUnicastUdp { } #[inline(always)] - fn get_mtu(&self) -> BatchSize { + fn get_mtu(&self) -> u16 { *UDP_DEFAULT_MTU } diff --git a/io/zenoh-links/zenoh-link-unixpipe/src/unix/unicast.rs b/io/zenoh-links/zenoh-link-unixpipe/src/unix/unicast.rs index 3026d4e4b0..eb8ee05d87 100644 --- a/io/zenoh-links/zenoh-link-unixpipe/src/unix/unicast.rs +++ b/io/zenoh-links/zenoh-link-unixpipe/src/unix/unicast.rs @@ -30,10 +30,10 @@ use std::sync::Arc; use tokio::fs::remove_file; use tokio::io::unix::AsyncFd; use tokio::io::Interest; +use tokio::task::JoinHandle; use tokio_util::sync::CancellationToken; -use zenoh_core::{zasyncread, zasyncwrite}; +use zenoh_core::{zasyncread, zasyncwrite, ResolveFuture, SyncResolve}; use zenoh_protocol::core::{EndPoint, Locator}; -use zenoh_protocol::transport::BatchSize; use zenoh_runtime::ZRuntime; use unix_named_pipe::{create, open_write}; @@ -46,7 +46,7 @@ use zenoh_result::{bail, ZResult}; use super::FILE_ACCESS_MASK; -const LINUX_PIPE_MAX_MTU: BatchSize = BatchSize::MAX; +const LINUX_PIPE_MAX_MTU: u16 = 65_535; const LINUX_PIPE_DEDICATE_TRIES: usize = 100; static PIPE_INVITATION: &[u8] = &[0xDE, 0xAD, 0xBE, 0xEF]; @@ -286,6 +286,7 @@ async fn handle_incoming_connections( struct UnicastPipeListener { uplink_locator: Locator, token: CancellationToken, + handle: JoinHandle<()>, } impl UnicastPipeListener { async fn listen(endpoint: EndPoint, manager: Arc) -> ZResult { @@ -301,7 +302,7 @@ impl UnicastPipeListener { // WARN: The spawn_blocking is mandatory verified by the ping/pong test // create listening task - tokio::task::spawn_blocking(move || { + let handle = tokio::task::spawn_blocking(move || { ZRuntime::Acceptor.block_on(async move { loop { tokio::select! { @@ -323,11 +324,13 @@ impl UnicastPipeListener { Ok(Self { uplink_locator: local, token, + handle, }) } fn stop_listening(self) { self.token.cancel(); + let _ = ResolveFuture::new(self.handle).res_sync(); } } @@ -499,7 +502,7 @@ impl LinkUnicastTrait for UnicastPipe { } #[inline(always)] - fn get_mtu(&self) -> BatchSize { + fn get_mtu(&self) -> u16 { LINUX_PIPE_MAX_MTU } diff --git a/io/zenoh-links/zenoh-link-unixsock_stream/src/lib.rs b/io/zenoh-links/zenoh-link-unixsock_stream/src/lib.rs index ce067c1aa2..b6c180cd8d 100644 --- a/io/zenoh-links/zenoh-link-unixsock_stream/src/lib.rs +++ b/io/zenoh-links/zenoh-link-unixsock_stream/src/lib.rs @@ -21,7 +21,6 @@ use async_trait::async_trait; use zenoh_core::zconfigurable; use zenoh_link_commons::LocatorInspector; use zenoh_protocol::core::{endpoint::Address, Locator}; -use zenoh_protocol::transport::BatchSize; use zenoh_result::ZResult; #[cfg(target_family = "unix")] mod unicast; @@ -34,13 +33,13 @@ pub use unicast::*; // adopted in Zenoh and the usage of 16 bits in Zenoh to encode the // payload length in byte-streamed, the UNIXSOCKSTREAM MTU is constrained to // 2^16 - 1 bytes (i.e., 65535). -const UNIXSOCKSTREAM_MAX_MTU: BatchSize = BatchSize::MAX; +const UNIXSOCKSTREAM_MAX_MTU: u16 = u16::MAX; pub const UNIXSOCKSTREAM_LOCATOR_PREFIX: &str = "unixsock-stream"; zconfigurable! { // Default MTU (UNIXSOCKSTREAM PDU) in bytes. - static ref UNIXSOCKSTREAM_DEFAULT_MTU: BatchSize = UNIXSOCKSTREAM_MAX_MTU; + static ref UNIXSOCKSTREAM_DEFAULT_MTU: u16 = UNIXSOCKSTREAM_MAX_MTU; // Amount of time in microseconds to throttle the accept loop upon an error. // Default set to 100 ms. static ref UNIXSOCKSTREAM_ACCEPT_THROTTLE_TIME: u64 = 100_000; diff --git a/io/zenoh-links/zenoh-link-unixsock_stream/src/unicast.rs b/io/zenoh-links/zenoh-link-unixsock_stream/src/unicast.rs index a961c1aebb..b85cee9c66 100644 --- a/io/zenoh-links/zenoh-link-unixsock_stream/src/unicast.rs +++ b/io/zenoh-links/zenoh-link-unixsock_stream/src/unicast.rs @@ -32,7 +32,6 @@ use zenoh_link_commons::{ LinkManagerUnicastTrait, LinkUnicast, LinkUnicastTrait, NewLinkChannelSender, }; use zenoh_protocol::core::{EndPoint, Locator}; -use zenoh_protocol::transport::BatchSize; use zenoh_result::{zerror, ZResult}; use super::{get_unix_path_as_string, UNIXSOCKSTREAM_DEFAULT_MTU, UNIXSOCKSTREAM_LOCATOR_PREFIX}; @@ -120,7 +119,7 @@ impl LinkUnicastTrait for LinkUnicastUnixSocketStream { } #[inline(always)] - fn get_mtu(&self) -> BatchSize { + fn get_mtu(&self) -> u16 { *UNIXSOCKSTREAM_DEFAULT_MTU } @@ -145,7 +144,7 @@ impl LinkUnicastTrait for LinkUnicastUnixSocketStream { impl Drop for LinkUnicastUnixSocketStream { fn drop(&mut self) { // Close the underlying UnixSocketStream socket - let _ = zenoh_runtime::ZRuntime::TX + let _ = zenoh_runtime::ZRuntime::Acceptor .block_in_place(async move { self.get_mut_socket().shutdown().await }); } } diff --git a/io/zenoh-links/zenoh-link-vsock/src/lib.rs b/io/zenoh-links/zenoh-link-vsock/src/lib.rs index d58250fed3..7834050796 100644 --- a/io/zenoh-links/zenoh-link-vsock/src/lib.rs +++ b/io/zenoh-links/zenoh-link-vsock/src/lib.rs @@ -22,7 +22,7 @@ use async_trait::async_trait; use zenoh_core::zconfigurable; use zenoh_link_commons::LocatorInspector; -use zenoh_protocol::{core::Locator, transport::BatchSize}; +use zenoh_protocol::core::Locator; use zenoh_result::ZResult; #[cfg(target_os = "linux")] @@ -47,7 +47,7 @@ impl LocatorInspector for VsockLocatorInspector { zconfigurable! { // Default MTU in bytes. - static ref VSOCK_DEFAULT_MTU: BatchSize = BatchSize::MAX; + static ref VSOCK_DEFAULT_MTU: u16 = u16::MAX; // Amount of time in microseconds to throttle the accept loop upon an error. // Default set to 100 ms. static ref VSOCK_ACCEPT_THROTTLE_TIME: u64 = 100_000; diff --git a/io/zenoh-links/zenoh-link-vsock/src/unicast.rs b/io/zenoh-links/zenoh-link-vsock/src/unicast.rs index 59efa6f0e3..ced7b9dc15 100644 --- a/io/zenoh-links/zenoh-link-vsock/src/unicast.rs +++ b/io/zenoh-links/zenoh-link-vsock/src/unicast.rs @@ -27,10 +27,8 @@ use zenoh_core::{zasyncread, zasyncwrite}; use zenoh_link_commons::{ LinkManagerUnicastTrait, LinkUnicast, LinkUnicastTrait, NewLinkChannelSender, }; -use zenoh_protocol::{ - core::{endpoint::Address, EndPoint, Locator}, - transport::BatchSize, -}; +use zenoh_protocol::core::endpoint::Address; +use zenoh_protocol::core::{EndPoint, Locator}; use zenoh_result::{bail, zerror, ZResult}; use super::{VSOCK_ACCEPT_THROTTLE_TIME, VSOCK_DEFAULT_MTU, VSOCK_LOCATOR_PREFIX}; @@ -172,7 +170,7 @@ impl LinkUnicastTrait for LinkUnicastVsock { } #[inline(always)] - fn get_mtu(&self) -> BatchSize { + fn get_mtu(&self) -> u16 { *VSOCK_DEFAULT_MTU } diff --git a/io/zenoh-links/zenoh-link-ws/src/lib.rs b/io/zenoh-links/zenoh-link-ws/src/lib.rs index d165b480a9..f68a20d15d 100644 --- a/io/zenoh-links/zenoh-link-ws/src/lib.rs +++ b/io/zenoh-links/zenoh-link-ws/src/lib.rs @@ -23,7 +23,6 @@ use url::Url; use zenoh_core::zconfigurable; use zenoh_link_commons::LocatorInspector; use zenoh_protocol::core::{endpoint::Address, Locator}; -use zenoh_protocol::transport::BatchSize; use zenoh_result::{bail, ZResult}; mod unicast; pub use unicast::*; @@ -34,7 +33,7 @@ pub use unicast::*; // adopted in Zenoh and the usage of 16 bits in Zenoh to encode the // payload length in byte-streamed, the TCP MTU is constrained to // 2^16 - 1 bytes (i.e., 65535). -const WS_MAX_MTU: BatchSize = BatchSize::MAX; +const WS_MAX_MTU: u16 = u16::MAX; pub const WS_LOCATOR_PREFIX: &str = "ws"; @@ -52,7 +51,7 @@ impl LocatorInspector for WsLocatorInspector { zconfigurable! { // Default MTU (TCP PDU) in bytes. - static ref WS_DEFAULT_MTU: BatchSize = WS_MAX_MTU; + static ref WS_DEFAULT_MTU: u16 = WS_MAX_MTU; // Amount of time in microseconds to throttle the accept loop upon an error. // Default set to 100 ms. static ref TCP_ACCEPT_THROTTLE_TIME: u64 = 100_000; diff --git a/io/zenoh-links/zenoh-link-ws/src/unicast.rs b/io/zenoh-links/zenoh-link-ws/src/unicast.rs index acf568f78c..1a6d0fecf3 100644 --- a/io/zenoh-links/zenoh-link-ws/src/unicast.rs +++ b/io/zenoh-links/zenoh-link-ws/src/unicast.rs @@ -34,7 +34,6 @@ use zenoh_link_commons::{ LinkManagerUnicastTrait, LinkUnicast, LinkUnicastTrait, NewLinkChannelSender, }; use zenoh_protocol::core::{EndPoint, Locator}; -use zenoh_protocol::transport::BatchSize; use zenoh_result::{bail, zerror, ZResult}; use super::{get_ws_addr, get_ws_url, TCP_ACCEPT_THROTTLE_TIME, WS_DEFAULT_MTU, WS_LOCATOR_PREFIX}; @@ -201,7 +200,7 @@ impl LinkUnicastTrait for LinkUnicastWs { } #[inline(always)] - fn get_mtu(&self) -> BatchSize { + fn get_mtu(&self) -> u16 { *WS_DEFAULT_MTU } @@ -225,7 +224,7 @@ impl LinkUnicastTrait for LinkUnicastWs { impl Drop for LinkUnicastWs { fn drop(&mut self) { - zenoh_runtime::ZRuntime::TX.block_in_place(async { + zenoh_runtime::ZRuntime::Acceptor.block_in_place(async { let mut guard = zasynclock!(self.send); // Close the underlying TCP socket guard.close().await.unwrap_or_else(|e| { diff --git a/io/zenoh-transport/Cargo.toml b/io/zenoh-transport/Cargo.toml index 6f18f7cc5c..5304a9fa17 100644 --- a/io/zenoh-transport/Cargo.toml +++ b/io/zenoh-transport/Cargo.toml @@ -83,6 +83,7 @@ zenoh-shm = { workspace = true, optional = true } zenoh-sync = { workspace = true } zenoh-util = { workspace = true } zenoh-runtime = { workspace = true } +zenoh-task = { workspace = true } diff --git a/io/zenoh-transport/src/common/batch.rs b/io/zenoh-transport/src/common/batch.rs index e923a7e1af..4139a65a05 100644 --- a/io/zenoh-transport/src/common/batch.rs +++ b/io/zenoh-transport/src/common/batch.rs @@ -460,7 +460,7 @@ impl RBatch { let mut into = (buff)(); let n = lz4_flex::block::decompress_into(payload, into.as_mut_slice()) .map_err(|_| zerror!("Decompression error"))?; - let zslice = ZSlice::new(Arc::new(into), 0, n) + let zslice = ZSlice::make(Arc::new(into), 0, n) .map_err(|_| zerror!("Invalid decompression buffer length"))?; Ok(zslice) } @@ -574,12 +574,12 @@ mod tests { let tmsg: TransportMessage = KeepAlive.into(); let nmsg: NetworkMessage = Push { wire_expr: WireExpr::empty(), - ext_qos: ext::QoSType::new(Priority::DEFAULT, CongestionControl::Block, false), + ext_qos: ext::QoSType::new(Priority::default(), CongestionControl::Block, false), ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, + ext_nodeid: ext::NodeIdType::default(), payload: PushBody::Put(Put { timestamp: None, - encoding: Encoding::empty(), + encoding: Encoding::default(), ext_sinfo: None, #[cfg(feature = "shared-memory")] ext_shm: None, @@ -601,7 +601,7 @@ mod tests { let mut frame = FrameHeader { reliability: Reliability::Reliable, sn: 0, - ext_qos: frame::ext::QoSType::DEFAULT, + ext_qos: frame::ext::QoSType::default(), }; // Serialize with a frame diff --git a/io/zenoh-transport/src/common/pipeline.rs b/io/zenoh-transport/src/common/pipeline.rs index b74fa2990c..9df7632f7a 100644 --- a/io/zenoh-transport/src/common/pipeline.rs +++ b/io/zenoh-transport/src/common/pipeline.rs @@ -22,7 +22,7 @@ use ringbuffer_spsc::{RingBuffer, RingBufferReader, RingBufferWriter}; use std::sync::{Arc, Mutex, MutexGuard}; use std::time::Duration; use std::{ - sync::atomic::{AtomicBool, Ordering}, + sync::atomic::{AtomicBool, AtomicU16, Ordering}, time::Instant, }; use zenoh_buffers::{ @@ -40,7 +40,7 @@ use zenoh_protocol::{ transport::{ fragment::FragmentHeader, frame::{self, FrameHeader}, - AtomicBatchSize, BatchSize, TransportMessage, + BatchSize, TransportMessage, }, }; @@ -75,7 +75,7 @@ impl StageInRefill { struct StageInOut { n_out_w: Sender<()>, s_out_w: RingBufferWriter, - bytes: Arc, + bytes: Arc, backoff: Arc, } @@ -178,18 +178,12 @@ impl StageIn { } macro_rules! zretok { - ($batch:expr, $msg:expr) => {{ - if $msg.is_express() { - // Move out existing batch - self.s_out.move_batch($batch); - return true; - } else { - let bytes = $batch.len(); - *c_guard = Some($batch); - drop(c_guard); - self.s_out.notify(bytes); - return true; - } + ($batch:expr) => {{ + let bytes = $batch.len(); + *c_guard = Some($batch); + drop(c_guard); + self.s_out.notify(bytes); + return true; }}; } @@ -197,7 +191,7 @@ impl StageIn { let mut batch = zgetbatch_rets!(false, {}); // Attempt the serialization on the current batch let e = match batch.encode(&*msg) { - Ok(_) => zretok!(batch, msg), + Ok(_) => zretok!(batch), Err(e) => e, }; @@ -217,7 +211,7 @@ impl StageIn { if let BatchError::NewFrame = e { // Attempt a serialization with a new frame if batch.encode((&*msg, &frame)).is_ok() { - zretok!(batch, msg); + zretok!(batch); } } @@ -229,7 +223,7 @@ impl StageIn { // Attempt a second serialization on fully empty batch if batch.encode((&*msg, &frame)).is_ok() { - zretok!(batch, msg); + zretok!(batch); } // The second serialization attempt has failed. This means that the message is @@ -355,12 +349,12 @@ enum Pull { struct Backoff { retry_time: NanoSeconds, last_bytes: BatchSize, - bytes: Arc, + bytes: Arc, backoff: Arc, } impl Backoff { - fn new(bytes: Arc, backoff: Arc) -> Self { + fn new(bytes: Arc, backoff: Arc) -> Self { Self { retry_time: 0, last_bytes: 0, @@ -522,7 +516,7 @@ impl TransmissionPipeline { let mut stage_in = vec![]; let mut stage_out = vec![]; - let default_queue_size = [config.queue_size[Priority::DEFAULT as usize]]; + let default_queue_size = [config.queue_size[Priority::default() as usize]]; let size_iter = if priority.len() == 1 { default_queue_size.iter() } else { @@ -552,7 +546,7 @@ impl TransmissionPipeline { // This is a SPSC ring buffer let (s_out_w, s_out_r) = RingBuffer::::init(); let current = Arc::new(Mutex::new(None)); - let bytes = Arc::new(AtomicBatchSize::new(0)); + let bytes = Arc::new(AtomicU16::new(0)); let backoff = Arc::new(AtomicBool::new(false)); stage_in.push(Mutex::new(StageIn { @@ -613,7 +607,7 @@ impl TransmissionPipelineProducer { let priority = msg.priority(); (priority as usize, priority) } else { - (0, Priority::DEFAULT) + (0, Priority::default()) }; // If message is droppable, compute a deadline after which the sample could be dropped let deadline_before_drop = if msg.is_droppable() { @@ -781,10 +775,10 @@ mod tests { wire_expr: key, ext_qos: ext::QoSType::new(Priority::Control, CongestionControl::Block, false), ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, + ext_nodeid: ext::NodeIdType::default(), payload: PushBody::Put(Put { timestamp: None, - encoding: Encoding::empty(), + encoding: Encoding::default(), ext_sinfo: None, #[cfg(feature = "shared-memory")] ext_shm: None, @@ -909,10 +903,10 @@ mod tests { wire_expr: key, ext_qos: ext::QoSType::new(Priority::Control, CongestionControl::Block, false), ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, + ext_nodeid: ext::NodeIdType::default(), payload: PushBody::Put(Put { timestamp: None, - encoding: Encoding::empty(), + encoding: Encoding::default(), ext_sinfo: None, #[cfg(feature = "shared-memory")] ext_shm: None, @@ -1026,10 +1020,10 @@ mod tests { false, ), ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, + ext_nodeid: ext::NodeIdType::default(), payload: PushBody::Put(Put { timestamp: None, - encoding: Encoding::empty(), + encoding: Encoding::default(), ext_sinfo: None, #[cfg(feature = "shared-memory")] ext_shm: None, diff --git a/io/zenoh-transport/src/common/stats.rs b/io/zenoh-transport/src/common/stats.rs index aaf39641c0..f095a58273 100644 --- a/io/zenoh-transport/src/common/stats.rs +++ b/io/zenoh-transport/src/common/stats.rs @@ -208,10 +208,6 @@ stats_struct! { # TYPE "counter" pub tx_z_del_msgs DiscriminatedStats, - # HELP "Counter of received bytes in zenoh del message attachments." - # TYPE "counter" - pub tx_z_del_pl_bytes DiscriminatedStats, - # HELP "Counter of sent zenoh query messages." # TYPE "counter" pub tx_z_query_msgs DiscriminatedStats, @@ -256,10 +252,6 @@ stats_struct! { # TYPE "counter" pub rx_z_del_msgs DiscriminatedStats, - # HELP "Counter of received bytes in zenoh del message attachments." - # TYPE "counter" - pub rx_z_del_pl_bytes DiscriminatedStats, - # HELP "Counter of received zenoh query messages." # TYPE "counter" pub rx_z_query_msgs DiscriminatedStats, diff --git a/io/zenoh-transport/src/manager.rs b/io/zenoh-transport/src/manager.rs index 2d7961ed2b..a52a35af83 100644 --- a/io/zenoh-transport/src/manager.rs +++ b/io/zenoh-transport/src/manager.rs @@ -24,7 +24,6 @@ use std::collections::HashMap; use std::sync::Arc; use std::time::Duration; use tokio::sync::Mutex as AsyncMutex; -use tokio_util::sync::CancellationToken; use zenoh_config::{Config, LinkRxConf, QueueConf, QueueSizeConf}; use zenoh_crypto::{BlockCipher, PseudoRng}; use zenoh_link::NewLinkChannelSender; @@ -34,6 +33,7 @@ use zenoh_protocol::{ VERSION, }; use zenoh_result::{bail, ZResult}; +use zenoh_task::TaskController; /// # Examples /// ``` @@ -93,7 +93,7 @@ pub struct TransportManagerConfig { pub zid: ZenohId, pub whatami: WhatAmI, pub resolution: Resolution, - pub batch_size: BatchSize, + pub batch_size: u16, pub wait_before_drop: Duration, pub queue_size: [usize; Priority::NUM], pub queue_backoff: Duration, @@ -122,7 +122,7 @@ pub struct TransportManagerBuilder { zid: ZenohId, whatami: WhatAmI, resolution: Resolution, - batch_size: BatchSize, + batch_size: u16, wait_before_drop: Duration, queue_size: QueueSizeConf, queue_backoff: Duration, @@ -151,7 +151,7 @@ impl TransportManagerBuilder { self } - pub fn batch_size(mut self, batch_size: BatchSize) -> Self { + pub fn batch_size(mut self, batch_size: u16) -> Self { self.batch_size = batch_size; self } @@ -335,7 +335,7 @@ pub struct TransportManager { pub(crate) new_unicast_link_sender: NewLinkChannelSender, #[cfg(feature = "stats")] pub(crate) stats: Arc, - pub(crate) token: CancellationToken, + pub(crate) task_controller: TaskController, } impl TransportManager { @@ -357,32 +357,27 @@ impl TransportManager { new_unicast_link_sender, #[cfg(feature = "stats")] stats: std::sync::Arc::new(crate::stats::TransportStats::default()), - token: CancellationToken::new(), + task_controller: TaskController::default(), }; // @TODO: this should be moved into the unicast module - zenoh_runtime::ZRuntime::Net.spawn({ - let this = this.clone(); - let token = this.token.clone(); - async move { - // while let Ok(link) = new_unicast_link_receiver.recv_async().await { - // this.handle_new_link_unicast(link).await; - // } - loop { - tokio::select! { - res = new_unicast_link_receiver.recv_async() => { - if let Ok(link) = res { - this.handle_new_link_unicast(link).await; - } - } - - _ = token.cancelled() => { - break; + let cancellation_token = this.task_controller.get_cancellation_token(); + this.task_controller + .spawn_with_rt(zenoh_runtime::ZRuntime::Net, { + let this = this.clone(); + async move { + loop { + tokio::select! { + res = new_unicast_link_receiver.recv_async() => { + if let Ok(link) = res { + this.handle_new_link_unicast(link).await; + } + } + _ = cancellation_token.cancelled() => { break; } } } } - } - }); + }); this } @@ -402,10 +397,9 @@ impl TransportManager { pub async fn close(&self) { self.close_unicast().await; - // TODO: Check this - self.token.cancel(); - // WARN: depends on the auto-close of tokio runtime after dropped - // self.tx_executor.runtime.shutdown_background(); + self.task_controller + .terminate_all_async(Duration::from_secs(10)) + .await; } /*************************************/ @@ -444,8 +438,8 @@ impl TransportManager { // TODO(yuyuan): Can we make this async as above? pub fn get_locators(&self) -> Vec { - let mut lsu = zenoh_runtime::ZRuntime::TX.block_in_place(self.get_locators_unicast()); - let mut lsm = zenoh_runtime::ZRuntime::TX.block_in_place(self.get_locators_multicast()); + let mut lsu = zenoh_runtime::ZRuntime::Net.block_in_place(self.get_locators_unicast()); + let mut lsm = zenoh_runtime::ZRuntime::Net.block_in_place(self.get_locators_multicast()); lsu.append(&mut lsm); lsu } diff --git a/io/zenoh-transport/src/multicast/link.rs b/io/zenoh-transport/src/multicast/link.rs index 577d3046fb..bfbdd3af61 100644 --- a/io/zenoh-transport/src/multicast/link.rs +++ b/io/zenoh-transport/src/multicast/link.rs @@ -211,7 +211,7 @@ impl TransportLinkMulticastRx { let mut into = (buff)(); let (n, locator) = self.inner.link.read(into.as_mut_slice()).await?; - let buffer = ZSlice::new(Arc::new(into), 0, n).map_err(|_| zerror!("Error"))?; + let buffer = ZSlice::make(Arc::new(into), 0, n).map_err(|_| zerror!("Error"))?; let mut batch = RBatch::new(self.inner.config.batch, buffer); batch.initialize(buff).map_err(|_| zerror!("{ERR}{self}"))?; Ok((batch, locator.into_owned())) @@ -491,7 +491,7 @@ async fn tx_task( .collect::>(); let (next_sn, ext_qos) = if next_sns.len() == Priority::NUM { let tmp: [PrioritySn; Priority::NUM] = next_sns.try_into().unwrap(); - (PrioritySn::DEFAULT, Some(Box::new(tmp))) + (PrioritySn::default(), Some(Box::new(tmp))) } else { (next_sns[0], None) }; diff --git a/io/zenoh-transport/src/multicast/rx.rs b/io/zenoh-transport/src/multicast/rx.rs index 5e31cea9d1..5cf714210f 100644 --- a/io/zenoh-transport/src/multicast/rx.rs +++ b/io/zenoh-transport/src/multicast/rx.rs @@ -145,7 +145,7 @@ impl TransportMulticastInner { let priority = ext_qos.priority(); let c = if self.is_qos() { &peer.priority_rx[priority as usize] - } else if priority == Priority::DEFAULT { + } else if priority == Priority::default() { &peer.priority_rx[0] } else { bail!( @@ -181,7 +181,7 @@ impl TransportMulticastInner { let priority = ext_qos.priority(); let c = if self.is_qos() { &peer.priority_rx[priority as usize] - } else if priority == Priority::DEFAULT { + } else if priority == Priority::default() { &peer.priority_rx[0] } else { bail!( diff --git a/io/zenoh-transport/src/multicast/transport.rs b/io/zenoh-transport/src/multicast/transport.rs index c647730390..2e7f54098d 100644 --- a/io/zenoh-transport/src/multicast/transport.rs +++ b/io/zenoh-transport/src/multicast/transport.rs @@ -39,6 +39,7 @@ use zenoh_protocol::{ transport::{close, Join}, }; use zenoh_result::{bail, ZResult}; +use zenoh_task::TaskController; // use zenoh_util::{Timed, TimedEvent, TimedHandle, Timer}; /*************************************/ @@ -82,8 +83,8 @@ pub(crate) struct TransportMulticastInner { pub(super) link: Arc>>, // The callback pub(super) callback: Arc>>>, - // token for safe cancellation - token: CancellationToken, + // Task controller for safe task cancellation + task_controller: TaskController, // Transport statistics #[cfg(feature = "stats")] pub(super) stats: Arc, @@ -115,7 +116,7 @@ impl TransportMulticastInner { locator: config.link.link.get_dst().to_owned(), link: Arc::new(RwLock::new(None)), callback: Arc::new(RwLock::new(None)), - token: CancellationToken::new(), + task_controller: TaskController::default(), #[cfg(feature = "stats")] stats, }; @@ -183,8 +184,9 @@ impl TransportMulticastInner { cb.closed(); } - // TODO(yuyuan): use CancellationToken to unify the termination with the above - self.token.cancel(); + self.task_controller + .terminate_all_async(Duration::from_secs(10)) + .await; Ok(()) } @@ -369,7 +371,7 @@ impl TransportMulticastInner { // TODO(yuyuan): refine the clone behaviors let is_active = Arc::new(AtomicBool::new(false)); let c_is_active = is_active.clone(); - let token = self.token.child_token(); + let token = self.task_controller.get_cancellation_token(); let c_token = token.clone(); let c_self = self.clone(); let c_locator = locator.clone(); @@ -389,8 +391,8 @@ impl TransportMulticastInner { let _ = c_self.del_peer(&c_locator, close::reason::EXPIRED); }; - // TODO(yuyuan): Put it into TaskTracker or store as JoinHandle - zenoh_runtime::ZRuntime::Acceptor.spawn(task); + self.task_controller + .spawn_with_rt(zenoh_runtime::ZRuntime::Acceptor, task); // TODO(yuyuan): Integrate the above async task into TransportMulticastPeer // Store the new peer diff --git a/io/zenoh-transport/src/shm.rs b/io/zenoh-transport/src/shm.rs index bf569d0345..7b87f038e5 100644 --- a/io/zenoh-transport/src/shm.rs +++ b/io/zenoh-transport/src/shm.rs @@ -18,10 +18,9 @@ use zenoh_core::{zasyncread, zasyncwrite, zerror}; use zenoh_protocol::{ network::{NetworkBody, NetworkMessage, Push, Request, Response}, zenoh::{ - err::Err, + err::{ext::ErrBodyType, Err}, ext::ShmType, query::{ext::QueryBodyType, Query}, - reply::ReplyBody, PushBody, Put, Reply, RequestBody, ResponseBody, }, }; @@ -106,28 +105,48 @@ impl MapShm for Query { // Impl - Reply impl MapShm for Reply { fn map_to_shminfo(&mut self) -> ZResult { - match &mut self.payload { - ReplyBody::Put(b) => b.map_to_shminfo(), - _ => Ok(false), - } + let Self { + payload, ext_shm, .. + } = self; + map_to_shminfo!(payload, ext_shm) } fn map_to_shmbuf(&mut self, shmr: &RwLock) -> ZResult { - match &mut self.payload { - ReplyBody::Put(b) => b.map_to_shmbuf(shmr), - _ => Ok(false), - } + let Self { + payload, ext_shm, .. + } = self; + map_to_shmbuf!(payload, ext_shm, shmr) } } // Impl - Err impl MapShm for Err { fn map_to_shminfo(&mut self) -> ZResult { - Ok(false) + if let Self { + ext_body: Some(ErrBodyType { + payload, ext_shm, .. + }), + .. + } = self + { + map_to_shminfo!(payload, ext_shm) + } else { + Ok(false) + } } - fn map_to_shmbuf(&mut self, _shmr: &RwLock) -> ZResult { - Ok(false) + fn map_to_shmbuf(&mut self, shmr: &RwLock) -> ZResult { + if let Self { + ext_body: Some(ErrBodyType { + payload, ext_shm, .. + }), + .. + } = self + { + map_to_shmbuf!(payload, ext_shm, shmr) + } else { + Ok(false) + } } } @@ -140,10 +159,14 @@ pub fn map_zmsg_to_shminfo(msg: &mut NetworkMessage) -> ZResult { }, NetworkBody::Request(Request { payload, .. }) => match payload { RequestBody::Query(b) => b.map_to_shminfo(), + RequestBody::Put(b) => b.map_to_shminfo(), + RequestBody::Del(_) | RequestBody::Pull(_) => Ok(false), }, NetworkBody::Response(Response { payload, .. }) => match payload { ResponseBody::Reply(b) => b.map_to_shminfo(), + ResponseBody::Put(b) => b.map_to_shminfo(), ResponseBody::Err(b) => b.map_to_shminfo(), + ResponseBody::Ack(_) => Ok(false), }, NetworkBody::ResponseFinal(_) | NetworkBody::Declare(_) | NetworkBody::OAM(_) => Ok(false), } @@ -191,10 +214,14 @@ pub fn map_zmsg_to_shmbuf( }, NetworkBody::Request(Request { payload, .. }) => match payload { RequestBody::Query(b) => b.map_to_shmbuf(shmr), + RequestBody::Put(b) => b.map_to_shmbuf(shmr), + RequestBody::Del(_) | RequestBody::Pull(_) => Ok(false), }, NetworkBody::Response(Response { payload, .. }) => match payload { - ResponseBody::Reply(b) => b.map_to_shmbuf(shmr), + ResponseBody::Put(b) => b.map_to_shmbuf(shmr), ResponseBody::Err(b) => b.map_to_shmbuf(shmr), + ResponseBody::Reply(b) => b.map_to_shmbuf(shmr), + ResponseBody::Ack(_) => Ok(false), }, NetworkBody::ResponseFinal(_) | NetworkBody::Declare(_) | NetworkBody::OAM(_) => Ok(false), } diff --git a/io/zenoh-transport/src/unicast/establishment/accept.rs b/io/zenoh-transport/src/unicast/establishment/accept.rs index e3341a8505..c1a1a8c16c 100644 --- a/io/zenoh-transport/src/unicast/establishment/accept.rs +++ b/io/zenoh-transport/src/unicast/establishment/accept.rs @@ -167,11 +167,9 @@ impl<'a, 'b: 'a> AcceptFsm for &'a mut AcceptLink<'b> { // Check if the version is supported if init_syn.version != input.mine_version { let e = zerror!( - "Rejecting InitSyn on {} because of unsupported Zenoh protocol version (expected: {}, received: {}) from: {}", + "Rejecting InitSyn on {} because of unsupported Zenoh version from peer: {}", self.link, - input.mine_version, - init_syn.version, - init_syn.zid, + init_syn.zid ); return Err((e.into(), Some(close::reason::INVALID))); } diff --git a/io/zenoh-transport/src/unicast/establishment/cookie.rs b/io/zenoh-transport/src/unicast/establishment/cookie.rs index 6f0295601c..e9916be7e6 100644 --- a/io/zenoh-transport/src/unicast/establishment/cookie.rs +++ b/io/zenoh-transport/src/unicast/establishment/cookie.rs @@ -11,6 +11,7 @@ // Contributors: // ZettaScale Zenoh Team, // +// use super::properties::EstablishmentProperties; use crate::unicast::establishment::ext; use std::convert::TryFrom; use zenoh_buffers::{ @@ -19,17 +20,14 @@ use zenoh_buffers::{ }; use zenoh_codec::{RCodec, WCodec, Zenoh080}; use zenoh_crypto::{BlockCipher, PseudoRng}; -use zenoh_protocol::{ - core::{Resolution, WhatAmI, ZenohId}, - transport::BatchSize, -}; +use zenoh_protocol::core::{Resolution, WhatAmI, ZenohId}; #[derive(Debug, PartialEq)] pub(crate) struct Cookie { pub(crate) zid: ZenohId, pub(crate) whatami: WhatAmI, pub(crate) resolution: Resolution, - pub(crate) batch_size: BatchSize, + pub(crate) batch_size: u16, pub(crate) nonce: u64, // Extensions pub(crate) ext_qos: ext::qos::StateAccept, @@ -85,7 +83,7 @@ where let whatami = WhatAmI::try_from(wai).map_err(|_| DidntRead)?; let resolution: u8 = self.read(&mut *reader)?; let resolution = Resolution::from(resolution); - let batch_size: BatchSize = self.read(&mut *reader)?; + let batch_size: u16 = self.read(&mut *reader)?; let nonce: u64 = self.read(&mut *reader)?; // Extensions let ext_qos: ext::qos::StateAccept = self.read(&mut *reader)?; diff --git a/io/zenoh-transport/src/unicast/establishment/properties.rs b/io/zenoh-transport/src/unicast/establishment/properties.rs new file mode 100644 index 0000000000..e259b650ab --- /dev/null +++ b/io/zenoh-transport/src/unicast/establishment/properties.rs @@ -0,0 +1,132 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// +use std::{ + convert::TryFrom, + ops::{Deref, DerefMut}, +}; +use zenoh_buffers::{reader::HasReader, writer::HasWriter, ZBuf}; +use zenoh_codec::{RCodec, WCodec, Zenoh080}; +use zenoh_protocol::core::Property; +use zenoh_result::{bail, zerror, Error as ZError, ZResult}; + +#[derive(Clone, Debug, PartialEq, Eq)] +pub struct EstablishmentProperties(Vec); + +impl Deref for EstablishmentProperties { + type Target = Vec; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +impl DerefMut for EstablishmentProperties { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.0 + } +} + +impl EstablishmentProperties { + pub(super) fn new() -> Self { + EstablishmentProperties(vec![]) + } + + pub(super) fn insert(&mut self, p: Property) -> ZResult<()> { + if self.0.iter().any(|x| x.key == p.key) { + bail!("Property {} already exists", p.key) + } + self.0.push(p); + Ok(()) + } + + pub(super) fn remove(&mut self, key: u64) -> Option { + self.0 + .iter() + .position(|x| x.key == key) + .map(|i| self.0.remove(i)) + } +} + +impl TryFrom<&EstablishmentProperties> for Attachment { + type Error = ZError; + + fn try_from(eps: &EstablishmentProperties) -> Result { + if eps.is_empty() { + bail!("Can not create an attachment with zero properties") + } + + let mut zbuf = ZBuf::empty(); + let mut writer = zbuf.writer(); + let codec = Zenoh080::new(); + + codec + .write(&mut writer, eps.0.as_slice()) + .map_err(|_| zerror!(""))?; + + let attachment = Attachment::new(zbuf); + Ok(attachment) + } +} + +impl TryFrom> for EstablishmentProperties { + type Error = ZError; + + fn try_from(mut ps: Vec) -> Result { + let mut eps = EstablishmentProperties::new(); + for p in ps.drain(..) { + eps.insert(p)?; + } + + Ok(eps) + } +} + +impl TryFrom<&Attachment> for EstablishmentProperties { + type Error = ZError; + + fn try_from(att: &Attachment) -> Result { + let mut reader = att.buffer.reader(); + let codec = Zenoh080::new(); + + let ps: Vec = codec.read(&mut reader).map_err(|_| zerror!(""))?; + EstablishmentProperties::try_from(ps) + } +} + +impl EstablishmentProperties { + #[cfg(test)] + pub fn rand() -> Self { + use rand::Rng; + + const MIN: usize = 1; + const MAX: usize = 8; + + let mut rng = rand::thread_rng(); + + let mut eps = EstablishmentProperties::new(); + for _ in MIN..=MAX { + loop { + let key: u64 = rng.gen(); + let mut value = vec![0u8; rng.gen_range(MIN..=MAX)]; + rng.fill(&mut value[..]); + let p = Property { key, value }; + if eps.insert(p).is_ok() { + break; + } + } + } + + eps + } +} diff --git a/io/zenoh-transport/src/unicast/link.rs b/io/zenoh-transport/src/unicast/link.rs index daa6c3e5a5..bd756d6396 100644 --- a/io/zenoh-transport/src/unicast/link.rs +++ b/io/zenoh-transport/src/unicast/link.rs @@ -232,7 +232,7 @@ impl TransportLinkUnicastRx { // log::trace!("RBytes: {:02x?}", &into.as_slice()[0..end]); - let buffer = ZSlice::new(Arc::new(into), 0, end) + let buffer = ZSlice::make(Arc::new(into), 0, end) .map_err(|_| zerror!("{ERR}{self}. ZSlice index(es) out of bounds"))?; let mut batch = RBatch::new(self.batch, buffer); batch diff --git a/io/zenoh-transport/src/unicast/lowlatency/link.rs b/io/zenoh-transport/src/unicast/lowlatency/link.rs index 826e5107d5..43e4516aa5 100644 --- a/io/zenoh-transport/src/unicast/lowlatency/link.rs +++ b/io/zenoh-transport/src/unicast/lowlatency/link.rs @@ -180,7 +180,7 @@ impl TransportUnicastLowlatency { } // Deserialize all the messages from the current ZBuf - let zslice = ZSlice::new(Arc::new(buffer), 0, bytes).unwrap(); + let zslice = ZSlice::make(Arc::new(buffer), 0, bytes).unwrap(); c_transport.read_messages(zslice, &link_rx.link).await?; } diff --git a/io/zenoh-transport/src/unicast/manager.rs b/io/zenoh-transport/src/unicast/manager.rs index eaf25cd2a3..8a63f4f630 100644 --- a/io/zenoh-transport/src/unicast/manager.rs +++ b/io/zenoh-transport/src/unicast/manager.rs @@ -744,17 +744,18 @@ impl TransportManager { // Spawn a task to accept the link let c_manager = self.clone(); - zenoh_runtime::ZRuntime::Acceptor.spawn(async move { - if let Err(e) = tokio::time::timeout( - c_manager.config.unicast.accept_timeout, - super::establishment::accept::accept_link(link, &c_manager), - ) - .await - { - log::debug!("{}", e); - } - incoming_counter.fetch_sub(1, SeqCst); - }); + self.task_controller + .spawn_with_rt(zenoh_runtime::ZRuntime::Acceptor, async move { + if let Err(e) = tokio::time::timeout( + c_manager.config.unicast.accept_timeout, + super::establishment::accept::accept_link(link, &c_manager), + ) + .await + { + log::debug!("{}", e); + } + incoming_counter.fetch_sub(1, SeqCst); + }); } } diff --git a/io/zenoh-transport/src/unicast/universal/link.rs b/io/zenoh-transport/src/unicast/universal/link.rs index 93a6c717dd..fe4e8c8691 100644 --- a/io/zenoh-transport/src/unicast/universal/link.rs +++ b/io/zenoh-transport/src/unicast/universal/link.rs @@ -102,7 +102,7 @@ impl TransportLinkUnicastUniversal { // to finish in the close() joining its handle // TODO(yuyuan): do more study to check which ZRuntime should be used or refine the // termination - zenoh_runtime::ZRuntime::TX + zenoh_runtime::ZRuntime::Net .spawn(async move { transport.del_link(tx.inner.link()).await }); } }; @@ -260,7 +260,8 @@ async fn rx_task( let batch = batch.map_err(|_| zerror!("{}: expired after {} milliseconds", link, lease.as_millis()))??; #[cfg(feature = "stats")] { - transport.stats.inc_rx_bytes(2 + n); // Account for the batch len encoding (16 bits) + + transport.stats.inc_rx_bytes(2 + batch.len()); // Account for the batch len encoding (16 bits) } transport.read_messages(batch, &l)?; } diff --git a/io/zenoh-transport/src/unicast/universal/rx.rs b/io/zenoh-transport/src/unicast/universal/rx.rs index 538aa581d4..9dfe075956 100644 --- a/io/zenoh-transport/src/unicast/universal/rx.rs +++ b/io/zenoh-transport/src/unicast/universal/rx.rs @@ -77,7 +77,7 @@ impl TransportUnicastUniversal { let priority = ext_qos.priority(); let c = if self.is_qos() { &self.priority_rx[priority as usize] - } else if priority == Priority::DEFAULT { + } else if priority == Priority::default() { &self.priority_rx[0] } else { bail!( @@ -120,7 +120,7 @@ impl TransportUnicastUniversal { let c = if self.is_qos() { &self.priority_rx[qos.priority() as usize] - } else if qos.priority() == Priority::DEFAULT { + } else if qos.priority() == Priority::default() { &self.priority_rx[0] } else { bail!( diff --git a/io/zenoh-transport/tests/multicast_compression.rs b/io/zenoh-transport/tests/multicast_compression.rs index 011b7878a2..5d0c9ef9ae 100644 --- a/io/zenoh-transport/tests/multicast_compression.rs +++ b/io/zenoh-transport/tests/multicast_compression.rs @@ -262,11 +262,11 @@ mod tests { wire_expr: "test".into(), ext_qos: QoSType::new(channel.priority, CongestionControl::Block, false), ext_tstamp: None, - ext_nodeid: NodeIdType::DEFAULT, + ext_nodeid: NodeIdType::default(), payload: Put { payload: vec![0u8; msg_size].into(), timestamp: None, - encoding: Encoding::empty(), + encoding: Encoding::default(), ext_sinfo: None, #[cfg(feature = "shared-memory")] ext_shm: None, @@ -352,7 +352,7 @@ mod tests { // Define the reliability and congestion control let channel = [ Channel { - priority: Priority::DEFAULT, + priority: Priority::default(), reliability: Reliability::BestEffort, }, Channel { diff --git a/io/zenoh-transport/tests/multicast_transport.rs b/io/zenoh-transport/tests/multicast_transport.rs index 3387a53949..96525c263c 100644 --- a/io/zenoh-transport/tests/multicast_transport.rs +++ b/io/zenoh-transport/tests/multicast_transport.rs @@ -258,11 +258,11 @@ mod tests { wire_expr: "test".into(), ext_qos: QoSType::new(channel.priority, CongestionControl::Block, false), ext_tstamp: None, - ext_nodeid: NodeIdType::DEFAULT, + ext_nodeid: NodeIdType::default(), payload: Put { payload: vec![0u8; msg_size].into(), timestamp: None, - encoding: Encoding::empty(), + encoding: Encoding::default(), ext_sinfo: None, #[cfg(feature = "shared-memory")] ext_shm: None, @@ -348,7 +348,7 @@ mod tests { // Define the reliability and congestion control let channel = [ Channel { - priority: Priority::DEFAULT, + priority: Priority::default(), reliability: Reliability::BestEffort, }, Channel { diff --git a/io/zenoh-transport/tests/unicast_compression.rs b/io/zenoh-transport/tests/unicast_compression.rs index acc5707fe1..7707da57de 100644 --- a/io/zenoh-transport/tests/unicast_compression.rs +++ b/io/zenoh-transport/tests/unicast_compression.rs @@ -290,11 +290,11 @@ mod tests { wire_expr: "test".into(), ext_qos: QoSType::new(channel.priority, cctrl, false), ext_tstamp: None, - ext_nodeid: NodeIdType::DEFAULT, + ext_nodeid: NodeIdType::default(), payload: Put { payload: vec![0u8; msg_size].into(), timestamp: None, - encoding: Encoding::empty(), + encoding: Encoding::default(), ext_sinfo: None, #[cfg(feature = "shared-memory")] ext_shm: None, @@ -432,7 +432,7 @@ mod tests { // Define the reliability and congestion control let channel = [ Channel { - priority: Priority::DEFAULT, + priority: Priority::default(), reliability: Reliability::Reliable, }, Channel { @@ -454,7 +454,7 @@ mod tests { // Define the reliability and congestion control let channel = [ Channel { - priority: Priority::DEFAULT, + priority: Priority::default(), reliability: Reliability::Reliable, }, Channel { @@ -479,7 +479,7 @@ mod tests { // Define the reliability and congestion control let channel = [ Channel { - priority: Priority::DEFAULT, + priority: Priority::default(), reliability: Reliability::BestEffort, }, Channel { @@ -501,7 +501,7 @@ mod tests { // Define the reliability and congestion control let channel = [ Channel { - priority: Priority::DEFAULT, + priority: Priority::default(), reliability: Reliability::BestEffort, }, Channel { diff --git a/io/zenoh-transport/tests/unicast_concurrent.rs b/io/zenoh-transport/tests/unicast_concurrent.rs index fe92314897..ae17ae3f99 100644 --- a/io/zenoh-transport/tests/unicast_concurrent.rs +++ b/io/zenoh-transport/tests/unicast_concurrent.rs @@ -186,13 +186,13 @@ async fn transport_concurrent(endpoint01: Vec, endpoint02: Vec, endpoint02: Vec, client_transport: TransportUn wire_expr: "test".into(), ext_qos: QoSType::new(*p, CongestionControl::Block, false), ext_tstamp: None, - ext_nodeid: NodeIdType::DEFAULT, + ext_nodeid: NodeIdType::default(), payload: Put { payload: vec![0u8; *ms].into(), timestamp: None, - encoding: Encoding::empty(), + encoding: Encoding::default(), ext_sinfo: None, #[cfg(feature = "shared-memory")] ext_shm: None, diff --git a/io/zenoh-transport/tests/unicast_shm.rs b/io/zenoh-transport/tests/unicast_shm.rs index 8f32139f86..6796f803ca 100644 --- a/io/zenoh-transport/tests/unicast_shm.rs +++ b/io/zenoh-transport/tests/unicast_shm.rs @@ -261,13 +261,13 @@ mod tests { let message: NetworkMessage = Push { wire_expr: "test".into(), - ext_qos: QoSType::new(Priority::DEFAULT, CongestionControl::Block, false), + ext_qos: QoSType::new(Priority::default(), CongestionControl::Block, false), ext_tstamp: None, - ext_nodeid: NodeIdType::DEFAULT, + ext_nodeid: NodeIdType::default(), payload: Put { payload: sbuf.into(), timestamp: None, - encoding: Encoding::empty(), + encoding: Encoding::default(), ext_sinfo: None, ext_shm: None, ext_attachment: None, @@ -309,13 +309,13 @@ mod tests { let message: NetworkMessage = Push { wire_expr: "test".into(), - ext_qos: QoSType::new(Priority::DEFAULT, CongestionControl::Block, false), + ext_qos: QoSType::new(Priority::default(), CongestionControl::Block, false), ext_tstamp: None, - ext_nodeid: NodeIdType::DEFAULT, + ext_nodeid: NodeIdType::default(), payload: Put { payload: sbuf.into(), timestamp: None, - encoding: Encoding::empty(), + encoding: Encoding::default(), ext_sinfo: None, ext_shm: None, ext_attachment: None, diff --git a/io/zenoh-transport/tests/unicast_simultaneous.rs b/io/zenoh-transport/tests/unicast_simultaneous.rs index 8740c8e2b0..83c3d98dce 100644 --- a/io/zenoh-transport/tests/unicast_simultaneous.rs +++ b/io/zenoh-transport/tests/unicast_simultaneous.rs @@ -70,11 +70,11 @@ mod tests { wire_expr: "test".into(), ext_qos: QoSType::new(Priority::Control, CongestionControl::Block, false), ext_tstamp: None, - ext_nodeid: NodeIdType::DEFAULT, + ext_nodeid: NodeIdType::default(), payload: Put { payload: vec![0u8; MSG_SIZE].into(), timestamp: None, - encoding: Encoding::empty(), + encoding: Encoding::default(), ext_sinfo: None, #[cfg(feature = "shared-memory")] ext_shm: None, diff --git a/io/zenoh-transport/tests/unicast_time.rs b/io/zenoh-transport/tests/unicast_time.rs deleted file mode 100644 index 75d3ae1d98..0000000000 --- a/io/zenoh-transport/tests/unicast_time.rs +++ /dev/null @@ -1,521 +0,0 @@ -// -// Copyright (c) 2023 ZettaScale Technology -// -// This program and the accompanying materials are made available under the -// terms of the Eclipse Public License 2.0 which is available at -// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 -// which is available at https://www.apache.org/licenses/LICENSE-2.0. -// -// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 -// -// Contributors: -// ZettaScale Zenoh Team, -// -use std::{ - convert::TryFrom, - sync::Arc, - time::{Duration, Instant}, -}; -use zenoh_core::ztimeout; -use zenoh_link::EndPoint; -use zenoh_protocol::core::{WhatAmI, ZenohId}; -use zenoh_result::ZResult; -use zenoh_transport::{ - multicast::TransportMulticast, - unicast::{test_helpers::make_transport_manager_builder, TransportUnicast}, - DummyTransportPeerEventHandler, TransportEventHandler, TransportManager, - TransportMulticastEventHandler, TransportPeer, TransportPeerEventHandler, -}; - -const TIMEOUT: Duration = Duration::from_secs(60); -const TIMEOUT_EXPECTED: Duration = Duration::from_secs(5); -const SLEEP: Duration = Duration::from_millis(100); - -macro_rules! ztimeout_expected { - ($f:expr) => { - tokio::time::timeout(TIMEOUT_EXPECTED, $f).await.unwrap() - }; -} - -#[cfg(test)] -#[derive(Default)] -struct SHRouterOpenClose; - -impl TransportEventHandler for SHRouterOpenClose { - fn new_unicast( - &self, - _peer: TransportPeer, - _transport: TransportUnicast, - ) -> ZResult> { - Ok(Arc::new(DummyTransportPeerEventHandler)) - } - - fn new_multicast( - &self, - _transport: TransportMulticast, - ) -> ZResult> { - panic!(); - } -} - -// Transport Handler for the client -struct SHClientOpenClose {} - -impl SHClientOpenClose { - fn new() -> Self { - Self {} - } -} - -impl TransportEventHandler for SHClientOpenClose { - fn new_unicast( - &self, - _peer: TransportPeer, - _transport: TransportUnicast, - ) -> ZResult> { - Ok(Arc::new(DummyTransportPeerEventHandler)) - } - - fn new_multicast( - &self, - _transport: TransportMulticast, - ) -> ZResult> { - panic!(); - } -} - -async fn time_transport( - listen_endpoint: &EndPoint, - connect_endpoint: &EndPoint, - lowlatency_transport: bool, -) { - if lowlatency_transport { - println!(">>> Low latency transport"); - } else { - println!(">>> Universal transport"); - } - /* [ROUTER] */ - let router_id = ZenohId::try_from([1]).unwrap(); - - let router_handler = Arc::new(SHRouterOpenClose); - // Create the router transport manager - let unicast = make_transport_manager_builder( - #[cfg(feature = "transport_multilink")] - 1, - #[cfg(feature = "shared-memory")] - false, - lowlatency_transport, - ) - .max_sessions(1); - let router_manager = TransportManager::builder() - .whatami(WhatAmI::Router) - .zid(router_id) - .unicast(unicast) - .build(router_handler.clone()) - .unwrap(); - - /* [CLIENT] */ - let client01_id = ZenohId::try_from([2]).unwrap(); - - // Create the transport transport manager for the first client - let unicast = make_transport_manager_builder( - #[cfg(feature = "transport_multilink")] - 1, - #[cfg(feature = "shared-memory")] - false, - lowlatency_transport, - ) - .max_sessions(1); - let client01_manager = TransportManager::builder() - .whatami(WhatAmI::Client) - .zid(client01_id) - .unicast(unicast) - .build(Arc::new(SHClientOpenClose::new())) - .unwrap(); - - /* [1] */ - // Add the locator on the router - let start = Instant::now(); - ztimeout!(router_manager.add_listener(listen_endpoint.clone())).unwrap(); - println!("Add listener {}: {:#?}", listen_endpoint, start.elapsed()); - - // Open a transport from the client to the router - let start = Instant::now(); - let c_ses1 = - ztimeout_expected!(client01_manager.open_transport_unicast(connect_endpoint.clone())) - .unwrap(); - println!( - "Open transport {}: {:#?}", - connect_endpoint, - start.elapsed() - ); - - // Verify that the transport has been open on the router - ztimeout!(async { - loop { - let transports = ztimeout!(router_manager.get_transports_unicast()); - let s = transports - .iter() - .find(|s| s.get_zid().unwrap() == client01_id); - - match s { - Some(s) => { - let links = s.get_links().unwrap(); - assert_eq!(links.len(), 1); - break; - } - None => tokio::time::sleep(SLEEP).await, - } - } - }); - - /* [2] */ - // Close the open transport on the client - let start = Instant::now(); - ztimeout!(c_ses1.close()).unwrap(); - println!( - "Close transport {}: {:#?}", - connect_endpoint, - start.elapsed() - ); - - // Verify that the transport has been closed also on the router - ztimeout!(async { - loop { - let transports = ztimeout!(router_manager.get_transports_unicast()); - let index = transports - .iter() - .find(|s| s.get_zid().unwrap() == client01_id); - if index.is_none() { - break; - } - tokio::time::sleep(SLEEP).await; - } - }); - - /* [3] */ - let start = Instant::now(); - ztimeout!(router_manager.del_listener(listen_endpoint)).unwrap(); - println!( - "Delete listener {}: {:#?}", - listen_endpoint, - start.elapsed() - ); - - ztimeout!(async { - while !router_manager.get_listeners().await.is_empty() { - tokio::time::sleep(SLEEP).await; - } - }); - - // Wait a little bit - tokio::time::sleep(SLEEP).await; - - ztimeout!(router_manager.close()); - ztimeout!(client01_manager.close()); - - // Wait a little bit - tokio::time::sleep(SLEEP).await; -} - -async fn time_universal_transport(endpoint: &EndPoint) { - time_transport(endpoint, endpoint, false).await -} - -async fn time_lowlatency_transport(endpoint: &EndPoint) { - time_transport(endpoint, endpoint, true).await -} - -#[cfg(feature = "transport_tcp")] -#[tokio::test(flavor = "multi_thread", worker_threads = 4)] -#[ignore] -async fn time_tcp_only() { - let _ = env_logger::try_init(); - let endpoint: EndPoint = format!("tcp/127.0.0.1:{}", 13000).parse().unwrap(); - time_universal_transport(&endpoint).await; -} - -#[cfg(feature = "transport_tcp")] -#[tokio::test(flavor = "multi_thread", worker_threads = 4)] -#[ignore] -async fn time_tcp_only_with_lowlatency_transport() { - let _ = env_logger::try_init(); - let endpoint: EndPoint = format!("tcp/127.0.0.1:{}", 13100).parse().unwrap(); - time_lowlatency_transport(&endpoint).await; -} - -#[cfg(feature = "transport_udp")] -#[tokio::test(flavor = "multi_thread", worker_threads = 4)] -#[ignore] -async fn time_udp_only() { - let _ = env_logger::try_init(); - let endpoint: EndPoint = format!("udp/127.0.0.1:{}", 13010).parse().unwrap(); - time_universal_transport(&endpoint).await; -} - -#[cfg(feature = "transport_udp")] -#[tokio::test(flavor = "multi_thread", worker_threads = 4)] -#[ignore] -async fn time_udp_only_with_lowlatency_transport() { - let _ = env_logger::try_init(); - let endpoint: EndPoint = format!("udp/127.0.0.1:{}", 13110).parse().unwrap(); - time_lowlatency_transport(&endpoint).await; -} - -#[cfg(feature = "transport_ws")] -#[tokio::test(flavor = "multi_thread", worker_threads = 4)] -#[ignore] -async fn time_ws_only() { - let _ = env_logger::try_init(); - let endpoint: EndPoint = format!("ws/127.0.0.1:{}", 13020).parse().unwrap(); - time_universal_transport(&endpoint).await; -} - -#[cfg(feature = "transport_ws")] -#[tokio::test(flavor = "multi_thread", worker_threads = 4)] -#[ignore] -async fn time_ws_only_with_lowlatency_transport() { - let _ = env_logger::try_init(); - let endpoint: EndPoint = format!("ws/127.0.0.1:{}", 13120).parse().unwrap(); - time_lowlatency_transport(&endpoint).await; -} - -#[cfg(feature = "transport_unixpipe")] -#[tokio::test(flavor = "multi_thread", worker_threads = 4)] -#[ignore] -async fn time_unixpipe_only() { - let _ = env_logger::try_init(); - let endpoint: EndPoint = "unixpipe/time_unixpipe_only".parse().unwrap(); - time_universal_transport(&endpoint).await; -} - -#[cfg(feature = "transport_unixpipe")] -#[tokio::test(flavor = "multi_thread", worker_threads = 4)] -#[ignore] -async fn time_unixpipe_only_with_lowlatency_transport() { - let _ = env_logger::try_init(); - let endpoint: EndPoint = "unixpipe/time_unixpipe_only_with_lowlatency_transport" - .parse() - .unwrap(); - time_lowlatency_transport(&endpoint).await; -} - -#[cfg(all(feature = "transport_unixsock-stream", target_family = "unix"))] -#[tokio::test(flavor = "multi_thread", worker_threads = 4)] -#[ignore] -async fn time_unix_only() { - let _ = env_logger::try_init(); - let f1 = "zenoh-test-unix-socket-9.sock"; - let _ = std::fs::remove_file(f1); - let endpoint: EndPoint = format!("unixsock-stream/{f1}").parse().unwrap(); - time_universal_transport(&endpoint).await; - let _ = std::fs::remove_file(f1); - let _ = std::fs::remove_file(format!("{f1}.lock")); -} - -#[cfg(feature = "transport_tls")] -#[tokio::test(flavor = "multi_thread", worker_threads = 4)] -#[ignore] -async fn time_tls_only() { - use zenoh_link::tls::config::*; - - let _ = env_logger::try_init(); - // NOTE: this an auto-generated pair of certificate and key. - // The target domain is localhost, so it has no real - // mapping to any existing domain. The certificate and key - // have been generated using: https://github.com/jsha/minica - let key = "-----BEGIN RSA PRIVATE KEY----- -MIIEpAIBAAKCAQEAsfqAuhElN4HnyeqLovSd4Qe+nNv5AwCjSO+HFiF30x3vQ1Hi -qRA0UmyFlSqBnFH3TUHm4Jcad40QfrX8f11NKGZdpvKHsMYqYjZnYkRFGS2s4fQy -aDbV5M06s3UDX8ETPgY41Y8fCKTSVdi9iHkwcVrXMxUu4IBBx0C1r2GSo3gkIBnU -cELdFdaUOSbdCipJhbnkwixEr2h7PXxwba7SIZgZtRaQWak1VE9b716qe3iMuMha -Efo/UoFmeZCPu5spfwaOZsnCsxRPk2IjbzlsHTJ09lM9wmbEFHBMVAXejLTk++Sr -Xt8jASZhNen/2GzyLQNAquGn98lCMQ6SsE9vLQIDAQABAoIBAGQkKggHm6Q20L+4 -2+bNsoOqguLplpvM4RMpyx11qWE9h6GeUmWD+5yg+SysJQ9aw0ZSHWEjRD4ePji9 -lxvm2IIxzuIftp+NcM2gBN2ywhpfq9XbO/2NVR6PJ0dQQJzBG12bzKDFDdYkP0EU -WdiPL+WoEkvo0F57bAd77n6G7SZSgxYekBF+5S6rjbu5I1cEKW+r2vLehD4uFCVX -Q0Tu7TyIOE1KJ2anRb7ZXVUaguNj0/Er7EDT1+wN8KJKvQ1tYGIq/UUBtkP9nkOI -9XJd25k6m5AQPDddzd4W6/5+M7kjyVPi3CsQcpBPss6ueyecZOMaKqdWAHeEyaak -r67TofUCgYEA6GBa+YkRvp0Ept8cd5mh4gCRM8wUuhtzTQnhubCPivy/QqMWScdn -qD0OiARLAsqeoIfkAVgyqebVnxwTrKTvWe0JwpGylEVWQtpGz3oHgjST47yZxIiY -CSAaimi2CYnJZ+QB2oBkFVwNCuXdPEGX6LgnOGva19UKrm6ONsy6V9MCgYEAxBJu -fu4dGXZreARKEHa/7SQjI9ayAFuACFlON/EgSlICzQyG/pumv1FsMEiFrv6w7PRj -4AGqzyzGKXWVDRMrUNVeGPSKJSmlPGNqXfPaXRpVEeB7UQhAs5wyMrWDl8jEW7Ih -XcWhMLn1f/NOAKyrSDSEaEM+Nuu+xTifoAghvP8CgYEAlta9Fw+nihDIjT10cBo0 -38w4dOP7bFcXQCGy+WMnujOYPzw34opiue1wOlB3FIfL8i5jjY/fyzPA5PhHuSCT -Ec9xL3B9+AsOFHU108XFi/pvKTwqoE1+SyYgtEmGKKjdKOfzYA9JaCgJe1J8inmV -jwXCx7gTJVjwBwxSmjXIm+sCgYBQF8NhQD1M0G3YCdCDZy7BXRippCL0OGxVfL2R -5oKtOVEBl9NxH/3+evE5y/Yn5Mw7Dx3ZPHUcygpslyZ6v9Da5T3Z7dKcmaVwxJ+H -n3wcugv0EIHvOPLNK8npovINR6rGVj6BAqD0uZHKYYYEioQxK5rGyGkaoDQ+dgHm -qku12wKBgQDem5FvNp5iW7mufkPZMqf3sEGtu612QeqejIPFM1z7VkUgetsgPBXD -tYsqC2FtWzY51VOEKNpnfH7zH5n+bjoI9nAEAW63TK9ZKkr2hRGsDhJdGzmLfQ7v -F6/CuIw9EsAq6qIB8O88FXQqald+BZOx6AzB8Oedsz/WtMmIEmr/+Q== ------END RSA PRIVATE KEY-----"; - - let cert = "-----BEGIN CERTIFICATE----- -MIIDLjCCAhagAwIBAgIIeUtmIdFQznMwDQYJKoZIhvcNAQELBQAwIDEeMBwGA1UE -AxMVbWluaWNhIHJvb3QgY2EgMDc4ZGE3MCAXDTIzMDMwNjE2MDMxOFoYDzIxMjMw -MzA2MTYwMzE4WjAUMRIwEAYDVQQDEwlsb2NhbGhvc3QwggEiMA0GCSqGSIb3DQEB -AQUAA4IBDwAwggEKAoIBAQCx+oC6ESU3gefJ6oui9J3hB76c2/kDAKNI74cWIXfT -He9DUeKpEDRSbIWVKoGcUfdNQebglxp3jRB+tfx/XU0oZl2m8oewxipiNmdiREUZ -Lazh9DJoNtXkzTqzdQNfwRM+BjjVjx8IpNJV2L2IeTBxWtczFS7ggEHHQLWvYZKj -eCQgGdRwQt0V1pQ5Jt0KKkmFueTCLESvaHs9fHBtrtIhmBm1FpBZqTVUT1vvXqp7 -eIy4yFoR+j9SgWZ5kI+7myl/Bo5mycKzFE+TYiNvOWwdMnT2Uz3CZsQUcExUBd6M -tOT75Kte3yMBJmE16f/YbPItA0Cq4af3yUIxDpKwT28tAgMBAAGjdjB0MA4GA1Ud -DwEB/wQEAwIFoDAdBgNVHSUEFjAUBggrBgEFBQcDAQYIKwYBBQUHAwIwDAYDVR0T -AQH/BAIwADAfBgNVHSMEGDAWgBTWfAmQ/BUIQm/9/llJJs2jUMWzGzAUBgNVHREE -DTALgglsb2NhbGhvc3QwDQYJKoZIhvcNAQELBQADggEBAG/POnBob0S7iYwsbtI2 -3LTTbRnmseIErtJuJmI9yYzgVIm6sUSKhlIUfAIm4rfRuzE94KFeWR2w9RabxOJD -wjYLLKvQ6rFY5g2AV/J0TwDjYuq0absdaDPZ8MKJ+/lpGYK3Te+CTOfq5FJRFt1q -GOkXAxnNpGg0obeRWRKFiAMHbcw6a8LIMfRjCooo3+uSQGsbVzGxSB4CYo720KcC -9vB1K9XALwzoqCewP4aiQsMY1GWpAmzXJftY3w+lka0e9dBYcdEdOqxSoZb5OBBZ -p5e60QweRuJsb60aUaCG8HoICevXYK2fFqCQdlb5sIqQqXyN2K6HuKAFywsjsGyJ -abY= ------END CERTIFICATE-----"; - - // Configure the client - let ca = "-----BEGIN CERTIFICATE----- -MIIDSzCCAjOgAwIBAgIIB42n1ZIkOakwDQYJKoZIhvcNAQELBQAwIDEeMBwGA1UE -AxMVbWluaWNhIHJvb3QgY2EgMDc4ZGE3MCAXDTIzMDMwNjE2MDMwN1oYDzIxMjMw -MzA2MTYwMzA3WjAgMR4wHAYDVQQDExVtaW5pY2Egcm9vdCBjYSAwNzhkYTcwggEi -MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDIuCq24O4P4Aep5vAVlrIQ7P8+ -uWWgcHIFYa02TmhBUB/hjo0JANCQvAtpVNuQ8NyKPlqnnq1cttePbSYVeA0rrnOs -DcfySAiyGBEY9zMjFfHJtH1wtrPcJEU8XIEY3xUlrAJE2CEuV9dVYgfEEydnvgLc -8Ug0WXSiARjqbnMW3l8jh6bYCp/UpL/gSM4mxdKrgpfyPoweGhlOWXc3RTS7cqM9 -T25acURGOSI6/g8GF0sNE4VZmUvHggSTmsbLeXMJzxDWO+xVehRmbQx3IkG7u++b -QdRwGIJcDNn7zHlDMHtQ0Z1DBV94fZNBwCULhCBB5g20XTGw//S7Fj2FPwyhAgMB -AAGjgYYwgYMwDgYDVR0PAQH/BAQDAgKEMB0GA1UdJQQWMBQGCCsGAQUFBwMBBggr -BgEFBQcDAjASBgNVHRMBAf8ECDAGAQH/AgEAMB0GA1UdDgQWBBTWfAmQ/BUIQm/9 -/llJJs2jUMWzGzAfBgNVHSMEGDAWgBTWfAmQ/BUIQm/9/llJJs2jUMWzGzANBgkq -hkiG9w0BAQsFAAOCAQEAvtcZFAELKiTuOiAeYts6zeKxc+nnHCzayDeD/BDCbxGJ -e1n+xdHjLtWGd+/Anc+fvftSYBPTFQqCi84lPiUIln5z/rUxE+ke81hNPIfw2obc -yIg87xCabQpVyEh8s+MV+7YPQ1+fH4FuSi2Fck1FejxkVqN2uOZPvOYUmSTsaVr1 -8SfRnwJNZ9UMRPM2bD4Jkvj0VcL42JM3QkOClOzYW4j/vll2cSs4kx7er27cIoo1 -Ck0v2xSPAiVjg6w65rUQeW6uB5m0T2wyj+wm0At8vzhZPlgS1fKhcmT2dzOq3+oN -R+IdLiXcyIkg0m9N8I17p0ljCSkbrgGMD3bbePRTfg== ------END CERTIFICATE-----"; - - let mut endpoint: EndPoint = format!("tls/localhost:{}", 13030).parse().unwrap(); - endpoint - .config_mut() - .extend( - [ - (TLS_ROOT_CA_CERTIFICATE_RAW, ca), - (TLS_SERVER_PRIVATE_KEY_RAW, key), - (TLS_SERVER_CERTIFICATE_RAW, cert), - ] - .iter() - .map(|(k, v)| ((*k).to_owned(), (*v).to_owned())), - ) - .unwrap(); - - time_universal_transport(&endpoint).await; -} - -#[cfg(feature = "transport_quic")] -#[tokio::test(flavor = "multi_thread", worker_threads = 4)] -#[ignore] -async fn time_quic_only() { - use zenoh_link::quic::config::*; - - // NOTE: this an auto-generated pair of certificate and key. - // The target domain is localhost, so it has no real - // mapping to any existing domain. The certificate and key - // have been generated using: https://github.com/jsha/minica - let key = "-----BEGIN RSA PRIVATE KEY----- -MIIEpAIBAAKCAQEAsfqAuhElN4HnyeqLovSd4Qe+nNv5AwCjSO+HFiF30x3vQ1Hi -qRA0UmyFlSqBnFH3TUHm4Jcad40QfrX8f11NKGZdpvKHsMYqYjZnYkRFGS2s4fQy -aDbV5M06s3UDX8ETPgY41Y8fCKTSVdi9iHkwcVrXMxUu4IBBx0C1r2GSo3gkIBnU -cELdFdaUOSbdCipJhbnkwixEr2h7PXxwba7SIZgZtRaQWak1VE9b716qe3iMuMha -Efo/UoFmeZCPu5spfwaOZsnCsxRPk2IjbzlsHTJ09lM9wmbEFHBMVAXejLTk++Sr -Xt8jASZhNen/2GzyLQNAquGn98lCMQ6SsE9vLQIDAQABAoIBAGQkKggHm6Q20L+4 -2+bNsoOqguLplpvM4RMpyx11qWE9h6GeUmWD+5yg+SysJQ9aw0ZSHWEjRD4ePji9 -lxvm2IIxzuIftp+NcM2gBN2ywhpfq9XbO/2NVR6PJ0dQQJzBG12bzKDFDdYkP0EU -WdiPL+WoEkvo0F57bAd77n6G7SZSgxYekBF+5S6rjbu5I1cEKW+r2vLehD4uFCVX -Q0Tu7TyIOE1KJ2anRb7ZXVUaguNj0/Er7EDT1+wN8KJKvQ1tYGIq/UUBtkP9nkOI -9XJd25k6m5AQPDddzd4W6/5+M7kjyVPi3CsQcpBPss6ueyecZOMaKqdWAHeEyaak -r67TofUCgYEA6GBa+YkRvp0Ept8cd5mh4gCRM8wUuhtzTQnhubCPivy/QqMWScdn -qD0OiARLAsqeoIfkAVgyqebVnxwTrKTvWe0JwpGylEVWQtpGz3oHgjST47yZxIiY -CSAaimi2CYnJZ+QB2oBkFVwNCuXdPEGX6LgnOGva19UKrm6ONsy6V9MCgYEAxBJu -fu4dGXZreARKEHa/7SQjI9ayAFuACFlON/EgSlICzQyG/pumv1FsMEiFrv6w7PRj -4AGqzyzGKXWVDRMrUNVeGPSKJSmlPGNqXfPaXRpVEeB7UQhAs5wyMrWDl8jEW7Ih -XcWhMLn1f/NOAKyrSDSEaEM+Nuu+xTifoAghvP8CgYEAlta9Fw+nihDIjT10cBo0 -38w4dOP7bFcXQCGy+WMnujOYPzw34opiue1wOlB3FIfL8i5jjY/fyzPA5PhHuSCT -Ec9xL3B9+AsOFHU108XFi/pvKTwqoE1+SyYgtEmGKKjdKOfzYA9JaCgJe1J8inmV -jwXCx7gTJVjwBwxSmjXIm+sCgYBQF8NhQD1M0G3YCdCDZy7BXRippCL0OGxVfL2R -5oKtOVEBl9NxH/3+evE5y/Yn5Mw7Dx3ZPHUcygpslyZ6v9Da5T3Z7dKcmaVwxJ+H -n3wcugv0EIHvOPLNK8npovINR6rGVj6BAqD0uZHKYYYEioQxK5rGyGkaoDQ+dgHm -qku12wKBgQDem5FvNp5iW7mufkPZMqf3sEGtu612QeqejIPFM1z7VkUgetsgPBXD -tYsqC2FtWzY51VOEKNpnfH7zH5n+bjoI9nAEAW63TK9ZKkr2hRGsDhJdGzmLfQ7v -F6/CuIw9EsAq6qIB8O88FXQqald+BZOx6AzB8Oedsz/WtMmIEmr/+Q== ------END RSA PRIVATE KEY-----"; - - let cert = "-----BEGIN CERTIFICATE----- -MIIDLjCCAhagAwIBAgIIeUtmIdFQznMwDQYJKoZIhvcNAQELBQAwIDEeMBwGA1UE -AxMVbWluaWNhIHJvb3QgY2EgMDc4ZGE3MCAXDTIzMDMwNjE2MDMxOFoYDzIxMjMw -MzA2MTYwMzE4WjAUMRIwEAYDVQQDEwlsb2NhbGhvc3QwggEiMA0GCSqGSIb3DQEB -AQUAA4IBDwAwggEKAoIBAQCx+oC6ESU3gefJ6oui9J3hB76c2/kDAKNI74cWIXfT -He9DUeKpEDRSbIWVKoGcUfdNQebglxp3jRB+tfx/XU0oZl2m8oewxipiNmdiREUZ -Lazh9DJoNtXkzTqzdQNfwRM+BjjVjx8IpNJV2L2IeTBxWtczFS7ggEHHQLWvYZKj -eCQgGdRwQt0V1pQ5Jt0KKkmFueTCLESvaHs9fHBtrtIhmBm1FpBZqTVUT1vvXqp7 -eIy4yFoR+j9SgWZ5kI+7myl/Bo5mycKzFE+TYiNvOWwdMnT2Uz3CZsQUcExUBd6M -tOT75Kte3yMBJmE16f/YbPItA0Cq4af3yUIxDpKwT28tAgMBAAGjdjB0MA4GA1Ud -DwEB/wQEAwIFoDAdBgNVHSUEFjAUBggrBgEFBQcDAQYIKwYBBQUHAwIwDAYDVR0T -AQH/BAIwADAfBgNVHSMEGDAWgBTWfAmQ/BUIQm/9/llJJs2jUMWzGzAUBgNVHREE -DTALgglsb2NhbGhvc3QwDQYJKoZIhvcNAQELBQADggEBAG/POnBob0S7iYwsbtI2 -3LTTbRnmseIErtJuJmI9yYzgVIm6sUSKhlIUfAIm4rfRuzE94KFeWR2w9RabxOJD -wjYLLKvQ6rFY5g2AV/J0TwDjYuq0absdaDPZ8MKJ+/lpGYK3Te+CTOfq5FJRFt1q -GOkXAxnNpGg0obeRWRKFiAMHbcw6a8LIMfRjCooo3+uSQGsbVzGxSB4CYo720KcC -9vB1K9XALwzoqCewP4aiQsMY1GWpAmzXJftY3w+lka0e9dBYcdEdOqxSoZb5OBBZ -p5e60QweRuJsb60aUaCG8HoICevXYK2fFqCQdlb5sIqQqXyN2K6HuKAFywsjsGyJ -abY= ------END CERTIFICATE-----"; - - // Configure the client - let ca = "-----BEGIN CERTIFICATE----- -MIIDSzCCAjOgAwIBAgIIB42n1ZIkOakwDQYJKoZIhvcNAQELBQAwIDEeMBwGA1UE -AxMVbWluaWNhIHJvb3QgY2EgMDc4ZGE3MCAXDTIzMDMwNjE2MDMwN1oYDzIxMjMw -MzA2MTYwMzA3WjAgMR4wHAYDVQQDExVtaW5pY2Egcm9vdCBjYSAwNzhkYTcwggEi -MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDIuCq24O4P4Aep5vAVlrIQ7P8+ -uWWgcHIFYa02TmhBUB/hjo0JANCQvAtpVNuQ8NyKPlqnnq1cttePbSYVeA0rrnOs -DcfySAiyGBEY9zMjFfHJtH1wtrPcJEU8XIEY3xUlrAJE2CEuV9dVYgfEEydnvgLc -8Ug0WXSiARjqbnMW3l8jh6bYCp/UpL/gSM4mxdKrgpfyPoweGhlOWXc3RTS7cqM9 -T25acURGOSI6/g8GF0sNE4VZmUvHggSTmsbLeXMJzxDWO+xVehRmbQx3IkG7u++b -QdRwGIJcDNn7zHlDMHtQ0Z1DBV94fZNBwCULhCBB5g20XTGw//S7Fj2FPwyhAgMB -AAGjgYYwgYMwDgYDVR0PAQH/BAQDAgKEMB0GA1UdJQQWMBQGCCsGAQUFBwMBBggr -BgEFBQcDAjASBgNVHRMBAf8ECDAGAQH/AgEAMB0GA1UdDgQWBBTWfAmQ/BUIQm/9 -/llJJs2jUMWzGzAfBgNVHSMEGDAWgBTWfAmQ/BUIQm/9/llJJs2jUMWzGzANBgkq -hkiG9w0BAQsFAAOCAQEAvtcZFAELKiTuOiAeYts6zeKxc+nnHCzayDeD/BDCbxGJ -e1n+xdHjLtWGd+/Anc+fvftSYBPTFQqCi84lPiUIln5z/rUxE+ke81hNPIfw2obc -yIg87xCabQpVyEh8s+MV+7YPQ1+fH4FuSi2Fck1FejxkVqN2uOZPvOYUmSTsaVr1 -8SfRnwJNZ9UMRPM2bD4Jkvj0VcL42JM3QkOClOzYW4j/vll2cSs4kx7er27cIoo1 -Ck0v2xSPAiVjg6w65rUQeW6uB5m0T2wyj+wm0At8vzhZPlgS1fKhcmT2dzOq3+oN -R+IdLiXcyIkg0m9N8I17p0ljCSkbrgGMD3bbePRTfg== ------END CERTIFICATE-----"; - - // Define the locator - let mut endpoint: EndPoint = format!("quic/localhost:{}", 13040).parse().unwrap(); - endpoint - .config_mut() - .extend( - [ - (TLS_ROOT_CA_CERTIFICATE_RAW, ca), - (TLS_SERVER_PRIVATE_KEY_RAW, key), - (TLS_SERVER_CERTIFICATE_RAW, cert), - ] - .iter() - .map(|(k, v)| ((*k).to_owned(), (*v).to_owned())), - ) - .unwrap(); - - time_universal_transport(&endpoint).await; -} - -#[cfg(all(feature = "transport_vsock", target_os = "linux"))] -#[tokio::test(flavor = "multi_thread", worker_threads = 4)] -#[ignore] -async fn time_vsock_only() { - let _ = env_logger::try_init(); - let endpoint: EndPoint = "vsock/VMADDR_CID_LOCAL:17000".parse().unwrap(); - time_lowlatency_transport(&endpoint).await; -} diff --git a/io/zenoh-transport/tests/unicast_transport.rs b/io/zenoh-transport/tests/unicast_transport.rs index c0af98eb46..38534a1a17 100644 --- a/io/zenoh-transport/tests/unicast_transport.rs +++ b/io/zenoh-transport/tests/unicast_transport.rs @@ -461,11 +461,11 @@ async fn test_transport( wire_expr: "test".into(), ext_qos: QoSType::new(channel.priority, cctrl, false), ext_tstamp: None, - ext_nodeid: NodeIdType::DEFAULT, + ext_nodeid: NodeIdType::default(), payload: Put { payload: vec![0u8; msg_size].into(), timestamp: None, - encoding: Encoding::empty(), + encoding: Encoding::default(), ext_sinfo: None, #[cfg(feature = "shared-memory")] ext_shm: None, @@ -604,7 +604,7 @@ async fn transport_unicast_tcp_only() { // Define the reliability and congestion control let channel = [ Channel { - priority: Priority::DEFAULT, + priority: Priority::default(), reliability: Reliability::Reliable, }, Channel { @@ -626,7 +626,7 @@ async fn transport_unicast_tcp_only_with_lowlatency_transport() { // Define the reliability and congestion control let channel = [ Channel { - priority: Priority::DEFAULT, + priority: Priority::default(), reliability: Reliability::Reliable, }, Channel { @@ -651,7 +651,7 @@ async fn transport_unicast_udp_only() { // Define the reliability and congestion control let channel = [ Channel { - priority: Priority::DEFAULT, + priority: Priority::default(), reliability: Reliability::BestEffort, }, Channel { @@ -673,7 +673,7 @@ async fn transport_unicast_udp_only_with_lowlatency_transport() { // Define the reliability and congestion control let channel = [ Channel { - priority: Priority::DEFAULT, + priority: Priority::default(), reliability: Reliability::BestEffort, }, Channel { @@ -697,7 +697,7 @@ async fn transport_unicast_unix_only() { // Define the reliability and congestion control let channel = [ Channel { - priority: Priority::DEFAULT, + priority: Priority::default(), reliability: Reliability::BestEffort, }, Channel { @@ -723,7 +723,7 @@ async fn transport_unicast_unix_only_with_lowlatency_transport() { // Define the reliability and congestion control let channel = [ Channel { - priority: Priority::DEFAULT, + priority: Priority::default(), reliability: Reliability::BestEffort, }, Channel { @@ -750,11 +750,11 @@ async fn transport_unicast_ws_only() { // Define the reliability and congestion control let channel = [ Channel { - priority: Priority::DEFAULT, + priority: Priority::default(), reliability: Reliability::Reliable, }, Channel { - priority: Priority::DEFAULT, + priority: Priority::default(), reliability: Reliability::BestEffort, }, Channel { @@ -780,11 +780,11 @@ async fn transport_unicast_ws_only_with_lowlatency_transport() { // Define the reliability and congestion control let channel = [ Channel { - priority: Priority::DEFAULT, + priority: Priority::default(), reliability: Reliability::Reliable, }, Channel { - priority: Priority::DEFAULT, + priority: Priority::default(), reliability: Reliability::BestEffort, }, Channel { @@ -813,7 +813,7 @@ async fn transport_unicast_unixpipe_only() { // Define the reliability and congestion control let channel = [ Channel { - priority: Priority::DEFAULT, + priority: Priority::default(), reliability: Reliability::Reliable, }, Channel { @@ -839,7 +839,7 @@ async fn transport_unicast_unixpipe_only_with_lowlatency_transport() { // Define the reliability and congestion control let channel = [ Channel { - priority: Priority::DEFAULT, + priority: Priority::default(), reliability: Reliability::Reliable, }, Channel { @@ -866,7 +866,7 @@ async fn transport_unicast_tcp_udp() { // Define the reliability and congestion control let channel = [ Channel { - priority: Priority::DEFAULT, + priority: Priority::default(), reliability: Reliability::BestEffort, }, Channel { @@ -898,7 +898,7 @@ async fn transport_unicast_tcp_unix() { // Define the reliability and congestion control let channel = [ Channel { - priority: Priority::DEFAULT, + priority: Priority::default(), reliability: Reliability::BestEffort, }, Channel { @@ -932,7 +932,7 @@ async fn transport_unicast_udp_unix() { // Define the reliability and congestion control let channel = [ Channel { - priority: Priority::DEFAULT, + priority: Priority::default(), reliability: Reliability::BestEffort, }, Channel { @@ -969,7 +969,7 @@ async fn transport_unicast_tcp_udp_unix() { // Define the reliability and congestion control let channel = [ Channel { - priority: Priority::DEFAULT, + priority: Priority::default(), reliability: Reliability::BestEffort, }, Channel { @@ -1008,11 +1008,11 @@ async fn transport_unicast_tls_only_server() { // Define the reliability and congestion control let channel = [ Channel { - priority: Priority::DEFAULT, + priority: Priority::default(), reliability: Reliability::Reliable, }, Channel { - priority: Priority::DEFAULT, + priority: Priority::default(), reliability: Reliability::BestEffort, }, Channel { @@ -1053,11 +1053,11 @@ async fn transport_unicast_quic_only_server() { // Define the reliability and congestion control let channel = [ Channel { - priority: Priority::DEFAULT, + priority: Priority::default(), reliability: Reliability::Reliable, }, Channel { - priority: Priority::DEFAULT, + priority: Priority::default(), reliability: Reliability::BestEffort, }, Channel { @@ -1117,11 +1117,11 @@ async fn transport_unicast_tls_only_mutual_success() { // Define the reliability and congestion control let channel = [ Channel { - priority: Priority::DEFAULT, + priority: Priority::default(), reliability: Reliability::Reliable, }, Channel { - priority: Priority::DEFAULT, + priority: Priority::default(), reliability: Reliability::BestEffort, }, Channel { @@ -1182,11 +1182,11 @@ async fn transport_unicast_tls_only_mutual_no_client_certs_failure() { // Define the reliability and congestion control let channel = [ Channel { - priority: Priority::DEFAULT, + priority: Priority::default(), reliability: Reliability::Reliable, }, Channel { - priority: Priority::DEFAULT, + priority: Priority::default(), reliability: Reliability::BestEffort, }, Channel { @@ -1261,11 +1261,11 @@ fn transport_unicast_tls_only_mutual_wrong_client_certs_failure() { // Define the reliability and congestion control let channel = [ Channel { - priority: Priority::DEFAULT, + priority: Priority::default(), reliability: Reliability::Reliable, }, Channel { - priority: Priority::DEFAULT, + priority: Priority::default(), reliability: Reliability::BestEffort, }, Channel { diff --git a/plugins/zenoh-backend-example/src/lib.rs b/plugins/zenoh-backend-example/src/lib.rs index f81231a498..602d29f375 100644 --- a/plugins/zenoh-backend-example/src/lib.rs +++ b/plugins/zenoh-backend-example/src/lib.rs @@ -13,8 +13,11 @@ // use async_std::sync::RwLock; use async_trait::async_trait; -use std::collections::{hash_map::Entry, HashMap}; -use zenoh::{prelude::OwnedKeyExpr, time::Timestamp, value::Value}; +use std::{ + collections::{hash_map::Entry, HashMap}, + sync::Arc, +}; +use zenoh::{prelude::OwnedKeyExpr, sample::Sample, time::Timestamp, value::Value}; use zenoh_backend_traits::{ config::{StorageConfig, VolumeConfig}, Capability, History, Persistence, Storage, StorageInsertionResult, StoredData, Volume, @@ -68,6 +71,12 @@ impl Volume for ExampleBackend { async fn create_storage(&self, _props: StorageConfig) -> ZResult> { Ok(Box::::default()) } + fn incoming_data_interceptor(&self) -> Option Sample + Send + Sync>> { + None + } + fn outgoing_data_interceptor(&self) -> Option Sample + Send + Sync>> { + None + } } #[async_trait] diff --git a/plugins/zenoh-backend-traits/Cargo.toml b/plugins/zenoh-backend-traits/Cargo.toml index b3926ab955..f2b8a4a1eb 100644 --- a/plugins/zenoh-backend-traits/Cargo.toml +++ b/plugins/zenoh-backend-traits/Cargo.toml @@ -31,7 +31,7 @@ async-std = { workspace = true, features = ["default"] } async-trait = { workspace = true } derive_more = { workspace = true } serde_json = { workspace = true } -zenoh = { workspace = true, features = ["unstable"] } +zenoh = { workspace = true } zenoh-result = { workspace = true } zenoh-util = { workspace = true } schemars = { workspace = true } diff --git a/plugins/zenoh-backend-traits/src/lib.rs b/plugins/zenoh-backend-traits/src/lib.rs index 40d022f1ec..8b9fa359e0 100644 --- a/plugins/zenoh-backend-traits/src/lib.rs +++ b/plugins/zenoh-backend-traits/src/lib.rs @@ -68,6 +68,16 @@ //! // The properties are the ones passed via a PUT in the admin space for Storage creation. //! Ok(Box::new(MyStorage::new(properties).await?)) //! } +//! +//! fn incoming_data_interceptor(&self) -> Option Sample + Send + Sync>> { +//! // No interception point for incoming data (on PUT operations) +//! None +//! } +//! +//! fn outgoing_data_interceptor(&self) -> Option Sample + Send + Sync>> { +//! // No interception point for outgoing data (on GET operations) +//! None +//! } //! } //! //! // Your Storage implementation @@ -125,7 +135,9 @@ use async_trait::async_trait; use const_format::concatcp; -use zenoh::prelude::OwnedKeyExpr; +use std::sync::Arc; +use zenoh::prelude::{KeyExpr, OwnedKeyExpr, Sample, Selector}; +use zenoh::queryable::ReplyBuilder; use zenoh::time::Timestamp; use zenoh::value::Value; pub use zenoh::Result as ZResult; @@ -198,6 +210,14 @@ pub trait Volume: Send + Sync { /// Creates a storage configured with some properties. async fn create_storage(&self, props: StorageConfig) -> ZResult>; + + /// Returns an interceptor that will be called before pushing any data + /// into a storage created by this backend. `None` can be returned for no interception point. + fn incoming_data_interceptor(&self) -> Option Sample + Send + Sync>>; + + /// Returns an interceptor that will be called before sending any reply + /// to a query from a storage created by this backend. `None` can be returned for no interception point. + fn outgoing_data_interceptor(&self) -> Option Sample + Send + Sync>>; } pub type VolumeInstance = Box; @@ -262,3 +282,49 @@ pub trait Storage: Send + Sync { /// Remember to fetch the entry corresponding to the `None` key async fn get_all_entries(&self) -> ZResult, Timestamp)>>; } + +/// A wrapper around the [`zenoh::queryable::Query`] allowing to call the +/// OutgoingDataInterceptor (if any) before to send the reply +pub struct Query { + q: zenoh::queryable::Query, + interceptor: Option Sample + Send + Sync>>, +} + +impl Query { + pub fn new( + q: zenoh::queryable::Query, + interceptor: Option Sample + Send + Sync>>, + ) -> Query { + Query { q, interceptor } + } + + /// The full [`Selector`] of this Query. + #[inline(always)] + pub fn selector(&self) -> Selector<'_> { + self.q.selector() + } + + /// The key selector part of this Query. + #[inline(always)] + pub fn key_expr(&self) -> &KeyExpr<'static> { + self.q.key_expr() + } + + /// This Query's selector parameters. + #[inline(always)] + pub fn parameters(&self) -> &str { + self.q.parameters() + } + + /// Sends a Sample as a reply to this Query + pub fn reply(&self, sample: Sample) -> ReplyBuilder<'_> { + // Call outgoing intercerceptor + let sample = if let Some(ref interceptor) = self.interceptor { + interceptor(sample) + } else { + sample + }; + // Send reply + self.q.reply(Ok(sample)) + } +} diff --git a/plugins/zenoh-plugin-example/src/lib.rs b/plugins/zenoh-plugin-example/src/lib.rs index ad254278e3..c2f083827d 100644 --- a/plugins/zenoh-plugin-example/src/lib.rs +++ b/plugins/zenoh-plugin-example/src/lib.rs @@ -15,7 +15,6 @@ use futures::select; use log::{debug, info}; -use std::borrow::Cow; use std::collections::HashMap; use std::convert::TryFrom; use std::sync::{ @@ -165,9 +164,8 @@ async fn run(runtime: Runtime, selector: KeyExpr<'_>, flag: Arc) { // on sample received by the Subscriber sample = sub.recv_async() => { let sample = sample.unwrap(); - let payload = sample.payload().deserialize::>().unwrap_or_else(|e| Cow::from(e.to_string())); - info!("Received data ('{}': '{}')", sample.key_expr(), payload); - stored.insert(sample.key_expr().to_string(), sample); + info!("Received data ('{}': '{}')", sample.key_expr, sample.value); + stored.insert(sample.key_expr.to_string(), sample); }, // on query received by the Queryable query = queryable.recv_async() => { @@ -175,7 +173,7 @@ async fn run(runtime: Runtime, selector: KeyExpr<'_>, flag: Arc) { info!("Handling query '{}'", query.selector()); for (key_expr, sample) in stored.iter() { if query.selector().key_expr.intersects(unsafe{keyexpr::from_str_unchecked(key_expr)}) { - query.reply_sample(sample.clone()).res().await.unwrap(); + query.reply(Ok(sample.clone())).res().await.unwrap(); } } } diff --git a/plugins/zenoh-plugin-rest/examples/z_serve_sse.rs b/plugins/zenoh-plugin-rest/examples/z_serve_sse.rs index bb76005d6e..0c6eb4357b 100644 --- a/plugins/zenoh-plugin-rest/examples/z_serve_sse.rs +++ b/plugins/zenoh-plugin-rest/examples/z_serve_sse.rs @@ -49,7 +49,11 @@ async fn main() { let receiver = queryable.receiver.clone(); async move { while let Ok(request) = receiver.recv_async().await { - request.reply(key, HTML).res().await.unwrap(); + request + .reply(Ok(Sample::new(key, HTML))) + .res() + .await + .unwrap(); } } }); @@ -71,7 +75,11 @@ async fn main() { println!("Data updates are accessible through HTML5 SSE at http://:8000/{key}"); loop { - publisher.put(value).res().await.unwrap(); + publisher + .put(Value::from(value).encoding(KnownEncoding::TextPlain.into())) + .res() + .await + .unwrap(); async_std::task::sleep(Duration::from_secs(1)).await; } } diff --git a/plugins/zenoh-plugin-rest/src/lib.rs b/plugins/zenoh-plugin-rest/src/lib.rs index 43c3f33776..6f4e80f4eb 100644 --- a/plugins/zenoh-plugin-rest/src/lib.rs +++ b/plugins/zenoh-plugin-rest/src/lib.rs @@ -18,20 +18,18 @@ //! //! [Click here for Zenoh's documentation](../zenoh/index.html) use async_std::prelude::FutureExt; -use base64::Engine; +use base64::{engine::general_purpose::STANDARD as b64_std_engine, Engine}; use futures::StreamExt; use http_types::Method; -use serde::{Deserialize, Serialize}; -use std::borrow::Cow; use std::convert::TryFrom; use std::str::FromStr; use std::sync::Arc; use tide::http::Mime; use tide::sse::Sender; use tide::{Request, Response, Server, StatusCode}; -use zenoh::payload::StringOrBase64; use zenoh::plugins::{RunningPluginTrait, ZenohPlugin}; use zenoh::prelude::r#async::*; +use zenoh::properties::Properties; use zenoh::query::{QueryConsolidation, Reply}; use zenoh::runtime::Runtime; use zenoh::selector::TIME_RANGE_KEY; @@ -48,59 +46,57 @@ lazy_static::lazy_static! { } const RAW_KEY: &str = "_raw"; -#[derive(Serialize, Deserialize)] -struct JSONSample { - key: String, - value: serde_json::Value, - encoding: String, - time: Option, -} - -pub fn base64_encode(data: &[u8]) -> String { - use base64::engine::general_purpose; - general_purpose::STANDARD.encode(data) -} - -fn payload_to_json(payload: &Payload, encoding: &Encoding) -> serde_json::Value { - match payload.is_empty() { - // If the value is empty return a JSON null - true => serde_json::Value::Null, - // if it is not check the encoding - false => { - match encoding { - // If it is a JSON try to deserialize as json, if it fails fallback to base64 - &Encoding::APPLICATION_JSON | &Encoding::TEXT_JSON | &Encoding::TEXT_JSON5 => { - payload - .deserialize::() - .unwrap_or_else(|_| { - serde_json::Value::String(StringOrBase64::from(payload).into_string()) - }) - } - // otherwise convert to JSON string - _ => serde_json::Value::String(StringOrBase64::from(payload).into_string()), - } +fn value_to_json(value: Value) -> String { + // @TODO: transcode to JSON when implemented in Value + match &value.encoding { + p if p.starts_with(KnownEncoding::TextPlain) + || p.starts_with(KnownEncoding::AppXWwwFormUrlencoded) => + { + // convert to Json string for special characters escaping + serde_json::json!(value.to_string()).to_string() + } + p if p.starts_with(KnownEncoding::AppProperties) => { + // convert to Json string for special characters escaping + serde_json::json!(*Properties::from(value.to_string())).to_string() + } + p if p.starts_with(KnownEncoding::AppJson) + || p.starts_with(KnownEncoding::AppInteger) + || p.starts_with(KnownEncoding::AppFloat) => + { + value.to_string() + } + _ => { + format!(r#""{}""#, b64_std_engine.encode(value.payload.contiguous())) } } } -fn sample_to_json(sample: &Sample) -> JSONSample { - JSONSample { - key: sample.key_expr().as_str().to_string(), - value: payload_to_json(sample.payload(), sample.encoding()), - encoding: sample.encoding().to_string(), - time: sample.timestamp().map(|ts| ts.to_string()), - } +fn sample_to_json(sample: Sample) -> String { + let encoding = sample.value.encoding.to_string(); + format!( + r#"{{ "key": "{}", "value": {}, "encoding": "{}", "time": "{}" }}"#, + sample.key_expr.as_str(), + value_to_json(sample.value), + encoding, + if let Some(ts) = sample.timestamp { + ts.to_string() + } else { + "None".to_string() + } + ) } -fn result_to_json(sample: Result) -> JSONSample { +fn result_to_json(sample: Result) -> String { match sample { - Ok(sample) => sample_to_json(&sample), - Err(err) => JSONSample { - key: "ERROR".into(), - value: payload_to_json(&err.payload, &err.encoding), - encoding: err.encoding.to_string(), - time: None, - }, + Ok(sample) => sample_to_json(sample), + Err(err) => { + let encoding = err.encoding.to_string(); + format!( + r#"{{ "key": "ERROR", "value": {}, "encoding": "{}"}}"#, + value_to_json(err), + encoding, + ) + } } } @@ -108,10 +104,10 @@ async fn to_json(results: flume::Receiver) -> String { let values = results .stream() .filter_map(move |reply| async move { Some(result_to_json(reply.sample)) }) - .collect::>() - .await; - - serde_json::to_string(&values).unwrap_or("[]".into()) + .collect::>() + .await + .join(",\n"); + format!("[\n{values}\n]\n") } async fn to_json_response(results: flume::Receiver) -> Response { @@ -125,11 +121,8 @@ async fn to_json_response(results: flume::Receiver) -> Response { fn sample_to_html(sample: Sample) -> String { format!( "
{}
\n
{}
\n", - sample.key_expr().as_str(), - sample - .payload() - .deserialize::>() - .unwrap_or_default() + sample.key_expr.as_str(), + String::from_utf8_lossy(&sample.payload.contiguous()) ) } @@ -139,7 +132,7 @@ fn result_to_html(sample: Result) -> String { Err(err) => { format!( "
ERROR
\n
{}
\n", - err.payload.deserialize::>().unwrap_or_default() + String::from_utf8_lossy(&err.payload.contiguous()) ) } } @@ -164,16 +157,13 @@ async fn to_raw_response(results: flume::Receiver) -> Response { Ok(reply) => match reply.sample { Ok(sample) => response( StatusCode::Ok, - Cow::from(sample.encoding()).as_ref(), - &sample - .payload() - .deserialize::>() - .unwrap_or_default(), + sample.value.encoding.to_string().as_ref(), + String::from_utf8_lossy(&sample.payload.contiguous()).as_ref(), ), Err(value) => response( StatusCode::Ok, - Cow::from(&value.encoding).as_ref(), - &value.payload.deserialize::>().unwrap_or_default(), + value.encoding.to_string().as_ref(), + String::from_utf8_lossy(&value.payload.contiguous()).as_ref(), ), }, Err(_) => response(StatusCode::Ok, "", ""), @@ -352,11 +342,8 @@ async fn query(mut req: Request<(Arc, String)>) -> tide::Result, String)>) -> tide::Result { @@ -454,19 +441,21 @@ async fn write(mut req: Request<(Arc, String)>) -> tide::Result session.put(&key_expr, bytes).encoding(encoding).res().await, - SampleKind::Delete => session.delete(&key_expr).res().await, - }; - match res { + match req + .state() + .0 + .put(&key_expr, bytes) + .encoding(encoding) + .kind(method_to_kind(req.method())) + .res() + .await + { Ok(_) => Ok(Response::new(StatusCode::Ok)), Err(e) => Ok(response( StatusCode::InternalServerError, diff --git a/plugins/zenoh-plugin-storage-manager/src/backends_mgt.rs b/plugins/zenoh-plugin-storage-manager/src/backends_mgt.rs index 90a6ae6250..aa7260e868 100644 --- a/plugins/zenoh-plugin-storage-manager/src/backends_mgt.rs +++ b/plugins/zenoh-plugin-storage-manager/src/backends_mgt.rs @@ -14,6 +14,7 @@ use super::storages_mgt::*; use flume::Sender; use std::sync::Arc; +use zenoh::prelude::r#async::*; use zenoh::Session; use zenoh_backend_traits::config::StorageConfig; use zenoh_backend_traits::{Capability, VolumeInstance}; @@ -22,12 +23,16 @@ use zenoh_result::ZResult; pub struct StoreIntercept { pub storage: Box, pub capability: Capability, + pub in_interceptor: Option Sample + Send + Sync>>, + pub out_interceptor: Option Sample + Send + Sync>>, } pub(crate) async fn create_and_start_storage( admin_key: String, config: StorageConfig, backend: &VolumeInstance, + in_interceptor: Option Sample + Send + Sync>>, + out_interceptor: Option Sample + Send + Sync>>, zenoh: Arc, ) -> ZResult> { log::trace!("Create storage '{}'", &admin_key); @@ -36,6 +41,8 @@ pub(crate) async fn create_and_start_storage( let store_intercept = StoreIntercept { storage, capability, + in_interceptor, + out_interceptor, }; start_storage(store_intercept, config, admin_key, zenoh).await diff --git a/plugins/zenoh-plugin-storage-manager/src/lib.rs b/plugins/zenoh-plugin-storage-manager/src/lib.rs index 91df2f108d..0db30bbd6a 100644 --- a/plugins/zenoh-plugin-storage-manager/src/lib.rs +++ b/plugins/zenoh-plugin-storage-manager/src/lib.rs @@ -239,10 +239,14 @@ impl StorageRuntimeInner { volume_id, backend.name() ); + let in_interceptor = backend.instance().incoming_data_interceptor(); + let out_interceptor = backend.instance().outgoing_data_interceptor(); let stopper = async_std::task::block_on(create_and_start_storage( admin_key, storage.clone(), backend.instance(), + in_interceptor, + out_interceptor, self.session.clone(), ))?; self.storages diff --git a/plugins/zenoh-plugin-storage-manager/src/memory_backend/mod.rs b/plugins/zenoh-plugin-storage-manager/src/memory_backend/mod.rs index 4e333b8592..ebb4922c9d 100644 --- a/plugins/zenoh-plugin-storage-manager/src/memory_backend/mod.rs +++ b/plugins/zenoh-plugin-storage-manager/src/memory_backend/mod.rs @@ -61,6 +61,26 @@ impl Volume for MemoryBackend { log::debug!("Create Memory Storage with configuration: {:?}", properties); Ok(Box::new(MemoryStorage::new(properties).await?)) } + + fn incoming_data_interceptor(&self) -> Option Sample + Send + Sync>> { + // By default: no interception point + None + // To test interceptors, uncomment this line: + // Some(Arc::new(|sample| { + // trace!(">>>> IN INTERCEPTOR FOR {:?}", sample); + // sample + // })) + } + + fn outgoing_data_interceptor(&self) -> Option Sample + Send + Sync>> { + // By default: no interception point + None + // To test interceptors, uncomment this line: + // Some(Arc::new(|sample| { + // trace!("<<<< OUT INTERCEPTOR FOR {:?}", sample); + // sample + // })) + } } impl Drop for MemoryBackend { diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs b/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs index 1ce6a1cb16..7295367a06 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/align_queryable.rs @@ -18,7 +18,6 @@ use std::cmp::Ordering; use std::collections::{BTreeSet, HashMap, HashSet}; use std::str; use std::str::FromStr; -use zenoh::payload::StringOrBase64; use zenoh::prelude::r#async::*; use zenoh::time::Timestamp; use zenoh::Session; @@ -95,43 +94,29 @@ impl AlignQueryable { for value in values { match value { AlignData::Interval(i, c) => { - query - .reply( - query.key_expr().clone(), - serde_json::to_string(&(i, c)).unwrap(), - ) - .res() - .await - .unwrap(); + let sample = Sample::new( + query.key_expr().clone(), + serde_json::to_string(&(i, c)).unwrap(), + ); + query.reply(Ok(sample)).res().await.unwrap(); } AlignData::Subinterval(i, c) => { - query - .reply( - query.key_expr().clone(), - serde_json::to_string(&(i, c)).unwrap(), - ) - .res() - .await - .unwrap(); + let sample = Sample::new( + query.key_expr().clone(), + serde_json::to_string(&(i, c)).unwrap(), + ); + query.reply(Ok(sample)).res().await.unwrap(); } AlignData::Content(i, c) => { - query - .reply( - query.key_expr().clone(), - serde_json::to_string(&(i, c)).unwrap(), - ) - .res() - .await - .unwrap(); + let sample = Sample::new( + query.key_expr().clone(), + serde_json::to_string(&(i, c)).unwrap(), + ); + query.reply(Ok(sample)).res().await.unwrap(); } AlignData::Data(k, (v, ts)) => { - query - .reply(k, v.payload) - .encoding(v.encoding) - .timestamp(ts) - .res() - .await - .unwrap(); + let sample = Sample::new(k, v).with_timestamp(ts); + query.reply(Ok(sample)).res().await.unwrap(); } } } @@ -179,8 +164,8 @@ impl AlignQueryable { if entry.is_some() { let entry = entry.unwrap(); result.push(AlignData::Data( - OwnedKeyExpr::from(entry.key_expr().clone()), - (Value::from(entry), each.timestamp), + OwnedKeyExpr::from(entry.key_expr), + (entry.value, each.timestamp), )); } } @@ -235,10 +220,10 @@ impl AlignQueryable { Ok(sample) => { log::trace!( "[ALIGN QUERYABLE] Received ('{}': '{}')", - sample.key_expr().as_str(), - StringOrBase64::from(sample.payload()) + sample.key_expr.as_str(), + sample.value ); - if let Some(timestamp) = sample.timestamp() { + if let Some(timestamp) = sample.timestamp { match timestamp.cmp(&logentry.timestamp) { Ordering::Greater => return None, Ordering::Less => { diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs b/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs index 64d5cfa1cd..041567ae27 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/aligner.rs @@ -19,9 +19,7 @@ use flume::{Receiver, Sender}; use std::collections::{HashMap, HashSet}; use std::str; use zenoh::key_expr::{KeyExpr, OwnedKeyExpr}; -use zenoh::payload::StringOrBase64; use zenoh::prelude::r#async::*; -use zenoh::sample::builder::SampleBuilder; use zenoh::time::Timestamp; use zenoh::Session; @@ -106,13 +104,7 @@ impl Aligner { log::trace!("[ALIGNER] Received queried samples: {missing_data:?}"); for (key, (ts, value)) in missing_data { - let Value { - payload, encoding, .. - } = value; - let sample = SampleBuilder::put(key, payload) - .encoding(encoding) - .timestamp(ts) - .into(); + let sample = Sample::new(key, value).with_timestamp(ts); log::debug!("[ALIGNER] Adding {:?} to storage", sample); self.tx_sample.send_async(sample).await.unwrap_or_else(|e| { log::error!("[ALIGNER] Error adding sample to storage: {}", e) @@ -143,8 +135,8 @@ impl Aligner { for sample in replies { result.insert( - sample.key_expr().clone().into(), - (*sample.timestamp().unwrap(), Value::from(sample)), + sample.key_expr.into(), + (sample.timestamp.unwrap(), sample.value), ); } (result, no_err) @@ -210,9 +202,9 @@ impl Aligner { let properties = format!("timestamp={}&{}=cold", other.timestamp, ERA); let (reply_content, mut no_err) = self.perform_query(other_rep, properties).await; let mut other_intervals: HashMap = HashMap::new(); - // expecting sample.payload to be a vec of intervals with their checksum + // expecting sample.value to be a vec of intervals with their checksum for each in reply_content { - match serde_json::from_str(&StringOrBase64::from(each.payload())) { + match serde_json::from_str(&each.value.to_string()) { Ok((i, c)) => { other_intervals.insert(i, c); } @@ -254,11 +246,11 @@ impl Aligner { INTERVALS, diff_string.join(",") ); - // expecting sample.payload to be a vec of subintervals with their checksum + // expecting sample.value to be a vec of subintervals with their checksum let (reply_content, mut no_err) = self.perform_query(other_rep, properties).await; let mut other_subintervals: HashMap = HashMap::new(); for each in reply_content { - match serde_json::from_str(&StringOrBase64::from(each.payload())) { + match serde_json::from_str(&each.value.to_string()) { Ok((i, c)) => { other_subintervals.insert(i, c); } @@ -295,11 +287,11 @@ impl Aligner { SUBINTERVALS, diff_string.join(",") ); - // expecting sample.payload to be a vec of log entries with their checksum + // expecting sample.value to be a vec of log entries with their checksum let (reply_content, mut no_err) = self.perform_query(other_rep, properties).await; let mut other_content: HashMap> = HashMap::new(); for each in reply_content { - match serde_json::from_str(&StringOrBase64::from(each.payload())) { + match serde_json::from_str(&each.value.to_string()) { Ok((i, c)) => { other_content.insert(i, c); } @@ -339,14 +331,14 @@ impl Aligner { Ok(sample) => { log::trace!( "[ALIGNER] Received ('{}': '{}')", - sample.key_expr().as_str(), - StringOrBase64::from(sample.payload()) + sample.key_expr.as_str(), + sample.value ); return_val.push(sample); } Err(err) => { log::error!( - "[ALIGNER] Received error for query on selector {} :{:?}", + "[ALIGNER] Received error for query on selector {} :{}", selector, err ); diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/mod.rs b/plugins/zenoh-plugin-storage-manager/src/replica/mod.rs index 5dda032029..b743a70451 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/mod.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/mod.rs @@ -26,7 +26,6 @@ use std::str; use std::str::FromStr; use std::time::{Duration, SystemTime}; use urlencoding::encode; -use zenoh::payload::StringOrBase64; use zenoh::prelude::r#async::*; use zenoh::time::Timestamp; use zenoh::Session; @@ -220,17 +219,16 @@ impl Replica { continue; } }; - let from = &sample.key_expr().as_str() + let from = &sample.key_expr.as_str() [Replica::get_digest_key(&self.key_expr, ALIGN_PREFIX).len() + 1..]; log::trace!( "[DIGEST_SUB] From {} Received {} ('{}': '{}')", from, - sample.kind(), - sample.key_expr().as_str(), - StringOrBase64::from(sample.payload()) + sample.kind, + sample.key_expr.as_str(), + sample.value ); - let digest: Digest = match serde_json::from_str(&StringOrBase64::from(sample.payload())) - { + let digest: Digest = match serde_json::from_str(&format!("{}", sample.value)) { Ok(digest) => digest, Err(e) => { log::error!("[DIGEST_SUB] Error in decoding the digest: {}", e); diff --git a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs index 06c5882408..9c419c6d31 100644 --- a/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs +++ b/plugins/zenoh-plugin-storage-manager/src/replica/storage.rs @@ -21,14 +21,10 @@ use futures::select; use std::collections::{HashMap, HashSet}; use std::str::{self, FromStr}; use std::time::{SystemTime, UNIX_EPOCH}; -use zenoh::buffers::buffer::SplitBuffer; use zenoh::buffers::ZBuf; use zenoh::prelude::r#async::*; -use zenoh::query::{ConsolidationMode, QueryTarget}; -use zenoh::sample::builder::SampleBuilder; -use zenoh::sample::{Sample, SampleKind}; -use zenoh::time::{new_reception_timestamp, Timestamp, NTP64}; -use zenoh::value::Value; +use zenoh::query::ConsolidationMode; +use zenoh::time::{Timestamp, NTP64}; use zenoh::{Result as ZResult, Session}; use zenoh_backend_traits::config::{GarbageCollectionConfig, StorageConfig}; use zenoh_backend_traits::{Capability, History, Persistence, StorageInsertionResult, StoredData}; @@ -64,6 +60,8 @@ pub struct StorageService { capability: Capability, tombstones: Arc>>, wildcard_updates: Arc>>, + in_interceptor: Option Sample + Send + Sync>>, + out_interceptor: Option Sample + Send + Sync>>, replication: Option, } @@ -87,6 +85,8 @@ impl StorageService { capability: store_intercept.capability, tombstones: Arc::new(RwLock::new(KeBoxTree::default())), wildcard_updates: Arc::new(RwLock::new(KeBoxTree::default())), + in_interceptor: store_intercept.in_interceptor, + out_interceptor: store_intercept.out_interceptor, replication, }; if storage_service @@ -179,8 +179,8 @@ impl StorageService { }; // log error if the sample is not timestamped // This is to reduce down the line inconsistencies of having duplicate samples stored - if sample.timestamp().is_none() { - log::error!("Sample {:?} is not timestamped. Please timestamp samples meant for replicated storage.", sample); + if sample.get_timestamp().is_none() { + log::error!("Sample {} is not timestamped. Please timestamp samples meant for replicated storage.", sample); } else { self.process_sample(sample).await; @@ -223,15 +223,14 @@ impl StorageService { select!( // on sample for key_expr sample = storage_sub.recv_async() => { - let sample = match sample { + let mut sample = match sample { Ok(sample) => sample, Err(e) => { log::error!("Error in sample: {}", e); continue; } }; - let timestamp = sample.timestamp().cloned().unwrap_or(new_reception_timestamp()); - let sample = SampleBuilder::from(sample).timestamp(timestamp).into(); + sample.ensure_timestamp(); self.process_sample(sample).await; }, // on query on key_expr @@ -263,60 +262,66 @@ impl StorageService { // The storage should only simply save the key, sample pair while put and retrieve the same during get // the trimming during PUT and GET should be handled by the plugin async fn process_sample(&self, sample: Sample) { - log::trace!("[STORAGE] Processing sample: {:?}", sample); + log::trace!("[STORAGE] Processing sample: {}", sample); + // Call incoming data interceptor (if any) + let sample = if let Some(ref interceptor) = self.in_interceptor { + interceptor(sample) + } else { + sample + }; + // if wildcard, update wildcard_updates - if sample.key_expr().is_wild() { + if sample.key_expr.is_wild() { self.register_wildcard_update(sample.clone()).await; } - let matching_keys = if sample.key_expr().is_wild() { - self.get_matching_keys(sample.key_expr()).await + let matching_keys = if sample.key_expr.is_wild() { + self.get_matching_keys(&sample.key_expr).await } else { - vec![sample.key_expr().clone().into()] + vec![sample.key_expr.clone().into()] }; log::trace!( "The list of keys matching `{}` is : {:?}", - sample.key_expr(), + sample.key_expr, matching_keys ); for k in matching_keys { if !self - .is_deleted(&k.clone(), sample.timestamp().unwrap()) + .is_deleted(&k.clone(), sample.get_timestamp().unwrap()) .await && (self.capability.history.eq(&History::All) || (self.capability.history.eq(&History::Latest) - && self.is_latest(&k, sample.timestamp().unwrap()).await)) + && self.is_latest(&k, sample.get_timestamp().unwrap()).await)) { log::trace!( - "Sample `{:?}` identified as neded processing for key {}", + "Sample `{}` identified as neded processing for key {}", sample, k ); // there might be the case that the actual update was outdated due to a wild card update, but not stored yet in the storage. // get the relevant wild card entry and use that value and timestamp to update the storage - let sample_to_store: Sample = if let Some(update) = self - .ovderriding_wild_update(&k, sample.timestamp().unwrap()) + let sample_to_store = match self + .ovderriding_wild_update(&k, sample.get_timestamp().unwrap()) .await { - match update.kind { - SampleKind::Put => { - SampleBuilder::put(KeyExpr::from(k.clone()), update.data.value.payload) - .encoding(update.data.value.encoding) - .timestamp(update.data.timestamp) - .into() - } - SampleKind::Delete => SampleBuilder::delete(KeyExpr::from(k.clone())) - .timestamp(update.data.timestamp) - .into(), + Some(overriding_update) => { + let mut sample_to_store = + Sample::new(KeyExpr::from(k.clone()), overriding_update.data.value) + .with_timestamp(overriding_update.data.timestamp); + sample_to_store.kind = overriding_update.kind; + sample_to_store + } + None => { + let mut sample_to_store = + Sample::new(KeyExpr::from(k.clone()), sample.value.clone()) + .with_timestamp(sample.timestamp.unwrap()); + sample_to_store.kind = sample.kind; + sample_to_store } - } else { - SampleBuilder::from(sample.clone()) - .keyexpr(k.clone()) - .into() }; - let stripped_key = match self.strip_prefix(sample_to_store.key_expr()) { + let stripped_key = match self.strip_prefix(&sample_to_store.key_expr) { Ok(stripped) => stripped, Err(e) => { log::error!("{}", e); @@ -324,25 +329,23 @@ impl StorageService { } }; let mut storage = self.storage.lock().await; - let result = match sample.kind() { - SampleKind::Put => { - storage - .put( - stripped_key, - Value::new(sample_to_store.payload().clone()) - .encoding(sample_to_store.encoding().clone()), - *sample_to_store.timestamp().unwrap(), - ) - .await - } - SampleKind::Delete => { - // register a tombstone - self.mark_tombstone(&k, *sample_to_store.timestamp().unwrap()) - .await; - storage - .delete(stripped_key, *sample_to_store.timestamp().unwrap()) - .await - } + let result = if sample.kind == SampleKind::Put { + storage + .put( + stripped_key, + sample_to_store.value.clone(), + sample_to_store.timestamp.unwrap(), + ) + .await + } else if sample.kind == SampleKind::Delete { + // register a tombstone + self.mark_tombstone(&k, sample_to_store.timestamp.unwrap()) + .await; + storage + .delete(stripped_key, sample_to_store.timestamp.unwrap()) + .await + } else { + Err("sample kind not implemented".into()) }; drop(storage); if self.replication.is_some() @@ -354,7 +357,7 @@ impl StorageService { .as_ref() .unwrap() .log_propagation - .send((k.clone(), *sample_to_store.timestamp().unwrap())); + .send((k.clone(), *sample_to_store.get_timestamp().unwrap())); match sending { Ok(_) => (), Err(e) => { @@ -387,16 +390,15 @@ impl StorageService { async fn register_wildcard_update(&self, sample: Sample) { // @TODO: change into a better store that does incremental writes - let key = sample.key_expr().clone(); + let key = sample.clone().key_expr; let mut wildcards = self.wildcard_updates.write().await; - let timestamp = *sample.timestamp().unwrap(); wildcards.insert( &key, Update { - kind: sample.kind(), + kind: sample.kind, data: StoredData { - value: Value::from(sample), - timestamp, + value: sample.value, + timestamp: sample.timestamp.unwrap(), }, }, ); @@ -513,13 +515,15 @@ impl StorageService { match storage.get(stripped_key, q.parameters()).await { Ok(stored_data) => { for entry in stored_data { - if let Err(e) = q - .reply(key.clone(), entry.value.payload) - .encoding(entry.value.encoding) - .timestamp(entry.timestamp) - .res() - .await - { + let sample = Sample::new(key.clone(), entry.value) + .with_timestamp(entry.timestamp); + // apply outgoing interceptor on results + let sample = if let Some(ref interceptor) = self.out_interceptor { + interceptor(sample) + } else { + sample + }; + if let Err(e) = q.reply(Ok(sample)).res().await { log::warn!( "Storage '{}' raised an error replying a query: {}", self.name, @@ -545,13 +549,15 @@ impl StorageService { match storage.get(stripped_key, q.parameters()).await { Ok(stored_data) => { for entry in stored_data { - if let Err(e) = q - .reply(q.key_expr().clone(), entry.value.payload) - .encoding(entry.value.encoding) - .timestamp(entry.timestamp) - .res() - .await - { + let sample = Sample::new(q.key_expr().clone(), entry.value) + .with_timestamp(entry.timestamp); + // apply outgoing interceptor on results + let sample = if let Some(ref interceptor) = self.out_interceptor { + interceptor(sample) + } else { + sample + }; + if let Err(e) = q.reply(Ok(sample)).res().await { log::warn!( "Storage '{}' raised an error replying a query: {}", self.name, @@ -561,16 +567,7 @@ impl StorageService { } } Err(e) => { - let err_message = - format!("Storage '{}' raised an error on query: {}", self.name, e); - log::warn!("{}", err_message); - if let Err(e) = q.reply_err(err_message).res().await { - log::warn!( - "Storage '{}' raised an error replying a query: {}", - self.name, - e - ) - } + log::warn!("Storage '{}' raised an error on query: {e}", self.name); } }; } @@ -661,7 +658,7 @@ impl StorageService { self.process_sample(sample).await; } Err(e) => log::warn!( - "Storage '{}' received an error to align query: {:?}", + "Storage '{}' received an error to align query: {}", self.name, e ), @@ -672,37 +669,25 @@ impl StorageService { } fn serialize_update(update: &Update) -> String { - let Update { - kind, - data: - StoredData { - value: Value { - payload, encoding, .. - }, - timestamp, - }, - } = update; - let zbuf: ZBuf = payload.into(); - let result = ( - kind.to_string(), - timestamp.to_string(), - encoding.to_string(), - zbuf.slices().collect::>(), + update.kind.to_string(), + update.data.timestamp.to_string(), + update.data.value.encoding.to_string(), + update.data.value.payload.slices().collect::>(), ); serde_json::to_string_pretty(&result).unwrap() } fn construct_update(data: String) -> Update { - let result: (String, String, String, Vec<&[u8]>) = serde_json::from_str(&data).unwrap(); // @TODO: remove the unwrap() + let result: (String, String, String, Vec<&[u8]>) = serde_json::from_str(&data).unwrap(); let mut payload = ZBuf::default(); for slice in result.3 { payload.push_zslice(slice.to_vec().into()); } - let value = Value::new(payload).encoding(result.2); + let value = Value::new(payload).encoding(Encoding::from(result.2)); let data = StoredData { value, - timestamp: Timestamp::from_str(&result.1).unwrap(), // @TODO: remove the unwrap() + timestamp: Timestamp::from_str(&result.1).unwrap(), }; let kind = if result.0.eq(&(SampleKind::Put).to_string()) { SampleKind::Put diff --git a/plugins/zenoh-plugin-storage-manager/tests/operations.rs b/plugins/zenoh-plugin-storage-manager/tests/operations.rs index 36162f01c2..a4293f31f1 100644 --- a/plugins/zenoh-plugin-storage-manager/tests/operations.rs +++ b/plugins/zenoh-plugin-storage-manager/tests/operations.rs @@ -20,7 +20,6 @@ use std::str::FromStr; use std::thread::sleep; use async_std::task; -use zenoh::payload::StringOrBase64; use zenoh::prelude::r#async::*; use zenoh::query::Reply; use zenoh::{prelude::Config, time::Timestamp}; @@ -101,7 +100,7 @@ async fn test_updates_in_order() { // expects exactly one sample let data = get_data(&session, "operation/test/a").await; assert_eq!(data.len(), 1); - assert_eq!(StringOrBase64::from(data[0].payload()).as_str(), "1"); + assert_eq!(format!("{}", data[0].value), "1"); put_data( &session, @@ -117,7 +116,7 @@ async fn test_updates_in_order() { // expects exactly one sample let data = get_data(&session, "operation/test/b").await; assert_eq!(data.len(), 1); - assert_eq!(StringOrBase64::from(data[0].payload()).as_str(), "2"); + assert_eq!(format!("{}", data[0].value), "2"); delete_data( &session, @@ -136,8 +135,8 @@ async fn test_updates_in_order() { // expects exactly one sample let data = get_data(&session, "operation/test/b").await; assert_eq!(data.len(), 1); - assert_eq!(StringOrBase64::from(data[0].payload()).as_str(), "2"); - assert_eq!(data[0].key_expr().as_str(), "operation/test/b"); + assert_eq!(format!("{}", data[0].value), "2"); + assert_eq!(data[0].key_expr.as_str(), "operation/test/b"); drop(storage); } diff --git a/plugins/zenoh-plugin-storage-manager/tests/wildcard.rs b/plugins/zenoh-plugin-storage-manager/tests/wildcard.rs index 5a71dc23f0..60970b2247 100644 --- a/plugins/zenoh-plugin-storage-manager/tests/wildcard.rs +++ b/plugins/zenoh-plugin-storage-manager/tests/wildcard.rs @@ -21,7 +21,6 @@ use std::thread::sleep; // use std::collections::HashMap; use async_std::task; -use zenoh::payload::StringOrBase64; use zenoh::prelude::r#async::*; use zenoh::query::Reply; use zenoh::{prelude::Config, time::Timestamp}; @@ -117,8 +116,8 @@ async fn test_wild_card_in_order() { // expected single entry let data = get_data(&session, "wild/test/*").await; assert_eq!(data.len(), 1); - assert_eq!(data[0].key_expr().as_str(), "wild/test/a"); - assert_eq!(StringOrBase64::from(data[0].payload()).as_str(), "2"); + assert_eq!(data[0].key_expr.as_str(), "wild/test/a"); + assert_eq!(format!("{}", data[0].value), "2"); put_data( &session, @@ -134,10 +133,10 @@ async fn test_wild_card_in_order() { // expected two entries let data = get_data(&session, "wild/test/*").await; assert_eq!(data.len(), 2); - assert!(["wild/test/a", "wild/test/b"].contains(&data[0].key_expr().as_str())); - assert!(["wild/test/a", "wild/test/b"].contains(&data[1].key_expr().as_str())); - assert!(["2", "3"].contains(&StringOrBase64::from(data[0].payload()).as_str())); - assert!(["2", "3"].contains(&StringOrBase64::from(data[1].payload()).as_str())); + assert!(["wild/test/a", "wild/test/b"].contains(&data[0].key_expr.as_str())); + assert!(["wild/test/a", "wild/test/b"].contains(&data[1].key_expr.as_str())); + assert!(["2", "3"].contains(&format!("{}", data[0].value).as_str())); + assert!(["2", "3"].contains(&format!("{}", data[1].value).as_str())); put_data( &session, @@ -153,10 +152,10 @@ async fn test_wild_card_in_order() { // expected two entries let data = get_data(&session, "wild/test/*").await; assert_eq!(data.len(), 2); - assert!(["wild/test/a", "wild/test/b"].contains(&data[0].key_expr().as_str())); - assert!(["wild/test/a", "wild/test/b"].contains(&data[1].key_expr().as_str())); - assert_eq!(StringOrBase64::from(data[0].payload()).as_str(), "4"); - assert_eq!(StringOrBase64::from(data[1].payload()).as_str(), "4"); + assert!(["wild/test/a", "wild/test/b"].contains(&data[0].key_expr.as_str())); + assert!(["wild/test/a", "wild/test/b"].contains(&data[1].key_expr.as_str())); + assert_eq!(format!("{}", data[0].value).as_str(), "4"); + assert_eq!(format!("{}", data[1].value).as_str(), "4"); delete_data( &session, diff --git a/zenoh-ext/Cargo.toml b/zenoh-ext/Cargo.toml index 65f1d47af1..6a0488cb54 100644 --- a/zenoh-ext/Cargo.toml +++ b/zenoh-ext/Cargo.toml @@ -37,10 +37,7 @@ env_logger = { workspace = true } flume = { workspace = true } futures = { workspace = true } log = { workspace = true } -phf = { workspace = true } serde = { workspace = true, features = ["default"] } -serde_cbor = { workspace = true } -serde_json = { workspace = true } zenoh = { workspace = true, features = ["unstable"], default-features = false } zenoh-core = { workspace = true } zenoh-macros = { workspace = true } @@ -48,6 +45,7 @@ zenoh-result = { workspace = true } zenoh-sync = { workspace = true } zenoh-util = { workspace = true } zenoh-runtime = { workspace = true } +zenoh-task = { workspace = true } [dev-dependencies] clap = { workspace = true, features = ["derive"] } diff --git a/zenoh-ext/examples/z_query_sub.rs b/zenoh-ext/examples/z_query_sub.rs index d88519789b..570d15ac15 100644 --- a/zenoh-ext/examples/z_query_sub.rs +++ b/zenoh-ext/examples/z_query_sub.rs @@ -53,15 +53,11 @@ async fn main() { println!("Press CTRL-C to quit..."); while let Ok(sample) = subscriber.recv_async().await { - let payload = sample - .payload() - .deserialize::() - .unwrap_or_else(|e| format!("{}", e)); println!( ">> [Subscriber] Received {} ('{}': '{}')", - sample.kind(), - sample.key_expr().as_str(), - payload + sample.kind, + sample.key_expr.as_str(), + sample.value ); } } diff --git a/zenoh-ext/src/group.rs b/zenoh-ext/src/group.rs index 8a7823ed72..3595ccad08 100644 --- a/zenoh-ext/src/group.rs +++ b/zenoh-ext/src/group.rs @@ -25,7 +25,6 @@ use std::sync::Arc; use std::time::{Duration, Instant}; use tokio::sync::Mutex; use tokio::task::JoinHandle; -use zenoh::payload::PayloadReader; use zenoh::prelude::r#async::*; use zenoh::publication::Publisher; use zenoh::query::ConsolidationMode; @@ -238,7 +237,11 @@ async fn query_handler(z: Arc, state: Arc) { while let Ok(query) = queryable.recv_async().await { log::trace!("Serving query for: {}", &qres); - query.reply(qres.clone(), buf.clone()).res().await.unwrap(); + query + .reply(Ok(Sample::new(qres.clone(), buf.clone()))) + .res() + .await + .unwrap(); } } @@ -249,7 +252,7 @@ async fn net_event_handler(z: Arc, state: Arc) { .await .unwrap(); while let Ok(s) = sub.recv_async().await { - match bincode::deserialize_from::(s.payload().reader()) { + match bincode::deserialize::(&(s.value.payload.contiguous())) { Ok(evt) => match evt { GroupNetEvent::Join(je) => { log::debug!("Member join: {:?}", &je.member); @@ -308,8 +311,8 @@ async fn net_event_handler(z: Arc, state: Arc) { while let Ok(reply) = receiver.recv_async().await { match reply.sample { Ok(sample) => { - match bincode::deserialize_from::( - sample.payload().reader(), + match bincode::deserialize::( + &sample.payload.contiguous(), ) { Ok(m) => { let mut expiry = Instant::now(); @@ -339,7 +342,7 @@ async fn net_event_handler(z: Arc, state: Arc) { } } Err(e) => { - log::warn!("Error received: {:?}", e); + log::warn!("Error received: {}", e); } } } diff --git a/zenoh-ext/src/lib.rs b/zenoh-ext/src/lib.rs index 7ac880fd8c..7440d80a53 100644 --- a/zenoh-ext/src/lib.rs +++ b/zenoh-ext/src/lib.rs @@ -23,9 +23,6 @@ pub use querying_subscriber::{ pub use session_ext::SessionExt; pub use subscriber_ext::SubscriberBuilderExt; pub use subscriber_ext::SubscriberForward; -use zenoh::query::Reply; -use zenoh::{sample::Sample, Result as ZResult}; -use zenoh_core::zerror; /// The space of keys to use in a [`FetchingSubscriber`]. pub enum KeySpace { @@ -54,13 +51,3 @@ impl From for KeySpace { KeySpace::Liveliness } } - -pub trait ExtractSample { - fn extract(self) -> ZResult; -} - -impl ExtractSample for Reply { - fn extract(self) -> ZResult { - self.sample.map_err(|e| zerror!("{:?}", e).into()) - } -} diff --git a/zenoh-ext/src/publication_cache.rs b/zenoh-ext/src/publication_cache.rs index 03f0814e5c..aede6a2ee4 100644 --- a/zenoh-ext/src/publication_cache.rs +++ b/zenoh-ext/src/publication_cache.rs @@ -11,16 +11,17 @@ // Contributors: // ZettaScale Zenoh Team, // -use flume::{bounded, Sender}; use std::collections::{HashMap, VecDeque}; use std::convert::TryInto; use std::future::Ready; +use std::time::Duration; use zenoh::prelude::r#async::*; use zenoh::queryable::{Query, Queryable}; use zenoh::subscriber::FlumeSubscriber; use zenoh::SessionRef; use zenoh_core::{AsyncResolve, Resolvable, SyncResolve}; use zenoh_result::{bail, ZResult}; +use zenoh_task::TerminatableTask; use zenoh_util::core::ResolveFuture; /// The builder of PublicationCache, allowing to configure it. @@ -110,7 +111,7 @@ impl<'a> AsyncResolve for PublicationCacheBuilder<'a, '_, '_> { pub struct PublicationCache<'a> { local_sub: FlumeSubscriber<'a>, _queryable: Queryable<'a, flume::Receiver>, - _stoptx: Sender, + task: TerminatableTask, } impl<'a> PublicationCache<'a> { @@ -166,84 +167,86 @@ impl<'a> PublicationCache<'a> { let history = conf.history; // TODO(yuyuan): use CancellationToken to manage it - let (stoptx, stoprx) = bounded::(1); - zenoh_runtime::ZRuntime::TX.spawn(async move { - let mut cache: HashMap> = - HashMap::with_capacity(resources_limit.unwrap_or(32)); - let limit = resources_limit.unwrap_or(usize::MAX); + let token = TerminatableTask::create_cancellation_token(); + let token2 = token.clone(); + let task = TerminatableTask::spawn( + zenoh_runtime::ZRuntime::Application, + async move { + let mut cache: HashMap> = + HashMap::with_capacity(resources_limit.unwrap_or(32)); + let limit = resources_limit.unwrap_or(usize::MAX); + loop { + tokio::select! { + // on publication received by the local subscriber, store it + sample = sub_recv.recv_async() => { + if let Ok(sample) = sample { + let queryable_key_expr: KeyExpr<'_> = if let Some(prefix) = &queryable_prefix { + prefix.join(&sample.key_expr).unwrap().into() + } else { + sample.key_expr.clone() + }; - loop { - tokio::select! { - // on publication received by the local subscriber, store it - sample = sub_recv.recv_async() => { - if let Ok(sample) = sample { - let queryable_key_expr: KeyExpr<'_> = if let Some(prefix) = &queryable_prefix { - prefix.join(sample.key_expr()).unwrap().into() - } else { - sample.key_expr().clone() - }; - - if let Some(queue) = cache.get_mut(queryable_key_expr.as_keyexpr()) { - if queue.len() >= history { - queue.pop_front(); + if let Some(queue) = cache.get_mut(queryable_key_expr.as_keyexpr()) { + if queue.len() >= history { + queue.pop_front(); + } + queue.push_back(sample); + } else if cache.len() >= limit { + log::error!("PublicationCache on {}: resource_limit exceeded - can't cache publication for a new resource", + pub_key_expr); + } else { + let mut queue: VecDeque = VecDeque::new(); + queue.push_back(sample); + cache.insert(queryable_key_expr.into(), queue); } - queue.push_back(sample); - } else if cache.len() >= limit { - log::error!("PublicationCache on {}: resource_limit exceeded - can't cache publication for a new resource", - pub_key_expr); - } else { - let mut queue: VecDeque = VecDeque::new(); - queue.push_back(sample); - cache.insert(queryable_key_expr.into(), queue); } - } - }, + }, - // on query, reply with cache content - query = quer_recv.recv_async() => { - if let Ok(query) = query { - if !query.selector().key_expr.as_str().contains('*') { - if let Some(queue) = cache.get(query.selector().key_expr.as_keyexpr()) { - for sample in queue { - if let (Ok(Some(time_range)), Some(timestamp)) = (query.selector().time_range(), sample.timestamp()) { - if !time_range.contains(timestamp.get_time().to_system_time()){ - continue; - } - } - if let Err(e) = query.reply_sample(sample.clone()).res_async().await { - log::warn!("Error replying to query: {}", e); - } - } - } - } else { - for (key_expr, queue) in cache.iter() { - if query.selector().key_expr.intersects(unsafe{ keyexpr::from_str_unchecked(key_expr) }) { + // on query, reply with cach content + query = quer_recv.recv_async() => { + if let Ok(query) = query { + if !query.selector().key_expr.as_str().contains('*') { + if let Some(queue) = cache.get(query.selector().key_expr.as_keyexpr()) { for sample in queue { - if let (Ok(Some(time_range)), Some(timestamp)) = (query.selector().time_range(), sample.timestamp()) { + if let (Ok(Some(time_range)), Some(timestamp)) = (query.selector().time_range(), sample.timestamp) { if !time_range.contains(timestamp.get_time().to_system_time()){ continue; } } - if let Err(e) = query.reply_sample(sample.clone()).res_async().await { + if let Err(e) = query.reply(Ok(sample.clone())).res_async().await { log::warn!("Error replying to query: {}", e); } } } + } else { + for (key_expr, queue) in cache.iter() { + if query.selector().key_expr.intersects(unsafe{ keyexpr::from_str_unchecked(key_expr) }) { + for sample in queue { + if let (Ok(Some(time_range)), Some(timestamp)) = (query.selector().time_range(), sample.timestamp) { + if !time_range.contains(timestamp.get_time().to_system_time()){ + continue; + } + } + if let Err(e) = query.reply(Ok(sample.clone())).res_async().await { + log::warn!("Error replying to query: {}", e); + } + } + } + } } } - } - }, - - // When stoptx is dropped, stop the task - _ = stoprx.recv_async() => return + }, + _ = token2.cancelled() => return + } } - } - }); + }, + token, + ); Ok(PublicationCache { local_sub, _queryable: queryable, - _stoptx: stoptx, + task, }) } @@ -254,11 +257,11 @@ impl<'a> PublicationCache<'a> { let PublicationCache { _queryable, local_sub, - _stoptx, + task, } = self; _queryable.undeclare().res_async().await?; local_sub.undeclare().res_async().await?; - drop(_stoptx); + task.terminate(Duration::from_secs(10)); Ok(()) }) } diff --git a/zenoh-ext/src/querying_subscriber.rs b/zenoh-ext/src/querying_subscriber.rs index d749a94ed9..978d348da1 100644 --- a/zenoh-ext/src/querying_subscriber.rs +++ b/zenoh-ext/src/querying_subscriber.rs @@ -20,15 +20,12 @@ use std::time::Duration; use zenoh::handlers::{locked, DefaultHandler}; use zenoh::prelude::r#async::*; use zenoh::query::{QueryConsolidation, QueryTarget, ReplyKeyExpr}; -use zenoh::sample::builder::SampleBuilder; use zenoh::subscriber::{Reliability, Subscriber}; -use zenoh::time::{new_reception_timestamp, Timestamp}; +use zenoh::time::Timestamp; use zenoh::Result as ZResult; use zenoh::SessionRef; use zenoh_core::{zlock, AsyncResolve, Resolvable, SyncResolve}; -use crate::ExtractSample; - /// The builder of [`FetchingSubscriber`], allowing to configure it. #[must_use = "Resolvables do nothing unless you resolve them using the `res` method from either `SyncResolve` or `AsyncResolve`"] pub struct QueryingSubscriberBuilder<'a, 'b, KeySpace, Handler> { @@ -106,7 +103,7 @@ impl<'a, 'b, KeySpace> QueryingSubscriberBuilder<'a, 'b, KeySpace, DefaultHandle handler: Handler, ) -> QueryingSubscriberBuilder<'a, 'b, KeySpace, Handler> where - Handler: zenoh::prelude::IntoHandler<'static, Sample>, + Handler: zenoh::prelude::IntoCallbackReceiverPair<'static, Sample>, { let QueryingSubscriberBuilder { session, @@ -215,17 +212,17 @@ impl<'a, 'b, KeySpace, Handler> QueryingSubscriberBuilder<'a, 'b, KeySpace, Hand impl<'a, KeySpace, Handler> Resolvable for QueryingSubscriberBuilder<'a, '_, KeySpace, Handler> where - Handler: IntoHandler<'static, Sample>, - Handler::Handler: Send, + Handler: IntoCallbackReceiverPair<'static, Sample>, + Handler::Receiver: Send, { - type To = ZResult>; + type To = ZResult>; } impl SyncResolve for QueryingSubscriberBuilder<'_, '_, KeySpace, Handler> where KeySpace: Into + Clone, - Handler: IntoHandler<'static, Sample> + Send, - Handler::Handler: Send, + Handler: IntoCallbackReceiverPair<'static, Sample> + Send, + Handler::Receiver: Send, { fn res_sync(self) -> ::To { let session = self.session.clone(); @@ -273,8 +270,8 @@ where impl<'a, KeySpace, Handler> AsyncResolve for QueryingSubscriberBuilder<'a, '_, KeySpace, Handler> where KeySpace: Into + Clone, - Handler: IntoHandler<'static, Sample> + Send, - Handler::Handler: Send, + Handler: IntoCallbackReceiverPair<'static, Sample> + Send, + Handler::Receiver: Send, { type Future = Ready; @@ -305,8 +302,8 @@ impl MergeQueue { } fn push(&mut self, sample: Sample) { - if let Some(ts) = sample.timestamp() { - self.timstamped.entry(*ts).or_insert(sample); + if let Some(ts) = sample.timestamp { + self.timstamped.entry(ts).or_insert(sample); } else { self.untimestamped.push_back(sample); } @@ -353,7 +350,8 @@ pub struct FetchingSubscriberBuilder< Fetch: FnOnce(Box) -> ZResult<()>, TryIntoSample, > where - TryIntoSample: ExtractSample, + TryIntoSample: TryInto, + >::Error: Into, { pub(crate) session: SessionRef<'a>, pub(crate) key_expr: ZResult>, @@ -374,7 +372,8 @@ impl< TryIntoSample, > FetchingSubscriberBuilder<'a, 'b, KeySpace, Handler, Fetch, TryIntoSample> where - TryIntoSample: ExtractSample, + TryIntoSample: TryInto, + >::Error: Into, { fn with_static_keys( self, @@ -400,7 +399,8 @@ impl< TryIntoSample, > FetchingSubscriberBuilder<'a, 'b, KeySpace, DefaultHandler, Fetch, TryIntoSample> where - TryIntoSample: ExtractSample, + TryIntoSample: TryInto, + >::Error: Into, { /// Add callback to [`FetchingSubscriber`]. #[inline] @@ -463,7 +463,7 @@ where handler: Handler, ) -> FetchingSubscriberBuilder<'a, 'b, KeySpace, Handler, Fetch, TryIntoSample> where - Handler: zenoh::prelude::IntoHandler<'static, Sample>, + Handler: zenoh::prelude::IntoCallbackReceiverPair<'static, Sample>, { let FetchingSubscriberBuilder { session, @@ -496,7 +496,8 @@ impl< TryIntoSample, > FetchingSubscriberBuilder<'a, 'b, crate::UserSpace, Handler, Fetch, TryIntoSample> where - TryIntoSample: ExtractSample, + TryIntoSample: TryInto, + >::Error: Into, { /// Change the subscription reliability. #[inline] @@ -537,11 +538,12 @@ impl< TryIntoSample, > Resolvable for FetchingSubscriberBuilder<'a, '_, KeySpace, Handler, Fetch, TryIntoSample> where - Handler: IntoHandler<'static, Sample>, - Handler::Handler: Send, - TryIntoSample: ExtractSample, + Handler: IntoCallbackReceiverPair<'static, Sample>, + Handler::Receiver: Send, + TryIntoSample: TryInto, + >::Error: Into, { - type To = ZResult>; + type To = ZResult>; } impl< @@ -552,9 +554,10 @@ impl< > SyncResolve for FetchingSubscriberBuilder<'_, '_, KeySpace, Handler, Fetch, TryIntoSample> where KeySpace: Into, - Handler: IntoHandler<'static, Sample> + Send, - Handler::Handler: Send, - TryIntoSample: ExtractSample + Send + Sync, + Handler: IntoCallbackReceiverPair<'static, Sample> + Send, + Handler::Receiver: Send, + TryIntoSample: TryInto + Send + Sync, + >::Error: Into, { fn res_sync(self) -> ::To { FetchingSubscriber::new(self.with_static_keys()) @@ -570,9 +573,10 @@ impl< > AsyncResolve for FetchingSubscriberBuilder<'a, '_, KeySpace, Handler, Fetch, TryIntoSample> where KeySpace: Into, - Handler: IntoHandler<'static, Sample> + Send, - Handler::Handler: Send, - TryIntoSample: ExtractSample + Send + Sync, + Handler: IntoCallbackReceiverPair<'static, Sample> + Send, + Handler::Receiver: Send, + TryIntoSample: TryInto + Send + Sync, + >::Error: Into, { type Future = Ready; @@ -645,19 +649,20 @@ impl<'a, Receiver> FetchingSubscriber<'a, Receiver> { ) -> ZResult where KeySpace: Into, - Handler: IntoHandler<'static, Sample, Handler = Receiver> + Send, - TryIntoSample: ExtractSample + Send + Sync, + Handler: IntoCallbackReceiverPair<'static, Sample, Receiver = Receiver> + Send, + TryIntoSample: TryInto + Send + Sync, + >::Error: Into, { let state = Arc::new(Mutex::new(InnerState { pending_fetches: 0, merge_queue: MergeQueue::new(), })); - let (callback, receiver) = conf.handler.into_handler(); + let (callback, receiver) = conf.handler.into_cb_receiver_pair(); let sub_callback = { let state = state.clone(); let callback = callback.clone(); - move |s| { + move |mut s| { let state = &mut zlock!(state); if state.pending_fetches == 0 { callback(s); @@ -665,10 +670,8 @@ impl<'a, Receiver> FetchingSubscriber<'a, Receiver> { log::trace!("Sample received while fetch in progress: push it to merge_queue"); // ensure the sample has a timestamp, thus it will always be sorted into the MergeQueue // after any timestamped Sample possibly coming from a fetch reply. - let timestamp = s.timestamp().cloned().unwrap_or(new_reception_timestamp()); - state - .merge_queue - .push(SampleBuilder::from(s).timestamp(timestamp).into()); + s.ensure_timestamp(); + state.merge_queue.push(s); } } }; @@ -768,7 +771,8 @@ impl<'a, Receiver> FetchingSubscriber<'a, Receiver> { fetch: Fetch, ) -> impl Resolve> where - TryIntoSample: ExtractSample + Send + Sync, + TryIntoSample: TryInto + Send + Sync, + >::Error: Into, { FetchBuilder { fetch, @@ -845,7 +849,8 @@ pub struct FetchBuilder< Fetch: FnOnce(Box) -> ZResult<()>, TryIntoSample, > where - TryIntoSample: ExtractSample, + TryIntoSample: TryInto, + >::Error: Into, { fetch: Fetch, phantom: std::marker::PhantomData, @@ -856,7 +861,8 @@ pub struct FetchBuilder< impl) -> ZResult<()>, TryIntoSample> Resolvable for FetchBuilder where - TryIntoSample: ExtractSample, + TryIntoSample: TryInto, + >::Error: Into, { type To = ZResult<()>; } @@ -864,7 +870,8 @@ where impl) -> ZResult<()>, TryIntoSample> SyncResolve for FetchBuilder where - TryIntoSample: ExtractSample, + TryIntoSample: TryInto, + >::Error: Into, { fn res_sync(self) -> ::To { let handler = register_handler(self.state, self.callback); @@ -875,7 +882,8 @@ where impl) -> ZResult<()>, TryIntoSample> AsyncResolve for FetchBuilder where - TryIntoSample: ExtractSample, + TryIntoSample: TryInto, + >::Error: Into, { type Future = Ready; @@ -901,15 +909,16 @@ fn run_fetch< handler: RepliesHandler, ) -> ZResult<()> where - TryIntoSample: ExtractSample, + TryIntoSample: TryInto, + >::Error: Into, { log::debug!("Fetch data for FetchingSubscriber"); - (fetch)(Box::new(move |s: TryIntoSample| match s.extract() { + (fetch)(Box::new(move |s: TryIntoSample| match s.try_into() { Ok(s) => { let mut state = zlock!(handler.state); log::trace!("Fetched sample received: push it to merge_queue"); state.merge_queue.push(s); } - Err(e) => log::debug!("Received error fetching data: {}", e), + Err(e) => log::debug!("Received error fetching data: {}", e.into()), })) } diff --git a/zenoh-ext/src/subscriber_ext.rs b/zenoh-ext/src/subscriber_ext.rs index 7d77fac05b..192a0a3121 100644 --- a/zenoh-ext/src/subscriber_ext.rs +++ b/zenoh-ext/src/subscriber_ext.rs @@ -13,7 +13,7 @@ // use flume::r#async::RecvStream; use futures::stream::{Forward, Map}; -use std::time::Duration; +use std::{convert::TryInto, time::Duration}; use zenoh::query::ReplyKeyExpr; use zenoh::sample::Locality; use zenoh::Result as ZResult; @@ -21,10 +21,9 @@ use zenoh::{ liveliness::LivelinessSubscriberBuilder, prelude::Sample, query::{QueryConsolidation, QueryTarget}, - subscriber::{Reliability, Subscriber, SubscriberBuilder}, + subscriber::{PushMode, Reliability, Subscriber, SubscriberBuilder}, }; -use crate::ExtractSample; use crate::{querying_subscriber::QueryingSubscriberBuilder, FetchingSubscriberBuilder}; /// Allows writing `subscriber.forward(receiver)` instead of `subscriber.stream().map(Ok).forward(publisher)` @@ -89,7 +88,8 @@ pub trait SubscriberBuilderExt<'a, 'b, Handler> { fetch: Fetch, ) -> FetchingSubscriberBuilder<'a, 'b, Self::KeySpace, Handler, Fetch, TryIntoSample> where - TryIntoSample: ExtractSample; + TryIntoSample: TryInto, + >::Error: Into; /// Create a [`FetchingSubscriber`](super::FetchingSubscriber) that will perform a query (`session.get()`) as it's /// initial fetch. @@ -124,7 +124,9 @@ pub trait SubscriberBuilderExt<'a, 'b, Handler> { fn querying(self) -> QueryingSubscriberBuilder<'a, 'b, Self::KeySpace, Handler>; } -impl<'a, 'b, Handler> SubscriberBuilderExt<'a, 'b, Handler> for SubscriberBuilder<'a, 'b, Handler> { +impl<'a, 'b, Handler> SubscriberBuilderExt<'a, 'b, Handler> + for SubscriberBuilder<'a, 'b, PushMode, Handler> +{ type KeySpace = crate::UserSpace; /// Create a [`FetchingSubscriber`](super::FetchingSubscriber). @@ -170,7 +172,8 @@ impl<'a, 'b, Handler> SubscriberBuilderExt<'a, 'b, Handler> for SubscriberBuilde fetch: Fetch, ) -> FetchingSubscriberBuilder<'a, 'b, Self::KeySpace, Handler, Fetch, TryIntoSample> where - TryIntoSample: ExtractSample, + TryIntoSample: TryInto, + >::Error: Into, { FetchingSubscriberBuilder { session: self.session, @@ -285,13 +288,14 @@ impl<'a, 'b, Handler> SubscriberBuilderExt<'a, 'b, Handler> fetch: Fetch, ) -> FetchingSubscriberBuilder<'a, 'b, Self::KeySpace, Handler, Fetch, TryIntoSample> where - TryIntoSample: ExtractSample, + TryIntoSample: TryInto, + >::Error: Into, { FetchingSubscriberBuilder { session: self.session, key_expr: self.key_expr, key_space: crate::LivelinessSpace, - reliability: Reliability::DEFAULT, + reliability: Reliability::default(), origin: Locality::default(), fetch, handler: self.handler, @@ -336,11 +340,11 @@ impl<'a, 'b, Handler> SubscriberBuilderExt<'a, 'b, Handler> session: self.session, key_expr: self.key_expr, key_space: crate::LivelinessSpace, - reliability: Reliability::DEFAULT, + reliability: Reliability::default(), origin: Locality::default(), query_selector: None, - query_target: QueryTarget::DEFAULT, - query_consolidation: QueryConsolidation::DEFAULT, + query_target: QueryTarget::default(), + query_consolidation: QueryConsolidation::default(), query_accept_replies: ReplyKeyExpr::MatchingQuery, query_timeout: Duration::from_secs(10), handler: self.handler, diff --git a/zenoh/Cargo.toml b/zenoh/Cargo.toml index 80cf8ba1bc..144e5dbf72 100644 --- a/zenoh/Cargo.toml +++ b/zenoh/Cargo.toml @@ -31,6 +31,7 @@ maintenance = { status = "actively-developed" } [features] auth_pubkey = ["zenoh-transport/auth_pubkey"] auth_usrpwd = ["zenoh-transport/auth_usrpwd"] +complete_n = ["zenoh-codec/complete_n"] shared-memory = [ "zenoh-shm", "zenoh-protocol/shared-memory", @@ -79,18 +80,13 @@ log = { workspace = true } ordered-float = { workspace = true } paste = { workspace = true } petgraph = { workspace = true } -phf = { workspace = true } rand = { workspace = true, features = ["default"] } regex = { workspace = true } serde = { workspace = true, features = ["default"] } -serde_cbor = { workspace = true } serde_json = { workspace = true } -serde-pickle = { workspace = true } -serde_yaml = { workspace = true } socket2 = { workspace = true } stop-token = { workspace = true } uhlc = { workspace = true, features = ["default"] } -unwrap-infallible = { workspace = true } uuid = { workspace = true, features = ["default"] } vec_map = { workspace = true } zenoh-buffers = { workspace = true, features = ["std"] } @@ -110,6 +106,7 @@ zenoh-sync = { workspace = true } zenoh-transport = { workspace = true } zenoh-util = { workspace = true } zenoh-runtime = { workspace = true } +zenoh-task = { workspace = true } [build-dependencies] rustc_version = { workspace = true } diff --git a/zenoh/src/admin.rs b/zenoh/src/admin.rs index 3c76ca468a..7fd972c9a6 100644 --- a/zenoh/src/admin.rs +++ b/zenoh/src/admin.rs @@ -12,12 +12,11 @@ // ZettaScale Zenoh Team, // use crate::{ - encoding::Encoding, keyexpr, prelude::sync::{KeyExpr, Locality, SampleKind}, queryable::Query, sample::DataInfo, - Payload, Session, ZResult, + Sample, Session, ZResult, }; use std::{ collections::hash_map::DefaultHasher, @@ -25,7 +24,10 @@ use std::{ sync::Arc, }; use zenoh_core::SyncResolve; -use zenoh_protocol::{core::WireExpr, network::NetworkMessage}; +use zenoh_protocol::{ + core::{Encoding, KnownEncoding, WireExpr}, + network::NetworkMessage, +}; use zenoh_transport::{ TransportEventHandler, TransportMulticastEventHandler, TransportPeer, TransportPeerEventHandler, }; @@ -68,12 +70,7 @@ pub(crate) fn on_admin_query(session: &Session, query: Query) { let key_expr = *KE_PREFIX / own_zid / *KE_TRANSPORT_UNICAST / zid; if query.key_expr().intersects(&key_expr) { if let Ok(value) = serde_json::value::to_value(peer.clone()) { - match Payload::try_from(value) { - Ok(zbuf) => { - let _ = query.reply(key_expr, zbuf).res_sync(); - } - Err(e) => log::debug!("Admin query error: {}", e), - } + let _ = query.reply(Ok(Sample::new(key_expr, value))).res_sync(); } } @@ -85,12 +82,7 @@ pub(crate) fn on_admin_query(session: &Session, query: Query) { *KE_PREFIX / own_zid / *KE_TRANSPORT_UNICAST / zid / *KE_LINK / lid; if query.key_expr().intersects(&key_expr) { if let Ok(value) = serde_json::value::to_value(link) { - match Payload::try_from(value) { - Ok(zbuf) => { - let _ = query.reply(key_expr, zbuf).res_sync(); - } - Err(e) => log::debug!("Admin query error: {}", e), - } + let _ = query.reply(Ok(Sample::new(key_expr, value))).res_sync(); } } } @@ -156,7 +148,7 @@ impl TransportMulticastEventHandler for Handler { let expr = WireExpr::from(&(*KE_PREFIX / own_zid / *KE_TRANSPORT_UNICAST / zid)) .to_owned(); let info = DataInfo { - encoding: Some(Encoding::APPLICATION_JSON), + encoding: Some(Encoding::Exact(KnownEncoding::AppJson)), ..Default::default() }; self.session.handle_data( @@ -202,7 +194,7 @@ impl TransportPeerEventHandler for PeerHandler { let mut s = DefaultHasher::new(); link.hash(&mut s); let info = DataInfo { - encoding: Some(Encoding::APPLICATION_JSON), + encoding: Some(Encoding::Exact(KnownEncoding::AppJson)), ..Default::default() }; self.session.handle_data( diff --git a/zenoh/src/encoding.rs b/zenoh/src/encoding.rs deleted file mode 100644 index d9fa725ed5..0000000000 --- a/zenoh/src/encoding.rs +++ /dev/null @@ -1,850 +0,0 @@ -// -// Copyright (c) 2023 ZettaScale Technology -// -// This program and the accompanying materials are made available under the -// terms of the Eclipse Public License 2.0 which is available at -// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 -// which is available at https://www.apache.org/licenses/LICENSE-2.0. -// -// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 -// -// Contributors: -// ZettaScale Zenoh Team, -// -use crate::payload::Payload; -use phf::phf_map; -use std::{borrow::Cow, convert::Infallible, fmt, str::FromStr}; -use zenoh_buffers::{ZBuf, ZSlice}; -use zenoh_protocol::core::EncodingId; -#[cfg(feature = "shared-memory")] -use ::{std::sync::Arc, zenoh_shm::SharedMemoryBuf}; - -/// Default encoding values used by Zenoh. -/// -/// An encoding has a similar role to Content-type in HTTP: it indicates, when present, how data should be interpreted by the application. -/// -/// Please note the Zenoh protocol does not impose any encoding value nor it operates on it. -/// It can be seen as some optional metadata that is carried over by Zenoh in such a way the application may perform different operations depending on the encoding value. -/// -/// A set of associated constants are provided to cover the most common encodings for user convenience. -/// This is parcticular useful in helping Zenoh to perform additional network optimizations. -/// -/// # Examples -/// -/// ### String operations -/// -/// Create an [`Encoding`] from a string and viceversa. -/// ``` -/// use zenoh::prelude::Encoding; -/// -/// let encoding: Encoding = "text/plain".into(); -/// let text: String = encoding.clone().into(); -/// assert_eq!("text/plain", &text); -/// ``` -/// -/// ### Constants and cow operations -/// -/// Since some encoding values are internally optimized by Zenoh, it's generally more efficient to use -/// the defined constants and [`Cow`][std::borrow::Cow] conversion to obtain its string representation. -/// ``` -/// use zenoh::prelude::Encoding; -/// use std::borrow::Cow; -/// -/// // This allocates -/// assert_eq!("text/plain", &String::from(Encoding::TEXT_PLAIN)); -/// // This does NOT allocate -/// assert_eq!("text/plain", &Cow::from(Encoding::TEXT_PLAIN)); -/// ``` -/// -/// ### Schema -/// -/// Additionally, a schema can be associated to the encoding. -/// The convetions is to use the `;` separator if an encoding is created from a string. -/// Alternatively, [`with_schema()`](Encoding::with_schema) can be used to add a schme to one of the associated constants. -/// ``` -/// use zenoh::prelude::Encoding; -/// -/// let encoding1 = Encoding::from("text/plain;utf-8"); -/// let encoding2 = Encoding::TEXT_PLAIN.with_schema("utf-8"); -/// assert_eq!(encoding1, encoding2); -/// assert_eq!("text/plain;utf-8", &encoding1.to_string()); -/// assert_eq!("text/plain;utf-8", &encoding2.to_string()); -/// ``` -#[repr(transparent)] -#[derive(Clone, Debug, PartialEq, Eq)] -pub struct Encoding(zenoh_protocol::core::Encoding); - -impl Encoding { - const SCHEMA_SEP: char = ';'; - - // For compatibility purposes Zenoh reserves any prefix value from `0` to `1023` included. - - // - Primitives types supported in all Zenoh bindings - /// Just some bytes. - /// - /// Constant alias for string: `"zenoh/bytes"`. - pub const ZENOH_BYTES: Encoding = Self(zenoh_protocol::core::Encoding { - id: 0, - schema: None, - }); - /// A VLE-encoded signed little-endian integer. Either 8bit, 16bit, 32bit, or 64bit. Binary reprensentation uses two's complement. - /// - /// Constant alias for string: `"zenoh/int"`. - pub const ZENOH_INT: Encoding = Self(zenoh_protocol::core::Encoding { - id: 1, - schema: None, - }); - /// A VLE-encoded little-endian unsigned integer. Either 8bit, 16bit, 32bit, or 64bit. - /// - /// Constant alias for string: `"zenoh/uint"`. - pub const ZENOH_UINT: Encoding = Self(zenoh_protocol::core::Encoding { - id: 2, - schema: None, - }); - /// A VLE-encoded float. Either little-endian 32bit or 64bit. Binary representation uses *IEEE 754-2008* *binary32* or *binary64*, respectively. - /// - /// Constant alias for string: `"zenoh/float"`. - pub const ZENOH_FLOAT: Encoding = Self(zenoh_protocol::core::Encoding { - id: 3, - schema: None, - }); - /// A boolean. `0` is `false`, `1` is `true`. Other values are invalid. - /// - /// Constant alias for string: `"zenoh/bool"`. - pub const ZENOH_BOOL: Encoding = Self(zenoh_protocol::core::Encoding { - id: 4, - schema: None, - }); - /// A UTF-8 string. - /// - /// Constant alias for string: `"zenoh/string"`. - pub const ZENOH_STRING: Encoding = Self(zenoh_protocol::core::Encoding { - id: 5, - schema: None, - }); - /// A zenoh error. - /// - /// Constant alias for string: `"zenoh/error"`. - pub const ZENOH_ERROR: Encoding = Self(zenoh_protocol::core::Encoding { - id: 6, - schema: None, - }); - - // - Advanced types may be supported in some of the Zenoh bindings. - /// An application-specific stream of bytes. - /// - /// Constant alias for string: `"application/octet-stream"`. - pub const APPLICATION_OCTET_STREAM: Encoding = Self(zenoh_protocol::core::Encoding { - id: 7, - schema: None, - }); - /// A textual file. - /// - /// Constant alias for string: `"text/plain"`. - pub const TEXT_PLAIN: Encoding = Self(zenoh_protocol::core::Encoding { - id: 8, - schema: None, - }); - /// JSON data intended to be consumed by an application. - /// - /// Constant alias for string: `"application/json"`. - pub const APPLICATION_JSON: Encoding = Self(zenoh_protocol::core::Encoding { - id: 9, - schema: None, - }); - /// JSON data intended to be human readable. - /// - /// Constant alias for string: `"text/json"`. - pub const TEXT_JSON: Encoding = Self(zenoh_protocol::core::Encoding { - id: 10, - schema: None, - }); - /// A Common Data Representation (CDR)-encoded data. - /// - /// Constant alias for string: `"application/cdr"`. - pub const APPLICATION_CDR: Encoding = Self(zenoh_protocol::core::Encoding { - id: 11, - schema: None, - }); - /// A Concise Binary Object Representation (CBOR)-encoded data. - /// - /// Constant alias for string: `"application/cbor"`. - pub const APPLICATION_CBOR: Encoding = Self(zenoh_protocol::core::Encoding { - id: 12, - schema: None, - }); - /// YAML data intended to be consumed by an application. - /// - /// Constant alias for string: `"application/yaml"`. - pub const APPLICATION_YAML: Encoding = Self(zenoh_protocol::core::Encoding { - id: 13, - schema: None, - }); - /// YAML data intended to be human readable. - /// - /// Constant alias for string: `"text/yaml"`. - pub const TEXT_YAML: Encoding = Self(zenoh_protocol::core::Encoding { - id: 14, - schema: None, - }); - /// JSON5 encoded data that are human readable. - /// - /// Constant alias for string: `"text/json5"`. - pub const TEXT_JSON5: Encoding = Self(zenoh_protocol::core::Encoding { - id: 15, - schema: None, - }); - /// A Python object serialized using [pickle](https://docs.python.org/3/library/pickle.html). - /// - /// Constant alias for string: `"application/python-serialized-object"`. - pub const APPLICATION_PYTHON_SERIALIZED_OBJECT: Encoding = - Self(zenoh_protocol::core::Encoding { - id: 16, - schema: None, - }); - /// An application-specific protobuf-encoded data. - /// - /// Constant alias for string: `"application/protobuf"`. - pub const APPLICATION_PROTOBUF: Encoding = Self(zenoh_protocol::core::Encoding { - id: 17, - schema: None, - }); - /// A Java serialized object. - /// - /// Constant alias for string: `"application/java-serialized-object"`. - pub const APPLICATION_JAVA_SERIALIZED_OBJECT: Encoding = Self(zenoh_protocol::core::Encoding { - id: 18, - schema: None, - }); - /// An [openmetrics](https://github.com/OpenObservability/OpenMetrics) data, common used by [Prometheus](https://prometheus.io/). - /// - /// Constant alias for string: `"application/openmetrics-text"`. - pub const APPLICATION_OPENMETRICS_TEXT: Encoding = Self(zenoh_protocol::core::Encoding { - id: 19, - schema: None, - }); - /// A Portable Network Graphics (PNG) image. - /// - /// Constant alias for string: `"image/png"`. - pub const IMAGE_PNG: Encoding = Self(zenoh_protocol::core::Encoding { - id: 20, - schema: None, - }); - /// A Joint Photographic Experts Group (JPEG) image. - /// - /// Constant alias for string: `"image/jpeg"`. - pub const IMAGE_JPEG: Encoding = Self(zenoh_protocol::core::Encoding { - id: 21, - schema: None, - }); - /// A Graphics Interchange Format (GIF) image. - /// - /// Constant alias for string: `"image/gif"`. - pub const IMAGE_GIF: Encoding = Self(zenoh_protocol::core::Encoding { - id: 22, - schema: None, - }); - /// A BitMap (BMP) image. - /// - /// Constant alias for string: `"image/bmp"`. - pub const IMAGE_BMP: Encoding = Self(zenoh_protocol::core::Encoding { - id: 23, - schema: None, - }); - /// A Web Protable (WebP) image. - /// - /// Constant alias for string: `"image/webp"`. - pub const IMAGE_WEBP: Encoding = Self(zenoh_protocol::core::Encoding { - id: 24, - schema: None, - }); - /// An XML file intended to be consumed by an application.. - /// - /// Constant alias for string: `"application/xml"`. - pub const APPLICATION_XML: Encoding = Self(zenoh_protocol::core::Encoding { - id: 25, - schema: None, - }); - /// An encoded a list of tuples, each consisting of a name and a value. - /// - /// Constant alias for string: `"application/x-www-form-urlencoded"`. - pub const APPLICATION_X_WWW_FORM_URLENCODED: Encoding = Self(zenoh_protocol::core::Encoding { - id: 26, - schema: None, - }); - /// An HTML file. - /// - /// Constant alias for string: `"text/html"`. - pub const TEXT_HTML: Encoding = Self(zenoh_protocol::core::Encoding { - id: 27, - schema: None, - }); - /// An XML file that is human readable. - /// - /// Constant alias for string: `"text/xml"`. - pub const TEXT_XML: Encoding = Self(zenoh_protocol::core::Encoding { - id: 28, - schema: None, - }); - /// A CSS file. - /// - /// Constant alias for string: `"text/css"`. - pub const TEXT_CSS: Encoding = Self(zenoh_protocol::core::Encoding { - id: 29, - schema: None, - }); - /// A JavaScript file. - /// - /// Constant alias for string: `"text/javascript"`. - pub const TEXT_JAVASCRIPT: Encoding = Self(zenoh_protocol::core::Encoding { - id: 30, - schema: None, - }); - /// A MarkDown file. - /// - /// Constant alias for string: `"text/markdown"`. - pub const TEXT_MARKDOWN: Encoding = Self(zenoh_protocol::core::Encoding { - id: 31, - schema: None, - }); - /// A CSV file. - /// - /// Constant alias for string: `"text/csv"`. - pub const TEXT_CSV: Encoding = Self(zenoh_protocol::core::Encoding { - id: 32, - schema: None, - }); - /// An application-specific SQL query. - /// - /// Constant alias for string: `"application/sql"`. - pub const APPLICATION_SQL: Encoding = Self(zenoh_protocol::core::Encoding { - id: 33, - schema: None, - }); - /// Constrained Application Protocol (CoAP) data intended for CoAP-to-HTTP and HTTP-to-CoAP proxies. - /// - /// Constant alias for string: `"application/coap-payload"`. - pub const APPLICATION_COAP_PAYLOAD: Encoding = Self(zenoh_protocol::core::Encoding { - id: 34, - schema: None, - }); - /// Defines a JSON document structure for expressing a sequence of operations to apply to a JSON document. - /// - /// Constant alias for string: `"application/json-patch+json"`. - pub const APPLICATION_JSON_PATCH_JSON: Encoding = Self(zenoh_protocol::core::Encoding { - id: 35, - schema: None, - }); - /// A JSON text sequence consists of any number of JSON texts, all encoded in UTF-8. - /// - /// Constant alias for string: `"application/json-seq"`. - pub const APPLICATION_JSON_SEQ: Encoding = Self(zenoh_protocol::core::Encoding { - id: 36, - schema: None, - }); - /// A JSONPath defines a string syntax for selecting and extracting JSON values from within a given JSON value. - /// - /// Constant alias for string: `"application/jsonpath"`. - pub const APPLICATION_JSONPATH: Encoding = Self(zenoh_protocol::core::Encoding { - id: 37, - schema: None, - }); - /// A JSON Web Token (JWT). - /// - /// Constant alias for string: `"application/jwt"`. - pub const APPLICATION_JWT: Encoding = Self(zenoh_protocol::core::Encoding { - id: 38, - schema: None, - }); - /// An application-specific MPEG-4 encoded data, either audio or video. - /// - /// Constant alias for string: `"application/mp4"`. - pub const APPLICATION_MP4: Encoding = Self(zenoh_protocol::core::Encoding { - id: 39, - schema: None, - }); - /// A SOAP 1.2 message serialized as XML 1.0. - /// - /// Constant alias for string: `"application/soap+xml"`. - pub const APPLICATION_SOAP_XML: Encoding = Self(zenoh_protocol::core::Encoding { - id: 40, - schema: None, - }); - /// A YANG-encoded data commonly used by the Network Configuration Protocol (NETCONF). - /// - /// Constant alias for string: `"application/yang"`. - pub const APPLICATION_YANG: Encoding = Self(zenoh_protocol::core::Encoding { - id: 41, - schema: None, - }); - /// A MPEG-4 Advanced Audio Coding (AAC) media. - /// - /// Constant alias for string: `"audio/aac"`. - pub const AUDIO_AAC: Encoding = Self(zenoh_protocol::core::Encoding { - id: 42, - schema: None, - }); - /// A Free Lossless Audio Codec (FLAC) media. - /// - /// Constant alias for string: `"audio/flac"`. - pub const AUDIO_FLAC: Encoding = Self(zenoh_protocol::core::Encoding { - id: 43, - schema: None, - }); - /// An audio codec defined in MPEG-1, MPEG-2, MPEG-4, or registered at the MP4 registration authority. - /// - /// Constant alias for string: `"audio/mp4"`. - pub const AUDIO_MP4: Encoding = Self(zenoh_protocol::core::Encoding { - id: 44, - schema: None, - }); - /// An Ogg-encapsulated audio stream. - /// - /// Constant alias for string: `"audio/ogg"`. - pub const AUDIO_OGG: Encoding = Self(zenoh_protocol::core::Encoding { - id: 45, - schema: None, - }); - /// A Vorbis-encoded audio stream. - /// - /// Constant alias for string: `"audio/vorbis"`. - pub const AUDIO_VORBIS: Encoding = Self(zenoh_protocol::core::Encoding { - id: 46, - schema: None, - }); - /// A h261-encoded video stream. - /// - /// Constant alias for string: `"video/h261"`. - pub const VIDEO_H261: Encoding = Self(zenoh_protocol::core::Encoding { - id: 47, - schema: None, - }); - /// A h263-encoded video stream. - /// - /// Constant alias for string: `"video/h263"`. - pub const VIDEO_H263: Encoding = Self(zenoh_protocol::core::Encoding { - id: 48, - schema: None, - }); - /// A h264-encoded video stream. - /// - /// Constant alias for string: `"video/h264"`. - pub const VIDEO_H264: Encoding = Self(zenoh_protocol::core::Encoding { - id: 49, - schema: None, - }); - /// A h265-encoded video stream. - /// - /// Constant alias for string: `"video/h265"`. - pub const VIDEO_H265: Encoding = Self(zenoh_protocol::core::Encoding { - id: 50, - schema: None, - }); - /// A h266-encoded video stream. - /// - /// Constant alias for string: `"video/h266"`. - pub const VIDEO_H266: Encoding = Self(zenoh_protocol::core::Encoding { - id: 51, - schema: None, - }); - /// A video codec defined in MPEG-1, MPEG-2, MPEG-4, or registered at the MP4 registration authority. - /// - /// Constant alias for string: `"video/mp4"`. - pub const VIDEO_MP4: Encoding = Self(zenoh_protocol::core::Encoding { - id: 52, - schema: None, - }); - /// An Ogg-encapsulated video stream. - /// - /// Constant alias for string: `"video/ogg"`. - pub const VIDEO_OGG: Encoding = Self(zenoh_protocol::core::Encoding { - id: 53, - schema: None, - }); - /// An uncompressed, studio-quality video stream. - /// - /// Constant alias for string: `"video/raw"`. - pub const VIDEO_RAW: Encoding = Self(zenoh_protocol::core::Encoding { - id: 54, - schema: None, - }); - /// A VP8-encoded video stream. - /// - /// Constant alias for string: `"video/vp8"`. - pub const VIDEO_VP8: Encoding = Self(zenoh_protocol::core::Encoding { - id: 55, - schema: None, - }); - /// A VP9-encoded video stream. - /// - /// Constant alias for string: `"video/vp9"`. - pub const VIDEO_VP9: Encoding = Self(zenoh_protocol::core::Encoding { - id: 56, - schema: None, - }); - - const ID_TO_STR: phf::Map = phf_map! { - 0u16 => "zenoh/bytes", - 1u16 => "zenoh/int", - 2u16 => "zenoh/uint", - 3u16 => "zenoh/float", - 4u16 => "zenoh/bool", - 5u16 => "zenoh/string", - 6u16 => "zenoh/error", - 7u16 => "application/octet-stream", - 8u16 => "text/plain", - 9u16 => "application/json", - 10u16 => "text/json", - 11u16 => "application/cdr", - 12u16 => "application/cbor", - 13u16 => "application/yaml", - 14u16 => "text/yaml", - 15u16 => "text/json5", - 16u16 => "application/python-serialized-object", - 17u16 => "application/protobuf", - 18u16 => "application/java-serialized-object", - 19u16 => "application/openmetrics-text", - 20u16 => "image/png", - 21u16 => "image/jpeg", - 22u16 => "image/gif", - 23u16 => "image/bmp", - 24u16 => "image/webp", - 25u16 => "application/xml", - 26u16 => "application/x-www-form-urlencoded", - 27u16 => "text/html", - 28u16 => "text/xml", - 29u16 => "text/css", - 30u16 => "text/javascript", - 31u16 => "text/markdown", - 32u16 => "text/csv", - 33u16 => "application/sql", - 34u16 => "application/coap-payload", - 35u16 => "application/json-patch+json", - 36u16 => "application/json-seq", - 37u16 => "application/jsonpath", - 38u16 => "application/jwt", - 39u16 => "application/mp4", - 40u16 => "application/soap+xml", - 41u16 => "application/yang", - 42u16 => "audio/aac", - 43u16 => "audio/flac", - 44u16 => "audio/mp4", - 45u16 => "audio/ogg", - 46u16 => "audio/vorbis", - 47u16 => "video/h261", - 48u16 => "video/h263", - 49u16 => "video/h264", - 50u16 => "video/h265", - 51u16 => "video/h266", - 52u16 => "video/mp4", - 53u16 => "video/ogg", - 54u16 => "video/raw", - 55u16 => "video/vp8", - 56u16 => "video/vp9", - }; - - const STR_TO_ID: phf::Map<&'static str, EncodingId> = phf_map! { - "zenoh/bytes" => 0u16, - "zenoh/int" => 1u16, - "zenoh/uint" => 2u16, - "zenoh/float" => 3u16, - "zenoh/bool" => 4u16, - "zenoh/string" => 5u16, - "zenoh/error" => 6u16, - "application/octet-stream" => 7u16, - "text/plain" => 8u16, - "application/json" => 9u16, - "text/json" => 10u16, - "application/cdr" => 11u16, - "application/cbor" => 12u16, - "application/yaml" => 13u16, - "text/yaml" => 14u16, - "text/json5" => 15u16, - "application/python-serialized-object" => 16u16, - "application/protobuf" => 17u16, - "application/java-serialized-object" => 18u16, - "application/openmetrics-text" => 19u16, - "image/png" => 20u16, - "image/jpeg" => 21u16, - "image/gif" => 22u16, - "image/bmp" => 23u16, - "image/webp" => 24u16, - "application/xml" => 25u16, - "application/x-www-form-urlencoded" => 26u16, - "text/html" => 27u16, - "text/xml" => 28u16, - "text/css" => 29u16, - "text/javascript" => 30u16, - "text/markdown" => 31u16, - "text/csv" => 32u16, - "application/sql" => 33u16, - "application/coap-payload" => 34u16, - "application/json-patch+json" => 35u16, - "application/json-seq" => 36u16, - "application/jsonpath" => 37u16, - "application/jwt" => 38u16, - "application/mp4" => 39u16, - "application/soap+xml" => 40u16, - "application/yang" => 41u16, - "audio/aac" => 42u16, - "audio/flac" => 43u16, - "audio/mp4" => 44u16, - "audio/ogg" => 45u16, - "audio/vorbis" => 46u16, - "video/h261" => 47u16, - "video/h263" => 48u16, - "video/h264" => 49u16, - "video/h265" => 50u16, - "video/h266" => 51u16, - "video/mp4" => 52u16, - "video/ogg" => 53u16, - "video/raw" => 54u16, - "video/vp8" => 55u16, - "video/vp9" => 56u16, - }; - - /// The default [`Encoding`] is [`ZENOH_BYTES`](Encoding::ZENOH_BYTES). - pub const fn default() -> Self { - Self::ZENOH_BYTES - } - - /// Set a schema to this encoding. Zenoh does not define what a schema is and its semantichs is left to the implementer. - /// E.g. a common schema for `text/plain` encoding is `utf-8`. - pub fn with_schema(mut self, s: S) -> Self - where - S: Into, - { - let s: String = s.into(); - self.0.schema = Some(s.into_boxed_str().into_boxed_bytes().into()); - self - } -} - -impl Default for Encoding { - fn default() -> Self { - Self::default() - } -} - -impl From<&str> for Encoding { - fn from(t: &str) -> Self { - let mut inner = zenoh_protocol::core::Encoding::empty(); - - // Check if empty - if t.is_empty() { - return Encoding(inner); - } - - // Everything before `;` may be mapped to a known id - let (id, schema) = t.split_once(Encoding::SCHEMA_SEP).unwrap_or((t, "")); - if let Some(id) = Encoding::STR_TO_ID.get(id).copied() { - inner.id = id; - }; - if !schema.is_empty() { - inner.schema = Some(ZSlice::from(schema.to_string().into_bytes())); - } - - Encoding(inner) - } -} - -impl From for Encoding { - fn from(value: String) -> Self { - Self::from(value.as_str()) - } -} - -impl FromStr for Encoding { - type Err = Infallible; - - fn from_str(s: &str) -> Result { - Ok(Self::from(s)) - } -} - -impl From<&Encoding> for Cow<'static, str> { - fn from(encoding: &Encoding) -> Self { - fn su8_to_str(schema: &[u8]) -> &str { - std::str::from_utf8(schema).unwrap_or("unknown(non-utf8)") - } - - match ( - Encoding::ID_TO_STR.get(&encoding.0.id).copied(), - encoding.0.schema.as_ref(), - ) { - // Perfect match - (Some(i), None) => Cow::Borrowed(i), - // ID and schema - (Some(i), Some(s)) => { - Cow::Owned(format!("{}{}{}", i, Encoding::SCHEMA_SEP, su8_to_str(s))) - } - // - (None, Some(s)) => Cow::Owned(format!( - "unknown({}){}{}", - encoding.0.id, - Encoding::SCHEMA_SEP, - su8_to_str(s) - )), - (None, None) => Cow::Owned(format!("unknown({})", encoding.0.id)), - } - } -} - -impl From for Cow<'static, str> { - fn from(encoding: Encoding) -> Self { - Self::from(&encoding) - } -} - -impl From for String { - fn from(encoding: Encoding) -> Self { - encoding.to_string() - } -} - -impl From for zenoh_protocol::core::Encoding { - fn from(value: Encoding) -> Self { - value.0 - } -} - -impl From for Encoding { - fn from(value: zenoh_protocol::core::Encoding) -> Self { - Self(value) - } -} - -impl fmt::Display for Encoding { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> std::fmt::Result { - let s = Cow::from(self); - f.write_str(s.as_ref()) - } -} - -// - Encoding trait -pub trait EncodingMapping { - const ENCODING: Encoding; -} - -// Bytes -impl EncodingMapping for Payload { - const ENCODING: Encoding = Encoding::ZENOH_BYTES; -} - -impl EncodingMapping for ZBuf { - const ENCODING: Encoding = Encoding::ZENOH_BYTES; -} - -impl EncodingMapping for Vec { - const ENCODING: Encoding = Encoding::ZENOH_BYTES; -} - -impl EncodingMapping for &[u8] { - const ENCODING: Encoding = Encoding::ZENOH_BYTES; -} - -impl EncodingMapping for Cow<'_, [u8]> { - const ENCODING: Encoding = Encoding::ZENOH_BYTES; -} - -// String -impl EncodingMapping for String { - const ENCODING: Encoding = Encoding::ZENOH_STRING; -} - -impl EncodingMapping for &str { - const ENCODING: Encoding = Encoding::ZENOH_STRING; -} - -impl EncodingMapping for Cow<'_, str> { - const ENCODING: Encoding = Encoding::ZENOH_STRING; -} - -// Zenoh unsigned integers -impl EncodingMapping for u8 { - const ENCODING: Encoding = Encoding::ZENOH_UINT; -} - -impl EncodingMapping for u16 { - const ENCODING: Encoding = Encoding::ZENOH_UINT; -} - -impl EncodingMapping for u32 { - const ENCODING: Encoding = Encoding::ZENOH_UINT; -} - -impl EncodingMapping for u64 { - const ENCODING: Encoding = Encoding::ZENOH_UINT; -} - -impl EncodingMapping for usize { - const ENCODING: Encoding = Encoding::ZENOH_UINT; -} - -// Zenoh signed integers -impl EncodingMapping for i8 { - const ENCODING: Encoding = Encoding::ZENOH_INT; -} - -impl EncodingMapping for i16 { - const ENCODING: Encoding = Encoding::ZENOH_INT; -} - -impl EncodingMapping for i32 { - const ENCODING: Encoding = Encoding::ZENOH_INT; -} - -impl EncodingMapping for i64 { - const ENCODING: Encoding = Encoding::ZENOH_INT; -} - -impl EncodingMapping for isize { - const ENCODING: Encoding = Encoding::ZENOH_INT; -} - -// Zenoh floats -impl EncodingMapping for f32 { - const ENCODING: Encoding = Encoding::ZENOH_FLOAT; -} - -impl EncodingMapping for f64 { - const ENCODING: Encoding = Encoding::ZENOH_FLOAT; -} - -// Zenoh bool -impl EncodingMapping for bool { - const ENCODING: Encoding = Encoding::ZENOH_BOOL; -} - -// - Zenoh advanced types encoders/decoders -impl EncodingMapping for serde_json::Value { - const ENCODING: Encoding = Encoding::APPLICATION_JSON; -} - -impl EncodingMapping for serde_yaml::Value { - const ENCODING: Encoding = Encoding::APPLICATION_YAML; -} - -impl EncodingMapping for serde_cbor::Value { - const ENCODING: Encoding = Encoding::APPLICATION_CBOR; -} - -impl EncodingMapping for serde_pickle::Value { - const ENCODING: Encoding = Encoding::APPLICATION_PYTHON_SERIALIZED_OBJECT; -} - -// - Zenoh SHM -#[cfg(feature = "shared-memory")] -impl EncodingMapping for Arc { - const ENCODING: Encoding = Encoding::ZENOH_BYTES; -} - -#[cfg(feature = "shared-memory")] -impl EncodingMapping for Box { - const ENCODING: Encoding = Encoding::ZENOH_BYTES; -} - -#[cfg(feature = "shared-memory")] -impl EncodingMapping for SharedMemoryBuf { - const ENCODING: Encoding = Encoding::ZENOH_BYTES; -} diff --git a/zenoh/src/handlers.rs b/zenoh/src/handlers.rs index c5d2c6bb90..69828a5d7f 100644 --- a/zenoh/src/handlers.rs +++ b/zenoh/src/handlers.rs @@ -15,42 +15,36 @@ //! Callback handler trait. use crate::API_DATA_RECEPTION_CHANNEL_SIZE; -use std::sync::{Arc, Mutex, Weak}; -use zenoh_collections::RingBuffer as RingBufferInner; -use zenoh_result::ZResult; - /// An alias for `Arc`. pub type Dyn = std::sync::Arc; - /// An immutable callback function. pub type Callback<'a, T> = Dyn; -/// A type that can be converted into a [`Callback`]-handler pair. +/// A type that can be converted into a [`Callback`]-receiver pair. /// /// When Zenoh functions accept types that implement these, it intends to use the [`Callback`] as just that, -/// while granting you access to the handler through the returned value via [`std::ops::Deref`] and [`std::ops::DerefMut`]. +/// while granting you access to the receiver through the returned value via [`std::ops::Deref`] and [`std::ops::DerefMut`]. /// /// Any closure that accepts `T` can be converted into a pair of itself and `()`. -pub trait IntoHandler<'a, T> { - type Handler; - - fn into_handler(self) -> (Callback<'a, T>, Self::Handler); +pub trait IntoCallbackReceiverPair<'a, T> { + type Receiver; + fn into_cb_receiver_pair(self) -> (Callback<'a, T>, Self::Receiver); } - -impl<'a, T, F> IntoHandler<'a, T> for F +impl<'a, T, F> IntoCallbackReceiverPair<'a, T> for F where F: Fn(T) + Send + Sync + 'a, { - type Handler = (); - fn into_handler(self) -> (Callback<'a, T>, Self::Handler) { + type Receiver = (); + fn into_cb_receiver_pair(self) -> (Callback<'a, T>, Self::Receiver) { (Dyn::from(self), ()) } } +impl IntoCallbackReceiverPair<'static, T> + for (flume::Sender, flume::Receiver) +{ + type Receiver = flume::Receiver; -impl IntoHandler<'static, T> for (flume::Sender, flume::Receiver) { - type Handler = flume::Receiver; - - fn into_handler(self) -> (Callback<'static, T>, Self::Handler) { + fn into_cb_receiver_pair(self) -> (Callback<'static, T>, Self::Receiver) { let (sender, receiver) = self; ( Dyn::new(move |t| { @@ -62,24 +56,18 @@ impl IntoHandler<'static, T> for (flume::Sender, flume::Re ) } } - -/// The default handler in Zenoh is a FIFO queue. pub struct DefaultHandler; - -impl IntoHandler<'static, T> for DefaultHandler { - type Handler = flume::Receiver; - - fn into_handler(self) -> (Callback<'static, T>, Self::Handler) { - flume::bounded(*API_DATA_RECEPTION_CHANNEL_SIZE).into_handler() +impl IntoCallbackReceiverPair<'static, T> for DefaultHandler { + type Receiver = flume::Receiver; + fn into_cb_receiver_pair(self) -> (Callback<'static, T>, Self::Receiver) { + flume::bounded(*API_DATA_RECEPTION_CHANNEL_SIZE).into_cb_receiver_pair() } } - -impl IntoHandler<'static, T> +impl IntoCallbackReceiverPair<'static, T> for (std::sync::mpsc::SyncSender, std::sync::mpsc::Receiver) { - type Handler = std::sync::mpsc::Receiver; - - fn into_handler(self) -> (Callback<'static, T>, Self::Handler) { + type Receiver = std::sync::mpsc::Receiver; + fn into_cb_receiver_pair(self) -> (Callback<'static, T>, Self::Receiver) { let (sender, receiver) = self; ( Dyn::new(move |t| { @@ -92,54 +80,6 @@ impl IntoHandler<'static, T> } } -/// Ring buffer with a limited queue size, which allows users to keep the last N data. -pub struct RingBuffer { - ring: Arc>>, -} - -impl RingBuffer { - /// Initialize the RingBuffer with the capacity size. - pub fn new(capacity: usize) -> Self { - RingBuffer { - ring: Arc::new(Mutex::new(RingBufferInner::new(capacity))), - } - } -} - -pub struct RingBufferHandler { - ring: Weak>>, -} - -impl RingBufferHandler { - pub fn recv(&self) -> ZResult> { - let Some(ring) = self.ring.upgrade() else { - bail!("The ringbuffer has been deleted."); - }; - let mut guard = ring.lock().map_err(|e| zerror!("{}", e))?; - Ok(guard.pull()) - } -} - -impl IntoHandler<'static, T> for RingBuffer { - type Handler = RingBufferHandler; - - fn into_handler(self) -> (Callback<'static, T>, Self::Handler) { - let receiver = RingBufferHandler { - ring: Arc::downgrade(&self.ring), - }; - ( - Dyn::new(move |t| match self.ring.lock() { - Ok(mut g) => { - // Eventually drop the oldest element. - g.push_force(t); - } - Err(e) => log::error!("{}", e), - }), - receiver, - ) - } -} - /// A function that can transform a [`FnMut`]`(T)` to /// a [`Fn`]`(T)` with the help of a [`Mutex`](std::sync::Mutex). pub fn locked(fnmut: impl FnMut(T)) -> impl Fn(T) { @@ -156,7 +96,7 @@ pub fn locked(fnmut: impl FnMut(T)) -> impl Fn(T) { /// - `callback` will never be called once `drop` has started. /// - `drop` will only be called **once**, and **after every** `callback` has ended. /// - The two previous guarantees imply that `call` and `drop` are never called concurrently. -pub struct CallbackDrop +pub struct CallbackPair where DropFn: FnMut() + Send + Sync + 'static, { @@ -164,7 +104,7 @@ where pub drop: DropFn, } -impl Drop for CallbackDrop +impl Drop for CallbackPair where DropFn: FnMut() + Send + Sync + 'static, { @@ -173,14 +113,14 @@ where } } -impl<'a, OnEvent, Event, DropFn> IntoHandler<'a, Event> for CallbackDrop +impl<'a, OnEvent, Event, DropFn> IntoCallbackReceiverPair<'a, Event> + for CallbackPair where OnEvent: Fn(Event) + Send + Sync + 'a, DropFn: FnMut() + Send + Sync + 'static, { - type Handler = (); - - fn into_handler(self) -> (Callback<'a, Event>, Self::Handler) { + type Receiver = (); + fn into_cb_receiver_pair(self) -> (Callback<'a, Event>, Self::Receiver) { (Dyn::from(move |evt| (self.callback)(evt)), ()) } } diff --git a/zenoh/src/key_expr.rs b/zenoh/src/key_expr.rs index 1e8da2c3c9..628f07611a 100644 --- a/zenoh/src/key_expr.rs +++ b/zenoh/src/key_expr.rs @@ -53,7 +53,7 @@ pub use zenoh_keyexpr::*; pub use zenoh_macros::{kedefine, keformat, kewrite}; use zenoh_protocol::{ core::{key_expr::canon::Canonizable, ExprId, WireExpr}, - network::{declare, DeclareBody, DeclareMode, Mapping, UndeclareKeyExpr}, + network::{declare, DeclareBody, Mapping, UndeclareKeyExpr}, }; use zenoh_result::ZResult; @@ -185,7 +185,7 @@ impl<'a> KeyExpr<'a> { /// # Safety /// Key Expressions must follow some rules to be accepted by a Zenoh network. /// Messages addressed with invalid key expressions will be dropped. - pub unsafe fn from_str_unchecked(s: &'a str) -> Self { + pub unsafe fn from_str_uncheckend(s: &'a str) -> Self { keyexpr::from_str_unchecked(s).into() } @@ -664,10 +664,9 @@ impl SyncResolve for KeyExprUndeclaration<'_> { let primitives = state.primitives.as_ref().unwrap().clone(); drop(state); primitives.send_declare(zenoh_protocol::network::Declare { - mode: DeclareMode::Push, - ext_qos: declare::ext::QoSType::DECLARE, + ext_qos: declare::ext::QoSType::declare_default(), ext_tstamp: None, - ext_nodeid: declare::ext::NodeIdType::DEFAULT, + ext_nodeid: declare::ext::NodeIdType::default(), body: DeclareBody::UndeclareKeyExpr(UndeclareKeyExpr { id: expr_id }), }); diff --git a/zenoh/src/lib.rs b/zenoh/src/lib.rs index ea212485ec..d8820f7ad1 100644 --- a/zenoh/src/lib.rs +++ b/zenoh/src/lib.rs @@ -53,7 +53,7 @@ //! let session = zenoh::open(config::default()).res().await.unwrap(); //! let subscriber = session.declare_subscriber("key/expression").res().await.unwrap(); //! while let Ok(sample) = subscriber.recv_async().await { -//! println!("Received: {:?}", sample); +//! println!("Received: {}", sample); //! }; //! } //! ``` @@ -79,11 +79,9 @@ extern crate zenoh_core; #[macro_use] extern crate zenoh_result; -pub(crate) type Id = u32; - use git_version::git_version; use handlers::DefaultHandler; -#[cfg(feature = "unstable")] +#[zenoh_macros::unstable] use net::runtime::Runtime; use prelude::*; use scouting::ScoutBuilder; @@ -106,6 +104,7 @@ pub const FEATURES: &str = concat_enabled_features!( features = [ "auth_pubkey", "auth_usrpwd", + "complete_n", "shared-memory", "stats", "transport_multilink", @@ -134,12 +133,10 @@ pub use net::runtime; pub mod selector; #[deprecated = "This module is now a separate crate. Use the crate directly for shorter compile-times"] pub use zenoh_config as config; -pub(crate) mod encoding; pub mod handlers; pub mod info; #[cfg(feature = "unstable")] pub mod liveliness; -pub mod payload; pub mod plugins; pub mod prelude; pub mod publication; @@ -172,6 +169,23 @@ pub mod time { } } +/// A map of key/value (String,String) properties. +pub mod properties { + use super::prelude::Value; + pub use zenoh_collections::Properties; + + /// Convert a set of [`Properties`] into a [`Value`]. + /// For instance, Properties: `[("k1", "v1"), ("k2, v2")]` + /// is converted into Json: `{ "k1": "v1", "k2": "v2" }` + pub fn properties_to_json_value(props: &Properties) -> Value { + let json_map = props + .iter() + .map(|(k, v)| (k.clone(), serde_json::Value::String(v.clone()))) + .collect::>(); + serde_json::Value::Object(json_map).into() + } +} + /// Scouting primitives. pub mod scouting; diff --git a/zenoh/src/liveliness.rs b/zenoh/src/liveliness.rs index 23e1846741..9cf3b9c362 100644 --- a/zenoh/src/liveliness.rs +++ b/zenoh/src/liveliness.rs @@ -15,9 +15,8 @@ //! Liveliness primitives. //! //! see [`Liveliness`] -use zenoh_protocol::network::request; -use crate::{query::Reply, Id}; +use crate::query::Reply; #[zenoh_macros::unstable] use { @@ -136,9 +135,9 @@ impl<'a> Liveliness<'a> { /// let session = zenoh::open(config::peer()).res().await.unwrap(); /// let subscriber = session.liveliness().declare_subscriber("key/expression").res().await.unwrap(); /// while let Ok(sample) = subscriber.recv_async().await { - /// match sample.kind() { - /// SampleKind::Put => println!("New liveliness: {}", sample.key_expr()), - /// SampleKind::Delete => println!("Lost liveliness: {}", sample.key_expr()), + /// match sample.kind { + /// SampleKind::Put => println!("New liveliness: {}", sample.key_expr), + /// SampleKind::Delete => println!("Lost liveliness: {}", sample.key_expr), /// } /// } /// # } @@ -175,7 +174,7 @@ impl<'a> Liveliness<'a> { /// let replies = session.liveliness().get("key/expression").res().await.unwrap(); /// while let Ok(reply) = replies.recv_async().await { /// if let Ok(sample) = reply.sample { - /// println!(">> Liveliness token {}", sample.key_expr()); + /// println!(">> Liveliness token {}", sample.key_expr); /// } /// } /// # } @@ -409,6 +408,7 @@ impl Drop for LivelinessToken<'_> { /// let subscriber = session /// .declare_subscriber("key/expression") /// .best_effort() +/// .pull_mode() /// .res() /// .await /// .unwrap(); @@ -436,7 +436,7 @@ impl<'a, 'b> LivelinessSubscriberBuilder<'a, 'b, DefaultHandler> { /// let session = zenoh::open(config::peer()).res().await.unwrap(); /// let subscriber = session /// .declare_subscriber("key/expression") - /// .callback(|sample| { println!("Received: {} {:?}", sample.key_expr(), sample.payload()); }) + /// .callback(|sample| { println!("Received: {} {}", sample.key_expr, sample.value); }) /// .res() /// .await /// .unwrap(); @@ -496,7 +496,7 @@ impl<'a, 'b> LivelinessSubscriberBuilder<'a, 'b, DefaultHandler> { self.callback(locked(callback)) } - /// Receive the samples for this subscription with a [`Handler`](crate::prelude::IntoHandler). + /// Receive the samples for this subscription with a [`Handler`](crate::prelude::IntoCallbackReceiverPair). /// /// # Examples /// ```no_run @@ -512,7 +512,7 @@ impl<'a, 'b> LivelinessSubscriberBuilder<'a, 'b, DefaultHandler> { /// .await /// .unwrap(); /// while let Ok(sample) = subscriber.recv_async().await { - /// println!("Received: {} {:?}", sample.key_expr(), sample.payload()); + /// println!("Received: {} {}", sample.key_expr, sample.value); /// } /// # } /// ``` @@ -520,7 +520,7 @@ impl<'a, 'b> LivelinessSubscriberBuilder<'a, 'b, DefaultHandler> { #[zenoh_macros::unstable] pub fn with(self, handler: Handler) -> LivelinessSubscriberBuilder<'a, 'b, Handler> where - Handler: crate::handlers::IntoHandler<'static, Sample>, + Handler: crate::prelude::IntoCallbackReceiverPair<'static, Sample>, { let LivelinessSubscriberBuilder { session, @@ -538,30 +538,30 @@ impl<'a, 'b> LivelinessSubscriberBuilder<'a, 'b, DefaultHandler> { #[zenoh_macros::unstable] impl<'a, Handler> Resolvable for LivelinessSubscriberBuilder<'a, '_, Handler> where - Handler: IntoHandler<'static, Sample> + Send, - Handler::Handler: Send, + Handler: IntoCallbackReceiverPair<'static, Sample> + Send, + Handler::Receiver: Send, { - type To = ZResult>; + type To = ZResult>; } #[zenoh_macros::unstable] impl<'a, Handler> SyncResolve for LivelinessSubscriberBuilder<'a, '_, Handler> where - Handler: IntoHandler<'static, Sample> + Send, - Handler::Handler: Send, + Handler: IntoCallbackReceiverPair<'static, Sample> + Send, + Handler::Receiver: Send, { #[zenoh_macros::unstable] fn res_sync(self) -> ::To { let key_expr = self.key_expr?; let session = self.session; - let (callback, receiver) = self.handler.into_handler(); + let (callback, receiver) = self.handler.into_cb_receiver_pair(); session .declare_subscriber_inner( &key_expr, &Some(KeyExpr::from(*KE_PREFIX_LIVELINESS)), Locality::default(), callback, - &SubscriberInfo::DEFAULT, + &SubscriberInfo::default(), ) .map(|sub_state| Subscriber { subscriber: SubscriberInner { @@ -577,8 +577,8 @@ where #[zenoh_macros::unstable] impl<'a, Handler> AsyncResolve for LivelinessSubscriberBuilder<'a, '_, Handler> where - Handler: IntoHandler<'static, Sample> + Send, - Handler::Handler: Send, + Handler: IntoCallbackReceiverPair<'static, Sample> + Send, + Handler::Receiver: Send, { type Future = Ready; @@ -607,8 +607,8 @@ where /// .unwrap(); /// while let Ok(token) = tokens.recv_async().await { /// match token.sample { -/// Ok(sample) => println!("Alive token ('{}')", sample.key_expr().as_str()), -/// Err(err) => println!("Received (ERROR: '{:?}')", err.payload), +/// Ok(sample) => println!("Alive token ('{}')", sample.key_expr.as_str(),), +/// Err(err) => println!("Received (ERROR: '{}')", String::try_from(&err).unwrap()), /// } /// } /// # } @@ -693,7 +693,7 @@ impl<'a, 'b> LivelinessGetBuilder<'a, 'b, DefaultHandler> { self.callback(locked(callback)) } - /// Receive the replies for this query with a [`Handler`](crate::prelude::IntoHandler). + /// Receive the replies for this query with a [`Handler`](crate::prelude::IntoCallbackReceiverPair). /// /// # Examples /// ``` @@ -717,7 +717,7 @@ impl<'a, 'b> LivelinessGetBuilder<'a, 'b, DefaultHandler> { #[inline] pub fn with(self, handler: Handler) -> LivelinessGetBuilder<'a, 'b, Handler> where - Handler: IntoHandler<'static, Reply>, + Handler: IntoCallbackReceiverPair<'static, Reply>, { let LivelinessGetBuilder { session, @@ -745,32 +745,31 @@ impl<'a, 'b, Handler> LivelinessGetBuilder<'a, 'b, Handler> { impl Resolvable for LivelinessGetBuilder<'_, '_, Handler> where - Handler: IntoHandler<'static, Reply> + Send, - Handler::Handler: Send, + Handler: IntoCallbackReceiverPair<'static, Reply> + Send, + Handler::Receiver: Send, { - type To = ZResult; + type To = ZResult; } impl SyncResolve for LivelinessGetBuilder<'_, '_, Handler> where - Handler: IntoHandler<'static, Reply> + Send, - Handler::Handler: Send, + Handler: IntoCallbackReceiverPair<'static, Reply> + Send, + Handler::Receiver: Send, { fn res_sync(self) -> ::To { - let (callback, receiver) = self.handler.into_handler(); + let (callback, receiver) = self.handler.into_cb_receiver_pair(); + self.session .query( &self.key_expr?.into(), &Some(KeyExpr::from(*KE_PREFIX_LIVELINESS)), - QueryTarget::DEFAULT, - QueryConsolidation::DEFAULT, - request::ext::QoSType::REQUEST.into(), + QueryTarget::default(), + QueryConsolidation::default(), Locality::default(), self.timeout, None, #[cfg(feature = "unstable")] None, - SourceInfo::empty(), callback, ) .map(|_| receiver) @@ -779,8 +778,8 @@ where impl AsyncResolve for LivelinessGetBuilder<'_, '_, Handler> where - Handler: IntoHandler<'static, Reply> + Send, - Handler::Handler: Send, + Handler: IntoCallbackReceiverPair<'static, Reply> + Send, + Handler::Receiver: Send, { type Future = Ready; diff --git a/zenoh/src/net/primitives/mux.rs b/zenoh/src/net/primitives/mux.rs index 442c040624..5c473e8ad8 100644 --- a/zenoh/src/net/primitives/mux.rs +++ b/zenoh/src/net/primitives/mux.rs @@ -13,7 +13,7 @@ // use super::{EPrimitives, Primitives}; use crate::net::routing::{ - dispatcher::face::Face, + dispatcher::face::{Face, WeakFace}, interceptor::{InterceptorTrait, InterceptorsChain}, RoutingContext, }; @@ -25,7 +25,7 @@ use zenoh_transport::{multicast::TransportMulticast, unicast::TransportUnicast}; pub struct Mux { pub handler: TransportUnicast, - pub(crate) face: OnceLock, + pub(crate) face: OnceLock, pub(crate) interceptor: InterceptorsChain, } @@ -48,14 +48,14 @@ impl Primitives for Mux { }; if self.interceptor.interceptors.is_empty() { let _ = self.handler.schedule(msg); - } else if let Some(face) = self.face.get() { + } else if let Some(face) = self.face.get().and_then(|f| f.upgrade()) { let ctx = RoutingContext::new_out(msg, face.clone()); let prefix = ctx .wire_expr() .and_then(|we| (!we.has_suffix()).then(|| ctx.prefix())) .flatten() .cloned(); - let cache = prefix.as_ref().and_then(|p| p.get_egress_cache(face)); + let cache = prefix.as_ref().and_then(|p| p.get_egress_cache(&face)); if let Some(ctx) = self.interceptor.intercept(ctx, cache) { let _ = self.handler.schedule(ctx.msg); } @@ -72,14 +72,14 @@ impl Primitives for Mux { }; if self.interceptor.interceptors.is_empty() { let _ = self.handler.schedule(msg); - } else if let Some(face) = self.face.get() { + } else if let Some(face) = self.face.get().and_then(|f| f.upgrade()) { let ctx = RoutingContext::new_out(msg, face.clone()); let prefix = ctx .wire_expr() .and_then(|we| (!we.has_suffix()).then(|| ctx.prefix())) .flatten() .cloned(); - let cache = prefix.as_ref().and_then(|p| p.get_egress_cache(face)); + let cache = prefix.as_ref().and_then(|p| p.get_egress_cache(&face)); if let Some(ctx) = self.interceptor.intercept(ctx, cache) { let _ = self.handler.schedule(ctx.msg); } @@ -96,14 +96,14 @@ impl Primitives for Mux { }; if self.interceptor.interceptors.is_empty() { let _ = self.handler.schedule(msg); - } else if let Some(face) = self.face.get() { + } else if let Some(face) = self.face.get().and_then(|f| f.upgrade()) { let ctx = RoutingContext::new_out(msg, face.clone()); let prefix = ctx .wire_expr() .and_then(|we| (!we.has_suffix()).then(|| ctx.prefix())) .flatten() .cloned(); - let cache = prefix.as_ref().and_then(|p| p.get_egress_cache(face)); + let cache = prefix.as_ref().and_then(|p| p.get_egress_cache(&face)); if let Some(ctx) = self.interceptor.intercept(ctx, cache) { let _ = self.handler.schedule(ctx.msg); } @@ -120,14 +120,14 @@ impl Primitives for Mux { }; if self.interceptor.interceptors.is_empty() { let _ = self.handler.schedule(msg); - } else if let Some(face) = self.face.get() { + } else if let Some(face) = self.face.get().and_then(|f| f.upgrade()) { let ctx = RoutingContext::new_out(msg, face.clone()); let prefix = ctx .wire_expr() .and_then(|we| (!we.has_suffix()).then(|| ctx.prefix())) .flatten() .cloned(); - let cache = prefix.as_ref().and_then(|p| p.get_egress_cache(face)); + let cache = prefix.as_ref().and_then(|p| p.get_egress_cache(&face)); if let Some(ctx) = self.interceptor.intercept(ctx, cache) { let _ = self.handler.schedule(ctx.msg); } @@ -144,14 +144,14 @@ impl Primitives for Mux { }; if self.interceptor.interceptors.is_empty() { let _ = self.handler.schedule(msg); - } else if let Some(face) = self.face.get() { + } else if let Some(face) = self.face.get().and_then(|f| f.upgrade()) { let ctx = RoutingContext::new_out(msg, face.clone()); let prefix = ctx .wire_expr() .and_then(|we| (!we.has_suffix()).then(|| ctx.prefix())) .flatten() .cloned(); - let cache = prefix.as_ref().and_then(|p| p.get_egress_cache(face)); + let cache = prefix.as_ref().and_then(|p| p.get_egress_cache(&face)); if let Some(ctx) = self.interceptor.intercept(ctx, cache) { let _ = self.handler.schedule(ctx.msg); } @@ -199,14 +199,14 @@ impl EPrimitives for Mux { }; if self.interceptor.interceptors.is_empty() { let _ = self.handler.schedule(msg); - } else if let Some(face) = self.face.get() { + } else if let Some(face) = self.face.get().and_then(|f| f.upgrade()) { let ctx = RoutingContext::new_out(msg, face.clone()); let prefix = ctx .wire_expr() .and_then(|we| (!we.has_suffix()).then(|| ctx.prefix())) .flatten() .cloned(); - let cache = prefix.as_ref().and_then(|p| p.get_egress_cache(face)); + let cache = prefix.as_ref().and_then(|p| p.get_egress_cache(&face)); if let Some(ctx) = self.interceptor.intercept(ctx, cache) { let _ = self.handler.schedule(ctx.msg); } diff --git a/zenoh/src/net/routing/dispatcher/face.rs b/zenoh/src/net/routing/dispatcher/face.rs index 3531dd2d88..765779ee40 100644 --- a/zenoh/src/net/routing/dispatcher/face.rs +++ b/zenoh/src/net/routing/dispatcher/face.rs @@ -20,13 +20,15 @@ use crate::KeyExpr; use std::any::Any; use std::collections::HashMap; use std::fmt; -use std::sync::Arc; +use std::sync::{Arc, Weak}; +use tokio_util::sync::CancellationToken; use zenoh_protocol::zenoh::RequestBody; use zenoh_protocol::{ core::{ExprId, WhatAmI, ZenohId}, network::{Mapping, Push, Request, RequestId, Response, ResponseFinal}, }; use zenoh_sync::get_mut_unchecked; +use zenoh_task::TaskController; use zenoh_transport::multicast::TransportMulticast; #[cfg(feature = "stats")] use zenoh_transport::stats::TransportStats; @@ -41,10 +43,11 @@ pub struct FaceState { pub(crate) local_mappings: HashMap>, pub(crate) remote_mappings: HashMap>, pub(crate) next_qid: RequestId, - pub(crate) pending_queries: HashMap>, + pub(crate) pending_queries: HashMap, CancellationToken)>, pub(crate) mcast_group: Option, pub(crate) in_interceptors: Option>, pub(crate) hat: Box, + pub(crate) task_controller: TaskController, } impl FaceState { @@ -73,6 +76,7 @@ impl FaceState { mcast_group, in_interceptors, hat, + task_controller: TaskController::default(), }) } @@ -150,12 +154,36 @@ impl fmt::Display for FaceState { } } +#[derive(Clone)] +pub struct WeakFace { + pub(crate) tables: Weak, + pub(crate) state: Weak, +} + +impl WeakFace { + pub fn upgrade(&self) -> Option { + Some(Face { + tables: self.tables.upgrade()?, + state: self.state.upgrade()?, + }) + } +} + #[derive(Clone)] pub struct Face { pub(crate) tables: Arc, pub(crate) state: Arc, } +impl Face { + pub fn downgrade(&self) -> WeakFace { + WeakFace { + tables: Arc::downgrade(&self.tables), + state: Arc::downgrade(&self.state), + } + } +} + impl Primitives for Face { fn send_declare(&self, msg: zenoh_protocol::network::Declare) { let ctrl_lock = zlock!(self.tables.ctrl_lock); @@ -171,7 +199,6 @@ impl Primitives for Face { ctrl_lock.as_ref(), &self.tables, &mut self.state.clone(), - m.id, &m.wire_expr, &m.ext_info, msg.ext_nodeid.node_id, @@ -182,7 +209,6 @@ impl Primitives for Face { ctrl_lock.as_ref(), &self.tables, &mut self.state.clone(), - m.id, &m.ext_wire_expr.wire_expr, msg.ext_nodeid.node_id, ); @@ -192,7 +218,6 @@ impl Primitives for Face { ctrl_lock.as_ref(), &self.tables, &mut self.state.clone(), - m.id, &m.wire_expr, &m.ext_info, msg.ext_nodeid.node_id, @@ -203,7 +228,6 @@ impl Primitives for Face { ctrl_lock.as_ref(), &self.tables, &mut self.state.clone(), - m.id, &m.ext_wire_expr.wire_expr, msg.ext_nodeid.node_id, ); @@ -211,7 +235,8 @@ impl Primitives for Face { zenoh_protocol::network::DeclareBody::DeclareToken(_m) => todo!(), zenoh_protocol::network::DeclareBody::UndeclareToken(_m) => todo!(), zenoh_protocol::network::DeclareBody::DeclareInterest(_m) => todo!(), - zenoh_protocol::network::DeclareBody::DeclareFinal(_m) => todo!(), + zenoh_protocol::network::DeclareBody::FinalInterest(_m) => todo!(), + zenoh_protocol::network::DeclareBody::UndeclareInterest(_m) => todo!(), } drop(ctrl_lock); } @@ -243,6 +268,12 @@ impl Primitives for Face { msg.ext_nodeid.node_id, ); } + RequestBody::Pull(_) => { + pull_data(&self.tables.tables, &self.state.clone(), msg.wire_expr); + } + _ => { + log::error!("Unsupported request"); + } } } diff --git a/zenoh/src/net/routing/dispatcher/pubsub.rs b/zenoh/src/net/routing/dispatcher/pubsub.rs index 89c6c40206..da6ae0c371 100644 --- a/zenoh/src/net/routing/dispatcher/pubsub.rs +++ b/zenoh/src/net/routing/dispatcher/pubsub.rs @@ -12,15 +12,17 @@ // ZettaScale Zenoh Team, // use super::face::FaceState; -use super::resource::{DataRoutes, Direction, Resource}; +use super::resource::{DataRoutes, Direction, PullCaches, Resource}; use super::tables::{NodeId, Route, RoutingExpr, Tables, TablesLock}; use crate::net::routing::hat::HatTrait; +use std::borrow::Cow; use std::collections::HashMap; use std::sync::Arc; +use std::sync::RwLock; use zenoh_core::zread; -use zenoh_protocol::core::key_expr::keyexpr; +use zenoh_protocol::core::key_expr::{keyexpr, OwnedKeyExpr}; use zenoh_protocol::network::declare::subscriber::ext::SubscriberInfo; -use zenoh_protocol::network::declare::SubscriberId; +use zenoh_protocol::network::declare::Mode; use zenoh_protocol::{ core::{WhatAmI, WireExpr}, network::{declare::ext, Push}, @@ -32,24 +34,17 @@ pub(crate) fn declare_subscription( hat_code: &(dyn HatTrait + Send + Sync), tables: &TablesLock, face: &mut Arc, - id: SubscriberId, expr: &WireExpr, sub_info: &SubscriberInfo, node_id: NodeId, ) { + log::debug!("Declare subscription {}", face); let rtables = zread!(tables.tables); match rtables .get_mapping(face, &expr.scope, expr.mapping) .cloned() { Some(mut prefix) => { - log::debug!( - "{} Declare subscriber {} ({}{})", - face, - id, - prefix.expr(), - expr.suffix - ); let res = Resource::get_resource(&prefix, &expr.suffix); let (mut res, mut wtables) = if res.as_ref().map(|r| r.context.is_some()).unwrap_or(false) { @@ -71,7 +66,7 @@ pub(crate) fn declare_subscription( (res, wtables) }; - hat_code.declare_subscription(&mut wtables, face, id, &mut res, sub_info, node_id); + hat_code.declare_subscription(&mut wtables, face, &mut res, sub_info, node_id); disable_matches_data_routes(&mut wtables, &mut res); drop(wtables); @@ -81,19 +76,17 @@ pub(crate) fn declare_subscription( drop(rtables); let wtables = zwrite!(tables.tables); - for (mut res, data_routes) in matches_data_routes { + for (mut res, data_routes, matching_pulls) in matches_data_routes { get_mut_unchecked(&mut res) .context_mut() .update_data_routes(data_routes); + get_mut_unchecked(&mut res) + .context_mut() + .update_matching_pulls(matching_pulls); } drop(wtables); } - None => log::error!( - "{} Declare subscriber {} for unknown scope {}!", - face, - id, - expr.scope - ), + None => log::error!("Declare subscription for unknown scope {}!", expr.scope), } } @@ -101,57 +94,41 @@ pub(crate) fn undeclare_subscription( hat_code: &(dyn HatTrait + Send + Sync), tables: &TablesLock, face: &mut Arc, - id: SubscriberId, expr: &WireExpr, node_id: NodeId, ) { - let res = if expr.is_empty() { - None - } else { - let rtables = zread!(tables.tables); - match rtables.get_mapping(face, &expr.scope, expr.mapping) { - Some(prefix) => match Resource::get_resource(prefix, expr.suffix.as_ref()) { - Some(res) => Some(res), - None => { - log::error!( - "{} Undeclare unknown subscriber {}{}!", - face, - prefix.expr(), - expr.suffix - ); - return; + log::debug!("Undeclare subscription {}", face); + let rtables = zread!(tables.tables); + match rtables.get_mapping(face, &expr.scope, expr.mapping) { + Some(prefix) => match Resource::get_resource(prefix, expr.suffix.as_ref()) { + Some(mut res) => { + drop(rtables); + let mut wtables = zwrite!(tables.tables); + + hat_code.undeclare_subscription(&mut wtables, face, &mut res, node_id); + + disable_matches_data_routes(&mut wtables, &mut res); + drop(wtables); + + let rtables = zread!(tables.tables); + let matches_data_routes = compute_matches_data_routes(&rtables, &res); + drop(rtables); + + let wtables = zwrite!(tables.tables); + for (mut res, data_routes, matching_pulls) in matches_data_routes { + get_mut_unchecked(&mut res) + .context_mut() + .update_data_routes(data_routes); + get_mut_unchecked(&mut res) + .context_mut() + .update_matching_pulls(matching_pulls); } - }, - None => { - log::error!( - "{} Undeclare subscriber with unknown scope {}", - face, - expr.scope - ); - return; + Resource::clean(&mut res); + drop(wtables); } - } - }; - let mut wtables = zwrite!(tables.tables); - if let Some(mut res) = hat_code.undeclare_subscription(&mut wtables, face, id, res, node_id) { - log::debug!("{} Undeclare subscriber {} ({})", face, id, res.expr()); - disable_matches_data_routes(&mut wtables, &mut res); - drop(wtables); - - let rtables = zread!(tables.tables); - let matches_data_routes = compute_matches_data_routes(&rtables, &res); - drop(rtables); - - let wtables = zwrite!(tables.tables); - for (mut res, data_routes) in matches_data_routes { - get_mut_unchecked(&mut res) - .context_mut() - .update_data_routes(data_routes); - } - Resource::clean(&mut res); - drop(wtables); - } else { - log::error!("{} Undeclare unknown subscriber {}", face, id); + None => log::error!("Undeclare unknown subscription!"), + }, + None => log::error!("Undeclare subscription with unknown scope!"), } } @@ -215,6 +192,7 @@ pub(crate) fn update_data_routes(tables: &Tables, res: &mut Arc) { pub(crate) fn update_data_routes_from(tables: &mut Tables, res: &mut Arc) { update_data_routes(tables, res); + update_matching_pulls(tables, res); let res = get_mut_unchecked(res); for child in res.childs.values_mut() { update_data_routes_from(tables, child); @@ -224,17 +202,22 @@ pub(crate) fn update_data_routes_from(tables: &mut Tables, res: &mut Arc( tables: &'a Tables, res: &'a Arc, -) -> Vec<(Arc, DataRoutes)> { +) -> Vec<(Arc, DataRoutes, Arc)> { let mut routes = vec![]; if res.context.is_some() { let mut expr = RoutingExpr::new(res, ""); - routes.push((res.clone(), compute_data_routes(tables, &mut expr))); + routes.push(( + res.clone(), + compute_data_routes(tables, &mut expr), + compute_matching_pulls(tables, &mut expr), + )); for match_ in &res.context().matches { let match_ = match_.upgrade().unwrap(); if !Arc::ptr_eq(&match_, res) { let mut expr = RoutingExpr::new(&match_, ""); let match_routes = compute_data_routes(tables, &mut expr); - routes.push((match_, match_routes)); + let matching_pulls = compute_matching_pulls(tables, &mut expr); + routes.push((match_, match_routes, matching_pulls)); } } } @@ -244,10 +227,12 @@ pub(crate) fn compute_matches_data_routes<'a>( pub(crate) fn update_matches_data_routes<'a>(tables: &'a mut Tables, res: &'a mut Arc) { if res.context.is_some() { update_data_routes(tables, res); + update_matching_pulls(tables, res); for match_ in &res.context().matches { let mut match_ = match_.upgrade().unwrap(); if !Arc::ptr_eq(&match_, res) { update_data_routes(tables, &mut match_); + update_matching_pulls(tables, &mut match_); } } } @@ -262,6 +247,9 @@ pub(crate) fn disable_matches_data_routes(_tables: &mut Tables, res: &mut Arc Arc { + let mut pull_caches = PullCaches::default(); + compute_matching_pulls_(tables, &mut pull_caches, expr); + Arc::new(pull_caches) +} + +pub(crate) fn update_matching_pulls(tables: &Tables, res: &mut Arc) { + if res.context.is_some() { + let mut res_mut = res.clone(); + let res_mut = get_mut_unchecked(&mut res_mut); + if res_mut.context_mut().matching_pulls.is_none() { + res_mut.context_mut().matching_pulls = Some(Arc::new(PullCaches::default())); + } + compute_matching_pulls_( + tables, + get_mut_unchecked(res_mut.context_mut().matching_pulls.as_mut().unwrap()), + &mut RoutingExpr::new(res, ""), + ); + } +} + +#[inline] +fn get_matching_pulls( + tables: &Tables, + res: &Option>, + expr: &mut RoutingExpr, +) -> Arc { + res.as_ref() + .and_then(|res| res.context.as_ref()) + .and_then(|ctx| ctx.matching_pulls.clone()) + .unwrap_or_else(|| compute_matching_pulls(tables, expr)) +} + +macro_rules! cache_data { + ( + $matching_pulls:expr, + $expr:expr, + $payload:expr + ) => { + for context in $matching_pulls.iter() { + get_mut_unchecked(&mut context.clone()) + .last_values + .insert($expr.full_expr().to_string(), $payload.clone()); + } + }; +} + #[cfg(feature = "stats")] macro_rules! inc_stats { ( @@ -353,19 +413,10 @@ macro_rules! inc_stats { match &$body { PushBody::Put(p) => { stats.[<$txrx _z_put_msgs>].[](1); - let mut n = p.payload.len(); - if let Some(a) = p.ext_attachment.as_ref() { - n += a.buffer.len(); - } - stats.[<$txrx _z_put_pl_bytes>].[](n); + stats.[<$txrx _z_put_pl_bytes>].[](p.payload.len()); } - PushBody::Del(d) => { + PushBody::Del(_) => { stats.[<$txrx _z_del_msgs>].[](1); - let mut n = 0; - if let Some(a) = d.ext_attachment.as_ref() { - n += a.buffer.len(); - } - stats.[<$txrx _z_del_pl_bytes>].[](n); } } } @@ -385,8 +436,7 @@ pub fn full_reentrant_route_data( match tables.get_mapping(face, &expr.scope, expr.mapping).cloned() { Some(prefix) => { log::trace!( - "{} Route data for res {}{}", - face, + "Route data for res {}{}", prefix.expr(), expr.suffix.as_ref() ); @@ -406,10 +456,12 @@ pub fn full_reentrant_route_data( let route = get_data_route(&tables, face, &res, &mut expr, routing_context); - if !route.is_empty() { + let matching_pulls = get_matching_pulls(&tables, &res, &mut expr); + + if !(route.is_empty() && matching_pulls.is_empty()) { treat_timestamp!(&tables.hlc, payload, tables.drop_future_timestamp); - if route.len() == 1 { + if route.len() == 1 && matching_pulls.len() == 0 { let (outface, key_expr, context) = route.values().next().unwrap(); if tables .hat_code @@ -431,43 +483,26 @@ pub fn full_reentrant_route_data( payload, }) } - } else if tables.whatami == WhatAmI::Router { - let route = route - .values() - .filter(|(outface, _key_expr, _context)| { - tables - .hat_code - .egress_filter(&tables, face, outface, &mut expr) - }) - .cloned() - .collect::>(); + } else { + if !matching_pulls.is_empty() { + let lock = zlock!(tables.pull_caches_lock); + cache_data!(matching_pulls, expr, payload); + drop(lock); + } - drop(tables); - for (outface, key_expr, context) in route { - #[cfg(feature = "stats")] - if !admin { - inc_stats!(face, tx, user, payload) - } else { - inc_stats!(face, tx, admin, payload) - } + if tables.whatami == WhatAmI::Router { + let route = route + .values() + .filter(|(outface, _key_expr, _context)| { + tables + .hat_code + .egress_filter(&tables, face, outface, &mut expr) + }) + .cloned() + .collect::>(); - outface.primitives.send_push(Push { - wire_expr: key_expr, - ext_qos, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType { node_id: context }, - payload: payload.clone(), - }) - } - } else { - drop(tables); - for (outface, key_expr, context) in route.values() { - if face.id != outface.id - && match (face.mcast_group.as_ref(), outface.mcast_group.as_ref()) { - (Some(l), Some(r)) => l != r, - _ => true, - } - { + drop(tables); + for (outface, key_expr, context) in route { #[cfg(feature = "stats")] if !admin { inc_stats!(face, tx, user, payload) @@ -476,20 +511,110 @@ pub fn full_reentrant_route_data( } outface.primitives.send_push(Push { - wire_expr: key_expr.into(), + wire_expr: key_expr, ext_qos, ext_tstamp: None, - ext_nodeid: ext::NodeIdType { node_id: *context }, + ext_nodeid: ext::NodeIdType { node_id: context }, payload: payload.clone(), }) } + } else { + drop(tables); + for (outface, key_expr, context) in route.values() { + if face.id != outface.id + && match ( + face.mcast_group.as_ref(), + outface.mcast_group.as_ref(), + ) { + (Some(l), Some(r)) => l != r, + _ => true, + } + { + #[cfg(feature = "stats")] + if !admin { + inc_stats!(face, tx, user, payload) + } else { + inc_stats!(face, tx, admin, payload) + } + + outface.primitives.send_push(Push { + wire_expr: key_expr.into(), + ext_qos, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType { node_id: *context }, + payload: payload.clone(), + }) + } + } } } } } } None => { - log::error!("{} Route data with unknown scope {}!", face, expr.scope); + log::error!("Route data with unknown scope {}!", expr.scope); } } } + +pub fn pull_data(tables_ref: &RwLock, face: &Arc, expr: WireExpr) { + let tables = zread!(tables_ref); + match tables.get_mapping(face, &expr.scope, expr.mapping) { + Some(prefix) => match Resource::get_resource(prefix, expr.suffix.as_ref()) { + Some(mut res) => { + let res = get_mut_unchecked(&mut res); + match res.session_ctxs.get_mut(&face.id) { + Some(ctx) => match &ctx.subs { + Some(_subinfo) => { + // let reliability = subinfo.reliability; + let lock = zlock!(tables.pull_caches_lock); + let route = get_mut_unchecked(ctx) + .last_values + .drain() + .map(|(name, sample)| { + ( + Resource::get_best_key(&tables.root_res, &name, face.id) + .to_owned(), + sample, + ) + }) + .collect::>(); + drop(lock); + drop(tables); + for (key_expr, payload) in route { + face.primitives.send_push(Push { + wire_expr: key_expr, + ext_qos: ext::QoSType::push_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::default(), + payload, + }); + } + } + None => { + log::error!( + "Pull data for unknown subscription {} (no info)!", + prefix.expr() + expr.suffix.as_ref() + ); + } + }, + None => { + log::error!( + "Pull data for unknown subscription {} (no context)!", + prefix.expr() + expr.suffix.as_ref() + ); + } + } + } + None => { + log::error!( + "Pull data for unknown subscription {} (no resource)!", + prefix.expr() + expr.suffix.as_ref() + ); + } + }, + None => { + log::error!("Pull data with unknown scope {}!", expr.scope); + } + }; +} diff --git a/zenoh/src/net/routing/dispatcher/queries.rs b/zenoh/src/net/routing/dispatcher/queries.rs index 48df012b08..570377acd1 100644 --- a/zenoh/src/net/routing/dispatcher/queries.rs +++ b/zenoh/src/net/routing/dispatcher/queries.rs @@ -20,15 +20,19 @@ use crate::net::routing::RoutingContext; use async_trait::async_trait; use std::collections::HashMap; use std::sync::{Arc, Weak}; +use std::time::Duration; +use tokio_util::sync::CancellationToken; use zenoh_config::WhatAmI; +use zenoh_protocol::core::key_expr::keyexpr; +use zenoh_protocol::network::declare::queryable::ext::QueryableInfo; use zenoh_protocol::{ - core::{key_expr::keyexpr, Encoding, WireExpr}, + core::{Encoding, WireExpr}, network::{ - declare::{ext, queryable::ext::QueryableInfoType, QueryableId}, + declare::ext, request::{ext::TargetType, Request, RequestId}, response::{self, ext::ResponderIdType, Response, ResponseFinal}, }, - zenoh::{query::Consolidation, reply::ReplyBody, Put, Reply, RequestBody, ResponseBody}, + zenoh::{reply::ext::ConsolidationType, Reply, RequestBody, ResponseBody}, }; use zenoh_sync::get_mut_unchecked; use zenoh_util::Timed; @@ -42,24 +46,17 @@ pub(crate) fn declare_queryable( hat_code: &(dyn HatTrait + Send + Sync), tables: &TablesLock, face: &mut Arc, - id: QueryableId, expr: &WireExpr, - qabl_info: &QueryableInfoType, + qabl_info: &QueryableInfo, node_id: NodeId, ) { + log::debug!("Register queryable {}", face); let rtables = zread!(tables.tables); match rtables .get_mapping(face, &expr.scope, expr.mapping) .cloned() { Some(mut prefix) => { - log::debug!( - "{} Declare queryable {} ({}{})", - face, - id, - prefix.expr(), - expr.suffix - ); let res = Resource::get_resource(&prefix, &expr.suffix); let (mut res, mut wtables) = if res.as_ref().map(|r| r.context.is_some()).unwrap_or(false) { @@ -81,7 +78,7 @@ pub(crate) fn declare_queryable( (res, wtables) }; - hat_code.declare_queryable(&mut wtables, face, id, &mut res, qabl_info, node_id); + hat_code.declare_queryable(&mut wtables, face, &mut res, qabl_info, node_id); disable_matches_query_routes(&mut wtables, &mut res); drop(wtables); @@ -98,12 +95,7 @@ pub(crate) fn declare_queryable( } drop(wtables); } - None => log::error!( - "{} Declare queryable {} for unknown scope {}!", - face, - id, - expr.scope - ), + None => log::error!("Declare queryable for unknown scope {}!", expr.scope), } } @@ -111,57 +103,37 @@ pub(crate) fn undeclare_queryable( hat_code: &(dyn HatTrait + Send + Sync), tables: &TablesLock, face: &mut Arc, - id: QueryableId, expr: &WireExpr, node_id: NodeId, ) { - let res = if expr.is_empty() { - None - } else { - let rtables = zread!(tables.tables); - match rtables.get_mapping(face, &expr.scope, expr.mapping) { - Some(prefix) => match Resource::get_resource(prefix, expr.suffix.as_ref()) { - Some(res) => Some(res), - None => { - log::error!( - "{} Undeclare unknown queryable {}{}!", - face, - prefix.expr(), - expr.suffix - ); - return; + let rtables = zread!(tables.tables); + match rtables.get_mapping(face, &expr.scope, expr.mapping) { + Some(prefix) => match Resource::get_resource(prefix, expr.suffix.as_ref()) { + Some(mut res) => { + drop(rtables); + let mut wtables = zwrite!(tables.tables); + + hat_code.undeclare_queryable(&mut wtables, face, &mut res, node_id); + + disable_matches_query_routes(&mut wtables, &mut res); + drop(wtables); + + let rtables = zread!(tables.tables); + let matches_query_routes = compute_matches_query_routes(&rtables, &res); + drop(rtables); + + let wtables = zwrite!(tables.tables); + for (mut res, query_routes) in matches_query_routes { + get_mut_unchecked(&mut res) + .context_mut() + .update_query_routes(query_routes); } - }, - None => { - log::error!( - "{} Undeclare queryable with unknown scope {}", - face, - expr.scope - ); - return; + Resource::clean(&mut res); + drop(wtables); } - } - }; - let mut wtables = zwrite!(tables.tables); - if let Some(mut res) = hat_code.undeclare_queryable(&mut wtables, face, id, res, node_id) { - log::debug!("{} Undeclare queryable {} ({})", face, id, res.expr()); - disable_matches_query_routes(&mut wtables, &mut res); - drop(wtables); - - let rtables = zread!(tables.tables); - let matches_query_routes = compute_matches_query_routes(&rtables, &res); - drop(rtables); - - let wtables = zwrite!(tables.tables); - for (mut res, query_routes) in matches_query_routes { - get_mut_unchecked(&mut res) - .context_mut() - .update_query_routes(query_routes); - } - Resource::clean(&mut res); - drop(wtables); - } else { - log::error!("{} Undeclare unknown queryable {}", face, id); + None => log::error!("Undeclare unknown queryable!"), + }, + None => log::error!("Undeclare queryable with unknown scope!"), } } @@ -266,7 +238,10 @@ fn insert_pending_query(outface: &mut Arc, query: Arc) -> Requ let outface_mut = get_mut_unchecked(outface); outface_mut.next_qid += 1; let qid = outface_mut.next_qid; - outface_mut.pending_queries.insert(qid, query); + outface_mut.pending_queries.insert( + qid, + (query, outface_mut.task_controller.get_cancellation_token()), + ); qid } @@ -287,11 +262,22 @@ fn compute_final_route( .hat_code .egress_filter(tables, src_face, &qabl.direction.0, expr) { - route.entry(qabl.direction.0.id).or_insert_with(|| { - let mut direction = qabl.direction.clone(); - let qid = insert_pending_query(&mut direction.0, query.clone()); - (direction, qid) - }); + #[cfg(feature = "complete_n")] + { + route.entry(qabl.direction.0.id).or_insert_with(|| { + let mut direction = qabl.direction.clone(); + let qid = insert_pending_query(&mut direction.0, query.clone()); + (direction, qid, *target) + }); + } + #[cfg(not(feature = "complete_n"))] + { + route.entry(qabl.direction.0.id).or_insert_with(|| { + let mut direction = qabl.direction.clone(); + let qid = insert_pending_query(&mut direction.0, query.clone()); + (direction, qid) + }); + } } } route @@ -304,11 +290,46 @@ fn compute_final_route( .hat_code .egress_filter(tables, src_face, &qabl.direction.0, expr) { + #[cfg(feature = "complete_n")] + { + route.entry(qabl.direction.0.id).or_insert_with(|| { + let mut direction = qabl.direction.clone(); + let qid = insert_pending_query(&mut direction.0, query.clone()); + (direction, qid, *target) + }); + } + #[cfg(not(feature = "complete_n"))] + { + route.entry(qabl.direction.0.id).or_insert_with(|| { + let mut direction = qabl.direction.clone(); + let qid = insert_pending_query(&mut direction.0, query.clone()); + (direction, qid) + }); + } + } + } + route + } + #[cfg(feature = "complete_n")] + TargetType::Complete(n) => { + let mut route = HashMap::new(); + let mut remaining = *n; + for qabl in qabls.iter() { + if qabl.complete > 0 + && tables + .hat_code + .egress_filter(tables, src_face, &qabl.direction.0, expr) + { + let nb = std::cmp::min(qabl.complete, remaining); route.entry(qabl.direction.0.id).or_insert_with(|| { let mut direction = qabl.direction.clone(); let qid = insert_pending_query(&mut direction.0, query.clone()); - (direction, qid) + (direction, qid, TargetType::Complete(nb)) }); + remaining -= nb; + if remaining == 0 { + break; + } } } route @@ -319,11 +340,18 @@ fn compute_final_route( .find(|qabl| qabl.direction.0.id != src_face.id && qabl.complete > 0) { let mut route = HashMap::new(); - - let mut direction = qabl.direction.clone(); - let qid = insert_pending_query(&mut direction.0, query); - route.insert(direction.0.id, (direction, qid)); - + #[cfg(feature = "complete_n")] + { + let mut direction = qabl.direction.clone(); + let qid = insert_pending_query(&mut direction.0, query); + route.insert(direction.0.id, (direction, qid, *target)); + } + #[cfg(not(feature = "complete_n"))] + { + let mut direction = qabl.direction.clone(); + let qid = insert_pending_query(&mut direction.0, query); + route.insert(direction.0.id, (direction, qid)); + } route } else { compute_final_route(tables, qabls, src_face, expr, &TargetType::All, query) @@ -339,6 +367,31 @@ struct QueryCleanup { qid: RequestId, } +impl QueryCleanup { + pub fn spawn_query_clean_up_task( + face: &Arc, + tables_ref: &Arc, + qid: u32, + timeout: Duration, + ) { + let mut cleanup = QueryCleanup { + tables: tables_ref.clone(), + face: Arc::downgrade(face), + qid, + }; + if let Some((_, cancellation_token)) = face.pending_queries.get(&qid) { + let c_cancellation_token = cancellation_token.clone(); + face.task_controller + .spawn_with_rt(zenoh_runtime::ZRuntime::Net, async move { + tokio::select! { + _ = tokio::time::sleep(timeout) => { cleanup.run().await } + _ = c_cancellation_token.cancelled() => {} + } + }); + } + } +} + #[async_trait] impl Timed for QueryCleanup { async fn run(&mut self) { @@ -351,7 +404,7 @@ impl Timed for QueryCleanup { drop(tables_lock); log::warn!( "Didn't receive final reply {}:{} from {}: Timeout!", - query.src_face, + query.0.src_face, self.qid, face ); @@ -407,12 +460,20 @@ macro_rules! inc_req_stats { if let Some(stats) = $face.stats.as_ref() { use zenoh_buffers::buffer::Buffer; match &$body { + RequestBody::Put(p) => { + stats.[<$txrx _z_put_msgs>].[](1); + stats.[<$txrx _z_put_pl_bytes>].[](p.payload.len()); + } + RequestBody::Del(_) => { + stats.[<$txrx _z_del_msgs>].[](1); + } RequestBody::Query(q) => { stats.[<$txrx _z_query_msgs>].[](1); stats.[<$txrx _z_query_pl_bytes>].[]( q.ext_body.as_ref().map(|b| b.payload.len()).unwrap_or(0), ); } + RequestBody::Pull(_) => (), } } } @@ -431,30 +492,21 @@ macro_rules! inc_res_stats { if let Some(stats) = $face.stats.as_ref() { use zenoh_buffers::buffer::Buffer; match &$body { + ResponseBody::Put(p) => { + stats.[<$txrx _z_put_msgs>].[](1); + stats.[<$txrx _z_put_pl_bytes>].[](p.payload.len()); + } ResponseBody::Reply(r) => { stats.[<$txrx _z_reply_msgs>].[](1); - let mut n = 0; - match &r.payload { - ReplyBody::Put(p) => { - if let Some(a) = p.ext_attachment.as_ref() { - n += a.buffer.len(); - } - n += p.payload.len(); - } - ReplyBody::Del(d) => { - if let Some(a) = d.ext_attachment.as_ref() { - n += a.buffer.len(); - } - } - } - stats.[<$txrx _z_reply_pl_bytes>].[](n); + stats.[<$txrx _z_reply_pl_bytes>].[](r.payload.len()); } ResponseBody::Err(e) => { stats.[<$txrx _z_reply_msgs>].[](1); stats.[<$txrx _z_reply_pl_bytes>].[]( - e.payload.len() + e.ext_body.as_ref().map(|b| b.payload.len()).unwrap_or(0), ); } + ResponseBody::Ack(_) => (), } } } @@ -517,19 +569,15 @@ pub fn route_query( for (wexpr, payload) in local_replies { let payload = ResponseBody::Reply(Reply { - consolidation: Consolidation::DEFAULT, // @TODO: handle Del case - ext_unknown: vec![], // @TODO: handle unknown extensions - payload: ReplyBody::Put(Put { - // @TODO: handle Del case - timestamp: None, // @TODO: handle timestamp - encoding: Encoding::empty(), // @TODO: handle encoding - ext_sinfo: None, // @TODO: handle source info - ext_attachment: None, // @TODO: expose it in the API - #[cfg(feature = "shared-memory")] - ext_shm: None, - ext_unknown: vec![], // @TODO: handle unknown extensions - payload, - }), + timestamp: None, + encoding: Encoding::default(), + ext_sinfo: None, + ext_consolidation: ConsolidationType::default(), + #[cfg(feature = "shared-memory")] + ext_shm: None, + ext_attachment: None, // @TODO: expose it in the API + ext_unknown: vec![], + payload, }); #[cfg(feature = "stats")] if !admin { @@ -545,11 +593,11 @@ pub fn route_query( rid: qid, wire_expr: wexpr, payload, - ext_qos: response::ext::QoSType::DECLARE, + ext_qos: response::ext::QoSType::declare_default(), ext_tstamp: None, ext_respid: Some(response::ext::ResponderIdType { zid, - eid: 0, // 0 is reserved for routing core + eid: 0, // @TODO use proper ResponderId (#703) }), }, expr.full_expr().to_string(), @@ -567,44 +615,72 @@ pub fn route_query( .send_response_final(RoutingContext::with_expr( ResponseFinal { rid: qid, - ext_qos: response::ext::QoSType::RESPONSE_FINAL, + ext_qos: response::ext::QoSType::response_final_default(), ext_tstamp: None, }, expr.full_expr().to_string(), )); } else { - for ((outface, key_expr, context), qid) in route.values() { - let mut cleanup = QueryCleanup { - tables: tables_ref.clone(), - face: Arc::downgrade(outface), - qid: *qid, - }; - zenoh_runtime::ZRuntime::Net.spawn(async move { - tokio::time::sleep(timeout).await; - cleanup.run().await - }); - #[cfg(feature = "stats")] - if !admin { - inc_req_stats!(outface, tx, user, body) - } else { - inc_req_stats!(outface, tx, admin, body) + #[cfg(feature = "complete_n")] + { + for ((outface, key_expr, context), qid, t) in route.values() { + QueryCleanup::spawn_query_clean_up_task( + outface, tables_ref, *qid, timeout, + ); + #[cfg(feature = "stats")] + if !admin { + inc_req_stats!(outface, tx, user, body) + } else { + inc_req_stats!(outface, tx, admin, body) + } + + log::trace!("Propagate query {}:{} to {}", face, qid, outface); + outface.primitives.send_request(RoutingContext::with_expr( + Request { + id: *qid, + wire_expr: key_expr.into(), + ext_qos: ext::QoSType::request_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType { node_id: *context }, + ext_target: *t, + ext_budget: None, + ext_timeout: None, + payload: body.clone(), + }, + expr.full_expr().to_string(), + )); } + } - log::trace!("Propagate query {}:{} to {}", face, qid, outface); - outface.primitives.send_request(RoutingContext::with_expr( - Request { - id: *qid, - wire_expr: key_expr.into(), - ext_qos: ext::QoSType::REQUEST, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType { node_id: *context }, - ext_target: target, - ext_budget: None, - ext_timeout: None, - payload: body.clone(), - }, - expr.full_expr().to_string(), - )); + #[cfg(not(feature = "complete_n"))] + { + for ((outface, key_expr, context), qid) in route.values() { + QueryCleanup::spawn_query_clean_up_task( + outface, tables_ref, *qid, timeout, + ); + #[cfg(feature = "stats")] + if !admin { + inc_req_stats!(outface, tx, user, body) + } else { + inc_req_stats!(outface, tx, admin, body) + } + + log::trace!("Propagate query {}:{} to {}", face, qid, outface); + outface.primitives.send_request(RoutingContext::with_expr( + Request { + id: *qid, + wire_expr: key_expr.into(), + ext_qos: ext::QoSType::request_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType { node_id: *context }, + ext_target: target, + ext_budget: None, + ext_timeout: None, + payload: body.clone(), + }, + expr.full_expr().to_string(), + )); + } } } } else { @@ -615,7 +691,7 @@ pub fn route_query( .send_response_final(RoutingContext::with_expr( ResponseFinal { rid: qid, - ext_qos: response::ext::QoSType::RESPONSE_FINAL, + ext_qos: response::ext::QoSType::response_final_default(), ext_tstamp: None, }, expr.full_expr().to_string(), @@ -624,9 +700,8 @@ pub fn route_query( } None => { log::error!( - "{} Route query with unknown scope {}! Send final reply.", - face, - expr.scope, + "Route query with unknown scope {}! Send final reply.", + expr.scope ); drop(rtables); face.primitives @@ -634,7 +709,7 @@ pub fn route_query( .send_response_final(RoutingContext::with_expr( ResponseFinal { rid: qid, - ext_qos: response::ext::QoSType::RESPONSE_FINAL, + ext_qos: response::ext::QoSType::response_final_default(), ext_tstamp: None, }, "".to_string(), @@ -662,7 +737,7 @@ pub(crate) fn route_send_response( } match face.pending_queries.get(&qid) { - Some(query) => { + Some((query, _)) => { drop(queries_lock); #[cfg(feature = "stats")] @@ -681,7 +756,7 @@ pub(crate) fn route_send_response( rid: query.src_qid, wire_expr: key_expr.to_owned(), payload: body, - ext_qos: response::ext::QoSType::RESPONSE, + ext_qos: response::ext::QoSType::response_default(), ext_tstamp: None, ext_respid, }, @@ -708,7 +783,7 @@ pub(crate) fn route_send_response_final( drop(queries_lock); log::debug!( "Received final reply {}:{} from {}", - query.src_face, + query.0.src_face, qid, face ); @@ -731,7 +806,9 @@ pub(crate) fn finalize_pending_queries(tables_ref: &TablesLock, face: &mut Arc) { +pub(crate) fn finalize_pending_query(query: (Arc, CancellationToken)) { + let (query, cancellation_token) = query; + cancellation_token.cancel(); if let Some(query) = Arc::into_inner(query) { log::debug!("Propagate final reply {}:{}", query.src_face, query.src_qid); query @@ -741,7 +818,7 @@ pub(crate) fn finalize_pending_query(query: Arc) { .send_response_final(RoutingContext::with_expr( ResponseFinal { rid: query.src_qid, - ext_qos: response::ext::QoSType::RESPONSE_FINAL, + ext_qos: response::ext::QoSType::response_final_default(), ext_tstamp: None, }, "".to_string(), diff --git a/zenoh/src/net/routing/dispatcher/resource.rs b/zenoh/src/net/routing/dispatcher/resource.rs index 941b37f916..1762ff2cb4 100644 --- a/zenoh/src/net/routing/dispatcher/resource.rs +++ b/zenoh/src/net/routing/dispatcher/resource.rs @@ -21,13 +21,16 @@ use std::convert::TryInto; use std::hash::{Hash, Hasher}; use std::sync::{Arc, Weak}; use zenoh_config::WhatAmI; +#[cfg(feature = "complete_n")] +use zenoh_protocol::network::request::ext::TargetType; use zenoh_protocol::network::RequestId; +use zenoh_protocol::zenoh::PushBody; use zenoh_protocol::{ core::{key_expr::keyexpr, ExprId, WireExpr}, network::{ declare::{ - ext, queryable::ext::QueryableInfoType, subscriber::ext::SubscriberInfo, Declare, - DeclareBody, DeclareKeyExpr, DeclareMode, + ext, queryable::ext::QueryableInfo, subscriber::ext::SubscriberInfo, Declare, + DeclareBody, DeclareKeyExpr, }, Mapping, }, @@ -38,6 +41,9 @@ pub(crate) type NodeId = u16; pub(crate) type Direction = (Arc, WireExpr<'static>, NodeId); pub(crate) type Route = HashMap; +#[cfg(feature = "complete_n")] +pub(crate) type QueryRoute = HashMap; +#[cfg(not(feature = "complete_n"))] pub(crate) type QueryRoute = HashMap; pub(crate) struct QueryTargetQabl { pub(crate) direction: Direction, @@ -45,13 +51,15 @@ pub(crate) struct QueryTargetQabl { pub(crate) distance: f64, } pub(crate) type QueryTargetQablSet = Vec; +pub(crate) type PullCaches = Vec>; pub(crate) struct SessionContext { pub(crate) face: Arc, pub(crate) local_expr_id: Option, pub(crate) remote_expr_id: Option, pub(crate) subs: Option, - pub(crate) qabl: Option, + pub(crate) qabl: Option, + pub(crate) last_values: HashMap, pub(crate) in_interceptor_cache: Option>, pub(crate) e_interceptor_cache: Option>, } @@ -113,6 +121,7 @@ impl QueryRoutes { pub(crate) struct ResourceContext { pub(crate) matches: Vec>, + pub(crate) matching_pulls: Option>, pub(crate) hat: Box, pub(crate) valid_data_routes: bool, pub(crate) data_routes: DataRoutes, @@ -124,6 +133,7 @@ impl ResourceContext { fn new(hat: Box) -> ResourceContext { ResourceContext { matches: Vec::new(), + matching_pulls: None, hat, valid_data_routes: false, data_routes: DataRoutes::default(), @@ -149,6 +159,14 @@ impl ResourceContext { pub(crate) fn disable_query_routes(&mut self) { self.valid_query_routes = false; } + + pub(crate) fn update_matching_pulls(&mut self, pulls: Arc) { + self.matching_pulls = Some(pulls); + } + + pub(crate) fn disable_matching_pulls(&mut self) { + self.matching_pulls = None; + } } pub struct Resource { @@ -275,6 +293,7 @@ impl Resource { let mutres = get_mut_unchecked(&mut resclone); if let Some(ref mut parent) = mutres.parent { if Arc::strong_count(res) <= 3 && res.childs.is_empty() { + // consider only childless resource held by only one external object (+ 1 strong count for resclone, + 1 strong count for res.parent to a total of 3 ) log::debug!("Unregister resource {}", res.expr()); if let Some(context) = mutres.context.as_mut() { for match_ in &mut context.matches { @@ -288,6 +307,7 @@ impl Resource { } } } + mutres.nonwild_prefix.take(); { get_mut_unchecked(parent).childs.remove(&res.suffix); } @@ -296,6 +316,17 @@ impl Resource { } } + pub fn close(self: &mut Arc) { + let r = get_mut_unchecked(self); + for c in r.childs.values_mut() { + Self::close(c); + } + r.parent.take(); + r.childs.clear(); + r.nonwild_prefix.take(); + r.session_ctxs.clear(); + } + #[cfg(test)] pub fn print_tree(from: &Arc) -> String { let mut result = from.expr(); @@ -427,6 +458,7 @@ impl Resource { remote_expr_id: None, subs: None, qabl: None, + last_values: HashMap::new(), in_interceptor_cache: None, e_interceptor_cache: None, }) @@ -452,10 +484,9 @@ impl Resource { .insert(expr_id, nonwild_prefix.clone()); face.primitives.send_declare(RoutingContext::with_expr( Declare { - mode: DeclareMode::Push, - ext_qos: ext::QoSType::DECLARE, + ext_qos: ext::QoSType::declare_default(), ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, + ext_nodeid: ext::NodeIdType::default(), body: DeclareBody::DeclareKeyExpr(DeclareKeyExpr { id: expr_id, wire_expr: nonwild_prefix.expr().into(), @@ -649,11 +680,7 @@ pub fn register_expr( let mut fullexpr = prefix.expr(); fullexpr.push_str(expr.suffix.as_ref()); if res.expr() != fullexpr { - log::error!( - "{} Resource {} remapped. Remapping unsupported!", - face, - expr_id - ); + log::error!("Resource {} remapped. Remapping unsupported!", expr_id); } } None => { @@ -690,6 +717,7 @@ pub fn register_expr( remote_expr_id: Some(expr_id), subs: None, qabl: None, + last_values: HashMap::new(), in_interceptor_cache: None, e_interceptor_cache: None, }) @@ -703,11 +731,7 @@ pub fn register_expr( drop(wtables); } }, - None => log::error!( - "{} Declare resource with unknown scope {}!", - face, - expr.scope - ), + None => log::error!("Declare resource with unknown scope {}!", expr.scope), } } @@ -715,7 +739,7 @@ pub fn unregister_expr(tables: &TablesLock, face: &mut Arc, expr_id: let wtables = zwrite!(tables.tables); match get_mut_unchecked(face).remote_mappings.remove(&expr_id) { Some(mut res) => Resource::clean(&mut res), - None => log::error!("{} Undeclare unknown resource!", face), + None => log::error!("Undeclare unknown resource!"), } drop(wtables); } diff --git a/zenoh/src/net/routing/dispatcher/tables.rs b/zenoh/src/net/routing/dispatcher/tables.rs index 491e6d4a62..10605b25b1 100644 --- a/zenoh/src/net/routing/dispatcher/tables.rs +++ b/zenoh/src/net/routing/dispatcher/tables.rs @@ -70,6 +70,7 @@ pub struct Tables { pub(crate) mcast_groups: Vec>, pub(crate) mcast_faces: Vec>, pub(crate) interceptors: Vec, + pub(crate) pull_caches_lock: Mutex<()>, pub(crate) hat: Box, pub(crate) hat_code: Arc, // @TODO make this a Box } @@ -100,6 +101,7 @@ impl Tables { mcast_groups: vec![], mcast_faces: vec![], interceptors: interceptor_factories(config)?, + pull_caches_lock: Mutex::new(()), hat: hat_code.new_tables(router_peers_failover_brokering), hat_code: hat_code.into(), }) @@ -170,6 +172,7 @@ pub fn close_face(tables: &TablesLock, face: &Weak) { match face.upgrade() { Some(mut face) => { log::debug!("Close {}", face); + face.task_controller.terminate_all(Duration::from_secs(10)); finalize_pending_queries(tables, &mut face); zlock!(tables.ctrl_lock).close_face(tables, &mut face); } diff --git a/zenoh/src/net/routing/hat/client/mod.rs b/zenoh/src/net/routing/hat/client/mod.rs index 8b7031152a..aa83c34f5d 100644 --- a/zenoh/src/net/routing/hat/client/mod.rs +++ b/zenoh/src/net/routing/hat/client/mod.rs @@ -20,7 +20,9 @@ use crate::{ net::routing::{ dispatcher::face::Face, - router::{compute_data_routes, compute_query_routes, RoutesIndexes}, + router::{ + compute_data_routes, compute_matching_pulls, compute_query_routes, RoutesIndexes, + }, }, runtime::Runtime, }; @@ -38,13 +40,11 @@ use super::{ }; use std::{ any::Any, - collections::HashMap, - sync::{atomic::AtomicU32, Arc}, + collections::{HashMap, HashSet}, + sync::Arc, }; use zenoh_config::WhatAmI; -use zenoh_protocol::network::declare::{ - queryable::ext::QueryableInfoType, QueryableId, SubscriberId, -}; +use zenoh_protocol::network::declare::queryable::ext::QueryableInfo; use zenoh_protocol::network::Oam; use zenoh_result::ZResult; use zenoh_sync::get_mut_unchecked; @@ -131,7 +131,7 @@ impl HatBaseTrait for HatCode { face.local_mappings.clear(); let mut subs_matches = vec![]; - for (_id, mut res) in face + for mut res in face .hat .downcast_mut::() .unwrap() @@ -159,7 +159,7 @@ impl HatBaseTrait for HatCode { } let mut qabls_matches = vec![]; - for (_id, mut res) in face + for mut res in face .hat .downcast_mut::() .unwrap() @@ -192,7 +192,11 @@ impl HatBaseTrait for HatCode { let rtables = zread!(tables.tables); for _match in subs_matches.drain(..) { let mut expr = RoutingExpr::new(&_match, ""); - matches_data_routes.push((_match.clone(), compute_data_routes(&rtables, &mut expr))); + matches_data_routes.push(( + _match.clone(), + compute_data_routes(&rtables, &mut expr), + compute_matching_pulls(&rtables, &mut expr), + )); } for _match in qabls_matches.drain(..) { matches_query_routes.push((_match.clone(), compute_query_routes(&rtables, &_match))); @@ -200,10 +204,13 @@ impl HatBaseTrait for HatCode { drop(rtables); let mut wtables = zwrite!(tables.tables); - for (mut res, data_routes) in matches_data_routes { + for (mut res, data_routes, matching_pulls) in matches_data_routes { get_mut_unchecked(&mut res) .context_mut() .update_data_routes(data_routes); + get_mut_unchecked(&mut res) + .context_mut() + .update_matching_pulls(matching_pulls); Resource::clean(&mut res); } for (mut res, query_routes) in matches_query_routes { @@ -283,21 +290,19 @@ impl HatContext { } struct HatFace { - next_id: AtomicU32, // @TODO: manage rollover and uniqueness - local_subs: HashMap, SubscriberId>, - remote_subs: HashMap>, - local_qabls: HashMap, (QueryableId, QueryableInfoType)>, - remote_qabls: HashMap>, + local_subs: HashSet>, + remote_subs: HashSet>, + local_qabls: HashMap, QueryableInfo>, + remote_qabls: HashSet>, } impl HatFace { fn new() -> Self { Self { - next_id: AtomicU32::new(0), - local_subs: HashMap::new(), - remote_subs: HashMap::new(), + local_subs: HashSet::new(), + remote_subs: HashSet::new(), local_qabls: HashMap::new(), - remote_qabls: HashMap::new(), + remote_qabls: HashSet::new(), } } } diff --git a/zenoh/src/net/routing/hat/client/pubsub.rs b/zenoh/src/net/routing/hat/client/pubsub.rs index 6c689d3336..8968ec8fc6 100644 --- a/zenoh/src/net/routing/hat/client/pubsub.rs +++ b/zenoh/src/net/routing/hat/client/pubsub.rs @@ -22,15 +22,13 @@ use crate::net::routing::router::RoutesIndexes; use crate::net::routing::{RoutingContext, PREFIX_LIVELINESS}; use std::borrow::Cow; use std::collections::{HashMap, HashSet}; -use std::sync::atomic::Ordering; use std::sync::Arc; use zenoh_protocol::core::key_expr::OwnedKeyExpr; -use zenoh_protocol::network::declare::SubscriberId; use zenoh_protocol::{ core::{Reliability, WhatAmI}, network::declare::{ common::ext::WireExprType, ext, subscriber::ext::SubscriberInfo, Declare, DeclareBody, - DeclareMode, DeclareSubscriber, UndeclareSubscriber, + DeclareSubscriber, Mode, UndeclareSubscriber, }, }; use zenoh_sync::get_mut_unchecked; @@ -45,20 +43,18 @@ fn propagate_simple_subscription_to( ) { if (src_face.id != dst_face.id || (dst_face.whatami == WhatAmI::Client && res.expr().starts_with(PREFIX_LIVELINESS))) - && !face_hat!(dst_face).local_subs.contains_key(res) + && !face_hat!(dst_face).local_subs.contains(res) && (src_face.whatami == WhatAmI::Client || dst_face.whatami == WhatAmI::Client) { - let id = face_hat!(dst_face).next_id.fetch_add(1, Ordering::SeqCst); - face_hat_mut!(dst_face).local_subs.insert(res.clone(), id); + face_hat_mut!(dst_face).local_subs.insert(res.clone()); let key_expr = Resource::decl_key(res, dst_face); dst_face.primitives.send_declare(RoutingContext::with_expr( Declare { - mode: DeclareMode::Push, - ext_qos: ext::QoSType::DECLARE, + ext_qos: ext::QoSType::declare_default(), ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, + ext_nodeid: ext::NodeIdType::default(), body: DeclareBody::DeclareSubscriber(DeclareSubscriber { - id, + id: 0, // @TODO use proper SubscriberId (#703) wire_expr: key_expr, ext_info: *sub_info, }), @@ -87,19 +83,24 @@ fn propagate_simple_subscription( fn register_client_subscription( _tables: &mut Tables, face: &mut Arc, - id: SubscriberId, res: &mut Arc, sub_info: &SubscriberInfo, ) { // Register subscription { let res = get_mut_unchecked(res); + log::debug!("Register subscription {} for {}", res.expr(), face); match res.session_ctxs.get_mut(&face.id) { - Some(ctx) => { - if ctx.subs.is_none() { + Some(ctx) => match &ctx.subs { + Some(info) => { + if Mode::Pull == info.mode { + get_mut_unchecked(ctx).subs = Some(*sub_info); + } + } + None => { get_mut_unchecked(ctx).subs = Some(*sub_info); } - } + }, None => { res.session_ctxs.insert( face.id, @@ -109,6 +110,7 @@ fn register_client_subscription( remote_expr_id: None, subs: Some(*sub_info), qabl: None, + last_values: HashMap::new(), in_interceptor_cache: None, e_interceptor_cache: None, }), @@ -116,19 +118,20 @@ fn register_client_subscription( } } } - face_hat_mut!(face).remote_subs.insert(id, res.clone()); + face_hat_mut!(face).remote_subs.insert(res.clone()); } fn declare_client_subscription( tables: &mut Tables, face: &mut Arc, - id: SubscriberId, res: &mut Arc, sub_info: &SubscriberInfo, ) { - register_client_subscription(tables, face, id, res, sub_info); + register_client_subscription(tables, face, res, sub_info); + let mut propa_sub_info = *sub_info; + propa_sub_info.mode = Mode::Push; - propagate_simple_subscription(tables, res, sub_info, face); + propagate_simple_subscription(tables, res, &propa_sub_info, face); // This introduced a buffer overflow on windows // @TODO: Let's deactivate this on windows until Fixed #[cfg(not(windows))] @@ -137,12 +140,11 @@ fn declare_client_subscription( .primitives .send_declare(RoutingContext::with_expr( Declare { - mode: DeclareMode::Push, - ext_qos: ext::QoSType::DECLARE, + ext_qos: ext::QoSType::declare_default(), ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, + ext_nodeid: ext::NodeIdType::default(), body: DeclareBody::DeclareSubscriber(DeclareSubscriber { - id: 0, // @TODO use proper SubscriberId + id: 0, // @TODO use proper SubscriberId (#703) wire_expr: res.expr().into(), ext_info: *sub_info, }), @@ -168,20 +170,21 @@ fn client_subs(res: &Arc) -> Vec> { fn propagate_forget_simple_subscription(tables: &mut Tables, res: &Arc) { for face in tables.faces.values_mut() { - if let Some(id) = face_hat_mut!(face).local_subs.remove(res) { + if face_hat!(face).local_subs.contains(res) { + let wire_expr = Resource::get_best_key(res, "", face.id); face.primitives.send_declare(RoutingContext::with_expr( Declare { - mode: DeclareMode::Push, - ext_qos: ext::QoSType::DECLARE, + ext_qos: ext::QoSType::declare_default(), ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, + ext_nodeid: ext::NodeIdType::default(), body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { - id, - ext_wire_expr: WireExprType::null(), + id: 0, // @TODO use proper SubscriberId (#703) + ext_wire_expr: WireExprType { wire_expr }, }), }, res.expr(), )); + face_hat_mut!(face).local_subs.remove(res); } } } @@ -191,54 +194,51 @@ pub(super) fn undeclare_client_subscription( face: &mut Arc, res: &mut Arc, ) { - if !face_hat_mut!(face).remote_subs.values().any(|s| *s == *res) { - if let Some(ctx) = get_mut_unchecked(res).session_ctxs.get_mut(&face.id) { - get_mut_unchecked(ctx).subs = None; - } + log::debug!("Unregister client subscription {} for {}", res.expr(), face); + if let Some(ctx) = get_mut_unchecked(res).session_ctxs.get_mut(&face.id) { + get_mut_unchecked(ctx).subs = None; + } + face_hat_mut!(face).remote_subs.remove(res); - let mut client_subs = client_subs(res); - if client_subs.is_empty() { - propagate_forget_simple_subscription(tables, res); - } - if client_subs.len() == 1 { - let face = &mut client_subs[0]; - if !(face.whatami == WhatAmI::Client && res.expr().starts_with(PREFIX_LIVELINESS)) { - if let Some(id) = face_hat_mut!(face).local_subs.remove(res) { - face.primitives.send_declare(RoutingContext::with_expr( - Declare { - mode: DeclareMode::Push, - ext_qos: ext::QoSType::DECLARE, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, - body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { - id, - ext_wire_expr: WireExprType::null(), - }), - }, - res.expr(), - )); - } - } + let mut client_subs = client_subs(res); + if client_subs.is_empty() { + propagate_forget_simple_subscription(tables, res); + } + if client_subs.len() == 1 { + let face = &mut client_subs[0]; + if face_hat!(face).local_subs.contains(res) + && !(face.whatami == WhatAmI::Client && res.expr().starts_with(PREFIX_LIVELINESS)) + { + let wire_expr = Resource::get_best_key(res, "", face.id); + face.primitives.send_declare(RoutingContext::with_expr( + Declare { + ext_qos: ext::QoSType::declare_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::default(), + body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { + id: 0, // @TODO use proper SubscriberId (#703) + ext_wire_expr: WireExprType { wire_expr }, + }), + }, + res.expr(), + )); + + face_hat_mut!(face).local_subs.remove(res); } } } - fn forget_client_subscription( tables: &mut Tables, face: &mut Arc, - id: SubscriberId, -) -> Option> { - if let Some(mut res) = face_hat_mut!(face).remote_subs.remove(&id) { - undeclare_client_subscription(tables, face, &mut res); - Some(res) - } else { - None - } + res: &mut Arc, +) { + undeclare_client_subscription(tables, face, res); } pub(super) fn pubsub_new_face(tables: &mut Tables, face: &mut Arc) { let sub_info = SubscriberInfo { reliability: Reliability::Reliable, // @TODO compute proper reliability to propagate from reliability of known subscribers + mode: Mode::Push, }; for src_face in tables .faces @@ -246,7 +246,7 @@ pub(super) fn pubsub_new_face(tables: &mut Tables, face: &mut Arc) { .cloned() .collect::>>() { - for sub in face_hat!(src_face).remote_subs.values() { + for sub in &face_hat!(src_face).remote_subs { propagate_simple_subscription_to(tables, face, sub, &sub_info, &mut src_face.clone()); } } @@ -257,29 +257,27 @@ impl HatPubSubTrait for HatCode { &self, tables: &mut Tables, face: &mut Arc, - id: SubscriberId, res: &mut Arc, sub_info: &SubscriberInfo, _node_id: NodeId, ) { - declare_client_subscription(tables, face, id, res, sub_info); + declare_client_subscription(tables, face, res, sub_info); } fn undeclare_subscription( &self, tables: &mut Tables, face: &mut Arc, - id: SubscriberId, - _res: Option>, + res: &mut Arc, _node_id: NodeId, - ) -> Option> { - forget_client_subscription(tables, face, id) + ) { + forget_client_subscription(tables, face, res); } fn get_subscriptions(&self, tables: &Tables) -> Vec> { let mut subs = HashSet::new(); for src_face in tables.faces.values() { - for sub in face_hat!(src_face).remote_subs.values() { + for sub in &face_hat!(src_face).remote_subs { subs.insert(sub.clone()); } } @@ -322,19 +320,20 @@ impl HatPubSubTrait for HatCode { let mres = mres.upgrade().unwrap(); for (sid, context) in &mres.session_ctxs { - if context.subs.is_some() - && match tables.whatami { + if let Some(subinfo) = &context.subs { + if match tables.whatami { WhatAmI::Router => context.face.whatami != WhatAmI::Router, _ => { source_type == WhatAmI::Client || context.face.whatami == WhatAmI::Client } + } && subinfo.mode == Mode::Push + { + route.entry(*sid).or_insert_with(|| { + let key_expr = Resource::get_best_key(expr.prefix, expr.suffix, *sid); + (context.face.clone(), key_expr.to_owned(), NodeId::default()) + }); } - { - route.entry(*sid).or_insert_with(|| { - let key_expr = Resource::get_best_key(expr.prefix, expr.suffix, *sid); - (context.face.clone(), key_expr.to_owned(), NodeId::default()) - }); } } } diff --git a/zenoh/src/net/routing/hat/client/queries.rs b/zenoh/src/net/routing/hat/client/queries.rs index 28e1d75460..e89cfb174d 100644 --- a/zenoh/src/net/routing/hat/client/queries.rs +++ b/zenoh/src/net/routing/hat/client/queries.rs @@ -22,34 +22,37 @@ use crate::net::routing::router::RoutesIndexes; use crate::net::routing::{RoutingContext, PREFIX_LIVELINESS}; use ordered_float::OrderedFloat; use std::borrow::Cow; -use std::collections::HashSet; -use std::sync::atomic::Ordering; +use std::collections::{HashMap, HashSet}; use std::sync::Arc; use zenoh_buffers::ZBuf; use zenoh_protocol::core::key_expr::include::{Includer, DEFAULT_INCLUDER}; use zenoh_protocol::core::key_expr::OwnedKeyExpr; -use zenoh_protocol::network::declare::QueryableId; use zenoh_protocol::{ core::{WhatAmI, WireExpr}, network::declare::{ - common::ext::WireExprType, ext, queryable::ext::QueryableInfoType, Declare, DeclareBody, - DeclareMode, DeclareQueryable, UndeclareQueryable, + common::ext::WireExprType, ext, queryable::ext::QueryableInfo, Declare, DeclareBody, + DeclareQueryable, UndeclareQueryable, }, }; use zenoh_sync::get_mut_unchecked; +#[cfg(feature = "complete_n")] #[inline] -fn merge_qabl_infos(mut this: QueryableInfoType, info: &QueryableInfoType) -> QueryableInfoType { - this.complete = this.complete || info.complete; +fn merge_qabl_infos(mut this: QueryableInfo, info: &QueryableInfo) -> QueryableInfo { + this.complete += info.complete; this.distance = std::cmp::min(this.distance, info.distance); this } -fn local_qabl_info( - _tables: &Tables, - res: &Arc, - face: &Arc, -) -> QueryableInfoType { +#[cfg(not(feature = "complete_n"))] +#[inline] +fn merge_qabl_infos(mut this: QueryableInfo, info: &QueryableInfo) -> QueryableInfo { + this.complete = u8::from(this.complete != 0 || info.complete != 0); + this.distance = std::cmp::min(this.distance, info.distance); + this +} + +fn local_qabl_info(_tables: &Tables, res: &Arc, face: &Arc) -> QueryableInfo { res.session_ctxs .values() .fold(None, |accu, ctx| { @@ -66,7 +69,10 @@ fn local_qabl_info( accu } }) - .unwrap_or(QueryableInfoType::DEFAULT) + .unwrap_or(QueryableInfo { + complete: 0, + distance: 0, + }) } fn propagate_simple_queryable( @@ -77,28 +83,24 @@ fn propagate_simple_queryable( let faces = tables.faces.values().cloned(); for mut dst_face in faces { let info = local_qabl_info(tables, res, &dst_face); - let current = face_hat!(dst_face).local_qabls.get(res); + let current_info = face_hat!(dst_face).local_qabls.get(res); if (src_face.is_none() || src_face.as_ref().unwrap().id != dst_face.id) - && (current.is_none() || current.unwrap().1 != info) + && (current_info.is_none() || *current_info.unwrap() != info) && (src_face.is_none() || src_face.as_ref().unwrap().whatami == WhatAmI::Client || dst_face.whatami == WhatAmI::Client) { - let id = current - .map(|c| c.0) - .unwrap_or(face_hat!(dst_face).next_id.fetch_add(1, Ordering::SeqCst)); face_hat_mut!(&mut dst_face) .local_qabls - .insert(res.clone(), (id, info)); + .insert(res.clone(), info); let key_expr = Resource::decl_key(res, &mut dst_face); dst_face.primitives.send_declare(RoutingContext::with_expr( Declare { - mode: DeclareMode::Push, - ext_qos: ext::QoSType::DECLARE, + ext_qos: ext::QoSType::declare_default(), ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, + ext_nodeid: ext::NodeIdType::default(), body: DeclareBody::DeclareQueryable(DeclareQueryable { - id, + id: 0, // @TODO use proper QueryableId (#703) wire_expr: key_expr, ext_info: info, }), @@ -112,13 +114,13 @@ fn propagate_simple_queryable( fn register_client_queryable( _tables: &mut Tables, face: &mut Arc, - id: QueryableId, res: &mut Arc, - qabl_info: &QueryableInfoType, + qabl_info: &QueryableInfo, ) { // Register queryable { let res = get_mut_unchecked(res); + log::debug!("Register queryable {} (face: {})", res.expr(), face,); get_mut_unchecked(res.session_ctxs.entry(face.id).or_insert_with(|| { Arc::new(SessionContext { face: face.clone(), @@ -126,23 +128,23 @@ fn register_client_queryable( remote_expr_id: None, subs: None, qabl: None, + last_values: HashMap::new(), in_interceptor_cache: None, e_interceptor_cache: None, }) })) .qabl = Some(*qabl_info); } - face_hat_mut!(face).remote_qabls.insert(id, res.clone()); + face_hat_mut!(face).remote_qabls.insert(res.clone()); } fn declare_client_queryable( tables: &mut Tables, face: &mut Arc, - id: QueryableId, res: &mut Arc, - qabl_info: &QueryableInfoType, + qabl_info: &QueryableInfo, ) { - register_client_queryable(tables, face, id, res, qabl_info); + register_client_queryable(tables, face, res, qabl_info); propagate_simple_queryable(tables, res, Some(face)); } @@ -162,20 +164,22 @@ fn client_qabls(res: &Arc) -> Vec> { fn propagate_forget_simple_queryable(tables: &mut Tables, res: &mut Arc) { for face in tables.faces.values_mut() { - if let Some((id, _)) = face_hat_mut!(face).local_qabls.remove(res) { + if face_hat!(face).local_qabls.contains_key(res) { + let wire_expr = Resource::get_best_key(res, "", face.id); face.primitives.send_declare(RoutingContext::with_expr( Declare { - mode: DeclareMode::Push, - ext_qos: ext::QoSType::DECLARE, + ext_qos: ext::QoSType::declare_default(), ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, + ext_nodeid: ext::NodeIdType::default(), body: DeclareBody::UndeclareQueryable(UndeclareQueryable { - id, - ext_wire_expr: WireExprType::null(), + id: 0, // @TODO use proper QueryableId (#703) + ext_wire_expr: WireExprType { wire_expr }, }), }, res.expr(), )); + + face_hat_mut!(face).local_qabls.remove(res); } } } @@ -185,38 +189,38 @@ pub(super) fn undeclare_client_queryable( face: &mut Arc, res: &mut Arc, ) { - if !face_hat_mut!(face) - .remote_qabls - .values() - .any(|s| *s == *res) - { - if let Some(ctx) = get_mut_unchecked(res).session_ctxs.get_mut(&face.id) { - get_mut_unchecked(ctx).qabl = None; + log::debug!("Unregister client queryable {} for {}", res.expr(), face); + if let Some(ctx) = get_mut_unchecked(res).session_ctxs.get_mut(&face.id) { + get_mut_unchecked(ctx).qabl = None; + if ctx.qabl.is_none() { + face_hat_mut!(face).remote_qabls.remove(res); } + } - let mut client_qabls = client_qabls(res); - if client_qabls.is_empty() { - propagate_forget_simple_queryable(tables, res); - } else { - propagate_simple_queryable(tables, res, None); - } - if client_qabls.len() == 1 { - let face = &mut client_qabls[0]; - if let Some((id, _)) = face_hat_mut!(face).local_qabls.remove(res) { - face.primitives.send_declare(RoutingContext::with_expr( - Declare { - mode: DeclareMode::Push, - ext_qos: ext::QoSType::DECLARE, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, - body: DeclareBody::UndeclareQueryable(UndeclareQueryable { - id, - ext_wire_expr: WireExprType::null(), - }), - }, - res.expr(), - )); - } + let mut client_qabls = client_qabls(res); + if client_qabls.is_empty() { + propagate_forget_simple_queryable(tables, res); + } else { + propagate_simple_queryable(tables, res, None); + } + if client_qabls.len() == 1 { + let face = &mut client_qabls[0]; + if face_hat!(face).local_qabls.contains_key(res) { + let wire_expr = Resource::get_best_key(res, "", face.id); + face.primitives.send_declare(RoutingContext::with_expr( + Declare { + ext_qos: ext::QoSType::declare_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::default(), + body: DeclareBody::UndeclareQueryable(UndeclareQueryable { + id: 0, // @TODO use proper QueryableId (#703) + ext_wire_expr: WireExprType { wire_expr }, + }), + }, + res.expr(), + )); + + face_hat_mut!(face).local_qabls.remove(res); } } } @@ -224,14 +228,9 @@ pub(super) fn undeclare_client_queryable( fn forget_client_queryable( tables: &mut Tables, face: &mut Arc, - id: QueryableId, -) -> Option> { - if let Some(mut res) = face_hat_mut!(face).remote_qabls.remove(&id) { - undeclare_client_queryable(tables, face, &mut res); - Some(res) - } else { - None - } + res: &mut Arc, +) { + undeclare_client_queryable(tables, face, res); } pub(super) fn queries_new_face(tables: &mut Tables, _face: &mut Arc) { @@ -241,7 +240,7 @@ pub(super) fn queries_new_face(tables: &mut Tables, _face: &mut Arc) .cloned() .collect::>>() { - for qabl in face_hat!(face).remote_qabls.values() { + for qabl in face_hat!(face).remote_qabls.iter() { propagate_simple_queryable(tables, qabl, Some(&mut face.clone())); } } @@ -256,29 +255,27 @@ impl HatQueriesTrait for HatCode { &self, tables: &mut Tables, face: &mut Arc, - id: QueryableId, res: &mut Arc, - qabl_info: &QueryableInfoType, + qabl_info: &QueryableInfo, _node_id: NodeId, ) { - declare_client_queryable(tables, face, id, res, qabl_info); + declare_client_queryable(tables, face, res, qabl_info); } fn undeclare_queryable( &self, tables: &mut Tables, face: &mut Arc, - id: QueryableId, - _res: Option>, + res: &mut Arc, _node_id: NodeId, - ) -> Option> { - forget_client_queryable(tables, face, id) + ) { + forget_client_queryable(tables, face, res); } fn get_queryables(&self, tables: &Tables) -> Vec> { let mut qabls = HashSet::new(); for src_face in tables.faces.values() { - for qabl in face_hat!(src_face).remote_qabls.values() { + for qabl in &face_hat!(src_face).remote_qabls { qabls.insert(qabl.clone()); } } diff --git a/zenoh/src/net/routing/hat/linkstate_peer/mod.rs b/zenoh/src/net/routing/hat/linkstate_peer/mod.rs index 1b8ea8f7d4..35afaf30d7 100644 --- a/zenoh/src/net/routing/hat/linkstate_peer/mod.rs +++ b/zenoh/src/net/routing/hat/linkstate_peer/mod.rs @@ -36,7 +36,9 @@ use crate::{ routing::{ dispatcher::face::Face, hat::TREES_COMPUTATION_DELAY_MS, - router::{compute_data_routes, compute_query_routes, RoutesIndexes}, + router::{ + compute_data_routes, compute_matching_pulls, compute_query_routes, RoutesIndexes, + }, }, }, runtime::Runtime, @@ -44,20 +46,17 @@ use crate::{ use std::{ any::Any, collections::{HashMap, HashSet}, - sync::{atomic::AtomicU32, Arc}, + sync::Arc, + time::Duration, }; -use tokio::task::JoinHandle; use zenoh_config::{unwrap_or_default, ModeDependent, WhatAmI, WhatAmIMatcher, ZenohId}; use zenoh_protocol::{ common::ZExtBody, - network::{ - declare::{queryable::ext::QueryableInfoType, QueryableId, SubscriberId}, - oam::id::OAM_LINKSTATE, - Oam, - }, + network::{declare::queryable::ext::QueryableInfo, oam::id::OAM_LINKSTATE, Oam}, }; use zenoh_result::ZResult; use zenoh_sync::get_mut_unchecked; +use zenoh_task::TerminatableTask; use zenoh_transport::unicast::TransportUnicast; mod network; @@ -114,7 +113,16 @@ struct HatTables { peer_subs: HashSet>, peer_qabls: HashSet>, peers_net: Option, - peers_trees_task: Option>, + peers_trees_task: Option, +} + +impl Drop for HatTables { + fn drop(&mut self) { + if self.peers_trees_task.is_some() { + let task = self.peers_trees_task.take().unwrap(); + task.terminate(Duration::from_secs(10)); + } + } } impl HatTables { @@ -128,24 +136,30 @@ impl HatTables { } fn schedule_compute_trees(&mut self, tables_ref: Arc) { + log::trace!("Schedule computations"); if self.peers_trees_task.is_none() { - let task = Some(zenoh_runtime::ZRuntime::Net.spawn(async move { - tokio::time::sleep(std::time::Duration::from_millis( - *TREES_COMPUTATION_DELAY_MS, - )) - .await; - let mut tables = zwrite!(tables_ref.tables); - - log::trace!("Compute trees"); - let new_childs = hat_mut!(tables).peers_net.as_mut().unwrap().compute_trees(); - - log::trace!("Compute routes"); - pubsub::pubsub_tree_change(&mut tables, &new_childs); - queries::queries_tree_change(&mut tables, &new_childs); - - hat_mut!(tables).peers_trees_task = None; - })); - self.peers_trees_task = task; + let task = TerminatableTask::spawn( + zenoh_runtime::ZRuntime::Net, + async move { + tokio::time::sleep(std::time::Duration::from_millis( + *TREES_COMPUTATION_DELAY_MS, + )) + .await; + let mut tables = zwrite!(tables_ref.tables); + + log::trace!("Compute trees"); + let new_childs = hat_mut!(tables).peers_net.as_mut().unwrap().compute_trees(); + + log::trace!("Compute routes"); + pubsub::pubsub_tree_change(&mut tables, &new_childs); + queries::queries_tree_change(&mut tables, &new_childs); + + log::trace!("Computations completed"); + hat_mut!(tables).peers_trees_task = None; + }, + TerminatableTask::create_cancellation_token(), + ); + self.peers_trees_task = Some(task); } } } @@ -248,7 +262,7 @@ impl HatBaseTrait for HatCode { face.local_mappings.clear(); let mut subs_matches = vec![]; - for (_id, mut res) in face + for mut res in face .hat .downcast_mut::() .unwrap() @@ -276,7 +290,7 @@ impl HatBaseTrait for HatCode { } let mut qabls_matches = vec![]; - for (_, mut res) in face + for mut res in face .hat .downcast_mut::() .unwrap() @@ -309,7 +323,11 @@ impl HatBaseTrait for HatCode { let rtables = zread!(tables.tables); for _match in subs_matches.drain(..) { let mut expr = RoutingExpr::new(&_match, ""); - matches_data_routes.push((_match.clone(), compute_data_routes(&rtables, &mut expr))); + matches_data_routes.push(( + _match.clone(), + compute_data_routes(&rtables, &mut expr), + compute_matching_pulls(&rtables, &mut expr), + )); } for _match in qabls_matches.drain(..) { matches_query_routes.push((_match.clone(), compute_query_routes(&rtables, &_match))); @@ -317,10 +335,13 @@ impl HatBaseTrait for HatCode { drop(rtables); let mut wtables = zwrite!(tables.tables); - for (mut res, data_routes) in matches_data_routes { + for (mut res, data_routes, matching_pulls) in matches_data_routes { get_mut_unchecked(&mut res) .context_mut() .update_data_routes(data_routes); + get_mut_unchecked(&mut res) + .context_mut() + .update_matching_pulls(matching_pulls); Resource::clean(&mut res); } for (mut res, query_routes) in matches_query_routes { @@ -449,7 +470,7 @@ impl HatBaseTrait for HatCode { struct HatContext { router_subs: HashSet, peer_subs: HashSet, - peer_qabls: HashMap, + peer_qabls: HashMap, } impl HatContext { @@ -464,22 +485,20 @@ impl HatContext { struct HatFace { link_id: usize, - next_id: AtomicU32, // @TODO: manage rollover and uniqueness - local_subs: HashMap, SubscriberId>, - remote_subs: HashMap>, - local_qabls: HashMap, (QueryableId, QueryableInfoType)>, - remote_qabls: HashMap>, + local_subs: HashSet>, + remote_subs: HashSet>, + local_qabls: HashMap, QueryableInfo>, + remote_qabls: HashSet>, } impl HatFace { fn new() -> Self { Self { link_id: 0, - next_id: AtomicU32::new(0), - local_subs: HashMap::new(), - remote_subs: HashMap::new(), + local_subs: HashSet::new(), + remote_subs: HashSet::new(), local_qabls: HashMap::new(), - remote_qabls: HashMap::new(), + remote_qabls: HashSet::new(), } } } diff --git a/zenoh/src/net/routing/hat/linkstate_peer/network.rs b/zenoh/src/net/routing/hat/linkstate_peer/network.rs index 3c709a7963..4d3497c861 100644 --- a/zenoh/src/net/routing/hat/linkstate_peer/network.rs +++ b/zenoh/src/net/routing/hat/linkstate_peer/network.rs @@ -15,6 +15,7 @@ use crate::net::codec::Zenoh080Routing; use crate::net::protocol::linkstate::{LinkState, LinkStateList}; use crate::net::routing::dispatcher::tables::NodeId; use crate::net::runtime::Runtime; +use crate::runtime::WeakRuntime; use petgraph::graph::NodeIndex; use petgraph::visit::{VisitMap, Visitable}; use std::convert::TryInto; @@ -115,7 +116,7 @@ pub(super) struct Network { pub(super) trees: Vec, pub(super) distances: Vec, pub(super) graph: petgraph::stable_graph::StableUnGraph, - pub(super) runtime: Runtime, + pub(super) runtime: WeakRuntime, } impl Network { @@ -155,7 +156,7 @@ impl Network { }], distances: vec![0.0], graph, - runtime, + runtime: Runtime::downgrade(&runtime), } } @@ -247,7 +248,7 @@ impl Network { whatami: self.graph[idx].whatami, locators: if details.locators { if idx == self.idx { - Some(self.runtime.get_locators()) + Some(self.runtime.upgrade().unwrap().get_locators()) } else { self.graph[idx].locators.clone() } @@ -269,7 +270,7 @@ impl Network { Ok(NetworkBody::OAM(Oam { id: OAM_LINKSTATE, body: ZExtBody::ZBuf(buf), - ext_qos: oam::ext::QoSType::OAM, + ext_qos: oam::ext::QoSType::oam_default(), ext_tstamp: None, }) .into()) @@ -336,6 +337,7 @@ impl Network { pub(super) fn link_states(&mut self, link_states: Vec, src: ZenohId) -> Changes { log::trace!("{} Received from {} raw: {:?}", self.name, src, link_states); + let strong_runtime = self.runtime.upgrade().unwrap(); let graph = &self.graph; let links = &mut self.links; @@ -487,13 +489,15 @@ impl Network { if !self.autoconnect.is_empty() { // Connect discovered peers if zenoh_runtime::ZRuntime::Net - .block_in_place(self.runtime.manager().get_transport_unicast(&zid)) + .block_in_place( + strong_runtime.manager().get_transport_unicast(&zid), + ) .is_none() && self.autoconnect.matches(whatami) { if let Some(locators) = locators { - let runtime = self.runtime.clone(); - self.runtime.spawn(async move { + let runtime = strong_runtime.clone(); + strong_runtime.spawn(async move { // random backoff tokio::time::sleep(std::time::Duration::from_millis( rand::random::() % 100, @@ -607,15 +611,15 @@ impl Network { let node = &self.graph[*idx]; if let Some(whatami) = node.whatami { if zenoh_runtime::ZRuntime::Net - .block_in_place(self.runtime.manager().get_transport_unicast(&node.zid)) + .block_in_place(strong_runtime.manager().get_transport_unicast(&node.zid)) .is_none() && self.autoconnect.matches(whatami) { if let Some(locators) = &node.locators { - let runtime = self.runtime.clone(); + let runtime = strong_runtime.clone(); let zid = node.zid; let locators = locators.clone(); - self.runtime.spawn(async move { + strong_runtime.spawn(async move { // random backoff tokio::time::sleep(std::time::Duration::from_millis( rand::random::() % 100, diff --git a/zenoh/src/net/routing/hat/linkstate_peer/pubsub.rs b/zenoh/src/net/routing/hat/linkstate_peer/pubsub.rs index 81ea4bd8b1..0c05c39c7b 100644 --- a/zenoh/src/net/routing/hat/linkstate_peer/pubsub.rs +++ b/zenoh/src/net/routing/hat/linkstate_peer/pubsub.rs @@ -25,15 +25,13 @@ use crate::net::routing::{RoutingContext, PREFIX_LIVELINESS}; use petgraph::graph::NodeIndex; use std::borrow::Cow; use std::collections::{HashMap, HashSet}; -use std::sync::atomic::Ordering; use std::sync::Arc; use zenoh_protocol::core::key_expr::OwnedKeyExpr; -use zenoh_protocol::network::declare::SubscriberId; use zenoh_protocol::{ core::{Reliability, WhatAmI, ZenohId}, network::declare::{ common::ext::WireExprType, ext, subscriber::ext::SubscriberInfo, Declare, DeclareBody, - DeclareMode, DeclareSubscriber, UndeclareSubscriber, + DeclareSubscriber, Mode, UndeclareSubscriber, }, }; use zenoh_sync::get_mut_unchecked; @@ -55,16 +53,17 @@ fn send_sourced_subscription_to_net_childs( if src_face.is_none() || someface.id != src_face.unwrap().id { let key_expr = Resource::decl_key(res, &mut someface); + log::debug!("Send subscription {} on {}", res.expr(), someface); + someface.primitives.send_declare(RoutingContext::with_expr( Declare { - mode: DeclareMode::Push, - ext_qos: ext::QoSType::DECLARE, + ext_qos: ext::QoSType::declare_default(), ext_tstamp: None, ext_nodeid: ext::NodeIdType { node_id: routing_context, }, body: DeclareBody::DeclareSubscriber(DeclareSubscriber { - id: 0, // Sourced subscriptions do not use ids + id: 0, // TODO wire_expr: key_expr, ext_info: *sub_info, }), @@ -88,20 +87,18 @@ fn propagate_simple_subscription_to( src_face: &mut Arc, ) { if (src_face.id != dst_face.id || res.expr().starts_with(PREFIX_LIVELINESS)) - && !face_hat!(dst_face).local_subs.contains_key(res) + && !face_hat!(dst_face).local_subs.contains(res) && dst_face.whatami == WhatAmI::Client { - let id = face_hat!(dst_face).next_id.fetch_add(1, Ordering::SeqCst); - face_hat_mut!(dst_face).local_subs.insert(res.clone(), id); + face_hat_mut!(dst_face).local_subs.insert(res.clone()); let key_expr = Resource::decl_key(res, dst_face); dst_face.primitives.send_declare(RoutingContext::with_expr( Declare { - mode: DeclareMode::Push, - ext_qos: ext::QoSType::DECLARE, + ext_qos: ext::QoSType::declare_default(), ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, + ext_nodeid: ext::NodeIdType::default(), body: DeclareBody::DeclareSubscriber(DeclareSubscriber { - id, + id: 0, // TODO wire_expr: key_expr, ext_info: *sub_info, }), @@ -174,6 +171,7 @@ fn register_peer_subscription( if !res_hat!(res).peer_subs.contains(&peer) { // Register peer subscription { + log::debug!("Register peer subscription {} (peer: {})", res.expr(), peer); res_hat_mut!(res).peer_subs.insert(peer); hat_mut!(tables).peer_subs.insert(res.clone()); } @@ -201,19 +199,24 @@ fn declare_peer_subscription( fn register_client_subscription( _tables: &mut Tables, face: &mut Arc, - id: SubscriberId, res: &mut Arc, sub_info: &SubscriberInfo, ) { // Register subscription { let res = get_mut_unchecked(res); + log::debug!("Register subscription {} for {}", res.expr(), face); match res.session_ctxs.get_mut(&face.id) { - Some(ctx) => { - if ctx.subs.is_none() { + Some(ctx) => match &ctx.subs { + Some(info) => { + if Mode::Pull == info.mode { + get_mut_unchecked(ctx).subs = Some(*sub_info); + } + } + None => { get_mut_unchecked(ctx).subs = Some(*sub_info); } - } + }, None => { res.session_ctxs.insert( face.id, @@ -223,6 +226,7 @@ fn register_client_subscription( remote_expr_id: None, subs: Some(*sub_info), qabl: None, + last_values: HashMap::new(), in_interceptor_cache: None, e_interceptor_cache: None, }), @@ -230,19 +234,20 @@ fn register_client_subscription( } } } - face_hat_mut!(face).remote_subs.insert(id, res.clone()); + face_hat_mut!(face).remote_subs.insert(res.clone()); } fn declare_client_subscription( tables: &mut Tables, face: &mut Arc, - id: SubscriberId, res: &mut Arc, sub_info: &SubscriberInfo, ) { - register_client_subscription(tables, face, id, res, sub_info); + register_client_subscription(tables, face, res, sub_info); + let mut propa_sub_info = *sub_info; + propa_sub_info.mode = Mode::Push; let zid = tables.zid; - register_peer_subscription(tables, face, res, sub_info, zid); + register_peer_subscription(tables, face, res, &propa_sub_info, zid); } #[inline] @@ -284,16 +289,17 @@ fn send_forget_sourced_subscription_to_net_childs( if src_face.is_none() || someface.id != src_face.unwrap().id { let wire_expr = Resource::decl_key(res, &mut someface); + log::debug!("Send forget subscription {} on {}", res.expr(), someface); + someface.primitives.send_declare(RoutingContext::with_expr( Declare { - mode: DeclareMode::Push, - ext_qos: ext::QoSType::DECLARE, + ext_qos: ext::QoSType::declare_default(), ext_tstamp: None, ext_nodeid: ext::NodeIdType { node_id: routing_context.unwrap_or(0), }, body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { - id: 0, // Sourced subscriptions do not use ids + id: 0, // TODO ext_wire_expr: WireExprType { wire_expr }, }), }, @@ -309,20 +315,21 @@ fn send_forget_sourced_subscription_to_net_childs( fn propagate_forget_simple_subscription(tables: &mut Tables, res: &Arc) { for face in tables.faces.values_mut() { - if let Some(id) = face_hat_mut!(face).local_subs.remove(res) { + if face_hat!(face).local_subs.contains(res) { + let wire_expr = Resource::get_best_key(res, "", face.id); face.primitives.send_declare(RoutingContext::with_expr( Declare { - mode: DeclareMode::Push, - ext_qos: ext::QoSType::DECLARE, + ext_qos: ext::QoSType::declare_default(), ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, + ext_nodeid: ext::NodeIdType::default(), body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { - id, - ext_wire_expr: WireExprType::null(), + id: 0, // TODO + ext_wire_expr: WireExprType { wire_expr }, }), }, res.expr(), )); + face_hat_mut!(face).local_subs.remove(res); } } } @@ -363,6 +370,11 @@ fn propagate_forget_sourced_subscription( } fn unregister_peer_subscription(tables: &mut Tables, res: &mut Arc, peer: &ZenohId) { + log::debug!( + "Unregister peer subscription {} (peer: {})", + res.expr(), + peer + ); res_hat_mut!(res).peer_subs.retain(|sub| sub != peer); if res_hat!(res).peer_subs.is_empty() { @@ -402,35 +414,37 @@ pub(super) fn undeclare_client_subscription( face: &mut Arc, res: &mut Arc, ) { - if !face_hat_mut!(face).remote_subs.values().any(|s| *s == *res) { - if let Some(ctx) = get_mut_unchecked(res).session_ctxs.get_mut(&face.id) { - get_mut_unchecked(ctx).subs = None; - } + log::debug!("Unregister client subscription {} for {}", res.expr(), face); + if let Some(ctx) = get_mut_unchecked(res).session_ctxs.get_mut(&face.id) { + get_mut_unchecked(ctx).subs = None; + } + face_hat_mut!(face).remote_subs.remove(res); - let mut client_subs = client_subs(res); - let peer_subs = remote_peer_subs(tables, res); - if client_subs.is_empty() { - undeclare_peer_subscription(tables, None, res, &tables.zid.clone()); - } - if client_subs.len() == 1 && !peer_subs { - let face = &mut client_subs[0]; - if !(face.whatami == WhatAmI::Client && res.expr().starts_with(PREFIX_LIVELINESS)) { - if let Some(id) = face_hat_mut!(face).local_subs.remove(res) { - face.primitives.send_declare(RoutingContext::with_expr( - Declare { - mode: DeclareMode::Push, - ext_qos: ext::QoSType::DECLARE, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, - body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { - id, - ext_wire_expr: WireExprType::null(), - }), - }, - res.expr(), - )); - } - } + let mut client_subs = client_subs(res); + let peer_subs = remote_peer_subs(tables, res); + if client_subs.is_empty() { + undeclare_peer_subscription(tables, None, res, &tables.zid.clone()); + } + if client_subs.len() == 1 && !peer_subs { + let face = &mut client_subs[0]; + if face_hat!(face).local_subs.contains(res) + && !(face.whatami == WhatAmI::Client && res.expr().starts_with(PREFIX_LIVELINESS)) + { + let wire_expr = Resource::get_best_key(res, "", face.id); + face.primitives.send_declare(RoutingContext::with_expr( + Declare { + ext_qos: ext::QoSType::declare_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::default(), + body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { + id: 0, // TODO + ext_wire_expr: WireExprType { wire_expr }, + }), + }, + res.expr(), + )); + + face_hat_mut!(face).local_subs.remove(res); } } } @@ -438,34 +452,28 @@ pub(super) fn undeclare_client_subscription( fn forget_client_subscription( tables: &mut Tables, face: &mut Arc, - id: SubscriberId, -) -> Option> { - if let Some(mut res) = face_hat_mut!(face).remote_subs.remove(&id) { - undeclare_client_subscription(tables, face, &mut res); - Some(res) - } else { - None - } + res: &mut Arc, +) { + undeclare_client_subscription(tables, face, res); } pub(super) fn pubsub_new_face(tables: &mut Tables, face: &mut Arc) { let sub_info = SubscriberInfo { - reliability: Reliability::Reliable, // @TODO compute proper reliability to propagate from reliability of known subscribers + reliability: Reliability::Reliable, // @TODO + mode: Mode::Push, }; if face.whatami == WhatAmI::Client { for sub in &hat!(tables).peer_subs { - let id = face_hat!(face).next_id.fetch_add(1, Ordering::SeqCst); - face_hat_mut!(face).local_subs.insert(sub.clone(), id); + face_hat_mut!(face).local_subs.insert(sub.clone()); let key_expr = Resource::decl_key(sub, face); face.primitives.send_declare(RoutingContext::with_expr( Declare { - mode: DeclareMode::Push, - ext_qos: ext::QoSType::DECLARE, + ext_qos: ext::QoSType::declare_default(), ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, + ext_nodeid: ext::NodeIdType::default(), body: DeclareBody::DeclareSubscriber(DeclareSubscriber { - id, + id: 0, // TODO wire_expr: key_expr, ext_info: sub_info, }), @@ -507,7 +515,8 @@ pub(super) fn pubsub_tree_change(tables: &mut Tables, new_childs: &[Vec, - id: SubscriberId, res: &mut Arc, sub_info: &SubscriberInfo, node_id: NodeId, @@ -577,7 +585,7 @@ impl HatPubSubTrait for HatCode { declare_peer_subscription(tables, face, res, sub_info, peer) } } else { - declare_client_subscription(tables, face, id, res, sub_info) + declare_client_subscription(tables, face, res, sub_info) } } @@ -585,23 +593,15 @@ impl HatPubSubTrait for HatCode { &self, tables: &mut Tables, face: &mut Arc, - id: SubscriberId, - res: Option>, + res: &mut Arc, node_id: NodeId, - ) -> Option> { + ) { if face.whatami != WhatAmI::Client { - if let Some(mut res) = res { - if let Some(peer) = get_peer(tables, face, node_id) { - forget_peer_subscription(tables, face, &mut res, &peer); - Some(res) - } else { - None - } - } else { - None + if let Some(peer) = get_peer(tables, face, node_id) { + forget_peer_subscription(tables, face, res, &peer); } } else { - forget_client_subscription(tables, face, id) + forget_client_subscription(tables, face, res); } } @@ -659,19 +659,20 @@ impl HatPubSubTrait for HatCode { ); for (sid, context) in &mres.session_ctxs { - if context.subs.is_some() - && match tables.whatami { + if let Some(subinfo) = &context.subs { + if match tables.whatami { WhatAmI::Router => context.face.whatami != WhatAmI::Router, _ => { source_type == WhatAmI::Client || context.face.whatami == WhatAmI::Client } + } && subinfo.mode == Mode::Push + { + route.entry(*sid).or_insert_with(|| { + let key_expr = Resource::get_best_key(expr.prefix, expr.suffix, *sid); + (context.face.clone(), key_expr.to_owned(), NodeId::default()) + }); } - { - route.entry(*sid).or_insert_with(|| { - let key_expr = Resource::get_best_key(expr.prefix, expr.suffix, *sid); - (context.face.clone(), key_expr.to_owned(), NodeId::default()) - }); } } } diff --git a/zenoh/src/net/routing/hat/linkstate_peer/queries.rs b/zenoh/src/net/routing/hat/linkstate_peer/queries.rs index 356793e3a3..b965a6f58b 100644 --- a/zenoh/src/net/routing/hat/linkstate_peer/queries.rs +++ b/zenoh/src/net/routing/hat/linkstate_peer/queries.rs @@ -26,29 +26,36 @@ use ordered_float::OrderedFloat; use petgraph::graph::NodeIndex; use std::borrow::Cow; use std::collections::HashMap; -use std::sync::atomic::Ordering; use std::sync::Arc; use zenoh_buffers::ZBuf; use zenoh_protocol::core::key_expr::include::{Includer, DEFAULT_INCLUDER}; use zenoh_protocol::core::key_expr::OwnedKeyExpr; -use zenoh_protocol::network::declare::QueryableId; use zenoh_protocol::{ core::{WhatAmI, WireExpr, ZenohId}, network::declare::{ - common::ext::WireExprType, ext, queryable::ext::QueryableInfoType, Declare, DeclareBody, - DeclareMode, DeclareQueryable, UndeclareQueryable, + common::ext::WireExprType, ext, queryable::ext::QueryableInfo, Declare, DeclareBody, + DeclareQueryable, UndeclareQueryable, }, }; use zenoh_sync::get_mut_unchecked; +#[cfg(feature = "complete_n")] #[inline] -fn merge_qabl_infos(mut this: QueryableInfoType, info: &QueryableInfoType) -> QueryableInfoType { - this.complete = this.complete || info.complete; +fn merge_qabl_infos(mut this: QueryableInfo, info: &QueryableInfo) -> QueryableInfo { + this.complete += info.complete; this.distance = std::cmp::min(this.distance, info.distance); this } -fn local_peer_qabl_info(_tables: &Tables, res: &Arc) -> QueryableInfoType { +#[cfg(not(feature = "complete_n"))] +#[inline] +fn merge_qabl_infos(mut this: QueryableInfo, info: &QueryableInfo) -> QueryableInfo { + this.complete = u8::from(this.complete != 0 || info.complete != 0); + this.distance = std::cmp::min(this.distance, info.distance); + this +} + +fn local_peer_qabl_info(_tables: &Tables, res: &Arc) -> QueryableInfo { res.session_ctxs .values() .fold(None, |accu, ctx| { @@ -61,14 +68,13 @@ fn local_peer_qabl_info(_tables: &Tables, res: &Arc) -> QueryableInfoT accu } }) - .unwrap_or(QueryableInfoType::DEFAULT) + .unwrap_or(QueryableInfo { + complete: 0, + distance: 0, + }) } -fn local_qabl_info( - tables: &Tables, - res: &Arc, - face: &Arc, -) -> QueryableInfoType { +fn local_qabl_info(tables: &Tables, res: &Arc, face: &Arc) -> QueryableInfo { let info = if res.context.is_some() { res_hat!(res) .peer_qabls @@ -104,7 +110,10 @@ fn local_qabl_info( accu } }) - .unwrap_or(QueryableInfoType::DEFAULT) + .unwrap_or(QueryableInfo { + complete: 0, + distance: 0, + }) } #[inline] @@ -113,7 +122,7 @@ fn send_sourced_queryable_to_net_childs( net: &Network, childs: &[NodeIndex], res: &Arc, - qabl_info: &QueryableInfoType, + qabl_info: &QueryableInfo, src_face: Option<&mut Arc>, routing_context: NodeId, ) { @@ -124,16 +133,17 @@ fn send_sourced_queryable_to_net_childs( if src_face.is_none() || someface.id != src_face.as_ref().unwrap().id { let key_expr = Resource::decl_key(res, &mut someface); + log::debug!("Send queryable {} on {}", res.expr(), someface); + someface.primitives.send_declare(RoutingContext::with_expr( Declare { - mode: DeclareMode::Push, - ext_qos: ext::QoSType::DECLARE, + ext_qos: ext::QoSType::declare_default(), ext_tstamp: None, ext_nodeid: ext::NodeIdType { node_id: routing_context, }, body: DeclareBody::DeclareQueryable(DeclareQueryable { - id: 0, // Sourced queryables do not use ids + id: 0, // @TODO use proper QueryableId (#703) wire_expr: key_expr, ext_info: *qabl_info, }), @@ -156,26 +166,22 @@ fn propagate_simple_queryable( let faces = tables.faces.values().cloned(); for mut dst_face in faces { let info = local_qabl_info(tables, res, &dst_face); - let current = face_hat!(dst_face).local_qabls.get(res); + let current_info = face_hat!(dst_face).local_qabls.get(res); if (src_face.is_none() || src_face.as_ref().unwrap().id != dst_face.id) - && (current.is_none() || current.unwrap().1 != info) + && (current_info.is_none() || *current_info.unwrap() != info) && dst_face.whatami == WhatAmI::Client { - let id = current - .map(|c| c.0) - .unwrap_or(face_hat!(dst_face).next_id.fetch_add(1, Ordering::SeqCst)); face_hat_mut!(&mut dst_face) .local_qabls - .insert(res.clone(), (id, info)); + .insert(res.clone(), info); let key_expr = Resource::decl_key(res, &mut dst_face); dst_face.primitives.send_declare(RoutingContext::with_expr( Declare { - mode: DeclareMode::Push, - ext_qos: ext::QoSType::DECLARE, + ext_qos: ext::QoSType::declare_default(), ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, + ext_nodeid: ext::NodeIdType::default(), body: DeclareBody::DeclareQueryable(DeclareQueryable { - id, + id: 0, // @TODO use proper QueryableId (#703) wire_expr: key_expr, ext_info: info, }), @@ -189,7 +195,7 @@ fn propagate_simple_queryable( fn propagate_sourced_queryable( tables: &Tables, res: &Arc, - qabl_info: &QueryableInfoType, + qabl_info: &QueryableInfo, src_face: Option<&mut Arc>, source: &ZenohId, ) { @@ -227,13 +233,14 @@ fn register_peer_queryable( tables: &mut Tables, mut face: Option<&mut Arc>, res: &mut Arc, - qabl_info: &QueryableInfoType, + qabl_info: &QueryableInfo, peer: ZenohId, ) { let current_info = res_hat!(res).peer_qabls.get(&peer); if current_info.is_none() || current_info.unwrap() != qabl_info { // Register peer queryable { + log::debug!("Register peer queryable {} (peer: {})", res.expr(), peer,); res_hat_mut!(res).peer_qabls.insert(peer, *qabl_info); hat_mut!(tables).peer_qabls.insert(res.clone()); } @@ -252,7 +259,7 @@ fn declare_peer_queryable( tables: &mut Tables, face: &mut Arc, res: &mut Arc, - qabl_info: &QueryableInfoType, + qabl_info: &QueryableInfo, peer: ZenohId, ) { let face = Some(face); @@ -262,13 +269,13 @@ fn declare_peer_queryable( fn register_client_queryable( _tables: &mut Tables, face: &mut Arc, - id: QueryableId, res: &mut Arc, - qabl_info: &QueryableInfoType, + qabl_info: &QueryableInfo, ) { // Register queryable { let res = get_mut_unchecked(res); + log::debug!("Register queryable {} (face: {})", res.expr(), face,); get_mut_unchecked(res.session_ctxs.entry(face.id).or_insert_with(|| { Arc::new(SessionContext { face: face.clone(), @@ -276,23 +283,24 @@ fn register_client_queryable( remote_expr_id: None, subs: None, qabl: None, + last_values: HashMap::new(), in_interceptor_cache: None, e_interceptor_cache: None, }) })) .qabl = Some(*qabl_info); } - face_hat_mut!(face).remote_qabls.insert(id, res.clone()); + face_hat_mut!(face).remote_qabls.insert(res.clone()); } fn declare_client_queryable( tables: &mut Tables, face: &mut Arc, - id: QueryableId, res: &mut Arc, - qabl_info: &QueryableInfoType, + qabl_info: &QueryableInfo, ) { - register_client_queryable(tables, face, id, res, qabl_info); + register_client_queryable(tables, face, res, qabl_info); + let local_details = local_peer_qabl_info(tables, res); let zid = tables.zid; register_peer_queryable(tables, Some(face), res, &local_details, zid); @@ -337,16 +345,17 @@ fn send_forget_sourced_queryable_to_net_childs( if src_face.is_none() || someface.id != src_face.unwrap().id { let wire_expr = Resource::decl_key(res, &mut someface); + log::debug!("Send forget queryable {} on {}", res.expr(), someface); + someface.primitives.send_declare(RoutingContext::with_expr( Declare { - mode: DeclareMode::Push, - ext_qos: ext::QoSType::DECLARE, + ext_qos: ext::QoSType::declare_default(), ext_tstamp: None, ext_nodeid: ext::NodeIdType { node_id: routing_context, }, body: DeclareBody::UndeclareQueryable(UndeclareQueryable { - id: 0, + id: 0, // @TODO use proper QueryableId (#703) ext_wire_expr: WireExprType { wire_expr }, }), }, @@ -362,20 +371,22 @@ fn send_forget_sourced_queryable_to_net_childs( fn propagate_forget_simple_queryable(tables: &mut Tables, res: &mut Arc) { for face in tables.faces.values_mut() { - if let Some((id, _)) = face_hat_mut!(face).local_qabls.remove(res) { + if face_hat!(face).local_qabls.contains_key(res) { + let wire_expr = Resource::get_best_key(res, "", face.id); face.primitives.send_declare(RoutingContext::with_expr( Declare { - mode: DeclareMode::Push, - ext_qos: ext::QoSType::DECLARE, + ext_qos: ext::QoSType::declare_default(), ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, + ext_nodeid: ext::NodeIdType::default(), body: DeclareBody::UndeclareQueryable(UndeclareQueryable { - id, - ext_wire_expr: WireExprType::null(), + id: 0, // @TODO use proper QueryableId (#703) + ext_wire_expr: WireExprType { wire_expr }, }), }, res.expr(), )); + + face_hat_mut!(face).local_qabls.remove(res); } } } @@ -416,6 +427,7 @@ fn propagate_forget_sourced_queryable( } fn unregister_peer_queryable(tables: &mut Tables, res: &mut Arc, peer: &ZenohId) { + log::debug!("Unregister peer queryable {} (peer: {})", res.expr(), peer,); res_hat_mut!(res).peer_qabls.remove(peer); if res_hat!(res).peer_qabls.is_empty() { @@ -455,42 +467,42 @@ pub(super) fn undeclare_client_queryable( face: &mut Arc, res: &mut Arc, ) { - if !face_hat_mut!(face) - .remote_qabls - .values() - .any(|s| *s == *res) - { - if let Some(ctx) = get_mut_unchecked(res).session_ctxs.get_mut(&face.id) { - get_mut_unchecked(ctx).qabl = None; + log::debug!("Unregister client queryable {} for {}", res.expr(), face); + if let Some(ctx) = get_mut_unchecked(res).session_ctxs.get_mut(&face.id) { + get_mut_unchecked(ctx).qabl = None; + if ctx.qabl.is_none() { + face_hat_mut!(face).remote_qabls.remove(res); } + } - let mut client_qabls = client_qabls(res); - let peer_qabls = remote_peer_qabls(tables, res); + let mut client_qabls = client_qabls(res); + let peer_qabls = remote_peer_qabls(tables, res); - if client_qabls.is_empty() { - undeclare_peer_queryable(tables, None, res, &tables.zid.clone()); - } else { - let local_info = local_peer_qabl_info(tables, res); - register_peer_queryable(tables, None, res, &local_info, tables.zid); - } + if client_qabls.is_empty() { + undeclare_peer_queryable(tables, None, res, &tables.zid.clone()); + } else { + let local_info = local_peer_qabl_info(tables, res); + register_peer_queryable(tables, None, res, &local_info, tables.zid); + } - if client_qabls.len() == 1 && !peer_qabls { - let face = &mut client_qabls[0]; - if let Some((id, _)) = face_hat_mut!(face).local_qabls.remove(res) { - face.primitives.send_declare(RoutingContext::with_expr( - Declare { - mode: DeclareMode::Push, - ext_qos: ext::QoSType::DECLARE, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, - body: DeclareBody::UndeclareQueryable(UndeclareQueryable { - id, - ext_wire_expr: WireExprType::null(), - }), - }, - res.expr(), - )); - } + if client_qabls.len() == 1 && !peer_qabls { + let face = &mut client_qabls[0]; + if face_hat!(face).local_qabls.contains_key(res) { + let wire_expr = Resource::get_best_key(res, "", face.id); + face.primitives.send_declare(RoutingContext::with_expr( + Declare { + ext_qos: ext::QoSType::declare_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::default(), + body: DeclareBody::UndeclareQueryable(UndeclareQueryable { + id: 0, // @TODO use proper QueryableId (#703) + ext_wire_expr: WireExprType { wire_expr }, + }), + }, + res.expr(), + )); + + face_hat_mut!(face).local_qabls.remove(res); } } } @@ -498,14 +510,9 @@ pub(super) fn undeclare_client_queryable( fn forget_client_queryable( tables: &mut Tables, face: &mut Arc, - id: QueryableId, -) -> Option> { - if let Some(mut res) = face_hat_mut!(face).remote_qabls.remove(&id) { - undeclare_client_queryable(tables, face, &mut res); - Some(res) - } else { - None - } + res: &mut Arc, +) { + undeclare_client_queryable(tables, face, res); } pub(super) fn queries_new_face(tables: &mut Tables, face: &mut Arc) { @@ -513,19 +520,15 @@ pub(super) fn queries_new_face(tables: &mut Tables, face: &mut Arc) { for qabl in &hat!(tables).peer_qabls { if qabl.context.is_some() { let info = local_qabl_info(tables, qabl, face); - let id = face_hat!(face).next_id.fetch_add(1, Ordering::SeqCst); - face_hat_mut!(face) - .local_qabls - .insert(qabl.clone(), (id, info)); + face_hat_mut!(face).local_qabls.insert(qabl.clone(), info); let key_expr = Resource::decl_key(qabl, face); face.primitives.send_declare(RoutingContext::with_expr( Declare { - mode: DeclareMode::Push, - ext_qos: ext::QoSType::DECLARE, + ext_qos: ext::QoSType::declare_default(), ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, + ext_nodeid: ext::NodeIdType::default(), body: DeclareBody::DeclareQueryable(DeclareQueryable { - id, + id: 0, // @TODO use proper QueryableId (#703) wire_expr: key_expr, ext_info: info, }), @@ -594,7 +597,7 @@ fn insert_target_for_qabls( tables: &Tables, net: &Network, source: NodeId, - qabls: &HashMap, + qabls: &HashMap, complete: bool, ) { if net.trees.len() > source as usize { @@ -638,9 +641,8 @@ impl HatQueriesTrait for HatCode { &self, tables: &mut Tables, face: &mut Arc, - id: QueryableId, res: &mut Arc, - qabl_info: &QueryableInfoType, + qabl_info: &QueryableInfo, node_id: NodeId, ) { if face.whatami != WhatAmI::Client { @@ -648,7 +650,7 @@ impl HatQueriesTrait for HatCode { declare_peer_queryable(tables, face, res, qabl_info, peer); } } else { - declare_client_queryable(tables, face, id, res, qabl_info); + declare_client_queryable(tables, face, res, qabl_info); } } @@ -656,23 +658,15 @@ impl HatQueriesTrait for HatCode { &self, tables: &mut Tables, face: &mut Arc, - id: QueryableId, - res: Option>, + res: &mut Arc, node_id: NodeId, - ) -> Option> { + ) { if face.whatami != WhatAmI::Client { - if let Some(mut res) = res { - if let Some(peer) = get_peer(tables, face, node_id) { - forget_peer_queryable(tables, face, &mut res, &peer); - Some(res) - } else { - None - } - } else { - None + if let Some(peer) = get_peer(tables, face, node_id) { + forget_peer_queryable(tables, face, res, &peer); } } else { - forget_client_queryable(tables, face, id) + forget_client_queryable(tables, face, res); } } diff --git a/zenoh/src/net/routing/hat/mod.rs b/zenoh/src/net/routing/hat/mod.rs index 70e94ac176..4fbf9c9e5d 100644 --- a/zenoh/src/net/routing/hat/mod.rs +++ b/zenoh/src/net/routing/hat/mod.rs @@ -31,10 +31,7 @@ use zenoh_config::{unwrap_or_default, Config, WhatAmI}; use zenoh_protocol::{ core::WireExpr, network::{ - declare::{ - queryable::ext::QueryableInfoType, subscriber::ext::SubscriberInfo, QueryableId, - SubscriberId, - }, + declare::{queryable::ext::QueryableInfo, subscriber::ext::SubscriberInfo}, Oam, }, }; @@ -120,7 +117,6 @@ pub(crate) trait HatPubSubTrait { &self, tables: &mut Tables, face: &mut Arc, - id: SubscriberId, res: &mut Arc, sub_info: &SubscriberInfo, node_id: NodeId, @@ -129,10 +125,9 @@ pub(crate) trait HatPubSubTrait { &self, tables: &mut Tables, face: &mut Arc, - id: SubscriberId, - res: Option>, + res: &mut Arc, node_id: NodeId, - ) -> Option>; + ); fn get_subscriptions(&self, tables: &Tables) -> Vec>; @@ -152,19 +147,17 @@ pub(crate) trait HatQueriesTrait { &self, tables: &mut Tables, face: &mut Arc, - id: QueryableId, res: &mut Arc, - qabl_info: &QueryableInfoType, + qabl_info: &QueryableInfo, node_id: NodeId, ); fn undeclare_queryable( &self, tables: &mut Tables, face: &mut Arc, - id: QueryableId, - res: Option>, + res: &mut Arc, node_id: NodeId, - ) -> Option>; + ); fn get_queryables(&self, tables: &Tables) -> Vec>; diff --git a/zenoh/src/net/routing/hat/p2p_peer/gossip.rs b/zenoh/src/net/routing/hat/p2p_peer/gossip.rs index 1096eba363..bbe7bd9024 100644 --- a/zenoh/src/net/routing/hat/p2p_peer/gossip.rs +++ b/zenoh/src/net/routing/hat/p2p_peer/gossip.rs @@ -14,6 +14,7 @@ use crate::net::codec::Zenoh080Routing; use crate::net::protocol::linkstate::{LinkState, LinkStateList}; use crate::net::runtime::Runtime; +use crate::runtime::WeakRuntime; use petgraph::graph::NodeIndex; use std::convert::TryInto; use vec_map::VecMap; @@ -93,7 +94,7 @@ pub(super) struct Network { pub(super) idx: NodeIndex, pub(super) links: VecMap, pub(super) graph: petgraph::stable_graph::StableUnGraph, - pub(super) runtime: Runtime, + pub(super) runtime: WeakRuntime, } impl Network { @@ -124,7 +125,7 @@ impl Network { idx, links: VecMap::new(), graph, - runtime, + runtime: Runtime::downgrade(&runtime), } } @@ -191,7 +192,7 @@ impl Network { whatami: self.graph[idx].whatami, locators: if details.locators { if idx == self.idx { - Some(self.runtime.get_locators()) + Some(self.runtime.upgrade().unwrap().get_locators()) } else { self.graph[idx].locators.clone() } @@ -213,7 +214,7 @@ impl Network { Ok(NetworkBody::OAM(Oam { id: OAM_LINKSTATE, body: ZExtBody::ZBuf(buf), - ext_qos: oam::ext::QoSType::OAM, + ext_qos: oam::ext::QoSType::oam_default(), ext_tstamp: None, }) .into()) @@ -266,6 +267,7 @@ impl Network { pub(super) fn link_states(&mut self, link_states: Vec, src: ZenohId) { log::trace!("{} Received from {} raw: {:?}", self.name, src, link_states); + let strong_runtime = self.runtime.upgrade().unwrap(); let graph = &self.graph; let links = &mut self.links; @@ -406,14 +408,14 @@ impl Network { if !self.autoconnect.is_empty() { // Connect discovered peers - if zenoh_runtime::ZRuntime::Net - .block_in_place(self.runtime.manager().get_transport_unicast(&zid)) + if zenoh_runtime::ZRuntime::Acceptor + .block_in_place(strong_runtime.manager().get_transport_unicast(&zid)) .is_none() && self.autoconnect.matches(whatami) { if let Some(locators) = locators { - let runtime = self.runtime.clone(); - self.runtime.spawn(async move { + let runtime = strong_runtime.clone(); + strong_runtime.spawn(async move { // random backoff tokio::time::sleep(std::time::Duration::from_millis( rand::random::() % 100, diff --git a/zenoh/src/net/routing/hat/p2p_peer/mod.rs b/zenoh/src/net/routing/hat/p2p_peer/mod.rs index 1d87c2eb23..8dc4f15ada 100644 --- a/zenoh/src/net/routing/hat/p2p_peer/mod.rs +++ b/zenoh/src/net/routing/hat/p2p_peer/mod.rs @@ -23,7 +23,9 @@ use crate::{ protocol::linkstate::LinkStateList, routing::{ dispatcher::face::Face, - router::{compute_data_routes, compute_query_routes, RoutesIndexes}, + router::{ + compute_data_routes, compute_matching_pulls, compute_query_routes, RoutesIndexes, + }, }, }, runtime::Runtime, @@ -43,17 +45,14 @@ use super::{ }; use std::{ any::Any, - collections::HashMap, - sync::{atomic::AtomicU32, Arc}, + collections::{HashMap, HashSet}, + sync::Arc, }; use zenoh_config::{unwrap_or_default, ModeDependent, WhatAmI, WhatAmIMatcher}; -use zenoh_protocol::network::{ - declare::{QueryableId, SubscriberId}, - Oam, -}; +use zenoh_protocol::network::Oam; use zenoh_protocol::{ common::ZExtBody, - network::{declare::queryable::ext::QueryableInfoType, oam::id::OAM_LINKSTATE}, + network::{declare::queryable::ext::QueryableInfo, oam::id::OAM_LINKSTATE}, }; use zenoh_result::ZResult; use zenoh_sync::get_mut_unchecked; @@ -178,7 +177,7 @@ impl HatBaseTrait for HatCode { face.local_mappings.clear(); let mut subs_matches = vec![]; - for (_id, mut res) in face + for mut res in face .hat .downcast_mut::() .unwrap() @@ -206,7 +205,7 @@ impl HatBaseTrait for HatCode { } let mut qabls_matches = vec![]; - for (_id, mut res) in face + for mut res in face .hat .downcast_mut::() .unwrap() @@ -239,7 +238,11 @@ impl HatBaseTrait for HatCode { let rtables = zread!(tables.tables); for _match in subs_matches.drain(..) { let mut expr = RoutingExpr::new(&_match, ""); - matches_data_routes.push((_match.clone(), compute_data_routes(&rtables, &mut expr))); + matches_data_routes.push(( + _match.clone(), + compute_data_routes(&rtables, &mut expr), + compute_matching_pulls(&rtables, &mut expr), + )); } for _match in qabls_matches.drain(..) { matches_query_routes.push((_match.clone(), compute_query_routes(&rtables, &_match))); @@ -247,10 +250,13 @@ impl HatBaseTrait for HatCode { drop(rtables); let mut wtables = zwrite!(tables.tables); - for (mut res, data_routes) in matches_data_routes { + for (mut res, data_routes, matching_pulls) in matches_data_routes { get_mut_unchecked(&mut res) .context_mut() .update_data_routes(data_routes); + get_mut_unchecked(&mut res) + .context_mut() + .update_matching_pulls(matching_pulls); Resource::clean(&mut res); } for (mut res, query_routes) in matches_query_routes { @@ -357,21 +363,19 @@ impl HatContext { } struct HatFace { - next_id: AtomicU32, // @TODO: manage rollover and uniqueness - local_subs: HashMap, SubscriberId>, - remote_subs: HashMap>, - local_qabls: HashMap, (QueryableId, QueryableInfoType)>, - remote_qabls: HashMap>, + local_subs: HashSet>, + remote_subs: HashSet>, + local_qabls: HashMap, QueryableInfo>, + remote_qabls: HashSet>, } impl HatFace { fn new() -> Self { Self { - next_id: AtomicU32::new(0), - local_subs: HashMap::new(), - remote_subs: HashMap::new(), + local_subs: HashSet::new(), + remote_subs: HashSet::new(), local_qabls: HashMap::new(), - remote_qabls: HashMap::new(), + remote_qabls: HashSet::new(), } } } diff --git a/zenoh/src/net/routing/hat/p2p_peer/pubsub.rs b/zenoh/src/net/routing/hat/p2p_peer/pubsub.rs index 5ac0b22846..8b670727dc 100644 --- a/zenoh/src/net/routing/hat/p2p_peer/pubsub.rs +++ b/zenoh/src/net/routing/hat/p2p_peer/pubsub.rs @@ -22,15 +22,13 @@ use crate::net::routing::router::RoutesIndexes; use crate::net::routing::{RoutingContext, PREFIX_LIVELINESS}; use std::borrow::Cow; use std::collections::{HashMap, HashSet}; -use std::sync::atomic::Ordering; use std::sync::Arc; use zenoh_protocol::core::key_expr::OwnedKeyExpr; -use zenoh_protocol::network::declare::SubscriberId; use zenoh_protocol::{ core::{Reliability, WhatAmI}, network::declare::{ common::ext::WireExprType, ext, subscriber::ext::SubscriberInfo, Declare, DeclareBody, - DeclareMode, DeclareSubscriber, UndeclareSubscriber, + DeclareSubscriber, Mode, UndeclareSubscriber, }, }; use zenoh_sync::get_mut_unchecked; @@ -45,20 +43,18 @@ fn propagate_simple_subscription_to( ) { if (src_face.id != dst_face.id || (dst_face.whatami == WhatAmI::Client && res.expr().starts_with(PREFIX_LIVELINESS))) - && !face_hat!(dst_face).local_subs.contains_key(res) + && !face_hat!(dst_face).local_subs.contains(res) && (src_face.whatami == WhatAmI::Client || dst_face.whatami == WhatAmI::Client) { - let id = face_hat!(dst_face).next_id.fetch_add(1, Ordering::SeqCst); - face_hat_mut!(dst_face).local_subs.insert(res.clone(), id); + face_hat_mut!(dst_face).local_subs.insert(res.clone()); let key_expr = Resource::decl_key(res, dst_face); dst_face.primitives.send_declare(RoutingContext::with_expr( Declare { - mode: DeclareMode::Push, - ext_qos: ext::QoSType::DECLARE, + ext_qos: ext::QoSType::declare_default(), ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, + ext_nodeid: ext::NodeIdType::default(), body: DeclareBody::DeclareSubscriber(DeclareSubscriber { - id, + id: 0, // @TODO use proper SubscriberId (#703) wire_expr: key_expr, ext_info: *sub_info, }), @@ -87,19 +83,24 @@ fn propagate_simple_subscription( fn register_client_subscription( _tables: &mut Tables, face: &mut Arc, - id: SubscriberId, res: &mut Arc, sub_info: &SubscriberInfo, ) { // Register subscription { let res = get_mut_unchecked(res); + log::debug!("Register subscription {} for {}", res.expr(), face); match res.session_ctxs.get_mut(&face.id) { - Some(ctx) => { - if ctx.subs.is_none() { + Some(ctx) => match &ctx.subs { + Some(info) => { + if Mode::Pull == info.mode { + get_mut_unchecked(ctx).subs = Some(*sub_info); + } + } + None => { get_mut_unchecked(ctx).subs = Some(*sub_info); } - } + }, None => { res.session_ctxs.insert( face.id, @@ -109,6 +110,7 @@ fn register_client_subscription( remote_expr_id: None, subs: Some(*sub_info), qabl: None, + last_values: HashMap::new(), in_interceptor_cache: None, e_interceptor_cache: None, }), @@ -116,19 +118,20 @@ fn register_client_subscription( } } } - face_hat_mut!(face).remote_subs.insert(id, res.clone()); + face_hat_mut!(face).remote_subs.insert(res.clone()); } fn declare_client_subscription( tables: &mut Tables, face: &mut Arc, - id: SubscriberId, res: &mut Arc, sub_info: &SubscriberInfo, ) { - register_client_subscription(tables, face, id, res, sub_info); + register_client_subscription(tables, face, res, sub_info); + let mut propa_sub_info = *sub_info; + propa_sub_info.mode = Mode::Push; - propagate_simple_subscription(tables, res, sub_info, face); + propagate_simple_subscription(tables, res, &propa_sub_info, face); // This introduced a buffer overflow on windows // TODO: Let's deactivate this on windows until Fixed #[cfg(not(windows))] @@ -137,12 +140,11 @@ fn declare_client_subscription( .primitives .send_declare(RoutingContext::with_expr( Declare { - mode: DeclareMode::Push, - ext_qos: ext::QoSType::DECLARE, + ext_qos: ext::QoSType::declare_default(), ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, + ext_nodeid: ext::NodeIdType::default(), body: DeclareBody::DeclareSubscriber(DeclareSubscriber { - id: 0, // @TODO use proper SubscriberId + id: 0, // @TODO use proper SubscriberId (#703) wire_expr: res.expr().into(), ext_info: *sub_info, }), @@ -168,20 +170,21 @@ fn client_subs(res: &Arc) -> Vec> { fn propagate_forget_simple_subscription(tables: &mut Tables, res: &Arc) { for face in tables.faces.values_mut() { - if let Some(id) = face_hat_mut!(face).local_subs.remove(res) { + if face_hat!(face).local_subs.contains(res) { + let wire_expr = Resource::get_best_key(res, "", face.id); face.primitives.send_declare(RoutingContext::with_expr( Declare { - mode: DeclareMode::Push, - ext_qos: ext::QoSType::DECLARE, + ext_qos: ext::QoSType::declare_default(), ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, + ext_nodeid: ext::NodeIdType::default(), body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { - id, - ext_wire_expr: WireExprType::null(), + id: 0, // @TODO use proper SubscriberId (#703) + ext_wire_expr: WireExprType { wire_expr }, }), }, res.expr(), )); + face_hat_mut!(face).local_subs.remove(res); } } } @@ -191,34 +194,36 @@ pub(super) fn undeclare_client_subscription( face: &mut Arc, res: &mut Arc, ) { - if !face_hat_mut!(face).remote_subs.values().any(|s| *s == *res) { - if let Some(ctx) = get_mut_unchecked(res).session_ctxs.get_mut(&face.id) { - get_mut_unchecked(ctx).subs = None; - } + log::debug!("Unregister client subscription {} for {}", res.expr(), face); + if let Some(ctx) = get_mut_unchecked(res).session_ctxs.get_mut(&face.id) { + get_mut_unchecked(ctx).subs = None; + } + face_hat_mut!(face).remote_subs.remove(res); - let mut client_subs = client_subs(res); - if client_subs.is_empty() { - propagate_forget_simple_subscription(tables, res); - } - if client_subs.len() == 1 { - let face = &mut client_subs[0]; - if !(face.whatami == WhatAmI::Client && res.expr().starts_with(PREFIX_LIVELINESS)) { - if let Some(id) = face_hat_mut!(face).local_subs.remove(res) { - face.primitives.send_declare(RoutingContext::with_expr( - Declare { - mode: DeclareMode::Push, - ext_qos: ext::QoSType::DECLARE, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, - body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { - id, - ext_wire_expr: WireExprType::null(), - }), - }, - res.expr(), - )); - } - } + let mut client_subs = client_subs(res); + if client_subs.is_empty() { + propagate_forget_simple_subscription(tables, res); + } + if client_subs.len() == 1 { + let face = &mut client_subs[0]; + if face_hat!(face).local_subs.contains(res) + && !(face.whatami == WhatAmI::Client && res.expr().starts_with(PREFIX_LIVELINESS)) + { + let wire_expr = Resource::get_best_key(res, "", face.id); + face.primitives.send_declare(RoutingContext::with_expr( + Declare { + ext_qos: ext::QoSType::declare_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::default(), + body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { + id: 0, // @TODO use proper SubscriberId (#703) + ext_wire_expr: WireExprType { wire_expr }, + }), + }, + res.expr(), + )); + + face_hat_mut!(face).local_subs.remove(res); } } } @@ -226,19 +231,15 @@ pub(super) fn undeclare_client_subscription( fn forget_client_subscription( tables: &mut Tables, face: &mut Arc, - id: SubscriberId, -) -> Option> { - if let Some(mut res) = face_hat_mut!(face).remote_subs.remove(&id) { - undeclare_client_subscription(tables, face, &mut res); - Some(res) - } else { - None - } + res: &mut Arc, +) { + undeclare_client_subscription(tables, face, res); } pub(super) fn pubsub_new_face(tables: &mut Tables, face: &mut Arc) { let sub_info = SubscriberInfo { reliability: Reliability::Reliable, // @TODO compute proper reliability to propagate from reliability of known subscribers + mode: Mode::Push, }; for src_face in tables .faces @@ -246,7 +247,7 @@ pub(super) fn pubsub_new_face(tables: &mut Tables, face: &mut Arc) { .cloned() .collect::>>() { - for sub in face_hat!(src_face).remote_subs.values() { + for sub in &face_hat!(src_face).remote_subs { propagate_simple_subscription_to(tables, face, sub, &sub_info, &mut src_face.clone()); } } @@ -257,29 +258,27 @@ impl HatPubSubTrait for HatCode { &self, tables: &mut Tables, face: &mut Arc, - id: SubscriberId, res: &mut Arc, sub_info: &SubscriberInfo, _node_id: NodeId, ) { - declare_client_subscription(tables, face, id, res, sub_info); + declare_client_subscription(tables, face, res, sub_info); } fn undeclare_subscription( &self, tables: &mut Tables, face: &mut Arc, - id: SubscriberId, - _res: Option>, + res: &mut Arc, _node_id: NodeId, - ) -> Option> { - forget_client_subscription(tables, face, id) + ) { + forget_client_subscription(tables, face, res); } fn get_subscriptions(&self, tables: &Tables) -> Vec> { let mut subs = HashSet::new(); for src_face in tables.faces.values() { - for sub in face_hat!(src_face).remote_subs.values() { + for sub in &face_hat!(src_face).remote_subs { subs.insert(sub.clone()); } } @@ -322,19 +321,20 @@ impl HatPubSubTrait for HatCode { let mres = mres.upgrade().unwrap(); for (sid, context) in &mres.session_ctxs { - if context.subs.is_some() - && match tables.whatami { + if let Some(subinfo) = &context.subs { + if match tables.whatami { WhatAmI::Router => context.face.whatami != WhatAmI::Router, _ => { source_type == WhatAmI::Client || context.face.whatami == WhatAmI::Client } + } && subinfo.mode == Mode::Push + { + route.entry(*sid).or_insert_with(|| { + let key_expr = Resource::get_best_key(expr.prefix, expr.suffix, *sid); + (context.face.clone(), key_expr.to_owned(), NodeId::default()) + }); } - { - route.entry(*sid).or_insert_with(|| { - let key_expr = Resource::get_best_key(expr.prefix, expr.suffix, *sid); - (context.face.clone(), key_expr.to_owned(), NodeId::default()) - }); } } } diff --git a/zenoh/src/net/routing/hat/p2p_peer/queries.rs b/zenoh/src/net/routing/hat/p2p_peer/queries.rs index c2d62c7658..95d357fe11 100644 --- a/zenoh/src/net/routing/hat/p2p_peer/queries.rs +++ b/zenoh/src/net/routing/hat/p2p_peer/queries.rs @@ -22,34 +22,37 @@ use crate::net::routing::router::RoutesIndexes; use crate::net::routing::{RoutingContext, PREFIX_LIVELINESS}; use ordered_float::OrderedFloat; use std::borrow::Cow; -use std::collections::HashSet; -use std::sync::atomic::Ordering; +use std::collections::{HashMap, HashSet}; use std::sync::Arc; use zenoh_buffers::ZBuf; use zenoh_protocol::core::key_expr::include::{Includer, DEFAULT_INCLUDER}; use zenoh_protocol::core::key_expr::OwnedKeyExpr; -use zenoh_protocol::network::declare::QueryableId; use zenoh_protocol::{ core::{WhatAmI, WireExpr}, network::declare::{ - common::ext::WireExprType, ext, queryable::ext::QueryableInfoType, Declare, DeclareBody, - DeclareMode, DeclareQueryable, UndeclareQueryable, + common::ext::WireExprType, ext, queryable::ext::QueryableInfo, Declare, DeclareBody, + DeclareQueryable, UndeclareQueryable, }, }; use zenoh_sync::get_mut_unchecked; +#[cfg(feature = "complete_n")] #[inline] -fn merge_qabl_infos(mut this: QueryableInfoType, info: &QueryableInfoType) -> QueryableInfoType { - this.complete = this.complete || info.complete; +fn merge_qabl_infos(mut this: QueryableInfo, info: &QueryableInfo) -> QueryableInfo { + this.complete += info.complete; this.distance = std::cmp::min(this.distance, info.distance); this } -fn local_qabl_info( - _tables: &Tables, - res: &Arc, - face: &Arc, -) -> QueryableInfoType { +#[cfg(not(feature = "complete_n"))] +#[inline] +fn merge_qabl_infos(mut this: QueryableInfo, info: &QueryableInfo) -> QueryableInfo { + this.complete = u8::from(this.complete != 0 || info.complete != 0); + this.distance = std::cmp::min(this.distance, info.distance); + this +} + +fn local_qabl_info(_tables: &Tables, res: &Arc, face: &Arc) -> QueryableInfo { res.session_ctxs .values() .fold(None, |accu, ctx| { @@ -66,7 +69,10 @@ fn local_qabl_info( accu } }) - .unwrap_or(QueryableInfoType::DEFAULT) + .unwrap_or(QueryableInfo { + complete: 0, + distance: 0, + }) } fn propagate_simple_queryable( @@ -77,28 +83,24 @@ fn propagate_simple_queryable( let faces = tables.faces.values().cloned(); for mut dst_face in faces { let info = local_qabl_info(tables, res, &dst_face); - let current = face_hat!(dst_face).local_qabls.get(res); + let current_info = face_hat!(dst_face).local_qabls.get(res); if (src_face.is_none() || src_face.as_ref().unwrap().id != dst_face.id) - && (current.is_none() || current.unwrap().1 != info) + && (current_info.is_none() || *current_info.unwrap() != info) && (src_face.is_none() || src_face.as_ref().unwrap().whatami == WhatAmI::Client || dst_face.whatami == WhatAmI::Client) { - let id = current - .map(|c| c.0) - .unwrap_or(face_hat!(dst_face).next_id.fetch_add(1, Ordering::SeqCst)); face_hat_mut!(&mut dst_face) .local_qabls - .insert(res.clone(), (id, info)); + .insert(res.clone(), info); let key_expr = Resource::decl_key(res, &mut dst_face); dst_face.primitives.send_declare(RoutingContext::with_expr( Declare { - mode: DeclareMode::Push, - ext_qos: ext::QoSType::DECLARE, + ext_qos: ext::QoSType::declare_default(), ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, + ext_nodeid: ext::NodeIdType::default(), body: DeclareBody::DeclareQueryable(DeclareQueryable { - id, + id: 0, // @TODO use proper QueryableId (#703) wire_expr: key_expr, ext_info: info, }), @@ -112,13 +114,13 @@ fn propagate_simple_queryable( fn register_client_queryable( _tables: &mut Tables, face: &mut Arc, - id: QueryableId, res: &mut Arc, - qabl_info: &QueryableInfoType, + qabl_info: &QueryableInfo, ) { // Register queryable { let res = get_mut_unchecked(res); + log::debug!("Register queryable {} (face: {})", res.expr(), face,); get_mut_unchecked(res.session_ctxs.entry(face.id).or_insert_with(|| { Arc::new(SessionContext { face: face.clone(), @@ -126,23 +128,23 @@ fn register_client_queryable( remote_expr_id: None, subs: None, qabl: None, + last_values: HashMap::new(), in_interceptor_cache: None, e_interceptor_cache: None, }) })) .qabl = Some(*qabl_info); } - face_hat_mut!(face).remote_qabls.insert(id, res.clone()); + face_hat_mut!(face).remote_qabls.insert(res.clone()); } fn declare_client_queryable( tables: &mut Tables, face: &mut Arc, - id: QueryableId, res: &mut Arc, - qabl_info: &QueryableInfoType, + qabl_info: &QueryableInfo, ) { - register_client_queryable(tables, face, id, res, qabl_info); + register_client_queryable(tables, face, res, qabl_info); propagate_simple_queryable(tables, res, Some(face)); } @@ -162,20 +164,22 @@ fn client_qabls(res: &Arc) -> Vec> { fn propagate_forget_simple_queryable(tables: &mut Tables, res: &mut Arc) { for face in tables.faces.values_mut() { - if let Some((id, _)) = face_hat_mut!(face).local_qabls.remove(res) { + if face_hat!(face).local_qabls.contains_key(res) { + let wire_expr = Resource::get_best_key(res, "", face.id); face.primitives.send_declare(RoutingContext::with_expr( Declare { - mode: DeclareMode::Push, - ext_qos: ext::QoSType::DECLARE, + ext_qos: ext::QoSType::declare_default(), ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, + ext_nodeid: ext::NodeIdType::default(), body: DeclareBody::UndeclareQueryable(UndeclareQueryable { - id, - ext_wire_expr: WireExprType::null(), + id: 0, // @TODO use proper QueryableId (#703) + ext_wire_expr: WireExprType { wire_expr }, }), }, res.expr(), )); + + face_hat_mut!(face).local_qabls.remove(res); } } } @@ -185,38 +189,38 @@ pub(super) fn undeclare_client_queryable( face: &mut Arc, res: &mut Arc, ) { - if !face_hat_mut!(face) - .remote_qabls - .values() - .any(|s| *s == *res) - { - if let Some(ctx) = get_mut_unchecked(res).session_ctxs.get_mut(&face.id) { - get_mut_unchecked(ctx).qabl = None; + log::debug!("Unregister client queryable {} for {}", res.expr(), face); + if let Some(ctx) = get_mut_unchecked(res).session_ctxs.get_mut(&face.id) { + get_mut_unchecked(ctx).qabl = None; + if ctx.qabl.is_none() { + face_hat_mut!(face).remote_qabls.remove(res); } + } - let mut client_qabls = client_qabls(res); - if client_qabls.is_empty() { - propagate_forget_simple_queryable(tables, res); - } else { - propagate_simple_queryable(tables, res, None); - } - if client_qabls.len() == 1 { - let face = &mut client_qabls[0]; - if let Some((id, _)) = face_hat_mut!(face).local_qabls.remove(res) { - face.primitives.send_declare(RoutingContext::with_expr( - Declare { - mode: DeclareMode::Push, - ext_qos: ext::QoSType::DECLARE, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, - body: DeclareBody::UndeclareQueryable(UndeclareQueryable { - id, - ext_wire_expr: WireExprType::null(), - }), - }, - res.expr(), - )); - } + let mut client_qabls = client_qabls(res); + if client_qabls.is_empty() { + propagate_forget_simple_queryable(tables, res); + } else { + propagate_simple_queryable(tables, res, None); + } + if client_qabls.len() == 1 { + let face = &mut client_qabls[0]; + if face_hat!(face).local_qabls.contains_key(res) { + let wire_expr = Resource::get_best_key(res, "", face.id); + face.primitives.send_declare(RoutingContext::with_expr( + Declare { + ext_qos: ext::QoSType::declare_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::default(), + body: DeclareBody::UndeclareQueryable(UndeclareQueryable { + id: 0, // @TODO use proper QueryableId (#703) + ext_wire_expr: WireExprType { wire_expr }, + }), + }, + res.expr(), + )); + + face_hat_mut!(face).local_qabls.remove(res); } } } @@ -224,14 +228,9 @@ pub(super) fn undeclare_client_queryable( fn forget_client_queryable( tables: &mut Tables, face: &mut Arc, - id: QueryableId, -) -> Option> { - if let Some(mut res) = face_hat_mut!(face).remote_qabls.remove(&id) { - undeclare_client_queryable(tables, face, &mut res); - Some(res) - } else { - None - } + res: &mut Arc, +) { + undeclare_client_queryable(tables, face, res); } pub(super) fn queries_new_face(tables: &mut Tables, _face: &mut Arc) { @@ -241,7 +240,7 @@ pub(super) fn queries_new_face(tables: &mut Tables, _face: &mut Arc) .cloned() .collect::>>() { - for qabl in face_hat!(face).remote_qabls.values() { + for qabl in face_hat!(face).remote_qabls.iter() { propagate_simple_queryable(tables, qabl, Some(&mut face.clone())); } } @@ -256,29 +255,27 @@ impl HatQueriesTrait for HatCode { &self, tables: &mut Tables, face: &mut Arc, - id: QueryableId, res: &mut Arc, - qabl_info: &QueryableInfoType, + qabl_info: &QueryableInfo, _node_id: NodeId, ) { - declare_client_queryable(tables, face, id, res, qabl_info); + declare_client_queryable(tables, face, res, qabl_info); } fn undeclare_queryable( &self, tables: &mut Tables, face: &mut Arc, - id: QueryableId, - _res: Option>, + res: &mut Arc, _node_id: NodeId, - ) -> Option> { - forget_client_queryable(tables, face, id) + ) { + forget_client_queryable(tables, face, res); } fn get_queryables(&self, tables: &Tables) -> Vec> { let mut qabls = HashSet::new(); for src_face in tables.faces.values() { - for qabl in face_hat!(src_face).remote_qabls.values() { + for qabl in &face_hat!(src_face).remote_qabls { qabls.insert(qabl.clone()); } } diff --git a/zenoh/src/net/routing/hat/router/mod.rs b/zenoh/src/net/routing/hat/router/mod.rs index 52f067037e..030b8da4b4 100644 --- a/zenoh/src/net/routing/hat/router/mod.rs +++ b/zenoh/src/net/routing/hat/router/mod.rs @@ -40,7 +40,9 @@ use crate::{ routing::{ dispatcher::face::Face, hat::TREES_COMPUTATION_DELAY_MS, - router::{compute_data_routes, compute_query_routes, RoutesIndexes}, + router::{ + compute_data_routes, compute_matching_pulls, compute_query_routes, RoutesIndexes, + }, }, }, runtime::Runtime, @@ -49,20 +51,17 @@ use std::{ any::Any, collections::{hash_map::DefaultHasher, HashMap, HashSet}, hash::Hasher, - sync::{atomic::AtomicU32, Arc}, + sync::Arc, + time::Duration, }; -use tokio::task::JoinHandle; use zenoh_config::{unwrap_or_default, ModeDependent, WhatAmI, WhatAmIMatcher, ZenohId}; use zenoh_protocol::{ common::ZExtBody, - network::{ - declare::{queryable::ext::QueryableInfoType, QueryableId, SubscriberId}, - oam::id::OAM_LINKSTATE, - Oam, - }, + network::{declare::queryable::ext::QueryableInfo, oam::id::OAM_LINKSTATE, Oam}, }; use zenoh_result::ZResult; use zenoh_sync::get_mut_unchecked; +use zenoh_task::TerminatableTask; use zenoh_transport::unicast::TransportUnicast; mod network; @@ -123,11 +122,24 @@ struct HatTables { routers_net: Option, peers_net: Option, shared_nodes: Vec, - routers_trees_task: Option>, - peers_trees_task: Option>, + routers_trees_task: Option, + peers_trees_task: Option, router_peers_failover_brokering: bool, } +impl Drop for HatTables { + fn drop(&mut self) { + if self.peers_trees_task.is_some() { + let task = self.peers_trees_task.take().unwrap(); + task.terminate(Duration::from_secs(10)); + } + if self.routers_trees_task.is_some() { + let task = self.routers_trees_task.take().unwrap(); + task.terminate(Duration::from_secs(10)); + } + } +} + impl HatTables { fn new(router_peers_failover_brokering: bool) -> Self { Self { @@ -234,44 +246,51 @@ impl HatTables { .as_ref() .map(|net| { let links = net.get_links(peer1); + log::debug!("failover_brokering {} {} ({:?})", peer1, peer2, links); HatTables::failover_brokering_to(links, peer2) }) .unwrap_or(false) } fn schedule_compute_trees(&mut self, tables_ref: Arc, net_type: WhatAmI) { + log::trace!("Schedule computations"); if (net_type == WhatAmI::Router && self.routers_trees_task.is_none()) || (net_type == WhatAmI::Peer && self.peers_trees_task.is_none()) { - let task = Some(zenoh_runtime::ZRuntime::Net.spawn(async move { - tokio::time::sleep(std::time::Duration::from_millis( - *TREES_COMPUTATION_DELAY_MS, - )) - .await; - let mut tables = zwrite!(tables_ref.tables); - - log::trace!("Compute trees"); - let new_childs = match net_type { - WhatAmI::Router => hat_mut!(tables) - .routers_net - .as_mut() - .unwrap() - .compute_trees(), - _ => hat_mut!(tables).peers_net.as_mut().unwrap().compute_trees(), - }; + let task = TerminatableTask::spawn( + zenoh_runtime::ZRuntime::Net, + async move { + tokio::time::sleep(std::time::Duration::from_millis( + *TREES_COMPUTATION_DELAY_MS, + )) + .await; + let mut tables = zwrite!(tables_ref.tables); + + log::trace!("Compute trees"); + let new_childs = match net_type { + WhatAmI::Router => hat_mut!(tables) + .routers_net + .as_mut() + .unwrap() + .compute_trees(), + _ => hat_mut!(tables).peers_net.as_mut().unwrap().compute_trees(), + }; - log::trace!("Compute routes"); - pubsub::pubsub_tree_change(&mut tables, &new_childs, net_type); - queries::queries_tree_change(&mut tables, &new_childs, net_type); + log::trace!("Compute routes"); + pubsub::pubsub_tree_change(&mut tables, &new_childs, net_type); + queries::queries_tree_change(&mut tables, &new_childs, net_type); - match net_type { - WhatAmI::Router => hat_mut!(tables).routers_trees_task = None, - _ => hat_mut!(tables).peers_trees_task = None, - }; - })); + log::trace!("Computations completed"); + match net_type { + WhatAmI::Router => hat_mut!(tables).routers_trees_task = None, + _ => hat_mut!(tables).peers_trees_task = None, + }; + }, + TerminatableTask::create_cancellation_token(), + ); match net_type { - WhatAmI::Router => self.routers_trees_task = task, - _ => self.peers_trees_task = task, + WhatAmI::Router => self.routers_trees_task = Some(task), + _ => self.peers_trees_task = Some(task), }; } } @@ -417,7 +436,7 @@ impl HatBaseTrait for HatCode { face.local_mappings.clear(); let mut subs_matches = vec![]; - for (_id, mut res) in face + for mut res in face .hat .downcast_mut::() .unwrap() @@ -445,7 +464,7 @@ impl HatBaseTrait for HatCode { } let mut qabls_matches = vec![]; - for (_, mut res) in face + for mut res in face .hat .downcast_mut::() .unwrap() @@ -478,7 +497,11 @@ impl HatBaseTrait for HatCode { let rtables = zread!(tables.tables); for _match in subs_matches.drain(..) { let mut expr = RoutingExpr::new(&_match, ""); - matches_data_routes.push((_match.clone(), compute_data_routes(&rtables, &mut expr))); + matches_data_routes.push(( + _match.clone(), + compute_data_routes(&rtables, &mut expr), + compute_matching_pulls(&rtables, &mut expr), + )); } for _match in qabls_matches.drain(..) { matches_query_routes.push((_match.clone(), compute_query_routes(&rtables, &_match))); @@ -486,10 +509,13 @@ impl HatBaseTrait for HatCode { drop(rtables); let mut wtables = zwrite!(tables.tables); - for (mut res, data_routes) in matches_data_routes { + for (mut res, data_routes, matching_pulls) in matches_data_routes { get_mut_unchecked(&mut res) .context_mut() .update_data_routes(data_routes); + get_mut_unchecked(&mut res) + .context_mut() + .update_matching_pulls(matching_pulls); Resource::clean(&mut res); } for (mut res, query_routes) in matches_query_routes { @@ -748,8 +774,8 @@ impl HatBaseTrait for HatCode { struct HatContext { router_subs: HashSet, peer_subs: HashSet, - router_qabls: HashMap, - peer_qabls: HashMap, + router_qabls: HashMap, + peer_qabls: HashMap, } impl HatContext { @@ -765,22 +791,20 @@ impl HatContext { struct HatFace { link_id: usize, - next_id: AtomicU32, // @TODO: manage rollover and uniqueness - local_subs: HashMap, SubscriberId>, - remote_subs: HashMap>, - local_qabls: HashMap, (QueryableId, QueryableInfoType)>, - remote_qabls: HashMap>, + local_subs: HashSet>, + remote_subs: HashSet>, + local_qabls: HashMap, QueryableInfo>, + remote_qabls: HashSet>, } impl HatFace { fn new() -> Self { Self { link_id: 0, - next_id: AtomicU32::new(0), - local_subs: HashMap::new(), - remote_subs: HashMap::new(), + local_subs: HashSet::new(), + remote_subs: HashSet::new(), local_qabls: HashMap::new(), - remote_qabls: HashMap::new(), + remote_qabls: HashSet::new(), } } } diff --git a/zenoh/src/net/routing/hat/router/network.rs b/zenoh/src/net/routing/hat/router/network.rs index e359930a35..7ff42f1dc3 100644 --- a/zenoh/src/net/routing/hat/router/network.rs +++ b/zenoh/src/net/routing/hat/router/network.rs @@ -274,7 +274,7 @@ impl Network { Ok(NetworkBody::OAM(Oam { id: OAM_LINKSTATE, body: ZExtBody::ZBuf(buf), - ext_qos: oam::ext::QoSType::OAM, + ext_qos: oam::ext::QoSType::oam_default(), ext_tstamp: None, }) .into()) diff --git a/zenoh/src/net/routing/hat/router/pubsub.rs b/zenoh/src/net/routing/hat/router/pubsub.rs index 7646f8a1c5..d840d85665 100644 --- a/zenoh/src/net/routing/hat/router/pubsub.rs +++ b/zenoh/src/net/routing/hat/router/pubsub.rs @@ -25,15 +25,13 @@ use crate::net::routing::{RoutingContext, PREFIX_LIVELINESS}; use petgraph::graph::NodeIndex; use std::borrow::Cow; use std::collections::{HashMap, HashSet}; -use std::sync::atomic::Ordering; use std::sync::Arc; use zenoh_protocol::core::key_expr::OwnedKeyExpr; -use zenoh_protocol::network::declare::SubscriberId; use zenoh_protocol::{ core::{Reliability, WhatAmI, ZenohId}, network::declare::{ common::ext::WireExprType, ext, subscriber::ext::SubscriberInfo, Declare, DeclareBody, - DeclareMode, DeclareSubscriber, UndeclareSubscriber, + DeclareSubscriber, Mode, UndeclareSubscriber, }, }; use zenoh_sync::get_mut_unchecked; @@ -55,16 +53,17 @@ fn send_sourced_subscription_to_net_childs( if src_face.is_none() || someface.id != src_face.unwrap().id { let key_expr = Resource::decl_key(res, &mut someface); + log::debug!("Send subscription {} on {}", res.expr(), someface); + someface.primitives.send_declare(RoutingContext::with_expr( Declare { - mode: DeclareMode::Push, - ext_qos: ext::QoSType::DECLARE, + ext_qos: ext::QoSType::declare_default(), ext_tstamp: None, ext_nodeid: ext::NodeIdType { node_id: routing_context, }, body: DeclareBody::DeclareSubscriber(DeclareSubscriber { - id: 0, // Sourced subscriptions do not use ids + id: 0, // @TODO use proper SubscriberId (#703) wire_expr: key_expr, ext_info: *sub_info, }), @@ -90,7 +89,7 @@ fn propagate_simple_subscription_to( ) { if (src_face.id != dst_face.id || (dst_face.whatami == WhatAmI::Client && res.expr().starts_with(PREFIX_LIVELINESS))) - && !face_hat!(dst_face).local_subs.contains_key(res) + && !face_hat!(dst_face).local_subs.contains(res) && if full_peer_net { dst_face.whatami == WhatAmI::Client } else { @@ -100,17 +99,15 @@ fn propagate_simple_subscription_to( || hat!(tables).failover_brokering(src_face.zid, dst_face.zid)) } { - let id = face_hat!(dst_face).next_id.fetch_add(1, Ordering::SeqCst); - face_hat_mut!(dst_face).local_subs.insert(res.clone(), id); + face_hat_mut!(dst_face).local_subs.insert(res.clone()); let key_expr = Resource::decl_key(res, dst_face); dst_face.primitives.send_declare(RoutingContext::with_expr( Declare { - mode: DeclareMode::Push, - ext_qos: ext::QoSType::DECLARE, + ext_qos: ext::QoSType::declare_default(), ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, + ext_nodeid: ext::NodeIdType::default(), body: DeclareBody::DeclareSubscriber(DeclareSubscriber { - id, + id: 0, // @TODO use proper SubscriberId (#703) wire_expr: key_expr, ext_info: *sub_info, }), @@ -192,6 +189,11 @@ fn register_router_subscription( if !res_hat!(res).router_subs.contains(&router) { // Register router subscription { + log::debug!( + "Register router subscription {} (router: {})", + res.expr(), + router + ); res_hat_mut!(res).router_subs.insert(router); hat_mut!(tables).router_subs.insert(res.clone()); } @@ -228,6 +230,7 @@ fn register_peer_subscription( if !res_hat!(res).peer_subs.contains(&peer) { // Register peer subscription { + log::debug!("Register peer subscription {} (peer: {})", res.expr(), peer); res_hat_mut!(res).peer_subs.insert(peer); hat_mut!(tables).peer_subs.insert(res.clone()); } @@ -245,7 +248,8 @@ fn declare_peer_subscription( peer: ZenohId, ) { register_peer_subscription(tables, face, res, sub_info, peer); - let propa_sub_info = *sub_info; + let mut propa_sub_info = *sub_info; + propa_sub_info.mode = Mode::Push; let zid = tables.zid; register_router_subscription(tables, face, res, &propa_sub_info, zid); } @@ -253,19 +257,24 @@ fn declare_peer_subscription( fn register_client_subscription( _tables: &mut Tables, face: &mut Arc, - id: SubscriberId, res: &mut Arc, sub_info: &SubscriberInfo, ) { // Register subscription { let res = get_mut_unchecked(res); + log::debug!("Register subscription {} for {}", res.expr(), face); match res.session_ctxs.get_mut(&face.id) { - Some(ctx) => { - if ctx.subs.is_none() { + Some(ctx) => match &ctx.subs { + Some(info) => { + if Mode::Pull == info.mode { + get_mut_unchecked(ctx).subs = Some(*sub_info); + } + } + None => { get_mut_unchecked(ctx).subs = Some(*sub_info); } - } + }, None => { res.session_ctxs.insert( face.id, @@ -275,6 +284,7 @@ fn register_client_subscription( remote_expr_id: None, subs: Some(*sub_info), qabl: None, + last_values: HashMap::new(), in_interceptor_cache: None, e_interceptor_cache: None, }), @@ -282,19 +292,20 @@ fn register_client_subscription( } } } - face_hat_mut!(face).remote_subs.insert(id, res.clone()); + face_hat_mut!(face).remote_subs.insert(res.clone()); } fn declare_client_subscription( tables: &mut Tables, face: &mut Arc, - id: SubscriberId, res: &mut Arc, sub_info: &SubscriberInfo, ) { - register_client_subscription(tables, face, id, res, sub_info); + register_client_subscription(tables, face, res, sub_info); + let mut propa_sub_info = *sub_info; + propa_sub_info.mode = Mode::Push; let zid = tables.zid; - register_router_subscription(tables, face, res, sub_info, zid); + register_router_subscription(tables, face, res, &propa_sub_info, zid); } #[inline] @@ -345,16 +356,17 @@ fn send_forget_sourced_subscription_to_net_childs( if src_face.is_none() || someface.id != src_face.unwrap().id { let wire_expr = Resource::decl_key(res, &mut someface); + log::debug!("Send forget subscription {} on {}", res.expr(), someface); + someface.primitives.send_declare(RoutingContext::with_expr( Declare { - mode: DeclareMode::Push, - ext_qos: ext::QoSType::DECLARE, + ext_qos: ext::QoSType::declare_default(), ext_tstamp: None, ext_nodeid: ext::NodeIdType { node_id: routing_context.unwrap_or(0), }, body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { - id: 0, // Sourced subscriptions do not use ids + id: 0, // @TODO use proper SubscriberId (#703) ext_wire_expr: WireExprType { wire_expr }, }), }, @@ -370,20 +382,21 @@ fn send_forget_sourced_subscription_to_net_childs( fn propagate_forget_simple_subscription(tables: &mut Tables, res: &Arc) { for face in tables.faces.values_mut() { - if let Some(id) = face_hat_mut!(face).local_subs.remove(res) { + if face_hat!(face).local_subs.contains(res) { + let wire_expr = Resource::get_best_key(res, "", face.id); face.primitives.send_declare(RoutingContext::with_expr( Declare { - mode: DeclareMode::Push, - ext_qos: ext::QoSType::DECLARE, + ext_qos: ext::QoSType::declare_default(), ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, + ext_nodeid: ext::NodeIdType::default(), body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { - id, - ext_wire_expr: WireExprType::null(), + id: 0, // @TODO use proper SubscriberId (#703) + ext_wire_expr: WireExprType { wire_expr }, }), }, res.expr(), )); + face_hat_mut!(face).local_subs.remove(res); } } } @@ -400,7 +413,7 @@ fn propagate_forget_simple_subscription_to_peers(tables: &mut Tables, res: &Arc< .collect::>>() { if face.whatami == WhatAmI::Peer - && face_hat!(face).local_subs.contains_key(res) + && face_hat!(face).local_subs.contains(res) && !res.session_ctxs.values().any(|s| { face.zid != s.face.zid && s.subs.is_some() @@ -409,21 +422,21 @@ fn propagate_forget_simple_subscription_to_peers(tables: &mut Tables, res: &Arc< && hat!(tables).failover_brokering(s.face.zid, face.zid))) }) { - if let Some(id) = face_hat_mut!(&mut face).local_subs.remove(res) { - face.primitives.send_declare(RoutingContext::with_expr( - Declare { - mode: DeclareMode::Push, - ext_qos: ext::QoSType::DECLARE, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, - body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { - id, - ext_wire_expr: WireExprType::null(), - }), - }, - res.expr(), - )); - } + let wire_expr = Resource::get_best_key(res, "", face.id); + face.primitives.send_declare(RoutingContext::with_expr( + Declare { + ext_qos: ext::QoSType::declare_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::default(), + body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { + id: 0, // @TODO use proper SubscriberId (#703) + ext_wire_expr: WireExprType { wire_expr }, + }), + }, + res.expr(), + )); + + face_hat_mut!(&mut face).local_subs.remove(res); } } } @@ -466,6 +479,11 @@ fn propagate_forget_sourced_subscription( } fn unregister_router_subscription(tables: &mut Tables, res: &mut Arc, router: &ZenohId) { + log::debug!( + "Unregister router subscription {} (router: {})", + res.expr(), + router + ); res_hat_mut!(res).router_subs.retain(|sub| sub != router); if res_hat!(res).router_subs.is_empty() { @@ -504,6 +522,11 @@ fn forget_router_subscription( } fn unregister_peer_subscription(tables: &mut Tables, res: &mut Arc, peer: &ZenohId) { + log::debug!( + "Unregister peer subscription {} (peer: {})", + res.expr(), + peer + ); res_hat_mut!(res).peer_subs.retain(|sub| sub != peer); if res_hat!(res).peer_subs.is_empty() { @@ -545,38 +568,40 @@ pub(super) fn undeclare_client_subscription( face: &mut Arc, res: &mut Arc, ) { - if !face_hat_mut!(face).remote_subs.values().any(|s| *s == *res) { - if let Some(ctx) = get_mut_unchecked(res).session_ctxs.get_mut(&face.id) { - get_mut_unchecked(ctx).subs = None; - } + log::debug!("Unregister client subscription {} for {}", res.expr(), face); + if let Some(ctx) = get_mut_unchecked(res).session_ctxs.get_mut(&face.id) { + get_mut_unchecked(ctx).subs = None; + } + face_hat_mut!(face).remote_subs.remove(res); - let mut client_subs = client_subs(res); - let router_subs = remote_router_subs(tables, res); - let peer_subs = remote_peer_subs(tables, res); - if client_subs.is_empty() && !peer_subs { - undeclare_router_subscription(tables, None, res, &tables.zid.clone()); - } else { - propagate_forget_simple_subscription_to_peers(tables, res); - } - if client_subs.len() == 1 && !router_subs && !peer_subs { - let face = &mut client_subs[0]; - if !(face.whatami == WhatAmI::Client && res.expr().starts_with(PREFIX_LIVELINESS)) { - if let Some(id) = face_hat_mut!(face).local_subs.remove(res) { - face.primitives.send_declare(RoutingContext::with_expr( - Declare { - mode: DeclareMode::Push, - ext_qos: ext::QoSType::DECLARE, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, - body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { - id, - ext_wire_expr: WireExprType::null(), - }), - }, - res.expr(), - )); - } - } + let mut client_subs = client_subs(res); + let router_subs = remote_router_subs(tables, res); + let peer_subs = remote_peer_subs(tables, res); + if client_subs.is_empty() && !peer_subs { + undeclare_router_subscription(tables, None, res, &tables.zid.clone()); + } else { + propagate_forget_simple_subscription_to_peers(tables, res); + } + if client_subs.len() == 1 && !router_subs && !peer_subs { + let face = &mut client_subs[0]; + if face_hat!(face).local_subs.contains(res) + && !(face.whatami == WhatAmI::Client && res.expr().starts_with(PREFIX_LIVELINESS)) + { + let wire_expr = Resource::get_best_key(res, "", face.id); + face.primitives.send_declare(RoutingContext::with_expr( + Declare { + ext_qos: ext::QoSType::declare_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::default(), + body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { + id: 0, // @TODO use proper SubscriberId (#703) + ext_wire_expr: WireExprType { wire_expr }, + }), + }, + res.expr(), + )); + + face_hat_mut!(face).local_subs.remove(res); } } } @@ -584,34 +609,28 @@ pub(super) fn undeclare_client_subscription( fn forget_client_subscription( tables: &mut Tables, face: &mut Arc, - id: SubscriberId, -) -> Option> { - if let Some(mut res) = face_hat_mut!(face).remote_subs.remove(&id) { - undeclare_client_subscription(tables, face, &mut res); - Some(res) - } else { - None - } + res: &mut Arc, +) { + undeclare_client_subscription(tables, face, res); } pub(super) fn pubsub_new_face(tables: &mut Tables, face: &mut Arc) { let sub_info = SubscriberInfo { reliability: Reliability::Reliable, // @TODO compute proper reliability to propagate from reliability of known subscribers + mode: Mode::Push, }; if face.whatami == WhatAmI::Client { for sub in &hat!(tables).router_subs { - let id = face_hat!(face).next_id.fetch_add(1, Ordering::SeqCst); - face_hat_mut!(face).local_subs.insert(sub.clone(), id); + face_hat_mut!(face).local_subs.insert(sub.clone()); let key_expr = Resource::decl_key(sub, face); face.primitives.send_declare(RoutingContext::with_expr( Declare { - mode: DeclareMode::Push, - ext_qos: ext::QoSType::DECLARE, + ext_qos: ext::QoSType::declare_default(), ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, + ext_nodeid: ext::NodeIdType::default(), body: DeclareBody::DeclareSubscriber(DeclareSubscriber { - id, + id: 0, // @TODO use proper SubscriberId (#703) wire_expr: key_expr, ext_info: sub_info, }), @@ -630,17 +649,15 @@ pub(super) fn pubsub_new_face(tables: &mut Tables, face: &mut Arc) { && hat!(tables).failover_brokering(s.face.zid, face.zid))) })) { - let id = face_hat!(face).next_id.fetch_add(1, Ordering::SeqCst); - face_hat_mut!(face).local_subs.insert(sub.clone(), id); + face_hat_mut!(face).local_subs.insert(sub.clone()); let key_expr = Resource::decl_key(sub, face); face.primitives.send_declare(RoutingContext::with_expr( Declare { - mode: DeclareMode::Push, - ext_qos: ext::QoSType::DECLARE, + ext_qos: ext::QoSType::declare_default(), ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, + ext_nodeid: ext::NodeIdType::default(), body: DeclareBody::DeclareSubscriber(DeclareSubscriber { - id, + id: 0, // @TODO use proper SubscriberId (#703) wire_expr: key_expr, ext_info: sub_info, }), @@ -718,6 +735,7 @@ pub(super) fn pubsub_tree_change( if *sub == tree_id { let sub_info = SubscriberInfo { reliability: Reliability::Reliable, // @TODO compute proper reliability to propagate from reliability of known subscribers + mode: Mode::Push, }; send_sourced_subscription_to_net_childs( tables, @@ -742,7 +760,7 @@ pub(super) fn pubsub_tree_change( pub(super) fn pubsub_linkstate_change(tables: &mut Tables, zid: &ZenohId, links: &[ZenohId]) { if let Some(src_face) = tables.get_face(zid).cloned() { if hat!(tables).router_peers_failover_brokering && src_face.whatami == WhatAmI::Peer { - for res in face_hat!(src_face).remote_subs.values() { + for res in &face_hat!(src_face).remote_subs { let client_subs = res .session_ctxs .values() @@ -754,7 +772,7 @@ pub(super) fn pubsub_linkstate_change(tables: &mut Tables, zid: &ZenohId, links: { let dst_face = &mut get_mut_unchecked(ctx).face; if dst_face.whatami == WhatAmI::Peer && src_face.zid != dst_face.zid { - if let Some(id) = face_hat!(dst_face).local_subs.get(res).cloned() { + if face_hat!(dst_face).local_subs.contains(res) { let forget = !HatTables::failover_brokering_to(links, dst_face.zid) && { let ctx_links = hat!(tables) @@ -772,16 +790,16 @@ pub(super) fn pubsub_linkstate_change(tables: &mut Tables, zid: &ZenohId, links: }) }; if forget { + let wire_expr = Resource::get_best_key(res, "", dst_face.id); dst_face.primitives.send_declare(RoutingContext::with_expr( Declare { - mode: DeclareMode::Push, - ext_qos: ext::QoSType::DECLARE, + ext_qos: ext::QoSType::declare_default(), ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, + ext_nodeid: ext::NodeIdType::default(), body: DeclareBody::UndeclareSubscriber( UndeclareSubscriber { - id, - ext_wire_expr: WireExprType::null(), + id: 0, // @TODO use proper SubscriberId (#703) + ext_wire_expr: WireExprType { wire_expr }, }, ), }, @@ -792,20 +810,19 @@ pub(super) fn pubsub_linkstate_change(tables: &mut Tables, zid: &ZenohId, links: } } else if HatTables::failover_brokering_to(links, ctx.face.zid) { let dst_face = &mut get_mut_unchecked(ctx).face; - let id = face_hat!(dst_face).next_id.fetch_add(1, Ordering::SeqCst); - face_hat_mut!(dst_face).local_subs.insert(res.clone(), id); + face_hat_mut!(dst_face).local_subs.insert(res.clone()); let key_expr = Resource::decl_key(res, dst_face); let sub_info = SubscriberInfo { reliability: Reliability::Reliable, // @TODO compute proper reliability to propagate from reliability of known subscribers + mode: Mode::Push, }; dst_face.primitives.send_declare(RoutingContext::with_expr( Declare { - mode: DeclareMode::Push, - ext_qos: ext::QoSType::DECLARE, + ext_qos: ext::QoSType::declare_default(), ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, + ext_nodeid: ext::NodeIdType::default(), body: DeclareBody::DeclareSubscriber(DeclareSubscriber { - id, + id: 0, // @TODO use proper SubscriberId (#703) wire_expr: key_expr, ext_info: sub_info, }), @@ -859,7 +876,6 @@ impl HatPubSubTrait for HatCode { &self, tables: &mut Tables, face: &mut Arc, - id: SubscriberId, res: &mut Arc, sub_info: &SubscriberInfo, node_id: NodeId, @@ -876,10 +892,10 @@ impl HatPubSubTrait for HatCode { declare_peer_subscription(tables, face, res, sub_info, peer) } } else { - declare_client_subscription(tables, face, id, res, sub_info) + declare_client_subscription(tables, face, res, sub_info) } } - _ => declare_client_subscription(tables, face, id, res, sub_info), + _ => declare_client_subscription(tables, face, res, sub_info), } } @@ -887,40 +903,25 @@ impl HatPubSubTrait for HatCode { &self, tables: &mut Tables, face: &mut Arc, - id: SubscriberId, - res: Option>, + res: &mut Arc, node_id: NodeId, - ) -> Option> { + ) { match face.whatami { WhatAmI::Router => { - if let Some(mut res) = res { - if let Some(router) = get_router(tables, face, node_id) { - forget_router_subscription(tables, face, &mut res, &router); - Some(res) - } else { - None - } - } else { - None + if let Some(router) = get_router(tables, face, node_id) { + forget_router_subscription(tables, face, res, &router) } } WhatAmI::Peer => { if hat!(tables).full_net(WhatAmI::Peer) { - if let Some(mut res) = res { - if let Some(peer) = get_peer(tables, face, node_id) { - forget_peer_subscription(tables, face, &mut res, &peer); - Some(res) - } else { - None - } - } else { - None + if let Some(peer) = get_peer(tables, face, node_id) { + forget_peer_subscription(tables, face, res, &peer) } } else { - forget_client_subscription(tables, face, id) + forget_client_subscription(tables, face, res) } } - _ => forget_client_subscription(tables, face, id), + _ => forget_client_subscription(tables, face, res), } } @@ -1001,11 +1002,14 @@ impl HatPubSubTrait for HatCode { if master || source_type == WhatAmI::Router { for (sid, context) in &mres.session_ctxs { - if context.subs.is_some() && context.face.whatami != WhatAmI::Router { - route.entry(*sid).or_insert_with(|| { - let key_expr = Resource::get_best_key(expr.prefix, expr.suffix, *sid); - (context.face.clone(), key_expr.to_owned(), NodeId::default()) - }); + if let Some(subinfo) = &context.subs { + if context.face.whatami != WhatAmI::Router && subinfo.mode == Mode::Push { + route.entry(*sid).or_insert_with(|| { + let key_expr = + Resource::get_best_key(expr.prefix, expr.suffix, *sid); + (context.face.clone(), key_expr.to_owned(), NodeId::default()) + }); + } } } } diff --git a/zenoh/src/net/routing/hat/router/queries.rs b/zenoh/src/net/routing/hat/router/queries.rs index e647cf2dc7..12338eb339 100644 --- a/zenoh/src/net/routing/hat/router/queries.rs +++ b/zenoh/src/net/routing/hat/router/queries.rs @@ -26,29 +26,36 @@ use ordered_float::OrderedFloat; use petgraph::graph::NodeIndex; use std::borrow::Cow; use std::collections::HashMap; -use std::sync::atomic::Ordering; use std::sync::Arc; use zenoh_buffers::ZBuf; use zenoh_protocol::core::key_expr::include::{Includer, DEFAULT_INCLUDER}; use zenoh_protocol::core::key_expr::OwnedKeyExpr; -use zenoh_protocol::network::declare::QueryableId; use zenoh_protocol::{ core::{WhatAmI, WireExpr, ZenohId}, network::declare::{ - common::ext::WireExprType, ext, queryable::ext::QueryableInfoType, Declare, DeclareBody, - DeclareMode, DeclareQueryable, UndeclareQueryable, + common::ext::WireExprType, ext, queryable::ext::QueryableInfo, Declare, DeclareBody, + DeclareQueryable, UndeclareQueryable, }, }; use zenoh_sync::get_mut_unchecked; +#[cfg(feature = "complete_n")] #[inline] -fn merge_qabl_infos(mut this: QueryableInfoType, info: &QueryableInfoType) -> QueryableInfoType { - this.complete = this.complete || info.complete; +fn merge_qabl_infos(mut this: QueryableInfo, info: &QueryableInfo) -> QueryableInfo { + this.complete += info.complete; this.distance = std::cmp::min(this.distance, info.distance); this } -fn local_router_qabl_info(tables: &Tables, res: &Arc) -> QueryableInfoType { +#[cfg(not(feature = "complete_n"))] +#[inline] +fn merge_qabl_infos(mut this: QueryableInfo, info: &QueryableInfo) -> QueryableInfo { + this.complete = u8::from(this.complete != 0 || info.complete != 0); + this.distance = std::cmp::min(this.distance, info.distance); + this +} + +fn local_router_qabl_info(tables: &Tables, res: &Arc) -> QueryableInfo { let info = if hat!(tables).full_net(WhatAmI::Peer) { res.context.as_ref().and_then(|_| { res_hat!(res) @@ -80,10 +87,13 @@ fn local_router_qabl_info(tables: &Tables, res: &Arc) -> QueryableInfo accu } }) - .unwrap_or(QueryableInfoType::DEFAULT) + .unwrap_or(QueryableInfo { + complete: 0, + distance: 0, + }) } -fn local_peer_qabl_info(tables: &Tables, res: &Arc) -> QueryableInfoType { +fn local_peer_qabl_info(tables: &Tables, res: &Arc) -> QueryableInfo { let info = if res.context.is_some() { res_hat!(res) .router_qabls @@ -113,14 +123,13 @@ fn local_peer_qabl_info(tables: &Tables, res: &Arc) -> QueryableInfoTy accu } }) - .unwrap_or(QueryableInfoType::DEFAULT) + .unwrap_or(QueryableInfo { + complete: 0, + distance: 0, + }) } -fn local_qabl_info( - tables: &Tables, - res: &Arc, - face: &Arc, -) -> QueryableInfoType { +fn local_qabl_info(tables: &Tables, res: &Arc, face: &Arc) -> QueryableInfo { let mut info = if res.context.is_some() { res_hat!(res) .router_qabls @@ -172,7 +181,10 @@ fn local_qabl_info( accu } }) - .unwrap_or(QueryableInfoType::DEFAULT) + .unwrap_or(QueryableInfo { + complete: 0, + distance: 0, + }) } #[inline] @@ -181,7 +193,7 @@ fn send_sourced_queryable_to_net_childs( net: &Network, childs: &[NodeIndex], res: &Arc, - qabl_info: &QueryableInfoType, + qabl_info: &QueryableInfo, src_face: Option<&mut Arc>, routing_context: NodeId, ) { @@ -192,16 +204,17 @@ fn send_sourced_queryable_to_net_childs( if src_face.is_none() || someface.id != src_face.as_ref().unwrap().id { let key_expr = Resource::decl_key(res, &mut someface); + log::debug!("Send queryable {} on {}", res.expr(), someface); + someface.primitives.send_declare(RoutingContext::with_expr( Declare { - mode: DeclareMode::Push, - ext_qos: ext::QoSType::DECLARE, + ext_qos: ext::QoSType::declare_default(), ext_tstamp: None, ext_nodeid: ext::NodeIdType { node_id: routing_context, }, body: DeclareBody::DeclareQueryable(DeclareQueryable { - id: 0, // Sourced queryables do not use ids + id: 0, // @TODO use proper QueryableId (#703) wire_expr: key_expr, ext_info: *qabl_info, }), @@ -225,9 +238,9 @@ fn propagate_simple_queryable( let faces = tables.faces.values().cloned(); for mut dst_face in faces { let info = local_qabl_info(tables, res, &dst_face); - let current = face_hat!(dst_face).local_qabls.get(res); + let current_info = face_hat!(dst_face).local_qabls.get(res); if (src_face.is_none() || src_face.as_ref().unwrap().id != dst_face.id) - && (current.is_none() || current.unwrap().1 != info) + && (current_info.is_none() || *current_info.unwrap() != info) && if full_peers_net { dst_face.whatami == WhatAmI::Client } else { @@ -239,21 +252,17 @@ fn propagate_simple_queryable( .failover_brokering(src_face.as_ref().unwrap().zid, dst_face.zid)) } { - let id = current - .map(|c| c.0) - .unwrap_or(face_hat!(dst_face).next_id.fetch_add(1, Ordering::SeqCst)); face_hat_mut!(&mut dst_face) .local_qabls - .insert(res.clone(), (id, info)); + .insert(res.clone(), info); let key_expr = Resource::decl_key(res, &mut dst_face); dst_face.primitives.send_declare(RoutingContext::with_expr( Declare { - mode: DeclareMode::Push, - ext_qos: ext::QoSType::DECLARE, + ext_qos: ext::QoSType::declare_default(), ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, + ext_nodeid: ext::NodeIdType::default(), body: DeclareBody::DeclareQueryable(DeclareQueryable { - id, + id: 0, // @TODO use proper QueryableId (#703) wire_expr: key_expr, ext_info: info, }), @@ -267,7 +276,7 @@ fn propagate_simple_queryable( fn propagate_sourced_queryable( tables: &Tables, res: &Arc, - qabl_info: &QueryableInfoType, + qabl_info: &QueryableInfo, src_face: Option<&mut Arc>, source: &ZenohId, net_type: WhatAmI, @@ -306,13 +315,18 @@ fn register_router_queryable( tables: &mut Tables, mut face: Option<&mut Arc>, res: &mut Arc, - qabl_info: &QueryableInfoType, + qabl_info: &QueryableInfo, router: ZenohId, ) { let current_info = res_hat!(res).router_qabls.get(&router); if current_info.is_none() || current_info.unwrap() != qabl_info { // Register router queryable { + log::debug!( + "Register router queryable {} (router: {})", + res.expr(), + router, + ); res_hat_mut!(res).router_qabls.insert(router, *qabl_info); hat_mut!(tables).router_qabls.insert(res.clone()); } @@ -344,7 +358,7 @@ fn declare_router_queryable( tables: &mut Tables, face: &mut Arc, res: &mut Arc, - qabl_info: &QueryableInfoType, + qabl_info: &QueryableInfo, router: ZenohId, ) { register_router_queryable(tables, Some(face), res, qabl_info, router); @@ -354,13 +368,14 @@ fn register_peer_queryable( tables: &mut Tables, face: Option<&mut Arc>, res: &mut Arc, - qabl_info: &QueryableInfoType, + qabl_info: &QueryableInfo, peer: ZenohId, ) { let current_info = res_hat!(res).peer_qabls.get(&peer); if current_info.is_none() || current_info.unwrap() != qabl_info { // Register peer queryable { + log::debug!("Register peer queryable {} (peer: {})", res.expr(), peer,); res_hat_mut!(res).peer_qabls.insert(peer, *qabl_info); hat_mut!(tables).peer_qabls.insert(res.clone()); } @@ -374,7 +389,7 @@ fn declare_peer_queryable( tables: &mut Tables, face: &mut Arc, res: &mut Arc, - qabl_info: &QueryableInfoType, + qabl_info: &QueryableInfo, peer: ZenohId, ) { let mut face = Some(face); @@ -387,13 +402,13 @@ fn declare_peer_queryable( fn register_client_queryable( _tables: &mut Tables, face: &mut Arc, - id: QueryableId, res: &mut Arc, - qabl_info: &QueryableInfoType, + qabl_info: &QueryableInfo, ) { // Register queryable { let res = get_mut_unchecked(res); + log::debug!("Register queryable {} (face: {})", res.expr(), face,); get_mut_unchecked(res.session_ctxs.entry(face.id).or_insert_with(|| { Arc::new(SessionContext { face: face.clone(), @@ -401,23 +416,23 @@ fn register_client_queryable( remote_expr_id: None, subs: None, qabl: None, + last_values: HashMap::new(), in_interceptor_cache: None, e_interceptor_cache: None, }) })) .qabl = Some(*qabl_info); } - face_hat_mut!(face).remote_qabls.insert(id, res.clone()); + face_hat_mut!(face).remote_qabls.insert(res.clone()); } fn declare_client_queryable( tables: &mut Tables, face: &mut Arc, - id: QueryableId, res: &mut Arc, - qabl_info: &QueryableInfoType, + qabl_info: &QueryableInfo, ) { - register_client_queryable(tables, face, id, res, qabl_info); + register_client_queryable(tables, face, res, qabl_info); let local_details = local_router_qabl_info(tables, res); let zid = tables.zid; register_router_queryable(tables, Some(face), res, &local_details, zid); @@ -471,16 +486,17 @@ fn send_forget_sourced_queryable_to_net_childs( if src_face.is_none() || someface.id != src_face.unwrap().id { let wire_expr = Resource::decl_key(res, &mut someface); + log::debug!("Send forget queryable {} on {}", res.expr(), someface); + someface.primitives.send_declare(RoutingContext::with_expr( Declare { - mode: DeclareMode::Push, - ext_qos: ext::QoSType::DECLARE, + ext_qos: ext::QoSType::declare_default(), ext_tstamp: None, ext_nodeid: ext::NodeIdType { node_id: routing_context, }, body: DeclareBody::UndeclareQueryable(UndeclareQueryable { - id: 0, // Sourced queryables do not use ids + id: 0, // @TODO use proper QueryableId (#703) ext_wire_expr: WireExprType { wire_expr }, }), }, @@ -496,20 +512,22 @@ fn send_forget_sourced_queryable_to_net_childs( fn propagate_forget_simple_queryable(tables: &mut Tables, res: &mut Arc) { for face in tables.faces.values_mut() { - if let Some((id, _)) = face_hat_mut!(face).local_qabls.remove(res) { + if face_hat!(face).local_qabls.contains_key(res) { + let wire_expr = Resource::get_best_key(res, "", face.id); face.primitives.send_declare(RoutingContext::with_expr( Declare { - mode: DeclareMode::Push, - ext_qos: ext::QoSType::DECLARE, + ext_qos: ext::QoSType::declare_default(), ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, + ext_nodeid: ext::NodeIdType::default(), body: DeclareBody::UndeclareQueryable(UndeclareQueryable { - id, - ext_wire_expr: WireExprType::null(), + id: 0, // @TODO use proper QueryableId (#703) + ext_wire_expr: WireExprType { wire_expr }, }), }, res.expr(), )); + + face_hat_mut!(face).local_qabls.remove(res); } } } @@ -535,21 +553,21 @@ fn propagate_forget_simple_queryable_to_peers(tables: &mut Tables, res: &mut Arc && hat!(tables).failover_brokering(s.face.zid, face.zid))) }) { - if let Some((id, _)) = face_hat_mut!(&mut face).local_qabls.remove(res) { - face.primitives.send_declare(RoutingContext::with_expr( - Declare { - mode: DeclareMode::Push, - ext_qos: ext::QoSType::DECLARE, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, - body: DeclareBody::UndeclareQueryable(UndeclareQueryable { - id, - ext_wire_expr: WireExprType::null(), - }), - }, - res.expr(), - )); - } + let wire_expr = Resource::get_best_key(res, "", face.id); + face.primitives.send_declare(RoutingContext::with_expr( + Declare { + ext_qos: ext::QoSType::declare_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::default(), + body: DeclareBody::UndeclareQueryable(UndeclareQueryable { + id: 0, // @TODO use proper QueryableId (#703) + ext_wire_expr: WireExprType { wire_expr }, + }), + }, + res.expr(), + )); + + face_hat_mut!(&mut face).local_qabls.remove(res); } } } @@ -592,6 +610,11 @@ fn propagate_forget_sourced_queryable( } fn unregister_router_queryable(tables: &mut Tables, res: &mut Arc, router: &ZenohId) { + log::debug!( + "Unregister router queryable {} (router: {})", + res.expr(), + router, + ); res_hat_mut!(res).router_qabls.remove(router); if res_hat!(res).router_qabls.is_empty() { @@ -630,6 +653,7 @@ fn forget_router_queryable( } fn unregister_peer_queryable(tables: &mut Tables, res: &mut Arc, peer: &ZenohId) { + log::debug!("Unregister peer queryable {} (peer: {})", res.expr(), peer,); res_hat_mut!(res).peer_qabls.remove(peer); if res_hat!(res).peer_qabls.is_empty() { @@ -675,44 +699,44 @@ pub(super) fn undeclare_client_queryable( face: &mut Arc, res: &mut Arc, ) { - if !face_hat_mut!(face) - .remote_qabls - .values() - .any(|s| *s == *res) - { - if let Some(ctx) = get_mut_unchecked(res).session_ctxs.get_mut(&face.id) { - get_mut_unchecked(ctx).qabl = None; + log::debug!("Unregister client queryable {} for {}", res.expr(), face); + if let Some(ctx) = get_mut_unchecked(res).session_ctxs.get_mut(&face.id) { + get_mut_unchecked(ctx).qabl = None; + if ctx.qabl.is_none() { + face_hat_mut!(face).remote_qabls.remove(res); } + } - let mut client_qabls = client_qabls(res); - let router_qabls = remote_router_qabls(tables, res); - let peer_qabls = remote_peer_qabls(tables, res); + let mut client_qabls = client_qabls(res); + let router_qabls = remote_router_qabls(tables, res); + let peer_qabls = remote_peer_qabls(tables, res); - if client_qabls.is_empty() && !peer_qabls { - undeclare_router_queryable(tables, None, res, &tables.zid.clone()); - } else { - let local_info = local_router_qabl_info(tables, res); - register_router_queryable(tables, None, res, &local_info, tables.zid); - propagate_forget_simple_queryable_to_peers(tables, res); - } + if client_qabls.is_empty() && !peer_qabls { + undeclare_router_queryable(tables, None, res, &tables.zid.clone()); + } else { + let local_info = local_router_qabl_info(tables, res); + register_router_queryable(tables, None, res, &local_info, tables.zid); + propagate_forget_simple_queryable_to_peers(tables, res); + } - if client_qabls.len() == 1 && !router_qabls && !peer_qabls { - let face = &mut client_qabls[0]; - if let Some((id, _)) = face_hat_mut!(face).local_qabls.remove(res) { - face.primitives.send_declare(RoutingContext::with_expr( - Declare { - mode: DeclareMode::Push, - ext_qos: ext::QoSType::DECLARE, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, - body: DeclareBody::UndeclareQueryable(UndeclareQueryable { - id, - ext_wire_expr: WireExprType::null(), - }), - }, - res.expr(), - )); - } + if client_qabls.len() == 1 && !router_qabls && !peer_qabls { + let face = &mut client_qabls[0]; + if face_hat!(face).local_qabls.contains_key(res) { + let wire_expr = Resource::get_best_key(res, "", face.id); + face.primitives.send_declare(RoutingContext::with_expr( + Declare { + ext_qos: ext::QoSType::declare_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::default(), + body: DeclareBody::UndeclareQueryable(UndeclareQueryable { + id: 0, // @TODO use proper QueryableId (#703) + ext_wire_expr: WireExprType { wire_expr }, + }), + }, + res.expr(), + )); + + face_hat_mut!(face).local_qabls.remove(res); } } } @@ -720,14 +744,9 @@ pub(super) fn undeclare_client_queryable( fn forget_client_queryable( tables: &mut Tables, face: &mut Arc, - id: QueryableId, -) -> Option> { - if let Some(mut res) = face_hat_mut!(face).remote_qabls.remove(&id) { - undeclare_client_queryable(tables, face, &mut res); - Some(res) - } else { - None - } + res: &mut Arc, +) { + undeclare_client_queryable(tables, face, res); } pub(super) fn queries_new_face(tables: &mut Tables, face: &mut Arc) { @@ -735,19 +754,15 @@ pub(super) fn queries_new_face(tables: &mut Tables, face: &mut Arc) { for qabl in hat!(tables).router_qabls.iter() { if qabl.context.is_some() { let info = local_qabl_info(tables, qabl, face); - let id = face_hat!(face).next_id.fetch_add(1, Ordering::SeqCst); - face_hat_mut!(face) - .local_qabls - .insert(qabl.clone(), (id, info)); + face_hat_mut!(face).local_qabls.insert(qabl.clone(), info); let key_expr = Resource::decl_key(qabl, face); face.primitives.send_declare(RoutingContext::with_expr( Declare { - mode: DeclareMode::Push, - ext_qos: ext::QoSType::DECLARE, + ext_qos: ext::QoSType::declare_default(), ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, + ext_nodeid: ext::NodeIdType::default(), body: DeclareBody::DeclareQueryable(DeclareQueryable { - id, + id: 0, // @TODO use proper QueryableId (#703) wire_expr: key_expr, ext_info: info, }), @@ -768,19 +783,15 @@ pub(super) fn queries_new_face(tables: &mut Tables, face: &mut Arc) { })) { let info = local_qabl_info(tables, qabl, face); - let id = face_hat!(face).next_id.fetch_add(1, Ordering::SeqCst); - face_hat_mut!(face) - .local_qabls - .insert(qabl.clone(), (id, info)); + face_hat_mut!(face).local_qabls.insert(qabl.clone(), info); let key_expr = Resource::decl_key(qabl, face); face.primitives.send_declare(RoutingContext::with_expr( Declare { - mode: DeclareMode::Push, - ext_qos: ext::QoSType::DECLARE, + ext_qos: ext::QoSType::declare_default(), ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, + ext_nodeid: ext::NodeIdType::default(), body: DeclareBody::DeclareQueryable(DeclareQueryable { - id, + id: 0, // @TODO use proper QueryableId (#703) wire_expr: key_expr, ext_info: info, }), @@ -842,7 +853,7 @@ pub(super) fn queries_remove_node(tables: &mut Tables, node: &ZenohId, net_type: pub(super) fn queries_linkstate_change(tables: &mut Tables, zid: &ZenohId, links: &[ZenohId]) { if let Some(src_face) = tables.get_face(zid) { if hat!(tables).router_peers_failover_brokering && src_face.whatami == WhatAmI::Peer { - for res in face_hat!(src_face).remote_qabls.values() { + for res in &face_hat!(src_face).remote_qabls { let client_qabls = res .session_ctxs .values() @@ -854,7 +865,7 @@ pub(super) fn queries_linkstate_change(tables: &mut Tables, zid: &ZenohId, links { let dst_face = &mut get_mut_unchecked(ctx).face; if dst_face.whatami == WhatAmI::Peer && src_face.zid != dst_face.zid { - if let Some(id) = face_hat!(dst_face).local_subs.get(res).cloned() { + if face_hat!(dst_face).local_qabls.contains_key(res) { let forget = !HatTables::failover_brokering_to(links, dst_face.zid) && { let ctx_links = hat!(tables) @@ -872,16 +883,16 @@ pub(super) fn queries_linkstate_change(tables: &mut Tables, zid: &ZenohId, links }) }; if forget { + let wire_expr = Resource::get_best_key(res, "", dst_face.id); dst_face.primitives.send_declare(RoutingContext::with_expr( Declare { - mode: DeclareMode::Push, - ext_qos: ext::QoSType::DECLARE, + ext_qos: ext::QoSType::declare_default(), ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, + ext_nodeid: ext::NodeIdType::default(), body: DeclareBody::UndeclareQueryable( UndeclareQueryable { - id, - ext_wire_expr: WireExprType::null(), + id: 0, // @TODO use proper QueryableId (#703) + ext_wire_expr: WireExprType { wire_expr }, }, ), }, @@ -893,19 +904,17 @@ pub(super) fn queries_linkstate_change(tables: &mut Tables, zid: &ZenohId, links } else if HatTables::failover_brokering_to(links, ctx.face.zid) { let dst_face = &mut get_mut_unchecked(ctx).face; let info = local_qabl_info(tables, res, dst_face); - let id = face_hat!(dst_face).next_id.fetch_add(1, Ordering::SeqCst); face_hat_mut!(dst_face) .local_qabls - .insert(res.clone(), (id, info)); + .insert(res.clone(), info); let key_expr = Resource::decl_key(res, dst_face); dst_face.primitives.send_declare(RoutingContext::with_expr( Declare { - mode: DeclareMode::Push, - ext_qos: ext::QoSType::DECLARE, + ext_qos: ext::QoSType::declare_default(), ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, + ext_nodeid: ext::NodeIdType::default(), body: DeclareBody::DeclareQueryable(DeclareQueryable { - id, + id: 0, // @TODO use proper QueryableId (#703) wire_expr: key_expr, ext_info: info, }), @@ -971,7 +980,7 @@ fn insert_target_for_qabls( tables: &Tables, net: &Network, source: NodeId, - qabls: &HashMap, + qabls: &HashMap, complete: bool, ) { if net.trees.len() > source as usize { @@ -1015,9 +1024,8 @@ impl HatQueriesTrait for HatCode { &self, tables: &mut Tables, face: &mut Arc, - id: QueryableId, res: &mut Arc, - qabl_info: &QueryableInfoType, + qabl_info: &QueryableInfo, node_id: NodeId, ) { match face.whatami { @@ -1032,10 +1040,10 @@ impl HatQueriesTrait for HatCode { declare_peer_queryable(tables, face, res, qabl_info, peer) } } else { - declare_client_queryable(tables, face, id, res, qabl_info) + declare_client_queryable(tables, face, res, qabl_info) } } - _ => declare_client_queryable(tables, face, id, res, qabl_info), + _ => declare_client_queryable(tables, face, res, qabl_info), } } @@ -1043,40 +1051,25 @@ impl HatQueriesTrait for HatCode { &self, tables: &mut Tables, face: &mut Arc, - id: QueryableId, - res: Option>, + res: &mut Arc, node_id: NodeId, - ) -> Option> { + ) { match face.whatami { WhatAmI::Router => { - if let Some(mut res) = res { - if let Some(router) = get_router(tables, face, node_id) { - forget_router_queryable(tables, face, &mut res, &router); - Some(res) - } else { - None - } - } else { - None + if let Some(router) = get_router(tables, face, node_id) { + forget_router_queryable(tables, face, res, &router) } } WhatAmI::Peer => { if hat!(tables).full_net(WhatAmI::Peer) { - if let Some(mut res) = res { - if let Some(peer) = get_peer(tables, face, node_id) { - forget_peer_queryable(tables, face, &mut res, &peer); - Some(res) - } else { - None - } - } else { - None + if let Some(peer) = get_peer(tables, face, node_id) { + forget_peer_queryable(tables, face, res, &peer) } } else { - forget_client_queryable(tables, face, id) + forget_client_queryable(tables, face, res) } } - _ => forget_client_queryable(tables, face, id), + _ => forget_client_queryable(tables, face, res), } } diff --git a/zenoh/src/net/routing/mod.rs b/zenoh/src/net/routing/mod.rs index 77f51c16b3..afc49003f8 100644 --- a/zenoh/src/net/routing/mod.rs +++ b/zenoh/src/net/routing/mod.rs @@ -116,8 +116,9 @@ impl RoutingContext { DeclareBody::UndeclareQueryable(m) => Some(&m.ext_wire_expr.wire_expr), DeclareBody::DeclareToken(m) => Some(&m.wire_expr), DeclareBody::UndeclareToken(m) => Some(&m.ext_wire_expr.wire_expr), - DeclareBody::DeclareInterest(m) => m.wire_expr.as_ref(), - DeclareBody::DeclareFinal(_) => None, + DeclareBody::DeclareInterest(m) => Some(&m.wire_expr), + DeclareBody::FinalInterest(_) => None, + DeclareBody::UndeclareInterest(m) => Some(&m.ext_wire_expr.wire_expr), }, NetworkBody::OAM(_) => None, } diff --git a/zenoh/src/net/routing/router.rs b/zenoh/src/net/routing/router.rs index d67a2baa9d..c80d3bdc09 100644 --- a/zenoh/src/net/routing/router.rs +++ b/zenoh/src/net/routing/router.rs @@ -155,7 +155,7 @@ impl Router { state: newface, }; - let _ = mux.face.set(face.clone()); + let _ = mux.face.set(Face::downgrade(&face)); ctrl_lock.new_transport_unicast_face(&mut tables, &self.tables, &mut face, &transport)?; diff --git a/zenoh/src/net/runtime/adminspace.rs b/zenoh/src/net/runtime/adminspace.rs index 62c38b16ee..16e44f072c 100644 --- a/zenoh/src/net/runtime/adminspace.rs +++ b/zenoh/src/net/runtime/adminspace.rs @@ -12,15 +12,12 @@ // ZettaScale Zenoh Team, use super::routing::dispatcher::face::Face; use super::Runtime; -use crate::encoding::Encoding; use crate::key_expr::KeyExpr; use crate::net::primitives::Primitives; -use crate::payload::Payload; use crate::plugins::sealed::{self as plugins}; -use crate::prelude::sync::SyncResolve; +use crate::prelude::sync::{Sample, SyncResolve}; use crate::queryable::Query; use crate::queryable::QueryInner; -use crate::sample::builder::ValueBuilderTrait; use crate::value::Value; use log::{error, trace}; use serde_json::json; @@ -32,16 +29,13 @@ use std::sync::Mutex; use zenoh_buffers::buffer::SplitBuffer; use zenoh_config::{ConfigValidator, ValidatedMap, WhatAmI}; use zenoh_plugin_trait::{PluginControl, PluginStatus}; -use zenoh_protocol::network::declare::QueryableId; +use zenoh_protocol::core::key_expr::keyexpr; use zenoh_protocol::{ - core::{ - key_expr::{keyexpr, OwnedKeyExpr}, - ExprId, WireExpr, ZenohId, EMPTY_EXPR_ID, - }, + core::{key_expr::OwnedKeyExpr, ExprId, KnownEncoding, WireExpr, ZenohId, EMPTY_EXPR_ID}, network::{ - declare::{queryable::ext::QueryableInfoType, subscriber::ext::SubscriberInfo}, - ext, Declare, DeclareBody, DeclareMode, DeclareQueryable, DeclareSubscriber, Push, Request, - Response, ResponseFinal, + declare::{queryable::ext::QueryableInfo, subscriber::ext::SubscriberInfo}, + ext, Declare, DeclareBody, DeclareQueryable, DeclareSubscriber, Push, Request, Response, + ResponseFinal, }, zenoh::{PushBody, RequestBody}, }; @@ -60,7 +54,6 @@ type Handler = Arc; pub struct AdminSpace { zid: ZenohId, - queryable_id: QueryableId, primitives: Mutex>>, mappings: Mutex>, handlers: HashMap, @@ -191,7 +184,6 @@ impl AdminSpace { }); let admin = Arc::new(AdminSpace { zid: runtime.zid(), - queryable_id: runtime.next_id(), primitives: Mutex::new(None), mappings: Mutex::new(HashMap::new()), handlers, @@ -277,27 +269,27 @@ impl AdminSpace { zlock!(admin.primitives).replace(primitives.clone()); primitives.send_declare(Declare { - mode: DeclareMode::Push, - - ext_qos: ext::QoSType::DECLARE, + ext_qos: ext::QoSType::declare_default(), ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, + ext_nodeid: ext::NodeIdType::default(), body: DeclareBody::DeclareQueryable(DeclareQueryable { - id: runtime.next_id(), + id: 0, // @TODO use proper QueryableId (#703) wire_expr: [&root_key, "/**"].concat().into(), - ext_info: QueryableInfoType::DEFAULT, + ext_info: QueryableInfo { + complete: 0, + distance: 0, + }, }), }); primitives.send_declare(Declare { - mode: DeclareMode::Push, - ext_qos: ext::QoSType::DECLARE, + ext_qos: ext::QoSType::declare_default(), ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, + ext_nodeid: ext::NodeIdType::default(), body: DeclareBody::DeclareSubscriber(DeclareSubscriber { - id: runtime.next_id(), + id: 0, // @TODO use proper SubscriberId (#703) wire_expr: [&root_key, "/config/**"].concat().into(), - ext_info: SubscriberInfo::DEFAULT, + ext_info: SubscriberInfo::default(), }), }); } @@ -388,60 +380,57 @@ impl Primitives for AdminSpace { fn send_request(&self, msg: Request) { trace!("recv Request {:?}", msg); - match msg.payload { - RequestBody::Query(query) => { - let primitives = zlock!(self.primitives).as_ref().unwrap().clone(); - { - let conf = self.context.runtime.state.config.lock(); - if !conf.adminspace.permissions().read { - log::error!( + if let RequestBody::Query(query) = msg.payload { + let primitives = zlock!(self.primitives).as_ref().unwrap().clone(); + { + let conf = self.context.runtime.state.config.lock(); + if !conf.adminspace.permissions().read { + log::error!( "Received GET on '{}' but adminspace.permissions.read=false in configuration", msg.wire_expr ); - primitives.send_response_final(ResponseFinal { - rid: msg.id, - ext_qos: ext::QoSType::RESPONSE_FINAL, - ext_tstamp: None, - }); - return; - } + primitives.send_response_final(ResponseFinal { + rid: msg.id, + ext_qos: ext::QoSType::response_final_default(), + ext_tstamp: None, + }); + return; } + } - let key_expr = match self.key_expr_to_string(&msg.wire_expr) { - Ok(key_expr) => key_expr.into_owned(), - Err(e) => { - log::error!("Unknown KeyExpr: {}", e); - primitives.send_response_final(ResponseFinal { - rid: msg.id, - ext_qos: ext::QoSType::RESPONSE_FINAL, - ext_tstamp: None, - }); - return; - } - }; - - let zid = self.zid; - let parameters = query.parameters.to_owned(); - let query = Query { - inner: Arc::new(QueryInner { - key_expr: key_expr.clone(), - parameters, - value: query - .ext_body - .map(|b| Value::from(b.payload).encoding(b.encoding)), - qid: msg.id, - zid, - primitives, - #[cfg(feature = "unstable")] - attachment: query.ext_attachment.map(Into::into), - }), - eid: self.queryable_id, - }; - - for (key, handler) in &self.handlers { - if key_expr.intersects(key) { - handler(&self.context, query.clone()); - } + let key_expr = match self.key_expr_to_string(&msg.wire_expr) { + Ok(key_expr) => key_expr.into_owned(), + Err(e) => { + log::error!("Unknown KeyExpr: {}", e); + primitives.send_response_final(ResponseFinal { + rid: msg.id, + ext_qos: ext::QoSType::response_final_default(), + ext_tstamp: None, + }); + return; + } + }; + + let zid = self.zid; + let parameters = query.parameters.to_owned(); + let query = Query { + inner: Arc::new(QueryInner { + key_expr: key_expr.clone(), + parameters, + value: query + .ext_body + .map(|b| Value::from(b.payload).encoding(b.encoding)), + qid: msg.id, + zid, + primitives, + #[cfg(feature = "unstable")] + attachment: query.ext_attachment.map(Into::into), + }), + }; + + for (key, handler) in &self.handlers { + if key_expr.intersects(key) { + handler(&self.context, query.clone()); } } } @@ -572,17 +561,13 @@ fn router_data(context: &AdminContext, query: Query) { } log::trace!("AdminSpace router_data: {:?}", json); - let payload = match Payload::try_from(json) { - Ok(p) => p, - Err(e) => { - log::error!("Error serializing AdminSpace reply: {:?}", e); - return; - } - }; if let Err(e) = query - .reply(reply_key, payload) - .encoding(Encoding::APPLICATION_JSON) - .res_sync() + .reply(Ok(Sample::new( + reply_key, + Value::from(json.to_string().as_bytes().to_vec()) + .encoding(KnownEncoding::AppJson.into()), + ))) + .res() { log::error!("Error sending AdminSpace reply: {:?}", e); } @@ -611,7 +596,13 @@ zenoh_build{{version="{}"}} 1 .openmetrics_text(), ); - if let Err(e) = query.reply(reply_key, metrics).res() { + if let Err(e) = query + .reply(Ok(Sample::new( + reply_key, + Value::from(metrics.as_bytes().to_vec()).encoding(KnownEncoding::TextPlain.into()), + ))) + .res() + { log::error!("Error sending AdminSpace reply: {:?}", e); } } @@ -624,7 +615,17 @@ fn routers_linkstate_data(context: &AdminContext, query: Query) { let tables = zread!(context.runtime.state.router.tables.tables); if let Err(e) = query - .reply(reply_key, tables.hat_code.info(&tables, WhatAmI::Router)) + .reply(Ok(Sample::new( + reply_key, + Value::from( + tables + .hat_code + .info(&tables, WhatAmI::Router) + .as_bytes() + .to_vec(), + ) + .encoding(KnownEncoding::TextPlain.into()), + ))) .res() { log::error!("Error sending AdminSpace reply: {:?}", e); @@ -639,7 +640,17 @@ fn peers_linkstate_data(context: &AdminContext, query: Query) { let tables = zread!(context.runtime.state.router.tables.tables); if let Err(e) = query - .reply(reply_key, tables.hat_code.info(&tables, WhatAmI::Peer)) + .reply(Ok(Sample::new( + reply_key, + Value::from( + tables + .hat_code + .info(&tables, WhatAmI::Peer) + .as_bytes() + .to_vec(), + ) + .encoding(KnownEncoding::TextPlain.into()), + ))) .res() { log::error!("Error sending AdminSpace reply: {:?}", e); @@ -656,7 +667,7 @@ fn subscribers_data(context: &AdminContext, query: Query) { )) .unwrap(); if query.key_expr().intersects(&key) { - if let Err(e) = query.reply(key, Payload::empty()).res() { + if let Err(e) = query.reply(Ok(Sample::new(key, Value::empty()))).res() { log::error!("Error sending AdminSpace reply: {:?}", e); } } @@ -673,7 +684,7 @@ fn queryables_data(context: &AdminContext, query: Query) { )) .unwrap(); if query.key_expr().intersects(&key) { - if let Err(e) = query.reply(key, Payload::empty()).res() { + if let Err(e) = query.reply(Ok(Sample::new(key, Value::empty()))).res() { log::error!("Error sending AdminSpace reply: {:?}", e); } } @@ -691,13 +702,8 @@ fn plugins_data(context: &AdminContext, query: Query) { log::debug!("plugin status: {:?}", status); let key = root_key.join(status.name()).unwrap(); let status = serde_json::to_value(status).unwrap(); - match Payload::try_from(status) { - Ok(zbuf) => { - if let Err(e) = query.reply(key, zbuf).res_sync() { - log::error!("Error sending AdminSpace reply: {:?}", e); - } - } - Err(e) => log::debug!("Admin query error: {}", e), + if let Err(e) = query.reply(Ok(Sample::new(key, Value::from(status)))).res() { + log::error!("Error sending AdminSpace reply: {:?}", e); } } } @@ -714,7 +720,13 @@ fn plugins_status(context: &AdminContext, query: Query) { with_extended_string(plugin_key, &["/__path__"], |plugin_path_key| { if let Ok(key_expr) = KeyExpr::try_from(plugin_path_key.clone()) { if query.key_expr().intersects(&key_expr) { - if let Err(e) = query.reply(key_expr, plugin.path()).res() { + if let Err(e) = query + .reply(Ok(Sample::new( + key_expr, + Value::from(plugin.path()).encoding(KnownEncoding::AppJson.into()), + ))) + .res() + { log::error!("Error sending AdminSpace reply: {:?}", e); } } @@ -736,13 +748,13 @@ fn plugins_status(context: &AdminContext, query: Query) { Ok(Ok(responses)) => { for response in responses { if let Ok(key_expr) = KeyExpr::try_from(response.key) { - match Payload::try_from(response.value) { - Ok(zbuf) => { - if let Err(e) = query.reply(key_expr, zbuf).res_sync() { - log::error!("Error sending AdminSpace reply: {:?}", e); - } - }, - Err(e) => log::debug!("Admin query error: {}", e), + if let Err(e) = query.reply(Ok(Sample::new( + key_expr, + Value::from(response.value).encoding(KnownEncoding::AppJson.into()), + ))) + .res() + { + log::error!("Error sending AdminSpace reply: {:?}", e); } } else { log::error!("Error: plugin {} replied with an invalid key", plugin_key); diff --git a/zenoh/src/net/runtime/mod.rs b/zenoh/src/net/runtime/mod.rs index 1d81811c76..9314186b2e 100644 --- a/zenoh/src/net/runtime/mod.rs +++ b/zenoh/src/net/runtime/mod.rs @@ -29,8 +29,8 @@ pub use adminspace::AdminSpace; use futures::stream::StreamExt; use futures::Future; use std::any::Any; -use std::sync::atomic::{AtomicU32, Ordering}; -use std::sync::Arc; +use std::sync::{Arc, Weak}; +use std::time::Duration; use tokio::task::JoinHandle; use tokio_util::sync::CancellationToken; use uhlc::{HLCBuilder, HLC}; @@ -40,6 +40,7 @@ use zenoh_protocol::core::{Locator, WhatAmI, ZenohId}; use zenoh_protocol::network::NetworkMessage; use zenoh_result::{bail, ZResult}; use zenoh_sync::get_mut_unchecked; +use zenoh_task::TaskController; use zenoh_transport::{ multicast::TransportMulticast, unicast::TransportUnicast, TransportEventHandler, TransportManager, TransportMulticastEventHandler, TransportPeer, TransportPeerEventHandler, @@ -48,7 +49,6 @@ use zenoh_transport::{ struct RuntimeState { zid: ZenohId, whatami: WhatAmI, - next_id: AtomicU32, metadata: serde_json::Value, router: Arc, config: Notifier, @@ -56,7 +56,17 @@ struct RuntimeState { transport_handlers: std::sync::RwLock>>, locators: std::sync::RwLock>, hlc: Option>, - token: CancellationToken, + task_controller: TaskController, +} + +pub struct WeakRuntime { + state: Weak, +} + +impl WeakRuntime { + pub fn upgrade(&self) -> Option { + self.state.upgrade().map(|state| Runtime { state }) + } } #[derive(Clone)] @@ -99,7 +109,7 @@ impl Runtime { let router = Arc::new(Router::new(zid, whatami, hlc.clone(), &config)?); let handler = Arc::new(RuntimeTransportEventHandler { - runtime: std::sync::RwLock::new(None), + runtime: std::sync::RwLock::new(WeakRuntime { state: Weak::new() }), }); let transport_manager = TransportManager::builder() @@ -115,7 +125,6 @@ impl Runtime { state: Arc::new(RuntimeState { zid, whatami, - next_id: AtomicU32::new(1), // 0 is reserved for routing core metadata, router, config: config.clone(), @@ -123,22 +132,33 @@ impl Runtime { transport_handlers: std::sync::RwLock::new(vec![]), locators: std::sync::RwLock::new(vec![]), hlc, - token: CancellationToken::new(), + task_controller: TaskController::default(), }), }; - *handler.runtime.write().unwrap() = Some(runtime.clone()); + *handler.runtime.write().unwrap() = Runtime::downgrade(&runtime); get_mut_unchecked(&mut runtime.state.router.clone()).init_link_state(runtime.clone()); let receiver = config.subscribe(); + let token = runtime.get_cancellation_token(); runtime.spawn({ let runtime2 = runtime.clone(); async move { let mut stream = receiver.into_stream(); - while let Some(event) = stream.next().await { - if &*event == "connect/endpoints" { - if let Err(e) = runtime2.update_peers().await { - log::error!("Error updating peers: {}", e); + loop { + tokio::select! { + res = stream.next() => { + match res { + Some(event) => { + if &*event == "connect/endpoints" { + if let Err(e) = runtime2.update_peers().await { + log::error!("Error updating peers: {}", e); + } + } + }, + None => { break; } + } } + _ = token.cancelled() => { break; } } } } @@ -156,16 +176,27 @@ impl Runtime { zwrite!(self.state.transport_handlers).push(handler); } - #[inline] - pub fn next_id(&self) -> u32 { - self.state.next_id.fetch_add(1, Ordering::SeqCst) - } - pub async fn close(&self) -> ZResult<()> { log::trace!("Runtime::close())"); // TODO: Check this whether is able to terminate all spawned task by Runtime::spawn - self.state.token.cancel(); + self.state + .task_controller + .terminate_all(Duration::from_secs(10)); self.manager().close().await; + // clean up to break cyclic reference of self.state to itself + self.state.transport_handlers.write().unwrap().clear(); + // TODO: the call below is needed to prevent intermittent leak + // due to not freed resource Arc, that apparently happens because + // the task responsible for resource clean up was aborted earlier than expected. + // This should be resolved by identfying correspodning task, and placing + // cancellation token manually inside it. + self.router() + .tables + .tables + .write() + .unwrap() + .root_res + .close(); Ok(()) } @@ -177,18 +208,28 @@ impl Runtime { self.state.locators.read().unwrap().clone() } + /// Spawns a task within runtime. + /// Upon close runtime will block until this task completes pub(crate) fn spawn(&self, future: F) -> JoinHandle<()> where F: Future + Send + 'static, T: Send + 'static, { - let token = self.state.token.clone(); - zenoh_runtime::ZRuntime::Net.spawn(async move { - tokio::select! { - _ = token.cancelled() => {} - _ = future => {} - } - }) + self.state + .task_controller + .spawn_with_rt(zenoh_runtime::ZRuntime::Net, future) + } + + /// Spawns a task within runtime. + /// Upon runtime close the task will be automatically aborted. + pub(crate) fn spawn_abortable(&self, future: F) -> JoinHandle<()> + where + F: Future + Send + 'static, + T: Send + 'static, + { + self.state + .task_controller + .spawn_abortable_with_rt(zenoh_runtime::ZRuntime::Net, future) } pub(crate) fn router(&self) -> Arc { @@ -210,10 +251,20 @@ impl Runtime { pub fn whatami(&self) -> WhatAmI { self.state.whatami } + + pub fn downgrade(this: &Runtime) -> WeakRuntime { + WeakRuntime { + state: Arc::downgrade(&this.state), + } + } + + pub fn get_cancellation_token(&self) -> CancellationToken { + self.state.task_controller.get_cancellation_token() + } } struct RuntimeTransportEventHandler { - runtime: std::sync::RwLock>, + runtime: std::sync::RwLock, } impl TransportEventHandler for RuntimeTransportEventHandler { @@ -222,7 +273,7 @@ impl TransportEventHandler for RuntimeTransportEventHandler { peer: TransportPeer, transport: TransportUnicast, ) -> ZResult> { - match zread!(self.runtime).as_ref() { + match zread!(self.runtime).upgrade().as_ref() { Some(runtime) => { let slave_handlers: Vec> = zread!(runtime.state.transport_handlers) @@ -250,7 +301,7 @@ impl TransportEventHandler for RuntimeTransportEventHandler { &self, transport: TransportMulticast, ) -> ZResult> { - match zread!(self.runtime).as_ref() { + match zread!(self.runtime).upgrade().as_ref() { Some(runtime) => { let slave_handlers: Vec> = zread!(runtime.state.transport_handlers) diff --git a/zenoh/src/net/runtime/orchestrator.rs b/zenoh/src/net/runtime/orchestrator.rs index 3feee6fb1b..3f1026268a 100644 --- a/zenoh/src/net/runtime/orchestrator.rs +++ b/zenoh/src/net/runtime/orchestrator.rs @@ -194,7 +194,7 @@ impl Runtime { let this = self.clone(); match (listen, autoconnect.is_empty()) { (true, false) => { - self.spawn(async move { + self.spawn_abortable(async move { tokio::select! { _ = this.responder(&mcast_socket, &sockets) => {}, _ = this.connect_all(&sockets, autoconnect, &addr) => {}, @@ -202,14 +202,14 @@ impl Runtime { }); } (true, true) => { - self.spawn(async move { + self.spawn_abortable(async move { this.responder(&mcast_socket, &sockets).await; }); } (false, false) => { - self.spawn( - async move { this.connect_all(&sockets, autoconnect, &addr).await }, - ); + self.spawn_abortable(async move { + this.connect_all(&sockets, autoconnect, &addr).await + }); } _ => {} } @@ -658,43 +658,44 @@ impl Runtime { async fn peer_connector_retry(&self, peer: EndPoint) { let retry_config = self.get_connect_retry_config(&peer); let mut period = retry_config.period(); + let cancellation_token = self.get_cancellation_token(); loop { log::trace!("Trying to connect to configured peer {}", peer); let endpoint = peer.clone(); - match tokio::time::timeout( - retry_config.timeout(), - self.manager().open_transport_unicast(endpoint), - ) - .await - { - Ok(Ok(transport)) => { - log::debug!("Successfully connected to configured peer {}", peer); - if let Ok(Some(orch_transport)) = transport.get_callback() { - if let Some(orch_transport) = orch_transport - .as_any() - .downcast_ref::() - { - *zwrite!(orch_transport.endpoint) = Some(peer); + tokio::select! { + res = tokio::time::timeout(retry_config.timeout(), self.manager().open_transport_unicast(endpoint)) => { + match res { + Ok(Ok(transport)) => { + log::debug!("Successfully connected to configured peer {}", peer); + if let Ok(Some(orch_transport)) = transport.get_callback() { + if let Some(orch_transport) = orch_transport + .as_any() + .downcast_ref::() + { + *zwrite!(orch_transport.endpoint) = Some(peer); + } + } + break; + } + Ok(Err(e)) => { + log::debug!( + "Unable to connect to configured peer {}! {}. Retry in {:?}.", + peer, + e, + period.duration() + ); + } + Err(e) => { + log::debug!( + "Unable to connect to configured peer {}! {}. Retry in {:?}.", + peer, + e, + period.duration() + ); } } - break; - } - Ok(Err(e)) => { - log::debug!( - "Unable to connect to configured peer {}! {}. Retry in {:?}.", - peer, - e, - period.duration() - ); - } - Err(e) => { - log::debug!( - "Unable to connect to configured peer {}! {}. Retry in {:?}.", - peer, - e, - period.duration() - ); } + _ = cancellation_token.cancelled() => { break; } } tokio::time::sleep(period.next_duration()).await; } @@ -1018,11 +1019,15 @@ impl Runtime { match session.runtime.whatami() { WhatAmI::Client => { let runtime = session.runtime.clone(); + let cancellation_token = runtime.get_cancellation_token(); session.runtime.spawn(async move { let retry_config = runtime.get_global_connect_retry_config(); let mut period = retry_config.period(); while runtime.start_client().await.is_err() { - tokio::time::sleep(period.next_duration()).await; + tokio::select! { + _ = tokio::time::sleep(period.next_duration()) => {} + _ = cancellation_token.cancelled() => { break; } + } } }); } diff --git a/zenoh/src/net/tests/tables.rs b/zenoh/src/net/tests/tables.rs index 55ff9f0a4d..1b02a5964f 100644 --- a/zenoh/src/net/tests/tables.rs +++ b/zenoh/src/net/tests/tables.rs @@ -26,7 +26,8 @@ use zenoh_protocol::core::{ key_expr::keyexpr, ExprId, Reliability, WhatAmI, WireExpr, ZenohId, EMPTY_EXPR_ID, }; use zenoh_protocol::network::declare::subscriber::ext::SubscriberInfo; -use zenoh_protocol::network::{ext, Declare, DeclareBody, DeclareKeyExpr, DeclareMode}; +use zenoh_protocol::network::declare::Mode; +use zenoh_protocol::network::{ext, Declare, DeclareBody, DeclareKeyExpr}; use zenoh_protocol::zenoh::{PushBody, Put}; #[test] @@ -58,13 +59,13 @@ fn base_test() { let sub_info = SubscriberInfo { reliability: Reliability::Reliable, + mode: Mode::Push, }; declare_subscription( zlock!(tables.ctrl_lock).as_ref(), &tables, &mut face.upgrade().unwrap(), - 0, &WireExpr::from(1).with_suffix("four/five"), &sub_info, NodeId::default(), @@ -165,77 +166,8 @@ fn match_test() { } } -#[test] -fn multisub_test() { - let config = Config::default(); - let router = Router::new( - ZenohId::try_from([1]).unwrap(), - WhatAmI::Client, - Some(Arc::new(HLC::default())), - &config, - ) - .unwrap(); - let tables = router.tables.clone(); - - let primitives = Arc::new(DummyPrimitives {}); - let face0 = Arc::downgrade(&router.new_primitives(primitives).state); - assert!(face0.upgrade().is_some()); - - // -------------- - let sub_info = SubscriberInfo { - reliability: Reliability::Reliable, - }; - declare_subscription( - zlock!(tables.ctrl_lock).as_ref(), - &tables, - &mut face0.upgrade().unwrap(), - 0, - &"sub".into(), - &sub_info, - NodeId::default(), - ); - let optres = Resource::get_resource(zread!(tables.tables)._get_root(), "sub") - .map(|res| Arc::downgrade(&res)); - assert!(optres.is_some()); - let res = optres.unwrap(); - assert!(res.upgrade().is_some()); - - declare_subscription( - zlock!(tables.ctrl_lock).as_ref(), - &tables, - &mut face0.upgrade().unwrap(), - 1, - &"sub".into(), - &sub_info, - NodeId::default(), - ); - assert!(res.upgrade().is_some()); - - undeclare_subscription( - zlock!(tables.ctrl_lock).as_ref(), - &tables, - &mut face0.upgrade().unwrap(), - 0, - &WireExpr::empty(), - NodeId::default(), - ); - assert!(res.upgrade().is_some()); - - undeclare_subscription( - zlock!(tables.ctrl_lock).as_ref(), - &tables, - &mut face0.upgrade().unwrap(), - 1, - &WireExpr::empty(), - NodeId::default(), - ); - assert!(res.upgrade().is_none()); - - tables::close_face(&tables, &face0); -} - -#[test] -fn clean_test() { +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] +async fn clean_test() { let config = Config::default(); let router = Router::new( ZenohId::try_from([1]).unwrap(), @@ -302,13 +234,13 @@ fn clean_test() { let sub_info = SubscriberInfo { reliability: Reliability::Reliable, + mode: Mode::Push, }; declare_subscription( zlock!(tables.ctrl_lock).as_ref(), &tables, &mut face0.upgrade().unwrap(), - 0, &"todrop1/todrop11".into(), &sub_info, NodeId::default(), @@ -323,7 +255,6 @@ fn clean_test() { zlock!(tables.ctrl_lock).as_ref(), &tables, &mut face0.upgrade().unwrap(), - 1, &WireExpr::from(1).with_suffix("/todrop12"), &sub_info, NodeId::default(), @@ -339,8 +270,7 @@ fn clean_test() { zlock!(tables.ctrl_lock).as_ref(), &tables, &mut face0.upgrade().unwrap(), - 1, - &WireExpr::empty(), + &WireExpr::from(1).with_suffix("/todrop12"), NodeId::default(), ); @@ -354,8 +284,7 @@ fn clean_test() { zlock!(tables.ctrl_lock).as_ref(), &tables, &mut face0.upgrade().unwrap(), - 0, - &WireExpr::empty(), + &"todrop1/todrop11".into(), NodeId::default(), ); assert!(res1.upgrade().is_some()); @@ -373,7 +302,6 @@ fn clean_test() { zlock!(tables.ctrl_lock).as_ref(), &tables, &mut face0.upgrade().unwrap(), - 2, &"todrop3".into(), &sub_info, NodeId::default(), @@ -388,8 +316,7 @@ fn clean_test() { zlock!(tables.ctrl_lock).as_ref(), &tables, &mut face0.upgrade().unwrap(), - 2, - &WireExpr::empty(), + &"todrop3".into(), NodeId::default(), ); assert!(res1.upgrade().is_some()); @@ -404,7 +331,6 @@ fn clean_test() { zlock!(tables.ctrl_lock).as_ref(), &tables, &mut face0.upgrade().unwrap(), - 3, &"todrop5".into(), &sub_info, NodeId::default(), @@ -413,7 +339,6 @@ fn clean_test() { zlock!(tables.ctrl_lock).as_ref(), &tables, &mut face0.upgrade().unwrap(), - 4, &"todrop6".into(), &sub_info, NodeId::default(), @@ -566,6 +491,7 @@ fn client_test() { let sub_info = SubscriberInfo { reliability: Reliability::Reliable, + mode: Mode::Push, }; let primitives0 = Arc::new(ClientPrimitives::new()); @@ -579,10 +505,9 @@ fn client_test() { Primitives::send_declare( primitives0.as_ref(), Declare { - mode: DeclareMode::Push, - ext_qos: ext::QoSType::DECLARE, + ext_qos: ext::QoSType::declare_default(), ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, + ext_nodeid: ext::NodeIdType::default(), body: DeclareBody::DeclareKeyExpr(DeclareKeyExpr { id: 11, wire_expr: "test/client".into(), @@ -593,7 +518,6 @@ fn client_test() { zlock!(tables.ctrl_lock).as_ref(), &tables, &mut face0.upgrade().unwrap(), - 0, &WireExpr::from(11).with_suffix("/**"), &sub_info, NodeId::default(), @@ -607,10 +531,9 @@ fn client_test() { Primitives::send_declare( primitives0.as_ref(), Declare { - mode: DeclareMode::Push, - ext_qos: ext::QoSType::DECLARE, + ext_qos: ext::QoSType::declare_default(), ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, + ext_nodeid: ext::NodeIdType::default(), body: DeclareBody::DeclareKeyExpr(DeclareKeyExpr { id: 12, wire_expr: WireExpr::from(11).with_suffix("/z1_pub1"), @@ -629,10 +552,9 @@ fn client_test() { Primitives::send_declare( primitives1.as_ref(), Declare { - mode: DeclareMode::Push, - ext_qos: ext::QoSType::DECLARE, + ext_qos: ext::QoSType::declare_default(), ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, + ext_nodeid: ext::NodeIdType::default(), body: DeclareBody::DeclareKeyExpr(DeclareKeyExpr { id: 21, wire_expr: "test/client".into(), @@ -643,7 +565,6 @@ fn client_test() { zlock!(tables.ctrl_lock).as_ref(), &tables, &mut face1.upgrade().unwrap(), - 0, &WireExpr::from(21).with_suffix("/**"), &sub_info, NodeId::default(), @@ -657,10 +578,9 @@ fn client_test() { Primitives::send_declare( primitives1.as_ref(), Declare { - mode: DeclareMode::Push, - ext_qos: ext::QoSType::DECLARE, + ext_qos: ext::QoSType::declare_default(), ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, + ext_nodeid: ext::NodeIdType::default(), body: DeclareBody::DeclareKeyExpr(DeclareKeyExpr { id: 22, wire_expr: WireExpr::from(21).with_suffix("/z2_pub1"), @@ -679,10 +599,9 @@ fn client_test() { Primitives::send_declare( primitives2.as_ref(), Declare { - mode: DeclareMode::Push, - ext_qos: ext::QoSType::DECLARE, + ext_qos: ext::QoSType::declare_default(), ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, + ext_nodeid: ext::NodeIdType::default(), body: DeclareBody::DeclareKeyExpr(DeclareKeyExpr { id: 31, wire_expr: "test/client".into(), @@ -693,7 +612,6 @@ fn client_test() { zlock!(tables.ctrl_lock).as_ref(), &tables, &mut face2.upgrade().unwrap(), - 0, &WireExpr::from(31).with_suffix("/**"), &sub_info, NodeId::default(), @@ -707,10 +625,10 @@ fn client_test() { &tables, &face0.upgrade().unwrap(), &"test/client/z1_wr1".into(), - ext::QoSType::DEFAULT, + ext::QoSType::default(), PushBody::Put(Put { timestamp: None, - encoding: Encoding::empty(), + encoding: Encoding::default(), ext_sinfo: None, #[cfg(feature = "shared-memory")] ext_shm: None, @@ -740,10 +658,10 @@ fn client_test() { &router.tables, &face0.upgrade().unwrap(), &WireExpr::from(11).with_suffix("/z1_wr2"), - ext::QoSType::DEFAULT, + ext::QoSType::default(), PushBody::Put(Put { timestamp: None, - encoding: Encoding::empty(), + encoding: Encoding::default(), ext_sinfo: None, #[cfg(feature = "shared-memory")] ext_shm: None, @@ -773,10 +691,10 @@ fn client_test() { &router.tables, &face1.upgrade().unwrap(), &"test/client/**".into(), - ext::QoSType::DEFAULT, + ext::QoSType::default(), PushBody::Put(Put { timestamp: None, - encoding: Encoding::empty(), + encoding: Encoding::default(), ext_sinfo: None, #[cfg(feature = "shared-memory")] ext_shm: None, @@ -806,10 +724,10 @@ fn client_test() { &router.tables, &face0.upgrade().unwrap(), &12.into(), - ext::QoSType::DEFAULT, + ext::QoSType::default(), PushBody::Put(Put { timestamp: None, - encoding: Encoding::empty(), + encoding: Encoding::default(), ext_sinfo: None, #[cfg(feature = "shared-memory")] ext_shm: None, @@ -839,10 +757,10 @@ fn client_test() { &router.tables, &face1.upgrade().unwrap(), &22.into(), - ext::QoSType::DEFAULT, + ext::QoSType::default(), PushBody::Put(Put { timestamp: None, - encoding: Encoding::empty(), + encoding: Encoding::default(), ext_sinfo: None, #[cfg(feature = "shared-memory")] ext_shm: None, diff --git a/zenoh/src/payload.rs b/zenoh/src/payload.rs deleted file mode 100644 index eac4f58e7c..0000000000 --- a/zenoh/src/payload.rs +++ /dev/null @@ -1,1406 +0,0 @@ -// -// Copyright (c) 2023 ZettaScale Technology -// -// This program and the accompanying materials are made available under the -// terms of the Eclipse Public License 2.0 which is available at -// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 -// which is available at https://www.apache.org/licenses/LICENSE-2.0. -// -// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 -// -// Contributors: -// ZettaScale Zenoh Team, -// - -//! Payload primitives. -use crate::buffers::ZBuf; -use std::str::Utf8Error; -use std::{ - borrow::Cow, convert::Infallible, fmt::Debug, marker::PhantomData, ops::Deref, - string::FromUtf8Error, sync::Arc, -}; -use unwrap_infallible::UnwrapInfallible; -use zenoh_buffers::ZBufWriter; -use zenoh_buffers::{ - buffer::{Buffer, SplitBuffer}, - reader::HasReader, - writer::HasWriter, - ZBufReader, ZSlice, -}; -use zenoh_codec::{RCodec, WCodec, Zenoh080}; -use zenoh_result::{ZError, ZResult}; -#[cfg(feature = "shared-memory")] -use zenoh_shm::SharedMemoryBuf; - -/// Trait to encode a type `T` into a [`Value`]. -pub trait Serialize { - type Output; - - /// The implementer should take care of serializing the type `T` and set the proper [`Encoding`]. - fn serialize(self, t: T) -> Self::Output; -} - -pub trait Deserialize<'a, T> { - type Error; - - /// The implementer should take care of deserializing the type `T` based on the [`Encoding`] information. - fn deserialize(self, t: &'a Payload) -> Result; -} - -/// A payload contains the serialized bytes of user data. -#[repr(transparent)] -#[derive(Clone, Debug, Default, PartialEq, Eq)] -pub struct Payload(ZBuf); - -impl Payload { - /// Create an empty payload. - pub const fn empty() -> Self { - Self(ZBuf::empty()) - } - - /// Create a [`Payload`] from any type `T` that implements [`Into`]. - pub fn new(t: T) -> Self - where - T: Into, - { - Self(t.into()) - } - - /// Returns wether the payload is empty or not. - pub fn is_empty(&self) -> bool { - self.0.is_empty() - } - - /// Returns the length of the payload. - pub fn len(&self) -> usize { - self.0.len() - } - - /// Get a [`PayloadReader`] implementing [`std::io::Read`] trait. - pub fn reader(&self) -> PayloadReader<'_> { - PayloadReader(self.0.reader()) - } - - /// Build a [`Payload`] from a generic reader implementing [`std::io::Read`]. This operation copies data from the reader. - pub fn from_reader(mut reader: R) -> Result - where - R: std::io::Read, - { - let mut buf: Vec = vec![]; - reader.read_to_end(&mut buf)?; - Ok(Payload::new(buf)) - } - - /// Get a [`PayloadReader`] implementing [`std::io::Read`] trait. - pub fn iter(&self) -> PayloadIterator<'_, T> - where - T: for<'b> TryFrom<&'b Payload>, - for<'b> ZSerde: Deserialize<'b, T>, - for<'b> >::Error: Debug, - { - PayloadIterator { - reader: self.0.reader(), - _t: PhantomData::, - } - } - - /// Get a [`PayloadWriter`] implementing [`std::io::Write`] trait. - pub fn writer(&mut self) -> PayloadWriter<'_> { - PayloadWriter(self.0.writer()) - } - - /// Encode an object of type `T` as a [`Value`] using the [`ZSerde`]. - /// - /// ```rust - /// use zenoh::payload::Payload; - /// - /// let start = String::from("abc"); - /// let payload = Payload::serialize(start.clone()); - /// let end: String = payload.deserialize().unwrap(); - /// assert_eq!(start, end); - /// ``` - pub fn serialize(t: T) -> Self - where - ZSerde: Serialize, - { - ZSerde.serialize(t) - } - - /// Decode an object of type `T` from a [`Value`] using the [`ZSerde`]. - pub fn deserialize<'a, T>(&'a self) -> ZResult - where - ZSerde: Deserialize<'a, T>, - >::Error: Debug, - { - ZSerde - .deserialize(self) - .map_err(|e| zerror!("{:?}", e).into()) - } - - /// Decode an object of type `T` from a [`Value`] using the [`ZSerde`]. - pub fn into<'a, T>(&'a self) -> T - where - ZSerde: Deserialize<'a, T, Error = Infallible>, - >::Error: Debug, - { - ZSerde.deserialize(self).unwrap_infallible() - } -} - -/// A reader that implements [`std::io::Read`] trait to read from a [`Payload`]. -#[repr(transparent)] -#[derive(Debug)] -pub struct PayloadReader<'a>(ZBufReader<'a>); - -impl std::io::Read for PayloadReader<'_> { - fn read(&mut self, buf: &mut [u8]) -> std::io::Result { - std::io::Read::read(&mut self.0, buf) - } -} - -impl std::io::Seek for PayloadReader<'_> { - fn seek(&mut self, pos: std::io::SeekFrom) -> std::io::Result { - std::io::Seek::seek(&mut self.0, pos) - } -} - -/// A writer that implements [`std::io::Write`] trait to write into a [`Payload`]. -#[repr(transparent)] -#[derive(Debug)] -pub struct PayloadWriter<'a>(ZBufWriter<'a>); - -impl std::io::Write for PayloadWriter<'_> { - fn write(&mut self, buf: &[u8]) -> std::io::Result { - std::io::Write::write(&mut self.0, buf) - } - - fn flush(&mut self) -> std::io::Result<()> { - Ok(()) - } -} - -/// An iterator that implements [`std::iter::Iterator`] trait to iterate on values `T` in a [`Payload`]. -/// Note that [`Payload`] contains a serialized version of `T` and iterating over a [`Payload`] performs lazy deserialization. -#[repr(transparent)] -#[derive(Debug)] -pub struct PayloadIterator<'a, T> -where - ZSerde: Deserialize<'a, T>, -{ - reader: ZBufReader<'a>, - _t: PhantomData, -} - -impl Iterator for PayloadIterator<'_, T> -where - for<'a> ZSerde: Deserialize<'a, T>, - for<'a> >::Error: Debug, -{ - type Item = T; - - fn next(&mut self) -> Option { - let codec = Zenoh080::new(); - - let kbuf: ZBuf = codec.read(&mut self.reader).ok()?; - let kpld = Payload::new(kbuf); - - let t = ZSerde.deserialize(&kpld).ok()?; - Some(t) - } -} - -impl FromIterator for Payload -where - ZSerde: Serialize, -{ - fn from_iter>(iter: T) -> Self { - let codec = Zenoh080::new(); - let mut buffer: ZBuf = ZBuf::empty(); - let mut writer = buffer.writer(); - for t in iter { - let tpld = ZSerde.serialize(t); - // SAFETY: we are serializing slices on a ZBuf, so serialization will never - // fail unless we run out of memory. In that case, Rust memory allocator - // will panic before the serializer has any chance to fail. - unsafe { - codec.write(&mut writer, &tpld.0).unwrap_unchecked(); - } - } - - Payload::new(buffer) - } -} - -/// The default serializer for Zenoh payload. It supports primitives types, such as: vec, int, uint, float, string, bool. -/// It also supports common Rust serde values. -#[derive(Clone, Copy, Debug)] -pub struct ZSerde; - -#[derive(Debug, Clone, Copy)] -pub struct ZDeserializeError; - -// ZBuf -impl Serialize for ZSerde { - type Output = Payload; - - fn serialize(self, t: ZBuf) -> Self::Output { - Payload::new(t) - } -} - -impl From for Payload { - fn from(t: ZBuf) -> Self { - ZSerde.serialize(t) - } -} - -impl Serialize<&ZBuf> for ZSerde { - type Output = Payload; - - fn serialize(self, t: &ZBuf) -> Self::Output { - Payload::new(t.clone()) - } -} - -impl From<&ZBuf> for Payload { - fn from(t: &ZBuf) -> Self { - ZSerde.serialize(t) - } -} - -impl Deserialize<'_, ZBuf> for ZSerde { - type Error = Infallible; - - fn deserialize(self, v: &Payload) -> Result { - Ok(v.0.clone()) - } -} - -impl From for ZBuf { - fn from(value: Payload) -> Self { - value.0 - } -} - -impl From<&Payload> for ZBuf { - fn from(value: &Payload) -> Self { - ZSerde.deserialize(value).unwrap_infallible() - } -} - -// ZSlice -impl Serialize for ZSerde { - type Output = Payload; - - fn serialize(self, t: ZSlice) -> Self::Output { - Payload::new(t) - } -} - -impl From for Payload { - fn from(t: ZSlice) -> Self { - ZSerde.serialize(t) - } -} - -impl Serialize<&ZSlice> for ZSerde { - type Output = Payload; - - fn serialize(self, t: &ZSlice) -> Self::Output { - Payload::new(t.clone()) - } -} - -impl From<&ZSlice> for Payload { - fn from(t: &ZSlice) -> Self { - ZSerde.serialize(t) - } -} - -impl Deserialize<'_, ZSlice> for ZSerde { - type Error = Infallible; - - fn deserialize(self, v: &Payload) -> Result { - Ok(v.0.to_zslice()) - } -} - -impl From for ZSlice { - fn from(value: Payload) -> Self { - ZBuf::from(value).to_zslice() - } -} - -impl From<&Payload> for ZSlice { - fn from(value: &Payload) -> Self { - ZSerde.deserialize(value).unwrap_infallible() - } -} - -// [u8; N] -impl Serialize<[u8; N]> for ZSerde { - type Output = Payload; - - fn serialize(self, t: [u8; N]) -> Self::Output { - Payload::new(t) - } -} - -impl From<[u8; N]> for Payload { - fn from(t: [u8; N]) -> Self { - ZSerde.serialize(t) - } -} - -impl Serialize<&[u8; N]> for ZSerde { - type Output = Payload; - - fn serialize(self, t: &[u8; N]) -> Self::Output { - Payload::new(*t) - } -} - -impl From<&[u8; N]> for Payload { - fn from(t: &[u8; N]) -> Self { - ZSerde.serialize(t) - } -} - -impl Deserialize<'_, [u8; N]> for ZSerde { - type Error = ZDeserializeError; - - fn deserialize(self, v: &Payload) -> Result<[u8; N], Self::Error> { - use std::io::Read; - - if v.0.len() != N { - return Err(ZDeserializeError); - } - let mut dst = [0u8; N]; - let mut reader = v.reader(); - reader.read_exact(&mut dst).map_err(|_| ZDeserializeError)?; - Ok(dst) - } -} - -impl TryFrom for [u8; N] { - type Error = ZDeserializeError; - - fn try_from(value: Payload) -> Result { - ZSerde.deserialize(&value) - } -} - -impl TryFrom<&Payload> for [u8; N] { - type Error = ZDeserializeError; - - fn try_from(value: &Payload) -> Result { - ZSerde.deserialize(value) - } -} - -// Vec -impl Serialize> for ZSerde { - type Output = Payload; - - fn serialize(self, t: Vec) -> Self::Output { - Payload::new(t) - } -} - -impl From> for Payload { - fn from(t: Vec) -> Self { - ZSerde.serialize(t) - } -} - -impl Serialize<&Vec> for ZSerde { - type Output = Payload; - - fn serialize(self, t: &Vec) -> Self::Output { - Payload::new(t.clone()) - } -} - -impl From<&Vec> for Payload { - fn from(t: &Vec) -> Self { - ZSerde.serialize(t) - } -} - -impl Deserialize<'_, Vec> for ZSerde { - type Error = Infallible; - - fn deserialize(self, v: &Payload) -> Result, Self::Error> { - Ok(v.0.contiguous().to_vec()) - } -} - -impl From for Vec { - fn from(value: Payload) -> Self { - ZSerde.deserialize(&value).unwrap_infallible() - } -} - -impl From<&Payload> for Vec { - fn from(value: &Payload) -> Self { - ZSerde.deserialize(value).unwrap_infallible() - } -} - -// &[u8] -impl Serialize<&[u8]> for ZSerde { - type Output = Payload; - - fn serialize(self, t: &[u8]) -> Self::Output { - Payload::new(t.to_vec()) - } -} - -impl From<&[u8]> for Payload { - fn from(t: &[u8]) -> Self { - ZSerde.serialize(t) - } -} - -// Cow<[u8]> -impl<'a> Serialize> for ZSerde { - type Output = Payload; - - fn serialize(self, t: Cow<'a, [u8]>) -> Self::Output { - Payload::new(t.to_vec()) - } -} - -impl From> for Payload { - fn from(t: Cow<'_, [u8]>) -> Self { - ZSerde.serialize(t) - } -} - -impl<'a> Serialize<&Cow<'a, [u8]>> for ZSerde { - type Output = Payload; - - fn serialize(self, t: &Cow<'a, [u8]>) -> Self::Output { - Payload::new(t.to_vec()) - } -} - -impl From<&Cow<'_, [u8]>> for Payload { - fn from(t: &Cow<'_, [u8]>) -> Self { - ZSerde.serialize(t) - } -} - -impl<'a> Deserialize<'a, Cow<'a, [u8]>> for ZSerde { - type Error = Infallible; - - fn deserialize(self, v: &'a Payload) -> Result, Self::Error> { - Ok(v.0.contiguous()) - } -} - -impl<'a> From<&'a Payload> for Cow<'a, [u8]> { - fn from(value: &'a Payload) -> Self { - ZSerde.deserialize(value).unwrap_infallible() - } -} - -// String -impl Serialize for ZSerde { - type Output = Payload; - - fn serialize(self, s: String) -> Self::Output { - Payload::new(s.into_bytes()) - } -} - -impl From for Payload { - fn from(t: String) -> Self { - ZSerde.serialize(t) - } -} - -impl Serialize<&String> for ZSerde { - type Output = Payload; - - fn serialize(self, s: &String) -> Self::Output { - Payload::new(s.clone().into_bytes()) - } -} - -impl From<&String> for Payload { - fn from(t: &String) -> Self { - ZSerde.serialize(t) - } -} - -impl Deserialize<'_, String> for ZSerde { - type Error = FromUtf8Error; - - fn deserialize(self, v: &Payload) -> Result { - let v: Vec = ZSerde.deserialize(v).unwrap_infallible(); - String::from_utf8(v) - } -} - -impl TryFrom for String { - type Error = FromUtf8Error; - - fn try_from(value: Payload) -> Result { - ZSerde.deserialize(&value) - } -} - -impl TryFrom<&Payload> for String { - type Error = FromUtf8Error; - - fn try_from(value: &Payload) -> Result { - ZSerde.deserialize(value) - } -} - -// &str -impl Serialize<&str> for ZSerde { - type Output = Payload; - - fn serialize(self, s: &str) -> Self::Output { - Self.serialize(s.to_string()) - } -} - -impl From<&str> for Payload { - fn from(t: &str) -> Self { - ZSerde.serialize(t) - } -} - -impl<'a> Serialize> for ZSerde { - type Output = Payload; - - fn serialize(self, s: Cow<'a, str>) -> Self::Output { - Self.serialize(s.to_string()) - } -} - -impl From> for Payload { - fn from(t: Cow<'_, str>) -> Self { - ZSerde.serialize(t) - } -} - -impl<'a> Serialize<&Cow<'a, str>> for ZSerde { - type Output = Payload; - - fn serialize(self, s: &Cow<'a, str>) -> Self::Output { - Self.serialize(s.to_string()) - } -} - -impl From<&Cow<'_, str>> for Payload { - fn from(t: &Cow<'_, str>) -> Self { - ZSerde.serialize(t) - } -} - -impl<'a> Deserialize<'a, Cow<'a, str>> for ZSerde { - type Error = Utf8Error; - - fn deserialize(self, v: &'a Payload) -> Result, Self::Error> { - let v: Cow<[u8]> = Self.deserialize(v).unwrap_infallible(); - let _ = core::str::from_utf8(v.as_ref())?; - // SAFETY: &str is &[u8] with the guarantee that every char is UTF-8 - // As implemented internally https://doc.rust-lang.org/std/str/fn.from_utf8_unchecked.html. - Ok(unsafe { core::mem::transmute(v) }) - } -} - -impl<'a> TryFrom<&'a Payload> for Cow<'a, str> { - type Error = Utf8Error; - - fn try_from(value: &'a Payload) -> Result { - ZSerde.deserialize(value) - } -} - -// - Integers impl -macro_rules! impl_int { - ($t:ty) => { - impl Serialize<$t> for ZSerde { - type Output = Payload; - - fn serialize(self, t: $t) -> Self::Output { - let bs = t.to_le_bytes(); - let end = if t == 0 as $t { - 0 - } else { - 1 + bs.iter().rposition(|b| *b != 0).unwrap_or(bs.len() - 1) - }; - // SAFETY: - // - 0 is a valid start index because bs is guaranteed to always have a length greater or equal than 1 - // - end is a valid end index because is bounded between 0 and bs.len() - Payload::new(unsafe { ZSlice::new_unchecked(Arc::new(bs), 0, end) }) - } - } - - impl From<$t> for Payload { - fn from(t: $t) -> Self { - ZSerde.serialize(t) - } - } - - impl Serialize<&$t> for ZSerde { - type Output = Payload; - - fn serialize(self, t: &$t) -> Self::Output { - Self.serialize(*t) - } - } - - impl From<&$t> for Payload { - fn from(t: &$t) -> Self { - ZSerde.serialize(t) - } - } - - impl<'a> Deserialize<'a, $t> for ZSerde { - type Error = ZDeserializeError; - - fn deserialize(self, v: &Payload) -> Result<$t, Self::Error> { - use std::io::Read; - - let mut r = v.reader(); - let mut bs = (0 as $t).to_le_bytes(); - if v.len() > bs.len() { - return Err(ZDeserializeError); - } - r.read_exact(&mut bs[..v.len()]) - .map_err(|_| ZDeserializeError)?; - let t = <$t>::from_le_bytes(bs); - Ok(t) - } - } - - impl TryFrom for $t { - type Error = ZDeserializeError; - - fn try_from(value: Payload) -> Result { - ZSerde.deserialize(&value) - } - } - - impl TryFrom<&Payload> for $t { - type Error = ZDeserializeError; - - fn try_from(value: &Payload) -> Result { - ZSerde.deserialize(value) - } - } - }; -} - -// Zenoh unsigned integers -impl_int!(u8); -impl_int!(u16); -impl_int!(u32); -impl_int!(u64); -impl_int!(usize); - -// Zenoh signed integers -impl_int!(i8); -impl_int!(i16); -impl_int!(i32); -impl_int!(i64); -impl_int!(isize); - -// Zenoh floats -impl_int!(f32); -impl_int!(f64); - -// Zenoh bool -impl Serialize for ZSerde { - type Output = Payload; - - fn serialize(self, t: bool) -> Self::Output { - // SAFETY: casting a bool into an integer is well-defined behaviour. - // 0 is false, 1 is true: https://doc.rust-lang.org/std/primitive.bool.html - Payload::new(ZBuf::from((t as u8).to_le_bytes())) - } -} - -impl From for Payload { - fn from(t: bool) -> Self { - ZSerde.serialize(t) - } -} - -impl Serialize<&bool> for ZSerde { - type Output = Payload; - - fn serialize(self, t: &bool) -> Self::Output { - ZSerde.serialize(*t) - } -} - -impl From<&bool> for Payload { - fn from(t: &bool) -> Self { - ZSerde.serialize(t) - } -} - -impl Deserialize<'_, bool> for ZSerde { - type Error = ZDeserializeError; - - fn deserialize(self, v: &Payload) -> Result { - let p = v.deserialize::().map_err(|_| ZDeserializeError)?; - match p { - 0 => Ok(false), - 1 => Ok(true), - _ => Err(ZDeserializeError), - } - } -} - -impl TryFrom for bool { - type Error = ZDeserializeError; - - fn try_from(value: Payload) -> Result { - ZSerde.deserialize(&value) - } -} - -impl TryFrom<&Payload> for bool { - type Error = ZDeserializeError; - - fn try_from(value: &Payload) -> Result { - ZSerde.deserialize(value) - } -} - -// - Zenoh advanced types encoders/decoders -// JSON -impl Serialize for ZSerde { - type Output = Result; - - fn serialize(self, t: serde_json::Value) -> Self::Output { - ZSerde.serialize(&t) - } -} - -impl TryFrom for Payload { - type Error = serde_json::Error; - - fn try_from(value: serde_json::Value) -> Result { - ZSerde.serialize(&value) - } -} - -impl Serialize<&serde_json::Value> for ZSerde { - type Output = Result; - - fn serialize(self, t: &serde_json::Value) -> Self::Output { - let mut payload = Payload::empty(); - serde_json::to_writer(payload.writer(), t)?; - Ok(payload) - } -} - -impl TryFrom<&serde_json::Value> for Payload { - type Error = serde_json::Error; - - fn try_from(value: &serde_json::Value) -> Result { - ZSerde.serialize(value) - } -} - -impl Deserialize<'_, serde_json::Value> for ZSerde { - type Error = serde_json::Error; - - fn deserialize(self, v: &Payload) -> Result { - serde_json::from_reader(v.reader()) - } -} - -impl TryFrom for serde_json::Value { - type Error = serde_json::Error; - - fn try_from(value: Payload) -> Result { - ZSerde.deserialize(&value) - } -} - -impl TryFrom<&Payload> for serde_json::Value { - type Error = serde_json::Error; - - fn try_from(value: &Payload) -> Result { - ZSerde.deserialize(value) - } -} - -// Yaml -impl Serialize for ZSerde { - type Output = Result; - - fn serialize(self, t: serde_yaml::Value) -> Self::Output { - Self.serialize(&t) - } -} - -impl TryFrom for Payload { - type Error = serde_yaml::Error; - - fn try_from(value: serde_yaml::Value) -> Result { - ZSerde.serialize(value) - } -} - -impl Serialize<&serde_yaml::Value> for ZSerde { - type Output = Result; - - fn serialize(self, t: &serde_yaml::Value) -> Self::Output { - let mut payload = Payload::empty(); - serde_yaml::to_writer(payload.0.writer(), t)?; - Ok(payload) - } -} - -impl TryFrom<&serde_yaml::Value> for Payload { - type Error = serde_yaml::Error; - - fn try_from(value: &serde_yaml::Value) -> Result { - ZSerde.serialize(value) - } -} - -impl Deserialize<'_, serde_yaml::Value> for ZSerde { - type Error = serde_yaml::Error; - - fn deserialize(self, v: &Payload) -> Result { - serde_yaml::from_reader(v.reader()) - } -} - -impl TryFrom for serde_yaml::Value { - type Error = serde_yaml::Error; - - fn try_from(value: Payload) -> Result { - ZSerde.deserialize(&value) - } -} - -impl TryFrom<&Payload> for serde_yaml::Value { - type Error = serde_yaml::Error; - - fn try_from(value: &Payload) -> Result { - ZSerde.deserialize(value) - } -} - -// CBOR -impl Serialize for ZSerde { - type Output = Result; - - fn serialize(self, t: serde_cbor::Value) -> Self::Output { - Self.serialize(&t) - } -} - -impl TryFrom for Payload { - type Error = serde_cbor::Error; - - fn try_from(value: serde_cbor::Value) -> Result { - ZSerde.serialize(value) - } -} - -impl Serialize<&serde_cbor::Value> for ZSerde { - type Output = Result; - - fn serialize(self, t: &serde_cbor::Value) -> Self::Output { - let mut payload = Payload::empty(); - serde_cbor::to_writer(payload.0.writer(), t)?; - Ok(payload) - } -} - -impl TryFrom<&serde_cbor::Value> for Payload { - type Error = serde_cbor::Error; - - fn try_from(value: &serde_cbor::Value) -> Result { - ZSerde.serialize(value) - } -} - -impl Deserialize<'_, serde_cbor::Value> for ZSerde { - type Error = serde_cbor::Error; - - fn deserialize(self, v: &Payload) -> Result { - serde_cbor::from_reader(v.reader()) - } -} - -impl TryFrom for serde_cbor::Value { - type Error = serde_cbor::Error; - - fn try_from(value: Payload) -> Result { - ZSerde.deserialize(&value) - } -} - -impl TryFrom<&Payload> for serde_cbor::Value { - type Error = serde_cbor::Error; - - fn try_from(value: &Payload) -> Result { - ZSerde.deserialize(value) - } -} - -// Pickle -impl Serialize for ZSerde { - type Output = Result; - - fn serialize(self, t: serde_pickle::Value) -> Self::Output { - Self.serialize(&t) - } -} - -impl TryFrom for Payload { - type Error = serde_pickle::Error; - - fn try_from(value: serde_pickle::Value) -> Result { - ZSerde.serialize(value) - } -} - -impl Serialize<&serde_pickle::Value> for ZSerde { - type Output = Result; - - fn serialize(self, t: &serde_pickle::Value) -> Self::Output { - let mut payload = Payload::empty(); - serde_pickle::value_to_writer( - &mut payload.0.writer(), - t, - serde_pickle::SerOptions::default(), - )?; - Ok(payload) - } -} - -impl TryFrom<&serde_pickle::Value> for Payload { - type Error = serde_pickle::Error; - - fn try_from(value: &serde_pickle::Value) -> Result { - ZSerde.serialize(value) - } -} - -impl Deserialize<'_, serde_pickle::Value> for ZSerde { - type Error = serde_pickle::Error; - - fn deserialize(self, v: &Payload) -> Result { - serde_pickle::value_from_reader(v.reader(), serde_pickle::DeOptions::default()) - } -} - -impl TryFrom for serde_pickle::Value { - type Error = serde_pickle::Error; - - fn try_from(value: Payload) -> Result { - ZSerde.deserialize(&value) - } -} - -impl TryFrom<&Payload> for serde_pickle::Value { - type Error = serde_pickle::Error; - - fn try_from(value: &Payload) -> Result { - ZSerde.deserialize(value) - } -} - -// Shared memory conversion -#[cfg(feature = "shared-memory")] -impl Serialize> for ZSerde { - type Output = Payload; - - fn serialize(self, t: Arc) -> Self::Output { - Payload::new(t) - } -} -#[cfg(feature = "shared-memory")] -impl From> for Payload { - fn from(t: Arc) -> Self { - ZSerde.serialize(t) - } -} - -#[cfg(feature = "shared-memory")] -impl Serialize> for ZSerde { - type Output = Payload; - - fn serialize(self, t: Box) -> Self::Output { - let smb: Arc = t.into(); - Self.serialize(smb) - } -} - -#[cfg(feature = "shared-memory")] -impl From> for Payload { - fn from(t: Box) -> Self { - ZSerde.serialize(t) - } -} - -#[cfg(feature = "shared-memory")] -impl Serialize for ZSerde { - type Output = Payload; - - fn serialize(self, t: SharedMemoryBuf) -> Self::Output { - Payload::new(t) - } -} - -#[cfg(feature = "shared-memory")] -impl From for Payload { - fn from(t: SharedMemoryBuf) -> Self { - ZSerde.serialize(t) - } -} - -#[cfg(feature = "shared-memory")] -impl Deserialize<'_, SharedMemoryBuf> for ZSerde { - type Error = ZDeserializeError; - - fn deserialize(self, v: &Payload) -> Result { - // A SharedMemoryBuf is expected to have only one slice - let mut zslices = v.0.zslices(); - if let Some(zs) = zslices.next() { - if let Some(shmb) = zs.downcast_ref::() { - return Ok(shmb.clone()); - } - } - Err(ZDeserializeError) - } -} - -#[cfg(feature = "shared-memory")] -impl TryFrom for SharedMemoryBuf { - type Error = ZDeserializeError; - - fn try_from(value: Payload) -> Result { - ZSerde.deserialize(&value) - } -} - -// Tuple -impl Serialize<(A, B)> for ZSerde -where - A: Into, - B: Into, -{ - type Output = Payload; - - fn serialize(self, t: (A, B)) -> Self::Output { - let (a, b) = t; - - let codec = Zenoh080::new(); - let mut buffer: ZBuf = ZBuf::empty(); - let mut writer = buffer.writer(); - let apld: Payload = a.into(); - let bpld: Payload = b.into(); - - // SAFETY: we are serializing slices on a ZBuf, so serialization will never - // fail unless we run out of memory. In that case, Rust memory allocator - // will panic before the serializer has any chance to fail. - unsafe { - codec.write(&mut writer, &apld.0).unwrap_unchecked(); - codec.write(&mut writer, &bpld.0).unwrap_unchecked(); - } - - Payload::new(buffer) - } -} - -impl From<(A, B)> for Payload -where - A: Into, - B: Into, -{ - fn from(value: (A, B)) -> Self { - ZSerde.serialize(value) - } -} - -impl Deserialize<'_, (A, B)> for ZSerde -where - for<'a> A: TryFrom<&'a Payload>, - for<'a> >::Error: Debug, - for<'b> B: TryFrom<&'b Payload>, - for<'b> >::Error: Debug, -{ - type Error = ZError; - - fn deserialize(self, payload: &Payload) -> Result<(A, B), Self::Error> { - let codec = Zenoh080::new(); - let mut reader = payload.0.reader(); - - let abuf: ZBuf = codec.read(&mut reader).map_err(|e| zerror!("{:?}", e))?; - let apld = Payload::new(abuf); - - let bbuf: ZBuf = codec.read(&mut reader).map_err(|e| zerror!("{:?}", e))?; - let bpld = Payload::new(bbuf); - - let a = A::try_from(&apld).map_err(|e| zerror!("{:?}", e))?; - let b = B::try_from(&bpld).map_err(|e| zerror!("{:?}", e))?; - Ok((a, b)) - } -} - -impl TryFrom for (A, B) -where - A: for<'a> TryFrom<&'a Payload>, - for<'a> >::Error: Debug, - for<'b> B: TryFrom<&'b Payload>, - for<'b> >::Error: Debug, -{ - type Error = ZError; - - fn try_from(value: Payload) -> Result { - ZSerde.deserialize(&value) - } -} - -impl TryFrom<&Payload> for (A, B) -where - for<'a> A: TryFrom<&'a Payload>, - for<'a> >::Error: Debug, - for<'b> B: TryFrom<&'b Payload>, - for<'b> >::Error: Debug, -{ - type Error = ZError; - - fn try_from(value: &Payload) -> Result { - ZSerde.deserialize(value) - } -} - -// For convenience to always convert a Value in the examples -#[derive(Debug, Clone, PartialEq, Eq)] -pub enum StringOrBase64 { - String(String), - Base64(String), -} - -impl StringOrBase64 { - pub fn into_string(self) -> String { - match self { - StringOrBase64::String(s) | StringOrBase64::Base64(s) => s, - } - } -} - -impl Deref for StringOrBase64 { - type Target = String; - - fn deref(&self) -> &Self::Target { - match self { - Self::String(s) | Self::Base64(s) => s, - } - } -} - -impl std::fmt::Display for StringOrBase64 { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - f.write_str(self) - } -} - -impl From<&Payload> for StringOrBase64 { - fn from(v: &Payload) -> Self { - use base64::{engine::general_purpose::STANDARD as b64_std_engine, Engine}; - match v.deserialize::() { - Ok(s) => StringOrBase64::String(s), - Err(_) => StringOrBase64::Base64(b64_std_engine.encode(v.into::>())), - } - } -} - -mod tests { - #[test] - fn serializer() { - use super::Payload; - use rand::Rng; - use std::borrow::Cow; - use zenoh_buffers::{ZBuf, ZSlice}; - - const NUM: usize = 1_000; - - macro_rules! serialize_deserialize { - ($t:ty, $in:expr) => { - let i = $in; - let t = i.clone(); - println!("Serialize:\t{:?}", t); - let v = Payload::serialize(t); - println!("Deserialize:\t{:?}", v); - let o: $t = v.deserialize().unwrap(); - assert_eq!(i, o); - println!(""); - }; - } - - let mut rng = rand::thread_rng(); - - // unsigned integer - serialize_deserialize!(u8, u8::MIN); - serialize_deserialize!(u16, u16::MIN); - serialize_deserialize!(u32, u32::MIN); - serialize_deserialize!(u64, u64::MIN); - serialize_deserialize!(usize, usize::MIN); - - serialize_deserialize!(u8, u8::MAX); - serialize_deserialize!(u16, u16::MAX); - serialize_deserialize!(u32, u32::MAX); - serialize_deserialize!(u64, u64::MAX); - serialize_deserialize!(usize, usize::MAX); - - for _ in 0..NUM { - serialize_deserialize!(u8, rng.gen::()); - serialize_deserialize!(u16, rng.gen::()); - serialize_deserialize!(u32, rng.gen::()); - serialize_deserialize!(u64, rng.gen::()); - serialize_deserialize!(usize, rng.gen::()); - } - - // signed integer - serialize_deserialize!(i8, i8::MIN); - serialize_deserialize!(i16, i16::MIN); - serialize_deserialize!(i32, i32::MIN); - serialize_deserialize!(i64, i64::MIN); - serialize_deserialize!(isize, isize::MIN); - - serialize_deserialize!(i8, i8::MAX); - serialize_deserialize!(i16, i16::MAX); - serialize_deserialize!(i32, i32::MAX); - serialize_deserialize!(i64, i64::MAX); - serialize_deserialize!(isize, isize::MAX); - - for _ in 0..NUM { - serialize_deserialize!(i8, rng.gen::()); - serialize_deserialize!(i16, rng.gen::()); - serialize_deserialize!(i32, rng.gen::()); - serialize_deserialize!(i64, rng.gen::()); - serialize_deserialize!(isize, rng.gen::()); - } - - // float - serialize_deserialize!(f32, f32::MIN); - serialize_deserialize!(f64, f64::MIN); - - serialize_deserialize!(f32, f32::MAX); - serialize_deserialize!(f64, f64::MAX); - - for _ in 0..NUM { - serialize_deserialize!(f32, rng.gen::()); - serialize_deserialize!(f64, rng.gen::()); - } - - // String - serialize_deserialize!(String, ""); - serialize_deserialize!(String, String::from("abcdef")); - - // Cow - serialize_deserialize!(Cow, Cow::from("")); - serialize_deserialize!(Cow, Cow::from(String::from("abcdef"))); - - // Vec - serialize_deserialize!(Vec, vec![0u8; 0]); - serialize_deserialize!(Vec, vec![0u8; 64]); - - // Cow<[u8]> - serialize_deserialize!(Cow<[u8]>, Cow::from(vec![0u8; 0])); - serialize_deserialize!(Cow<[u8]>, Cow::from(vec![0u8; 64])); - - // ZBuf - serialize_deserialize!(ZBuf, ZBuf::from(vec![0u8; 0])); - serialize_deserialize!(ZBuf, ZBuf::from(vec![0u8; 64])); - - // Tuple - serialize_deserialize!((usize, usize), (0, 1)); - serialize_deserialize!((usize, String), (0, String::from("a"))); - serialize_deserialize!((String, String), (String::from("a"), String::from("b"))); - - // Iterator - let v: [usize; 5] = [0, 1, 2, 3, 4]; - println!("Serialize:\t{:?}", v); - let p = Payload::from_iter(v.iter()); - println!("Deserialize:\t{:?}\n", p); - for (i, t) in p.iter::().enumerate() { - assert_eq!(i, t); - } - - let mut v = vec![[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11], [12, 13, 14, 15]]; - println!("Serialize:\t{:?}", v); - let p = Payload::from_iter(v.drain(..)); - println!("Deserialize:\t{:?}\n", p); - let mut iter = p.iter::<[u8; 4]>(); - assert_eq!(iter.next().unwrap(), [0, 1, 2, 3]); - assert_eq!(iter.next().unwrap(), [4, 5, 6, 7]); - assert_eq!(iter.next().unwrap(), [8, 9, 10, 11]); - assert_eq!(iter.next().unwrap(), [12, 13, 14, 15]); - assert!(iter.next().is_none()); - - use std::collections::HashMap; - let mut hm: HashMap = HashMap::new(); - hm.insert(0, 0); - hm.insert(1, 1); - println!("Serialize:\t{:?}", hm); - let p = Payload::from_iter(hm.clone().drain()); - println!("Deserialize:\t{:?}\n", p); - let o = HashMap::from_iter(p.iter::<(usize, usize)>()); - assert_eq!(hm, o); - - let mut hm: HashMap> = HashMap::new(); - hm.insert(0, vec![0u8; 8]); - hm.insert(1, vec![1u8; 16]); - println!("Serialize:\t{:?}", hm); - let p = Payload::from_iter(hm.clone().drain()); - println!("Deserialize:\t{:?}\n", p); - let o = HashMap::from_iter(p.iter::<(usize, Vec)>()); - assert_eq!(hm, o); - - let mut hm: HashMap> = HashMap::new(); - hm.insert(0, vec![0u8; 8]); - hm.insert(1, vec![1u8; 16]); - println!("Serialize:\t{:?}", hm); - let p = Payload::from_iter(hm.clone().drain()); - println!("Deserialize:\t{:?}\n", p); - let o = HashMap::from_iter(p.iter::<(usize, Vec)>()); - assert_eq!(hm, o); - - let mut hm: HashMap = HashMap::new(); - hm.insert(0, ZSlice::from(vec![0u8; 8])); - hm.insert(1, ZSlice::from(vec![1u8; 16])); - println!("Serialize:\t{:?}", hm); - let p = Payload::from_iter(hm.clone().drain()); - println!("Deserialize:\t{:?}\n", p); - let o = HashMap::from_iter(p.iter::<(usize, ZSlice)>()); - assert_eq!(hm, o); - - let mut hm: HashMap = HashMap::new(); - hm.insert(0, ZBuf::from(vec![0u8; 8])); - hm.insert(1, ZBuf::from(vec![1u8; 16])); - println!("Serialize:\t{:?}", hm); - let p = Payload::from_iter(hm.clone().drain()); - println!("Deserialize:\t{:?}\n", p); - let o = HashMap::from_iter(p.iter::<(usize, ZBuf)>()); - assert_eq!(hm, o); - - let mut hm: HashMap> = HashMap::new(); - hm.insert(0, vec![0u8; 8]); - hm.insert(1, vec![1u8; 16]); - println!("Serialize:\t{:?}", hm); - let p = Payload::from_iter(hm.clone().iter().map(|(k, v)| (k, Cow::from(v)))); - println!("Deserialize:\t{:?}\n", p); - let o = HashMap::from_iter(p.iter::<(usize, Vec)>()); - assert_eq!(hm, o); - } -} diff --git a/zenoh/src/prelude.rs b/zenoh/src/prelude.rs index e2327c0dcc..36a841d1ef 100644 --- a/zenoh/src/prelude.rs +++ b/zenoh/src/prelude.rs @@ -32,39 +32,39 @@ pub(crate) mod common { }; pub use zenoh_core::Resolve; - pub use zenoh_protocol::core::{EndPoint, Locator, ZenohId}; - #[zenoh_macros::unstable] - pub use zenoh_protocol::core::{EntityGlobalId, EntityId}; + pub(crate) type Id = usize; pub use crate::config::{self, Config, ValidatedMap}; - pub use crate::handlers::IntoHandler; + pub use crate::handlers::IntoCallbackReceiverPair; pub use crate::selector::{Parameter, Parameters, Selector}; pub use crate::session::{Session, SessionDeclarations}; - pub use crate::query::{ConsolidationMode, QueryConsolidation, QueryTarget}; + pub use crate::query::{QueryConsolidation, QueryTarget}; - pub use crate::encoding::Encoding; - /// The encoding of a zenoh `Value`. - pub use crate::payload::{Deserialize, Payload, Serialize}; pub use crate::value::Value; + /// The encoding of a zenoh `Value`. + pub use zenoh_protocol::core::{Encoding, KnownEncoding}; + pub use crate::query::ConsolidationMode; #[zenoh_macros::unstable] pub use crate::sample::Locality; #[cfg(not(feature = "unstable"))] pub(crate) use crate::sample::Locality; - #[zenoh_macros::unstable] - pub use crate::sample::SourceInfo; - pub use crate::sample::{Sample, SampleKind}; + pub use crate::sample::Sample; + + pub use zenoh_protocol::core::SampleKind; pub use crate::publication::Priority; #[zenoh_macros::unstable] pub use crate::publication::PublisherDeclarations; pub use zenoh_protocol::core::{CongestionControl, Reliability, WhatAmI}; - pub use crate::sample::builder::{QoSBuilderTrait, TimestampBuilderTrait, ValueBuilderTrait}; - - #[zenoh_macros::unstable] - pub use crate::sample::builder::SampleBuilderTrait; + /// A [`Locator`] contains a choice of protocol, an address and port, as well as optional additional properties to work with. + pub use zenoh_protocol::core::EndPoint; + /// A [`Locator`] contains a choice of protocol, an address and port, as well as optional additional properties to work with. + pub use zenoh_protocol::core::Locator; + /// The global unique id of a zenoh peer. + pub use zenoh_protocol::core::ZenohId; } /// Prelude to import when using Zenoh's sync API. diff --git a/zenoh/src/publication.rs b/zenoh/src/publication.rs index 4f31c73a24..ef33115a6b 100644 --- a/zenoh/src/publication.rs +++ b/zenoh/src/publication.rs @@ -13,18 +13,19 @@ // //! Publishing primitives. +#[zenoh_macros::unstable] +use crate::handlers::Callback; +#[zenoh_macros::unstable] +use crate::handlers::DefaultHandler; use crate::net::primitives::Primitives; use crate::prelude::*; #[zenoh_macros::unstable] use crate::sample::Attachment; -use crate::sample::{DataInfo, QoS, Sample, SampleFields, SampleKind}; +use crate::sample::DataInfo; +use crate::sample::QoS; +use crate::Encoding; use crate::SessionRef; use crate::Undeclarable; -#[cfg(feature = "unstable")] -use crate::{ - handlers::{Callback, DefaultHandler, IntoHandler}, - Id, -}; use std::future::Ready; use zenoh_core::{zread, AsyncResolve, Resolvable, Resolve, SyncResolve}; use zenoh_protocol::network::push::ext; @@ -38,16 +39,26 @@ use zenoh_result::ZResult; /// The kind of congestion control. pub use zenoh_protocol::core::CongestionControl; -#[derive(Debug, Clone)] -pub struct PublicationBuilderPut { - pub(crate) payload: Payload, - pub(crate) encoding: Encoding, -} -#[derive(Debug, Clone)] -pub struct PublicationBuilderDelete; +/// A builder for initializing a [`delete`](crate::Session::delete) operation. +/// +/// # Examples +/// ``` +/// # #[tokio::main] +/// # async fn main() { +/// use zenoh::prelude::r#async::*; +/// use zenoh::publication::CongestionControl; +/// +/// let session = zenoh::open(config::peer()).res().await.unwrap(); +/// session +/// .delete("key/expression") +/// .res() +/// .await +/// .unwrap(); +/// # } +/// ``` +pub type DeleteBuilder<'a, 'b> = PutBuilder<'a, 'b>; -/// A builder for initializing [`Session::put`](crate::Session::put), [`Session::delete`](crate::Session::delete), -/// [`Publisher::put`](crate::Publisher::put), and [`Publisher::delete`](crate::Publisher::delete) operations. +/// A builder for initializing a [`put`](crate::Session::put) operation. /// /// # Examples /// ``` @@ -55,12 +66,11 @@ pub struct PublicationBuilderDelete; /// # async fn main() { /// use zenoh::prelude::r#async::*; /// use zenoh::publication::CongestionControl; -/// use zenoh::sample::builder::{ValueBuilderTrait, QoSBuilderTrait}; /// /// let session = zenoh::open(config::peer()).res().await.unwrap(); /// session -/// .put("key/expression", "payload") -/// .encoding(Encoding::TEXT_PLAIN) +/// .put("key/expression", "value") +/// .encoding(KnownEncoding::TextPlain) /// .congestion_control(CongestionControl::Block) /// .res() /// .await @@ -69,52 +79,38 @@ pub struct PublicationBuilderDelete; /// ``` #[must_use = "Resolvables do nothing unless you resolve them using the `res` method from either `SyncResolve` or `AsyncResolve`"] #[derive(Debug, Clone)] -pub struct PublicationBuilder { - pub(crate) publisher: P, - pub(crate) kind: T, - pub(crate) timestamp: Option, - #[cfg(feature = "unstable")] - pub(crate) source_info: SourceInfo, +pub struct PutBuilder<'a, 'b> { + pub(crate) publisher: PublisherBuilder<'a, 'b>, + pub(crate) value: Value, + pub(crate) kind: SampleKind, #[cfg(feature = "unstable")] pub(crate) attachment: Option, } -pub type SessionPutBuilder<'a, 'b> = - PublicationBuilder, PublicationBuilderPut>; - -pub type SessionDeleteBuilder<'a, 'b> = - PublicationBuilder, PublicationBuilderDelete>; - -pub type PublisherPutBuilder<'a> = PublicationBuilder<&'a Publisher<'a>, PublicationBuilderPut>; - -pub type PublisherDeleteBuilder<'a> = - PublicationBuilder<&'a Publisher<'a>, PublicationBuilderDelete>; - -impl QoSBuilderTrait for PublicationBuilder, T> { +impl PutBuilder<'_, '_> { + /// Change the encoding of the written data. #[inline] - fn congestion_control(self, congestion_control: CongestionControl) -> Self { - Self { - publisher: self.publisher.congestion_control(congestion_control), - ..self - } + pub fn encoding(mut self, encoding: IntoEncoding) -> Self + where + IntoEncoding: Into, + { + self.value.encoding = encoding.into(); + self } + /// Change the `congestion_control` to apply when routing the data. #[inline] - fn priority(self, priority: Priority) -> Self { - Self { - publisher: self.publisher.priority(priority), - ..self - } + pub fn congestion_control(mut self, congestion_control: CongestionControl) -> Self { + self.publisher = self.publisher.congestion_control(congestion_control); + self } + + /// Change the priority of the written data. #[inline] - fn express(self, is_express: bool) -> Self { - Self { - publisher: self.publisher.express(is_express), - ..self - } + pub fn priority(mut self, priority: Priority) -> Self { + self.publisher = self.publisher.priority(priority); + self } -} -impl PublicationBuilder, T> { /// Restrict the matching subscribers that will receive the published data /// to the ones that have the given [`Locality`](crate::prelude::Locality). #[zenoh_macros::unstable] @@ -123,116 +119,53 @@ impl PublicationBuilder, T> { self.publisher = self.publisher.allowed_destination(destination); self } -} - -impl

ValueBuilderTrait for PublicationBuilder { - fn encoding>(self, encoding: T) -> Self { - Self { - kind: PublicationBuilderPut { - encoding: encoding.into(), - ..self.kind - }, - ..self - } - } - - fn payload(self, payload: IntoPayload) -> Self - where - IntoPayload: Into, - { - Self { - kind: PublicationBuilderPut { - payload: payload.into(), - ..self.kind - }, - ..self - } - } - fn value>(self, value: T) -> Self { - let Value { payload, encoding } = value.into(); - Self { - kind: PublicationBuilderPut { payload, encoding }, - ..self - } - } -} -#[zenoh_macros::unstable] -impl SampleBuilderTrait for PublicationBuilder { - #[cfg(feature = "unstable")] - fn source_info(self, source_info: SourceInfo) -> Self { - Self { - source_info, - ..self - } - } - #[cfg(feature = "unstable")] - fn attachment>>(self, attachment: TA) -> Self { - Self { - attachment: attachment.into(), - ..self - } + pub fn kind(mut self, kind: SampleKind) -> Self { + self.kind = kind; + self } -} -impl TimestampBuilderTrait for PublicationBuilder { - fn timestamp>>(self, timestamp: TS) -> Self { - Self { - timestamp: timestamp.into(), - ..self - } + #[zenoh_macros::unstable] + pub fn with_attachment(mut self, attachment: Attachment) -> Self { + self.attachment = Some(attachment); + self } } -impl Resolvable for PublicationBuilder { +impl Resolvable for PutBuilder<'_, '_> { type To = ZResult<()>; } -impl SyncResolve for PublicationBuilder, PublicationBuilderPut> { +impl SyncResolve for PutBuilder<'_, '_> { #[inline] fn res_sync(self) -> ::To { - let publisher = self.publisher.create_one_shot_publisher()?; - resolve_put( - &publisher, - self.kind.payload, - SampleKind::Put, - self.kind.encoding, - self.timestamp, - #[cfg(feature = "unstable")] - self.source_info, - #[cfg(feature = "unstable")] - self.attachment, - ) - } -} + let PublisherBuilder { + session, + key_expr, + congestion_control, + priority, + destination, + } = self.publisher; + + let publisher = Publisher { + session, + key_expr: key_expr?, + congestion_control, + priority, + destination, + }; -impl SyncResolve for PublicationBuilder, PublicationBuilderDelete> { - #[inline] - fn res_sync(self) -> ::To { - let publisher = self.publisher.create_one_shot_publisher()?; resolve_put( &publisher, - Payload::empty(), - SampleKind::Delete, - Encoding::ZENOH_BYTES, - self.timestamp, - #[cfg(feature = "unstable")] - self.source_info, + self.value, + self.kind, #[cfg(feature = "unstable")] self.attachment, ) } } -impl AsyncResolve for PublicationBuilder, PublicationBuilderPut> { - type Future = Ready; - - fn res_async(self) -> Self::Future { - std::future::ready(self.res_sync()) - } -} - -impl AsyncResolve for PublicationBuilder, PublicationBuilderDelete> { +impl AsyncResolve for PutBuilder<'_, '_> { type Future = Ready; fn res_async(self) -> Self::Future { @@ -310,62 +243,38 @@ impl std::fmt::Debug for PublisherRef<'_> { #[derive(Debug, Clone)] pub struct Publisher<'a> { pub(crate) session: SessionRef<'a>, - #[cfg(feature = "unstable")] - pub(crate) eid: EntityId, pub(crate) key_expr: KeyExpr<'a>, pub(crate) congestion_control: CongestionControl, pub(crate) priority: Priority, - pub(crate) is_express: bool, pub(crate) destination: Locality, } impl<'a> Publisher<'a> { - /// Returns the [`EntityGlobalId`] of this Publisher. - /// - /// # Examples - /// ``` - /// # #[tokio::main] - /// # async fn main() { - /// use zenoh::prelude::r#async::*; - /// - /// let session = zenoh::open(config::peer()).res().await.unwrap(); - /// let publisher = session.declare_publisher("key/expression") - /// .res() - /// .await - /// .unwrap(); - /// let publisher_id = publisher.id(); - /// # } - /// ``` - #[zenoh_macros::unstable] - pub fn id(&self) -> EntityGlobalId { - EntityGlobalId { - zid: self.session.zid(), - eid: self.eid, - } - } - pub fn key_expr(&self) -> &KeyExpr<'a> { &self.key_expr } /// Change the `congestion_control` to apply when routing the data. #[inline] - pub fn set_congestion_control(&mut self, congestion_control: CongestionControl) { + pub fn congestion_control(mut self, congestion_control: CongestionControl) -> Self { self.congestion_control = congestion_control; + self } /// Change the priority of the written data. #[inline] - pub fn set_priority(&mut self, priority: Priority) { + pub fn priority(mut self, priority: Priority) -> Self { self.priority = priority; + self } /// Restrict the matching subscribers that will receive the published data /// to the ones that have the given [`Locality`](crate::prelude::Locality). #[zenoh_macros::unstable] #[inline] - pub fn set_allowed_destination(&mut self, destination: Locality) { + pub fn allowed_destination(mut self, destination: Locality) -> Self { self.destination = destination; + self } /// Consumes the given `Publisher`, returning a thread-safe reference-counting @@ -404,6 +313,16 @@ impl<'a> Publisher<'a> { std::sync::Arc::new(self) } + fn _write(&self, kind: SampleKind, value: Value) -> Publication { + Publication { + publisher: self, + value, + kind, + #[cfg(feature = "unstable")] + attachment: None, + } + } + /// Put data. /// /// # Examples @@ -418,22 +337,11 @@ impl<'a> Publisher<'a> { /// # } /// ``` #[inline] - pub fn put(&self, payload: IntoPayload) -> PublisherPutBuilder<'_> + pub fn put(&self, value: IntoValue) -> Publication where - IntoPayload: Into, + IntoValue: Into, { - PublicationBuilder { - publisher: self, - kind: PublicationBuilderPut { - payload: payload.into(), - encoding: Encoding::ZENOH_BYTES, - }, - timestamp: None, - #[cfg(feature = "unstable")] - source_info: SourceInfo::empty(), - #[cfg(feature = "unstable")] - attachment: None, - } + self._write(SampleKind::Put, value.into()) } /// Delete data. @@ -449,16 +357,8 @@ impl<'a> Publisher<'a> { /// publisher.delete().res().await.unwrap(); /// # } /// ``` - pub fn delete(&self) -> PublisherDeleteBuilder<'_> { - PublicationBuilder { - publisher: self, - kind: PublicationBuilderDelete, - timestamp: None, - #[cfg(feature = "unstable")] - source_info: SourceInfo::empty(), - #[cfg(feature = "unstable")] - attachment: None, - } + pub fn delete(&self) -> Publication { + self._write(SampleKind::Delete, Value::empty()) } /// Return the [`MatchingStatus`] of the publisher. @@ -539,6 +439,44 @@ impl<'a> Publisher<'a> { } } +/// Internal function for sending data with specified [`kind`](SampleKind) +pub trait HasWriteWithSampleKind { + type WriteOutput<'a> + where + Self: 'a; + fn write>( + &self, + kind: SampleKind, + value: IntoValue, + ) -> Self::WriteOutput<'_>; +} + +impl<'a> HasWriteWithSampleKind for Publisher<'a> { + type WriteOutput<'b> = Publication<'b> + where + 'a: 'b; + /// Send data with [`kind`](SampleKind) (Put or Delete). + /// + /// # Examples + /// ``` + /// # #[tokio::main] + /// # async fn main() { + /// use zenoh::prelude::r#async::*; + /// use zenoh::publication::HasWriteWithSampleKind; + /// + /// let session = zenoh::open(config::peer()).res().await.unwrap().into_arc(); + /// let publisher = session.declare_publisher("key/expression").res().await.unwrap(); + /// publisher.write(SampleKind::Put, "value").res().await.unwrap(); + /// # } + /// ``` + fn write(&self, kind: SampleKind, value: IntoValue) -> Self::WriteOutput<'_> + where + IntoValue: Into, + { + self._write(kind, value.into()) + } +} + /// Functions to create zenoh entities with `'static` lifetime. /// /// This trait contains functions to create zenoh entities like @@ -687,47 +625,42 @@ impl Drop for Publisher<'_> { } } -impl SyncResolve for PublicationBuilder<&Publisher<'_>, PublicationBuilderPut> { - fn res_sync(self) -> ::To { - resolve_put( - self.publisher, - self.kind.payload, - SampleKind::Put, - self.kind.encoding, - self.timestamp, - #[cfg(feature = "unstable")] - self.source_info, - #[cfg(feature = "unstable")] - self.attachment, - ) +/// A [`Resolvable`] returned by [`Publisher::put()`](Publisher::put), +/// [`Publisher::delete()`](Publisher::delete) and [`Publisher::write()`](Publisher::write). +#[must_use = "Resolvables do nothing unless you resolve them using the `res` method from either `SyncResolve` or `AsyncResolve`"] +pub struct Publication<'a> { + publisher: &'a Publisher<'a>, + value: Value, + kind: SampleKind, + #[cfg(feature = "unstable")] + pub(crate) attachment: Option, +} + +impl<'a> Publication<'a> { + #[zenoh_macros::unstable] + pub fn with_attachment(mut self, attachment: Attachment) -> Self { + self.attachment = Some(attachment); + self } } -impl SyncResolve for PublicationBuilder<&Publisher<'_>, PublicationBuilderDelete> { +impl Resolvable for Publication<'_> { + type To = ZResult<()>; +} + +impl SyncResolve for Publication<'_> { fn res_sync(self) -> ::To { resolve_put( self.publisher, - Payload::empty(), - SampleKind::Delete, - Encoding::ZENOH_BYTES, - self.timestamp, - #[cfg(feature = "unstable")] - self.source_info, + self.value, + self.kind, #[cfg(feature = "unstable")] self.attachment, ) } } -impl AsyncResolve for PublicationBuilder<&Publisher<'_>, PublicationBuilderPut> { - type Future = Ready; - - fn res_async(self) -> Self::Future { - std::future::ready(self.res_sync()) - } -} - -impl AsyncResolve for PublicationBuilder<&Publisher<'_>, PublicationBuilderDelete> { +impl AsyncResolve for Publication<'_> { type Future = Ready; fn res_async(self) -> Self::Future { @@ -735,7 +668,10 @@ impl AsyncResolve for PublicationBuilder<&Publisher<'_>, PublicationBuilderDelet } } -impl<'a> Sink for Publisher<'a> { +impl<'a, IntoValue> Sink for Publisher<'a> +where + IntoValue: Into, +{ type Error = Error; #[inline] @@ -744,26 +680,8 @@ impl<'a> Sink for Publisher<'a> { } #[inline] - fn start_send(self: Pin<&mut Self>, item: Sample) -> Result<(), Self::Error> { - let SampleFields { - payload, - kind, - encoding, - #[cfg(feature = "unstable")] - attachment, - .. - } = item.into(); - resolve_put( - &self, - payload, - kind, - encoding, - None, - #[cfg(feature = "unstable")] - SourceInfo::empty(), - #[cfg(feature = "unstable")] - attachment, - ) + fn start_send(self: Pin<&mut Self>, item: IntoValue) -> Result<(), Self::Error> { + self.put(item.into()).res_sync() } #[inline] @@ -785,7 +703,6 @@ impl<'a> Sink for Publisher<'a> { /// # async fn main() { /// use zenoh::prelude::r#async::*; /// use zenoh::publication::CongestionControl; -/// use zenoh::sample::builder::QoSBuilderTrait; /// /// let session = zenoh::open(config::peer()).res().await.unwrap(); /// let publisher = session @@ -803,7 +720,6 @@ pub struct PublisherBuilder<'a, 'b: 'a> { pub(crate) key_expr: ZResult>, pub(crate) congestion_control: CongestionControl, pub(crate) priority: Priority, - pub(crate) is_express: bool, pub(crate) destination: Locality, } @@ -817,38 +733,26 @@ impl<'a, 'b> Clone for PublisherBuilder<'a, 'b> { }, congestion_control: self.congestion_control, priority: self.priority, - is_express: self.is_express, destination: self.destination, } } } -impl QoSBuilderTrait for PublisherBuilder<'_, '_> { +impl<'a, 'b> PublisherBuilder<'a, 'b> { /// Change the `congestion_control` to apply when routing the data. #[inline] - fn congestion_control(self, congestion_control: CongestionControl) -> Self { - Self { - congestion_control, - ..self - } + pub fn congestion_control(mut self, congestion_control: CongestionControl) -> Self { + self.congestion_control = congestion_control; + self } /// Change the priority of the written data. #[inline] - fn priority(self, priority: Priority) -> Self { - Self { priority, ..self } - } - - /// Change the `express` policy to apply when routing the data. - /// When express is set to `true`, then the message will not be batched. - /// This usually has a positive impact on latency but negative impact on throughput. - #[inline] - fn express(self, is_express: bool) -> Self { - Self { is_express, ..self } + pub fn priority(mut self, priority: Priority) -> Self { + self.priority = priority; + self } -} -impl<'a, 'b> PublisherBuilder<'a, 'b> { /// Restrict the matching subscribers that will receive the published data /// to the ones that have the given [`Locality`](crate::prelude::Locality). #[zenoh_macros::unstable] @@ -857,20 +761,6 @@ impl<'a, 'b> PublisherBuilder<'a, 'b> { self.destination = destination; self } - - // internal function for perfroming the publication - fn create_one_shot_publisher(self) -> ZResult> { - Ok(Publisher { - session: self.session, - #[cfg(feature = "unstable")] - eid: 0, // This is a one shot Publisher - key_expr: self.key_expr?, - congestion_control: self.congestion_control, - priority: self.priority, - is_express: self.is_express, - destination: self.destination, - }) - } } impl<'a, 'b> Resolvable for PublisherBuilder<'a, 'b> { @@ -913,16 +803,11 @@ impl<'a, 'b> SyncResolve for PublisherBuilder<'a, 'b> { self.session .declare_publication_intent(key_expr.clone()) .res_sync()?; - #[cfg(feature = "unstable")] - let eid = self.session.runtime.next_id(); let publisher = Publisher { session: self.session, - #[cfg(feature = "unstable")] - eid, key_expr, congestion_control: self.congestion_control, priority: self.priority, - is_express: self.is_express, destination: self.destination, }; log::trace!("publish({:?})", publisher.key_expr); @@ -940,11 +825,8 @@ impl<'a, 'b> AsyncResolve for PublisherBuilder<'a, 'b> { fn resolve_put( publisher: &Publisher<'_>, - payload: Payload, + value: Value, kind: SampleKind, - encoding: Encoding, - timestamp: Option, - #[cfg(feature = "unstable")] source_info: SourceInfo, #[cfg(feature = "unstable")] attachment: Option, ) -> ZResult<()> { log::trace!("write({:?}, [...])", &publisher.key_expr); @@ -953,21 +835,18 @@ fn resolve_put( .as_ref() .unwrap() .clone(); - let timestamp = if timestamp.is_none() { - publisher.session.runtime.new_timestamp() - } else { - timestamp - }; + let timestamp = publisher.session.runtime.new_timestamp(); + if publisher.destination != Locality::SessionLocal { primitives.send_push(Push { wire_expr: publisher.key_expr.to_wire(&publisher.session).to_owned(), ext_qos: ext::QoSType::new( publisher.priority.into(), publisher.congestion_control, - publisher.is_express, + false, ), ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, + ext_nodeid: ext::NodeIdType::default(), payload: match kind { SampleKind::Put => { #[allow(unused_mut)] @@ -980,16 +859,13 @@ fn resolve_put( } PushBody::Put(Put { timestamp, - encoding: encoding.clone().into(), - #[cfg(feature = "unstable")] - ext_sinfo: source_info.into(), - #[cfg(not(feature = "unstable"))] + encoding: value.encoding.clone(), ext_sinfo: None, #[cfg(feature = "shared-memory")] ext_shm: None, ext_attachment, ext_unknown: vec![], - payload: payload.clone().into(), + payload: value.payload.clone(), }) } SampleKind::Delete => { @@ -1003,9 +879,6 @@ fn resolve_put( } PushBody::Del(Del { timestamp, - #[cfg(feature = "unstable")] - ext_sinfo: source_info.into(), - #[cfg(not(feature = "unstable"))] ext_sinfo: None, ext_attachment, ext_unknown: vec![], @@ -1017,14 +890,14 @@ fn resolve_put( if publisher.destination != Locality::Remote { let data_info = DataInfo { kind, - encoding: Some(encoding), + encoding: Some(value.encoding), timestamp, source_id: None, source_sn: None, qos: QoS::from(ext::QoSType::new( publisher.priority.into(), publisher.congestion_control, - publisher.is_express, + false, )), }; @@ -1032,7 +905,7 @@ fn resolve_put( true, &publisher.key_expr.to_wire(&publisher.session), Some(data_info), - payload.into(), + value.payload, #[cfg(feature = "unstable")] attachment, ); @@ -1055,8 +928,6 @@ pub enum Priority { } impl Priority { - /// Default - pub const DEFAULT: Self = Self::Data; /// The lowest Priority pub const MIN: Self = Self::Background; /// The highest Priority @@ -1251,7 +1122,7 @@ impl<'a> MatchingListenerBuilder<'a, DefaultHandler> { self.callback(crate::handlers::locked(callback)) } - /// Receive the MatchingStatuses for this listener with a [`Handler`](crate::prelude::IntoHandler). + /// Receive the MatchingStatuses for this listener with a [`Handler`](crate::prelude::IntoCallbackReceiverPair). /// /// # Examples /// ```no_run @@ -1280,7 +1151,7 @@ impl<'a> MatchingListenerBuilder<'a, DefaultHandler> { #[zenoh_macros::unstable] pub fn with(self, handler: Handler) -> MatchingListenerBuilder<'a, Handler> where - Handler: crate::prelude::IntoHandler<'static, MatchingStatus>, + Handler: crate::prelude::IntoCallbackReceiverPair<'static, MatchingStatus>, { let MatchingListenerBuilder { publisher, @@ -1293,21 +1164,21 @@ impl<'a> MatchingListenerBuilder<'a, DefaultHandler> { #[zenoh_macros::unstable] impl<'a, Handler> Resolvable for MatchingListenerBuilder<'a, Handler> where - Handler: IntoHandler<'static, MatchingStatus> + Send, - Handler::Handler: Send, + Handler: IntoCallbackReceiverPair<'static, MatchingStatus> + Send, + Handler::Receiver: Send, { - type To = ZResult>; + type To = ZResult>; } #[zenoh_macros::unstable] impl<'a, Handler> SyncResolve for MatchingListenerBuilder<'a, Handler> where - Handler: IntoHandler<'static, MatchingStatus> + Send, - Handler::Handler: Send, + Handler: IntoCallbackReceiverPair<'static, MatchingStatus> + Send, + Handler::Receiver: Send, { #[zenoh_macros::unstable] fn res_sync(self) -> ::To { - let (callback, receiver) = self.handler.into_handler(); + let (callback, receiver) = self.handler.into_cb_receiver_pair(); self.publisher .session .declare_matches_listener_inner(&self.publisher, callback) @@ -1325,8 +1196,8 @@ where #[zenoh_macros::unstable] impl<'a, Handler> AsyncResolve for MatchingListenerBuilder<'a, Handler> where - Handler: IntoHandler<'static, MatchingStatus> + Send, - Handler::Handler: Send, + Handler: IntoCallbackReceiverPair<'static, MatchingStatus> + Send, + Handler::Receiver: Send, { type Future = Ready; @@ -1521,7 +1392,9 @@ mod tests { #[test] fn sample_kind_integrity_in_publication() { + use crate::publication::HasWriteWithSampleKind; use crate::{open, prelude::sync::*}; + use zenoh_protocol::core::SampleKind; const KEY_EXPR: &str = "test/sample_kind_integrity/publication"; const VALUE: &str = "zenoh"; @@ -1530,17 +1403,11 @@ mod tests { let session = open(Config::default()).res().unwrap(); let sub = session.declare_subscriber(KEY_EXPR).res().unwrap(); let pub_ = session.declare_publisher(KEY_EXPR).res().unwrap(); - - match kind { - SampleKind::Put => pub_.put(VALUE).res().unwrap(), - SampleKind::Delete => pub_.delete().res().unwrap(), - } + pub_.write(kind, VALUE).res().unwrap(); let sample = sub.recv().unwrap(); assert_eq!(sample.kind, kind); - if let SampleKind::Put = kind { - assert_eq!(sample.payload.deserialize::().unwrap(), VALUE); - } + assert_eq!(sample.value.to_string(), VALUE); } sample_kind_integrity_in_publication_with(SampleKind::Put); @@ -1550,6 +1417,7 @@ mod tests { #[test] fn sample_kind_integrity_in_put_builder() { use crate::{open, prelude::sync::*}; + use zenoh_protocol::core::SampleKind; const KEY_EXPR: &str = "test/sample_kind_integrity/put_builder"; const VALUE: &str = "zenoh"; @@ -1566,7 +1434,7 @@ mod tests { assert_eq!(sample.kind, kind); if let SampleKind::Put = kind { - assert_eq!(sample.payload.deserialize::().unwrap(), VALUE); + assert_eq!(sample.value.to_string(), VALUE); } } diff --git a/zenoh/src/query.rs b/zenoh/src/query.rs index 3a380bd1c9..f75df8c50e 100644 --- a/zenoh/src/query.rs +++ b/zenoh/src/query.rs @@ -13,11 +13,11 @@ // //! Query primitives. + use crate::handlers::{locked, Callback, DefaultHandler}; use crate::prelude::*; #[zenoh_macros::unstable] use crate::sample::Attachment; -use crate::sample::QoSBuilder; use crate::Session; use std::collections::HashMap; use std::future::Ready; @@ -26,10 +26,10 @@ use zenoh_core::{AsyncResolve, Resolvable, SyncResolve}; use zenoh_result::ZResult; /// The [`Queryable`](crate::queryable::Queryable)s that should be target of a [`get`](Session::get). -pub type QueryTarget = zenoh_protocol::network::request::ext::TargetType; +pub use zenoh_protocol::core::QueryTarget; /// The kind of consolidation. -pub type ConsolidationMode = zenoh_protocol::zenoh::query::Consolidation; +pub use zenoh_protocol::core::ConsolidationMode; /// The operation: either manual or automatic. #[derive(Clone, Copy, Debug, PartialEq, Eq)] @@ -41,26 +41,29 @@ pub enum Mode { /// The replies consolidation strategy to apply on replies to a [`get`](Session::get). #[derive(Clone, Copy, Debug, PartialEq, Eq)] pub struct QueryConsolidation { - pub(crate) mode: ConsolidationMode, + pub(crate) mode: Mode, } impl QueryConsolidation { - pub const DEFAULT: Self = Self::AUTO; /// Automatic query consolidation strategy selection. - pub const AUTO: Self = Self { - mode: ConsolidationMode::Auto, - }; + pub const AUTO: Self = Self { mode: Mode::Auto }; pub(crate) const fn from_mode(mode: ConsolidationMode) -> Self { - Self { mode } + Self { + mode: Mode::Manual(mode), + } } /// Returns the requested [`ConsolidationMode`]. - pub fn mode(&self) -> ConsolidationMode { + pub fn mode(&self) -> Mode { self.mode } } - +impl From> for QueryConsolidation { + fn from(mode: Mode) -> Self { + Self { mode } + } +} impl From for QueryConsolidation { fn from(mode: ConsolidationMode) -> Self { Self::from_mode(mode) @@ -69,7 +72,7 @@ impl From for QueryConsolidation { impl Default for QueryConsolidation { fn default() -> Self { - Self::DEFAULT + QueryConsolidation::AUTO } } @@ -122,70 +125,12 @@ pub struct GetBuilder<'a, 'b, Handler> { pub(crate) scope: ZResult>>, pub(crate) target: QueryTarget, pub(crate) consolidation: QueryConsolidation, - pub(crate) qos: QoSBuilder, pub(crate) destination: Locality, pub(crate) timeout: Duration, pub(crate) handler: Handler, pub(crate) value: Option, #[cfg(feature = "unstable")] pub(crate) attachment: Option, - #[cfg(feature = "unstable")] - pub(crate) source_info: SourceInfo, -} - -#[zenoh_macros::unstable] -impl SampleBuilderTrait for GetBuilder<'_, '_, Handler> { - #[cfg(feature = "unstable")] - fn source_info(self, source_info: SourceInfo) -> Self { - Self { - source_info, - ..self - } - } - - #[cfg(feature = "unstable")] - fn attachment>>(self, attachment: T) -> Self { - Self { - attachment: attachment.into(), - ..self - } - } -} - -impl QoSBuilderTrait for GetBuilder<'_, '_, DefaultHandler> { - fn congestion_control(self, congestion_control: CongestionControl) -> Self { - let qos = self.qos.congestion_control(congestion_control); - Self { qos, ..self } - } - - fn priority(self, priority: Priority) -> Self { - let qos = self.qos.priority(priority); - Self { qos, ..self } - } - - fn express(self, is_express: bool) -> Self { - let qos = self.qos.express(is_express); - Self { qos, ..self } - } -} - -impl ValueBuilderTrait for GetBuilder<'_, '_, Handler> { - fn encoding>(self, encoding: T) -> Self { - let value = Some(self.value.unwrap_or_default().encoding(encoding)); - Self { value, ..self } - } - - fn payload>(self, payload: T) -> Self { - let value = Some(self.value.unwrap_or_default().payload(payload)); - Self { value, ..self } - } - fn value>(self, value: T) -> Self { - let value: Value = value.into(); - Self { - value: if value.is_empty() { None } else { Some(value) }, - ..self - } - } } impl<'a, 'b> GetBuilder<'a, 'b, DefaultHandler> { @@ -217,14 +162,11 @@ impl<'a, 'b> GetBuilder<'a, 'b, DefaultHandler> { scope, target, consolidation, - qos, destination, timeout, value, #[cfg(feature = "unstable")] attachment, - #[cfg(feature = "unstable")] - source_info, handler: _, } = self; GetBuilder { @@ -233,14 +175,11 @@ impl<'a, 'b> GetBuilder<'a, 'b, DefaultHandler> { scope, target, consolidation, - qos, destination, timeout, value, #[cfg(feature = "unstable")] attachment, - #[cfg(feature = "unstable")] - source_info, handler: callback, } } @@ -277,7 +216,7 @@ impl<'a, 'b> GetBuilder<'a, 'b, DefaultHandler> { self.callback(locked(callback)) } - /// Receive the replies for this query with a [`Handler`](crate::prelude::IntoHandler). + /// Receive the replies for this query with a [`Handler`](crate::prelude::IntoCallbackReceiverPair). /// /// # Examples /// ``` @@ -300,7 +239,7 @@ impl<'a, 'b> GetBuilder<'a, 'b, DefaultHandler> { #[inline] pub fn with(self, handler: Handler) -> GetBuilder<'a, 'b, Handler> where - Handler: IntoHandler<'static, Reply>, + Handler: IntoCallbackReceiverPair<'static, Reply>, { let GetBuilder { session, @@ -308,14 +247,11 @@ impl<'a, 'b> GetBuilder<'a, 'b, DefaultHandler> { scope, target, consolidation, - qos, destination, timeout, value, #[cfg(feature = "unstable")] attachment, - #[cfg(feature = "unstable")] - source_info, handler: _, } = self; GetBuilder { @@ -324,14 +260,11 @@ impl<'a, 'b> GetBuilder<'a, 'b, DefaultHandler> { scope, target, consolidation, - qos, destination, timeout, value, #[cfg(feature = "unstable")] attachment, - #[cfg(feature = "unstable")] - source_info, handler, } } @@ -339,34 +272,48 @@ impl<'a, 'b> GetBuilder<'a, 'b, DefaultHandler> { impl<'a, 'b, Handler> GetBuilder<'a, 'b, Handler> { /// Change the target of the query. #[inline] - pub fn target(self, target: QueryTarget) -> Self { - Self { target, ..self } + pub fn target(mut self, target: QueryTarget) -> Self { + self.target = target; + self } /// Change the consolidation mode of the query. #[inline] - pub fn consolidation>(self, consolidation: QC) -> Self { - Self { - consolidation: consolidation.into(), - ..self - } + pub fn consolidation>(mut self, consolidation: QC) -> Self { + self.consolidation = consolidation.into(); + self } /// Restrict the matching queryables that will receive the query /// to the ones that have the given [`Locality`](crate::prelude::Locality). #[zenoh_macros::unstable] #[inline] - pub fn allowed_destination(self, destination: Locality) -> Self { - Self { - destination, - ..self - } + pub fn allowed_destination(mut self, destination: Locality) -> Self { + self.destination = destination; + self } /// Set query timeout. #[inline] - pub fn timeout(self, timeout: Duration) -> Self { - Self { timeout, ..self } + pub fn timeout(mut self, timeout: Duration) -> Self { + self.timeout = timeout; + self + } + + /// Set query value. + #[inline] + pub fn with_value(mut self, value: IntoValue) -> Self + where + IntoValue: Into, + { + self.value = Some(value.into()); + self + } + + #[zenoh_macros::unstable] + pub fn with_attachment(mut self, attachment: Attachment) -> Self { + self.attachment = Some(attachment); + self } /// By default, `get` guarantees that it will only receive replies whose key expressions intersect @@ -376,11 +323,29 @@ impl<'a, 'b, Handler> GetBuilder<'a, 'b, Handler> { /// expressions that don't intersect with the query's. #[zenoh_macros::unstable] pub fn accept_replies(self, accept: ReplyKeyExpr) -> Self { + let Self { + session, + selector, + scope, + target, + consolidation, + destination, + timeout, + value, + attachment, + handler, + } = self; Self { - selector: self - .selector - .and_then(|s| s.accept_any_keyexpr(accept == ReplyKeyExpr::Any)), - ..self + session, + selector: selector.and_then(|s| s.accept_any_keyexpr(accept == ReplyKeyExpr::Any)), + scope, + target, + consolidation, + destination, + timeout, + value, + attachment, + handler, } } } @@ -405,19 +370,19 @@ impl Default for ReplyKeyExpr { impl Resolvable for GetBuilder<'_, '_, Handler> where - Handler: IntoHandler<'static, Reply> + Send, - Handler::Handler: Send, + Handler: IntoCallbackReceiverPair<'static, Reply> + Send, + Handler::Receiver: Send, { - type To = ZResult; + type To = ZResult; } impl SyncResolve for GetBuilder<'_, '_, Handler> where - Handler: IntoHandler<'static, Reply> + Send, - Handler::Handler: Send, + Handler: IntoCallbackReceiverPair<'static, Reply> + Send, + Handler::Receiver: Send, { fn res_sync(self) -> ::To { - let (callback, receiver) = self.handler.into_handler(); + let (callback, receiver) = self.handler.into_cb_receiver_pair(); self.session .query( @@ -425,14 +390,11 @@ where &self.scope?, self.target, self.consolidation, - self.qos.into(), self.destination, self.timeout, self.value, #[cfg(feature = "unstable")] self.attachment, - #[cfg(feature = "unstable")] - self.source_info, callback, ) .map(|_| receiver) @@ -441,8 +403,8 @@ where impl AsyncResolve for GetBuilder<'_, '_, Handler> where - Handler: IntoHandler<'static, Reply> + Send, - Handler::Handler: Send, + Handler: IntoCallbackReceiverPair<'static, Reply> + Send, + Handler::Receiver: Send, { type Future = Ready; diff --git a/zenoh/src/queryable.rs b/zenoh/src/queryable.rs index 0ad3a36c07..751e454610 100644 --- a/zenoh/src/queryable.rs +++ b/zenoh/src/queryable.rs @@ -14,30 +14,27 @@ //! Queryable primitives. -use crate::encoding::Encoding; use crate::handlers::{locked, DefaultHandler}; use crate::net::primitives::Primitives; use crate::prelude::*; -use crate::sample::builder::SampleBuilder; -use crate::sample::QoSBuilder; -#[cfg(feature = "unstable")] -use crate::sample::SourceInfo; -use crate::Id; +#[zenoh_macros::unstable] +use crate::query::ReplyKeyExpr; +#[zenoh_macros::unstable] +use crate::sample::Attachment; +use crate::sample::DataInfo; use crate::SessionRef; use crate::Undeclarable; -#[cfg(feature = "unstable")] -use crate::{query::ReplyKeyExpr, sample::Attachment}; + use std::fmt; use std::future::Ready; use std::ops::Deref; use std::sync::Arc; -use uhlc::Timestamp; use zenoh_core::{AsyncResolve, Resolvable, SyncResolve}; -use zenoh_protocol::{ - core::{EntityId, WireExpr}, - network::{response, Mapping, RequestId, Response, ResponseFinal}, - zenoh::{self, reply::ReplyBody, Del, Put, ResponseBody}, -}; +use zenoh_protocol::core::WireExpr; +use zenoh_protocol::network::{response, Mapping, RequestId, Response, ResponseFinal}; +use zenoh_protocol::zenoh::ext::ValueType; +use zenoh_protocol::zenoh::reply::ext::ConsolidationType; +use zenoh_protocol::zenoh::{self, ResponseBody}; use zenoh_result::ZResult; pub(crate) struct QueryInner { @@ -59,7 +56,7 @@ impl Drop for QueryInner { fn drop(&mut self) { self.primitives.send_response_final(ResponseFinal { rid: self.qid, - ext_qos: response::ext::QoSType::RESPONSE_FINAL, + ext_qos: response::ext::QoSType::response_final_default(), ext_tstamp: None, }); } @@ -69,7 +66,6 @@ impl Drop for QueryInner { #[derive(Clone)] pub struct Query { pub(crate) inner: Arc, - pub(crate) eid: EntityId, } impl Query { @@ -100,108 +96,21 @@ impl Query { self.inner.value.as_ref() } - /// This Query's payload. - #[inline(always)] - pub fn payload(&self) -> Option<&Payload> { - self.inner.value.as_ref().map(|v| &v.payload) - } - - /// This Query's encoding. - #[inline(always)] - pub fn encoding(&self) -> Option<&Encoding> { - self.inner.value.as_ref().map(|v| &v.encoding) - } - #[zenoh_macros::unstable] pub fn attachment(&self) -> Option<&Attachment> { self.inner.attachment.as_ref() } - /// Sends a reply in the form of [`Sample`] to this Query. - /// - /// By default, queries only accept replies whose key expression intersects with the query's. - /// Unless the query has enabled disjoint replies (you can check this through [`Query::accepts_replies`]), - /// replying on a disjoint key expression will result in an error when resolving the reply. - /// This api is for internal use only. - #[inline(always)] - #[cfg(feature = "unstable")] - #[doc(hidden)] - pub fn reply_sample(&self, sample: Sample) -> ReplySample<'_> { - ReplySample { - query: self, - sample, - } - } - /// Sends a reply to this Query. /// /// By default, queries only accept replies whose key expression intersects with the query's. /// Unless the query has enabled disjoint replies (you can check this through [`Query::accepts_replies`]), /// replying on a disjoint key expression will result in an error when resolving the reply. #[inline(always)] - pub fn reply<'b, TryIntoKeyExpr, IntoPayload>( - &self, - key_expr: TryIntoKeyExpr, - payload: IntoPayload, - ) -> ReplyPutBuilder<'_, 'b> - where - TryIntoKeyExpr: TryInto>, - >>::Error: Into, - IntoPayload: Into, - { + pub fn reply(&self, result: Result) -> ReplyBuilder<'_> { ReplyBuilder { query: self, - key_expr: key_expr.try_into().map_err(Into::into), - qos: response::ext::QoSType::RESPONSE.into(), - kind: ReplyBuilderPut { - payload: payload.into(), - encoding: Encoding::default(), - }, - timestamp: None, - #[cfg(feature = "unstable")] - source_info: SourceInfo::empty(), - #[cfg(feature = "unstable")] - attachment: None, - } - } - - /// Sends a error reply to this Query. - /// - #[inline(always)] - pub fn reply_err(&self, value: IntoValue) -> ReplyErrBuilder<'_> - where - IntoValue: Into, - { - ReplyErrBuilder { - query: self, - value: value.into(), - } - } - - /// Sends a delete reply to this Query. - /// - /// By default, queries only accept replies whose key expression intersects with the query's. - /// Unless the query has enabled disjoint replies (you can check this through [`Query::accepts_replies`]), - /// replying on a disjoint key expression will result in an error when resolving the reply. - #[inline(always)] - pub fn reply_del<'b, TryIntoKeyExpr>( - &self, - key_expr: TryIntoKeyExpr, - ) -> ReplyDeleteBuilder<'_, 'b> - where - TryIntoKeyExpr: TryInto>, - >>::Error: Into, - { - ReplyBuilder { - query: self, - key_expr: key_expr.try_into().map_err(Into::into), - qos: response::ext::QoSType::RESPONSE.into(), - kind: ReplyBuilderDelete, - timestamp: None, - #[cfg(feature = "unstable")] - source_info: SourceInfo::empty(), - #[cfg(feature = "unstable")] - attachment: None, + result, } } @@ -244,300 +153,142 @@ impl fmt::Display for Query { } } -pub struct ReplySample<'a> { - query: &'a Query, - sample: Sample, -} - -impl Resolvable for ReplySample<'_> { - type To = ZResult<()>; -} - -impl SyncResolve for ReplySample<'_> { - fn res_sync(self) -> ::To { - self.query._reply_sample(self.sample) - } -} - -impl AsyncResolve for ReplySample<'_> { - type Future = Ready; - - fn res_async(self) -> Self::Future { - std::future::ready(self.res_sync()) - } -} - -#[derive(Debug)] -pub struct ReplyBuilderPut { - payload: super::Payload, - encoding: super::Encoding, -} -#[derive(Debug)] -pub struct ReplyBuilderDelete; - -/// A builder returned by [`Query::reply()`](Query::reply) and [`Query::reply_del()`](Query::reply_del) +/// A builder returned by [`Query::reply()`](Query::reply). #[must_use = "Resolvables do nothing unless you resolve them using the `res` method from either `SyncResolve` or `AsyncResolve`"] #[derive(Debug)] -pub struct ReplyBuilder<'a, 'b, T> { +pub struct ReplyBuilder<'a> { query: &'a Query, - key_expr: ZResult>, - kind: T, - timestamp: Option, - qos: QoSBuilder, - - #[cfg(feature = "unstable")] - source_info: SourceInfo, - - #[cfg(feature = "unstable")] - attachment: Option, -} - -pub type ReplyPutBuilder<'a, 'b> = ReplyBuilder<'a, 'b, ReplyBuilderPut>; - -pub type ReplyDeleteBuilder<'a, 'b> = ReplyBuilder<'a, 'b, ReplyBuilderDelete>; - -impl TimestampBuilderTrait for ReplyBuilder<'_, '_, T> { - fn timestamp>>(self, timestamp: U) -> Self { - Self { - timestamp: timestamp.into(), - ..self - } - } -} - -#[cfg(feature = "unstable")] -impl SampleBuilderTrait for ReplyBuilder<'_, '_, T> { - #[cfg(feature = "unstable")] - fn attachment>>(self, attachment: U) -> Self { - Self { - attachment: attachment.into(), - ..self - } - } - - #[cfg(feature = "unstable")] - fn source_info(self, source_info: SourceInfo) -> Self { - Self { - source_info, - ..self - } - } + result: Result, } -impl QoSBuilderTrait for ReplyBuilder<'_, '_, T> { - fn congestion_control(self, congestion_control: CongestionControl) -> Self { - let qos = self.qos.congestion_control(congestion_control); - Self { qos, ..self } - } - - fn priority(self, priority: Priority) -> Self { - let qos = self.qos.priority(priority); - Self { qos, ..self } - } - - fn express(self, is_express: bool) -> Self { - let qos = self.qos.express(is_express); - Self { qos, ..self } - } -} - -impl ValueBuilderTrait for ReplyBuilder<'_, '_, ReplyBuilderPut> { - fn encoding>(self, encoding: T) -> Self { - Self { - kind: ReplyBuilderPut { - encoding: encoding.into(), - ..self.kind - }, - ..self - } - } - - fn payload>(self, payload: T) -> Self { - Self { - kind: ReplyBuilderPut { - payload: payload.into(), - ..self.kind - }, - ..self - } - } - fn value>(self, value: T) -> Self { - let Value { payload, encoding } = value.into(); - Self { - kind: ReplyBuilderPut { payload, encoding }, - ..self +impl<'a> ReplyBuilder<'a> { + #[allow(clippy::result_large_err)] + #[zenoh_macros::unstable] + pub fn with_attachment(mut self, attachment: Attachment) -> Result { + match &mut self.result { + Ok(sample) => { + sample.attachment = Some(attachment); + Ok(self) + } + Err(_) => Err((self, attachment)), } } } -impl Resolvable for ReplyBuilder<'_, '_, T> { +impl<'a> Resolvable for ReplyBuilder<'a> { type To = ZResult<()>; } -impl SyncResolve for ReplyBuilder<'_, '_, ReplyBuilderPut> { +impl SyncResolve for ReplyBuilder<'_> { fn res_sync(self) -> ::To { - let key_expr = self.key_expr?.into_owned(); - let sample = SampleBuilder::put(key_expr, self.kind.payload) - .encoding(self.kind.encoding) - .timestamp(self.timestamp) - .qos(self.qos.into()); - #[cfg(feature = "unstable")] - let sample = sample.source_info(self.source_info); - #[cfg(feature = "unstable")] - let sample = sample.attachment(self.attachment); - self.query._reply_sample(sample.into()) - } -} - -impl SyncResolve for ReplyBuilder<'_, '_, ReplyBuilderDelete> { - fn res_sync(self) -> ::To { - let key_expr = self.key_expr?.into_owned(); - let sample = SampleBuilder::delete(key_expr) - .timestamp(self.timestamp) - .qos(self.qos.into()); - #[cfg(feature = "unstable")] - let sample = sample.source_info(self.source_info); - #[cfg(feature = "unstable")] - let sample = sample.attachment(self.attachment); - self.query._reply_sample(sample.into()) - } -} - -impl Query { - fn _reply_sample(&self, sample: Sample) -> ZResult<()> { - if !self._accepts_any_replies().unwrap_or(false) - && !self.key_expr().intersects(&sample.key_expr) - { - bail!("Attempted to reply on `{}`, which does not intersect with query `{}`, despite query only allowing replies on matching key expressions", sample.key_expr, self.key_expr()) - } - #[cfg(not(feature = "unstable"))] - let ext_sinfo = None; - #[cfg(feature = "unstable")] - let ext_sinfo = sample.source_info.into(); - self.inner.primitives.send_response(Response { - rid: self.inner.qid, - wire_expr: WireExpr { - scope: 0, - suffix: std::borrow::Cow::Owned(sample.key_expr.into()), - mapping: Mapping::Sender, - }, - payload: ResponseBody::Reply(zenoh::Reply { - consolidation: zenoh::Consolidation::DEFAULT, - ext_unknown: vec![], - payload: match sample.kind { - SampleKind::Put => ReplyBody::Put(Put { - timestamp: sample.timestamp, - encoding: sample.encoding.into(), - ext_sinfo, + match self.result { + Ok(sample) => { + if !self.query._accepts_any_replies().unwrap_or(false) + && !self.query.key_expr().intersects(&sample.key_expr) + { + bail!("Attempted to reply on `{}`, which does not intersect with query `{}`, despite query only allowing replies on matching key expressions", sample.key_expr, self.query.key_expr()) + } + let Sample { + key_expr, + value: Value { payload, encoding }, + kind, + timestamp, + qos, + #[cfg(feature = "unstable")] + source_info, + #[cfg(feature = "unstable")] + attachment, + } = sample; + #[allow(unused_mut)] + let mut data_info = DataInfo { + kind, + encoding: Some(encoding), + timestamp, + qos, + source_id: None, + source_sn: None, + }; + #[allow(unused_mut)] + let mut ext_attachment = None; + #[cfg(feature = "unstable")] + { + data_info.source_id = source_info.source_id; + data_info.source_sn = source_info.source_sn; + if let Some(attachment) = attachment { + ext_attachment = Some(attachment.into()); + } + } + self.query.inner.primitives.send_response(Response { + rid: self.query.inner.qid, + wire_expr: WireExpr { + scope: 0, + suffix: std::borrow::Cow::Owned(key_expr.into()), + mapping: Mapping::Sender, + }, + payload: ResponseBody::Reply(zenoh::Reply { + timestamp: data_info.timestamp, + encoding: data_info.encoding.unwrap_or_default(), + ext_sinfo: if data_info.source_id.is_some() || data_info.source_sn.is_some() + { + Some(zenoh::reply::ext::SourceInfoType { + zid: data_info.source_id.unwrap_or_default(), + eid: 0, // @TODO use proper EntityId (#703) + sn: data_info.source_sn.unwrap_or_default() as u32, + }) + } else { + None + }, + ext_consolidation: ConsolidationType::default(), #[cfg(feature = "shared-memory")] ext_shm: None, - #[cfg(feature = "unstable")] - ext_attachment: sample.attachment.map(|a| a.into()), - #[cfg(not(feature = "unstable"))] - ext_attachment: None, + ext_attachment, ext_unknown: vec![], - payload: sample.payload.into(), + payload, + }), + ext_qos: response::ext::QoSType::response_default(), + ext_tstamp: None, + ext_respid: Some(response::ext::ResponderIdType { + zid: self.query.inner.zid, + eid: 0, // @TODO use proper EntityId (#703) }), - SampleKind::Delete => ReplyBody::Del(Del { - timestamp: sample.timestamp, - ext_sinfo, - #[cfg(feature = "unstable")] - ext_attachment: sample.attachment.map(|a| a.into()), - #[cfg(not(feature = "unstable"))] - ext_attachment: None, + }); + Ok(()) + } + Err(payload) => { + self.query.inner.primitives.send_response(Response { + rid: self.query.inner.qid, + wire_expr: WireExpr { + scope: 0, + suffix: std::borrow::Cow::Owned(self.query.key_expr().as_str().to_owned()), + mapping: Mapping::Sender, + }, + payload: ResponseBody::Err(zenoh::Err { + timestamp: None, + is_infrastructure: false, + ext_sinfo: None, ext_unknown: vec![], + ext_body: Some(ValueType { + #[cfg(feature = "shared-memory")] + ext_shm: None, + payload: payload.payload, + encoding: payload.encoding, + }), + code: 0, // TODO }), - }, - }), - ext_qos: sample.qos.into(), - ext_tstamp: None, - ext_respid: Some(response::ext::ResponderIdType { - zid: self.inner.zid, - eid: self.eid, - }), - }); - Ok(()) - } -} - -impl AsyncResolve for ReplyBuilder<'_, '_, ReplyBuilderPut> { - type Future = Ready; - - fn res_async(self) -> Self::Future { - std::future::ready(self.res_sync()) - } -} - -impl AsyncResolve for ReplyBuilder<'_, '_, ReplyBuilderDelete> { - type Future = Ready; - - fn res_async(self) -> Self::Future { - std::future::ready(self.res_sync()) - } -} - -/// A builder returned by [`Query::reply_err()`](Query::reply_err). -#[must_use = "Resolvables do nothing unless you resolve them using the `res` method from either `SyncResolve` or `AsyncResolve`"] -#[derive(Debug)] -pub struct ReplyErrBuilder<'a> { - query: &'a Query, - value: Value, -} - -impl ValueBuilderTrait for ReplyErrBuilder<'_> { - fn encoding>(self, encoding: T) -> Self { - Self { - value: self.value.encoding(encoding), - ..self - } - } - - fn payload>(self, payload: T) -> Self { - Self { - value: self.value.payload(payload), - ..self - } - } - - fn value>(self, value: T) -> Self { - Self { - value: value.into(), - ..self + ext_qos: response::ext::QoSType::response_default(), + ext_tstamp: None, + ext_respid: Some(response::ext::ResponderIdType { + zid: self.query.inner.zid, + eid: 0, // @TODO use proper EntityId (#703) + }), + }); + Ok(()) + } } } } -impl<'a> Resolvable for ReplyErrBuilder<'a> { - type To = ZResult<()>; -} - -impl SyncResolve for ReplyErrBuilder<'_> { - fn res_sync(self) -> ::To { - self.query.inner.primitives.send_response(Response { - rid: self.query.inner.qid, - wire_expr: WireExpr { - scope: 0, - suffix: std::borrow::Cow::Owned(self.query.key_expr().as_str().to_owned()), - mapping: Mapping::Sender, - }, - payload: ResponseBody::Err(zenoh::Err { - encoding: self.value.encoding.into(), - ext_sinfo: None, - ext_unknown: vec![], - payload: self.value.payload.into(), - }), - ext_qos: response::ext::QoSType::RESPONSE, - ext_tstamp: None, - ext_respid: Some(response::ext::ResponderIdType { - zid: self.query.inner.zid, - eid: self.query.eid, - }), - }); - Ok(()) - } -} - -impl<'a> AsyncResolve for ReplyErrBuilder<'a> { +impl<'a> AsyncResolve for ReplyBuilder<'a> { type Future = Ready; fn res_async(self) -> Self::Future { @@ -583,7 +334,7 @@ impl fmt::Debug for QueryableState { /// let queryable = session.declare_queryable("key/expression").res().await.unwrap(); /// while let Ok(query) = queryable.recv_async().await { /// println!(">> Handling query '{}'", query.selector()); -/// query.reply(KeyExpr::try_from("key/expression").unwrap(), "value") +/// query.reply(Ok(Sample::try_from("key/expression", "value").unwrap())) /// .res() /// .await /// .unwrap(); @@ -744,7 +495,7 @@ impl<'a, 'b> QueryableBuilder<'a, 'b, DefaultHandler> { self.callback(locked(callback)) } - /// Receive the queries for this Queryable with a [`Handler`](crate::prelude::IntoHandler). + /// Receive the queries for this Queryable with a [`Handler`](crate::prelude::IntoCallbackReceiverPair). /// /// # Examples /// ```no_run @@ -767,7 +518,7 @@ impl<'a, 'b> QueryableBuilder<'a, 'b, DefaultHandler> { #[inline] pub fn with(self, handler: Handler) -> QueryableBuilder<'a, 'b, Handler> where - Handler: crate::prelude::IntoHandler<'static, Query>, + Handler: crate::prelude::IntoCallbackReceiverPair<'static, Query>, { let QueryableBuilder { session, @@ -803,7 +554,7 @@ impl<'a, 'b, Handler> QueryableBuilder<'a, 'b, Handler> { } } -/// A queryable that provides data through a [`Handler`](crate::prelude::IntoHandler). +/// A queryable that provides data through a [`Handler`](crate::prelude::IntoCallbackReceiverPair). /// /// Queryables can be created from a zenoh [`Session`] /// with the [`declare_queryable`](crate::Session::declare_queryable) function @@ -827,7 +578,7 @@ impl<'a, 'b, Handler> QueryableBuilder<'a, 'b, Handler> { /// .unwrap(); /// while let Ok(query) = queryable.recv_async().await { /// println!(">> Handling query '{}'", query.selector()); -/// query.reply(KeyExpr::try_from("key/expression").unwrap(), "value") +/// query.reply(Ok(Sample::try_from("key/expression", "value").unwrap())) /// .res() /// .await /// .unwrap(); @@ -842,30 +593,6 @@ pub struct Queryable<'a, Receiver> { } impl<'a, Receiver> Queryable<'a, Receiver> { - /// Returns the [`EntityGlobalId`] of this Queryable. - /// - /// # Examples - /// ``` - /// # #[tokio::main] - /// # async fn main() { - /// use zenoh::prelude::r#async::*; - /// - /// let session = zenoh::open(config::peer()).res().await.unwrap(); - /// let queryable = session.declare_queryable("key/expression") - /// .res() - /// .await - /// .unwrap(); - /// let queryable_id = queryable.id(); - /// # } - /// ``` - #[zenoh_macros::unstable] - pub fn id(&self) -> EntityGlobalId { - EntityGlobalId { - zid: self.queryable.session.zid(), - eid: self.queryable.state.id, - } - } - #[inline] pub fn undeclare(self) -> impl Resolve> + 'a { Undeclarable::undeclare_inner(self, ()) @@ -888,20 +615,20 @@ impl Deref for Queryable<'_, Receiver> { impl<'a, Handler> Resolvable for QueryableBuilder<'a, '_, Handler> where - Handler: IntoHandler<'static, Query> + Send, - Handler::Handler: Send, + Handler: IntoCallbackReceiverPair<'static, Query> + Send, + Handler::Receiver: Send, { - type To = ZResult>; + type To = ZResult>; } impl<'a, Handler> SyncResolve for QueryableBuilder<'a, '_, Handler> where - Handler: IntoHandler<'static, Query> + Send, - Handler::Handler: Send, + Handler: IntoCallbackReceiverPair<'static, Query> + Send, + Handler::Receiver: Send, { fn res_sync(self) -> ::To { let session = self.session; - let (callback, receiver) = self.handler.into_handler(); + let (callback, receiver) = self.handler.into_cb_receiver_pair(); session .declare_queryable_inner( &self.key_expr?.to_wire(&session), @@ -922,8 +649,8 @@ where impl<'a, Handler> AsyncResolve for QueryableBuilder<'a, '_, Handler> where - Handler: IntoHandler<'static, Query> + Send, - Handler::Handler: Send, + Handler: IntoCallbackReceiverPair<'static, Query> + Send, + Handler::Receiver: Send, { type Future = Ready; diff --git a/zenoh/src/sample/mod.rs b/zenoh/src/sample.rs similarity index 62% rename from zenoh/src/sample/mod.rs rename to zenoh/src/sample.rs index 0ef8462d2a..e94b1a9973 100644 --- a/zenoh/src/sample/mod.rs +++ b/zenoh/src/sample.rs @@ -13,20 +13,17 @@ // //! Sample primitives -use crate::encoding::Encoding; -use crate::payload::Payload; -use crate::prelude::{KeyExpr, Value}; -use crate::sample::builder::{QoSBuilderTrait, ValueBuilderTrait}; -use crate::time::Timestamp; +use crate::buffers::ZBuf; +use crate::prelude::ZenohId; +use crate::prelude::{KeyExpr, SampleKind, Value}; +use crate::query::Reply; +use crate::time::{new_reception_timestamp, Timestamp}; use crate::Priority; #[zenoh_macros::unstable] use serde::Serialize; -use std::{convert::TryFrom, fmt}; -use zenoh_protocol::core::CongestionControl; -use zenoh_protocol::core::EntityGlobalId; -use zenoh_protocol::network::declare::ext::QoSType; - -pub mod builder; +use std::convert::{TryFrom, TryInto}; +use zenoh_protocol::core::{CongestionControl, Encoding}; +use zenoh_protocol::network::push::ext::QoSType; pub type SourceSn = u64; @@ -53,99 +50,17 @@ pub(crate) struct DataInfo { pub kind: SampleKind, pub encoding: Option, pub timestamp: Option, - pub source_id: Option, + pub source_id: Option, pub source_sn: Option, pub qos: QoS, } -pub(crate) trait DataInfoIntoSample { - fn into_sample( - self, - key_expr: IntoKeyExpr, - payload: IntoPayload, - #[cfg(feature = "unstable")] attachment: Option, - ) -> Sample - where - IntoKeyExpr: Into>, - IntoPayload: Into; -} - -impl DataInfoIntoSample for DataInfo { - // This function is for internal use only. - // Technically it may create invalid sample (e.g. a delete sample with a payload and encoding) - // The test for it is intentionally not added to avoid inserting extra "if" into hot path. - // The correctness of the data should be ensured by the caller. - #[inline] - fn into_sample( - self, - key_expr: IntoKeyExpr, - payload: IntoPayload, - #[cfg(feature = "unstable")] attachment: Option, - ) -> Sample - where - IntoKeyExpr: Into>, - IntoPayload: Into, - { - Sample { - key_expr: key_expr.into(), - payload: payload.into(), - kind: self.kind, - encoding: self.encoding.unwrap_or_default(), - timestamp: self.timestamp, - qos: self.qos, - #[cfg(feature = "unstable")] - source_info: SourceInfo { - source_id: self.source_id, - source_sn: self.source_sn, - }, - #[cfg(feature = "unstable")] - attachment, - } - } -} - -impl DataInfoIntoSample for Option { - #[inline] - fn into_sample( - self, - key_expr: IntoKeyExpr, - payload: IntoPayload, - #[cfg(feature = "unstable")] attachment: Option, - ) -> Sample - where - IntoKeyExpr: Into>, - IntoPayload: Into, - { - if let Some(data_info) = self { - data_info.into_sample( - key_expr, - payload, - #[cfg(feature = "unstable")] - attachment, - ) - } else { - Sample { - key_expr: key_expr.into(), - payload: payload.into(), - kind: SampleKind::Put, - encoding: Encoding::default(), - timestamp: None, - qos: QoS::default(), - #[cfg(feature = "unstable")] - source_info: SourceInfo::empty(), - #[cfg(feature = "unstable")] - attachment, - } - } - } -} - /// Informations on the source of a zenoh [`Sample`]. #[zenoh_macros::unstable] #[derive(Debug, Clone)] pub struct SourceInfo { - /// The [`EntityGlobalId`] of the zenoh entity that published the concerned [`Sample`]. - pub source_id: Option, + /// The [`ZenohId`] of the zenoh instance that published the concerned [`Sample`]. + pub source_id: Option, /// The sequence number of the [`Sample`] from the source. pub source_sn: Option, } @@ -153,11 +68,6 @@ pub struct SourceInfo { #[test] #[cfg(feature = "unstable")] fn source_info_stack_size() { - use crate::{ - sample::{SourceInfo, SourceSn}, - ZenohId, - }; - assert_eq!(std::mem::size_of::(), 16); assert_eq!(std::mem::size_of::>(), 17); assert_eq!(std::mem::size_of::>(), 16); @@ -172,23 +82,6 @@ impl SourceInfo { source_sn: None, } } - pub(crate) fn is_empty(&self) -> bool { - self.source_id.is_none() && self.source_sn.is_none() - } -} - -#[zenoh_macros::unstable] -impl From for Option { - fn from(source_info: SourceInfo) -> Option { - if source_info.is_empty() { - None - } else { - Some(zenoh_protocol::zenoh::put::ext::SourceInfoType { - id: source_info.source_id.unwrap_or_default(), - sn: source_info.source_sn.unwrap_or_default() as u32, - }) - } - } } #[zenoh_macros::unstable] @@ -270,17 +163,6 @@ mod attachment { } } } - #[zenoh_macros::unstable] - impl From for Option { - fn from(value: AttachmentBuilder) -> Self { - if value.inner.is_empty() { - None - } else { - Some(value.into()) - } - } - } - #[zenoh_macros::unstable] #[derive(Clone)] pub struct Attachment { @@ -435,197 +317,215 @@ mod attachment { } } } - -/// The kind of a `Sample`. -#[repr(u8)] -#[derive(Debug, Default, Copy, Clone, PartialEq, Eq)] -pub enum SampleKind { - /// if the `Sample` was issued by a `put` operation. - #[default] - Put = 0, - /// if the `Sample` was issued by a `delete` operation. - Delete = 1, -} - -impl fmt::Display for SampleKind { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match self { - SampleKind::Put => write!(f, "PUT"), - SampleKind::Delete => write!(f, "DELETE"), - } - } -} - -impl TryFrom for SampleKind { - type Error = u64; - fn try_from(kind: u64) -> Result { - match kind { - 0 => Ok(SampleKind::Put), - 1 => Ok(SampleKind::Delete), - _ => Err(kind), - } - } -} - #[zenoh_macros::unstable] pub use attachment::{Attachment, AttachmentBuilder, AttachmentIterator}; -/// Structure with public fields for sample. It's convenient if it's necessary to decompose a sample into its fields. -pub struct SampleFields { - pub key_expr: KeyExpr<'static>, - pub payload: Payload, - pub kind: SampleKind, - pub encoding: Encoding, - pub timestamp: Option, - pub express: bool, - pub priority: Priority, - pub congestion_control: CongestionControl, - #[cfg(feature = "unstable")] - pub source_info: SourceInfo, - #[cfg(feature = "unstable")] - pub attachment: Option, -} - -impl From for SampleFields { - fn from(sample: Sample) -> Self { - SampleFields { - key_expr: sample.key_expr, - payload: sample.payload, - kind: sample.kind, - encoding: sample.encoding, - timestamp: sample.timestamp, - express: sample.qos.express(), - priority: sample.qos.priority(), - congestion_control: sample.qos.congestion_control(), - #[cfg(feature = "unstable")] - source_info: sample.source_info, - #[cfg(feature = "unstable")] - attachment: sample.attachment, - } - } -} - /// A zenoh sample. #[non_exhaustive] #[derive(Clone, Debug)] pub struct Sample { - pub(crate) key_expr: KeyExpr<'static>, - pub(crate) payload: Payload, - pub(crate) kind: SampleKind, - pub(crate) encoding: Encoding, - pub(crate) timestamp: Option, - pub(crate) qos: QoS, + /// The key expression on which this Sample was published. + pub key_expr: KeyExpr<'static>, + /// The value of this Sample. + pub value: Value, + /// The kind of this Sample. + pub kind: SampleKind, + /// The [`Timestamp`] of this Sample. + pub timestamp: Option, + /// Quality of service settings this sample was sent with. + pub qos: QoS, #[cfg(feature = "unstable")] - pub(crate) source_info: SourceInfo, + ///

+ /// 🔬 + /// This API has been marked as unstable: it works as advertised, but we may change it in a future release. + /// To use it, you must enable zenoh's unstable feature flag. + ///
+ /// + /// Infos on the source of this Sample. + pub source_info: SourceInfo, #[cfg(feature = "unstable")] - pub(crate) attachment: Option, + ///
+ /// 🔬 + /// This API has been marked as unstable: it works as advertised, but we may change it in a future release. + /// To use it, you must enable zenoh's unstable feature flag. + ///
+ /// + /// A map of key-value pairs, where each key and value are byte-slices. + pub attachment: Option, } impl Sample { - /// Gets the key expression on which this Sample was published. + /// Creates a new Sample. #[inline] - pub fn key_expr(&self) -> &KeyExpr<'static> { - &self.key_expr - } - - /// Gets the payload of this Sample. - #[inline] - pub fn payload(&self) -> &Payload { - &self.payload + pub fn new(key_expr: IntoKeyExpr, value: IntoValue) -> Self + where + IntoKeyExpr: Into>, + IntoValue: Into, + { + Sample { + key_expr: key_expr.into(), + value: value.into(), + kind: SampleKind::default(), + timestamp: None, + qos: QoS::default(), + #[cfg(feature = "unstable")] + source_info: SourceInfo::empty(), + #[cfg(feature = "unstable")] + attachment: None, + } } - - /// Gets the kind of this Sample. + /// Creates a new Sample. #[inline] - pub fn kind(&self) -> SampleKind { - self.kind + pub fn try_from( + key_expr: TryIntoKeyExpr, + value: IntoValue, + ) -> Result + where + TryIntoKeyExpr: TryInto>, + >>::Error: Into, + IntoValue: Into, + { + Ok(Sample { + key_expr: key_expr.try_into().map_err(Into::into)?, + value: value.into(), + kind: SampleKind::default(), + timestamp: None, + qos: QoS::default(), + #[cfg(feature = "unstable")] + source_info: SourceInfo::empty(), + #[cfg(feature = "unstable")] + attachment: None, + }) } - /// Gets the encoding of this sample + /// Creates a new Sample with optional data info. #[inline] - pub fn encoding(&self) -> &Encoding { - &self.encoding + pub(crate) fn with_info( + key_expr: KeyExpr<'static>, + payload: ZBuf, + data_info: Option, + ) -> Self { + let mut value: Value = payload.into(); + if let Some(data_info) = data_info { + if let Some(encoding) = &data_info.encoding { + value.encoding = encoding.clone(); + } + Sample { + key_expr, + value, + kind: data_info.kind, + timestamp: data_info.timestamp, + qos: data_info.qos, + #[cfg(feature = "unstable")] + source_info: data_info.into(), + #[cfg(feature = "unstable")] + attachment: None, + } + } else { + Sample { + key_expr, + value, + kind: SampleKind::default(), + timestamp: None, + qos: QoS::default(), + #[cfg(feature = "unstable")] + source_info: SourceInfo::empty(), + #[cfg(feature = "unstable")] + attachment: None, + } + } } /// Gets the timestamp of this Sample. #[inline] - pub fn timestamp(&self) -> Option<&Timestamp> { + pub fn get_timestamp(&self) -> Option<&Timestamp> { self.timestamp.as_ref() } - /// Gets the quality of service settings this Sample was sent with. + /// Sets the timestamp of this Sample. #[inline] - pub fn qos(&self) -> &QoS { - &self.qos + pub fn with_timestamp(mut self, timestamp: Timestamp) -> Self { + self.timestamp = Some(timestamp); + self } - /// Gets infos on the source of this Sample. + /// Sets the source info of this Sample. #[zenoh_macros::unstable] #[inline] - pub fn source_info(&self) -> &SourceInfo { - &self.source_info + pub fn with_source_info(mut self, source_info: SourceInfo) -> Self { + self.source_info = source_info; + self } - /// Gets the sample attachment: a map of key-value pairs, where each key and value are byte-slices. - #[zenoh_macros::unstable] #[inline] + /// Ensure that an associated Timestamp is present in this Sample. + /// If not, a new one is created with the current system time and 0x00 as id. + /// Get the timestamp of this sample (either existing one or newly created) + pub fn ensure_timestamp(&mut self) -> &Timestamp { + if let Some(ref timestamp) = self.timestamp { + timestamp + } else { + let timestamp = new_reception_timestamp(); + self.timestamp = Some(timestamp); + self.timestamp.as_ref().unwrap() + } + } + + #[zenoh_macros::unstable] pub fn attachment(&self) -> Option<&Attachment> { self.attachment.as_ref() } -} -impl From for Value { - fn from(sample: Sample) -> Self { - Value::new(sample.payload).encoding(sample.encoding) + #[zenoh_macros::unstable] + pub fn attachment_mut(&mut self) -> &mut Option { + &mut self.attachment } -} -/// Structure containing quality of service data -#[derive(Debug, Default, Copy, Clone, Eq, PartialEq)] -pub struct QoS { - inner: QoSType, + #[allow(clippy::result_large_err)] + #[zenoh_macros::unstable] + pub fn with_attachment(mut self, attachment: Attachment) -> Self { + self.attachment = Some(attachment); + self + } } -#[derive(Debug)] -pub struct QoSBuilder(QoS); +impl std::ops::Deref for Sample { + type Target = Value; -impl From for QoSBuilder { - fn from(qos: QoS) -> Self { - QoSBuilder(qos) + fn deref(&self) -> &Self::Target { + &self.value } } -impl From for QoSBuilder { - fn from(qos: QoSType) -> Self { - QoSBuilder(QoS { inner: qos }) +impl std::ops::DerefMut for Sample { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.value } } -impl From for QoS { - fn from(builder: QoSBuilder) -> Self { - builder.0 +impl std::fmt::Display for Sample { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self.kind { + SampleKind::Delete => write!(f, "{}({})", self.kind, self.key_expr), + _ => write!(f, "{}({}: {})", self.kind, self.key_expr, self.value), + } } } -impl QoSBuilderTrait for QoSBuilder { - fn congestion_control(self, congestion_control: CongestionControl) -> Self { - let mut inner = self.0.inner; - inner.set_congestion_control(congestion_control); - Self(QoS { inner }) - } +impl TryFrom for Sample { + type Error = Value; - fn priority(self, priority: Priority) -> Self { - let mut inner = self.0.inner; - inner.set_priority(priority.into()); - Self(QoS { inner }) + fn try_from(value: Reply) -> Result { + value.sample } +} - fn express(self, is_express: bool) -> Self { - let mut inner = self.0.inner; - inner.set_is_express(is_express); - Self(QoS { inner }) - } +/// Structure containing quality of service data +#[derive(Debug, Default, Copy, Clone, Eq, PartialEq)] +pub struct QoS { + inner: QoSType, } impl QoS { @@ -648,10 +548,28 @@ impl QoS { self.inner.get_congestion_control() } - /// Gets express flag value. If `true`, the message is not batched during transmission, in order to reduce latency. + /// Gets express flag value. If true, the message is not batched during transmission, in order to reduce latency. pub fn express(&self) -> bool { self.inner.is_express() } + + /// Sets priority value. + pub fn with_priority(mut self, priority: Priority) -> Self { + self.inner.set_priority(priority.into()); + self + } + + /// Sets congestion control value. + pub fn with_congestion_control(mut self, congestion_control: CongestionControl) -> Self { + self.inner.set_congestion_control(congestion_control); + self + } + + /// Sets express flag vlaue. + pub fn with_express(mut self, is_express: bool) -> Self { + self.inner.set_is_express(is_express); + self + } } impl From for QoS { @@ -659,9 +577,3 @@ impl From for QoS { QoS { inner: qos } } } - -impl From for QoSType { - fn from(qos: QoS) -> Self { - qos.inner - } -} diff --git a/zenoh/src/sample/builder.rs b/zenoh/src/sample/builder.rs deleted file mode 100644 index bad35024ef..0000000000 --- a/zenoh/src/sample/builder.rs +++ /dev/null @@ -1,289 +0,0 @@ -// -// Copyright (c) 2024 ZettaScale Technology -// -// This program and the accompanying materials are made available under the -// terms of the Eclipse Public License 2.0 which is available at -// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 -// which is available at https://www.apache.org/licenses/LICENSE-2.0. -// -// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 -// -// Contributors: -// ZettaScale Zenoh Team, -// - -use std::marker::PhantomData; - -#[cfg(feature = "unstable")] -use crate::sample::{Attachment, SourceInfo}; -use crate::sample::{QoS, QoSBuilder}; -use crate::Encoding; -use crate::KeyExpr; -use crate::Payload; -use crate::Priority; -use crate::Sample; -use crate::SampleKind; -use crate::Value; -use uhlc::Timestamp; -use zenoh_core::zresult; -use zenoh_protocol::core::CongestionControl; - -pub trait QoSBuilderTrait { - /// Change the `congestion_control` to apply when routing the data. - fn congestion_control(self, congestion_control: CongestionControl) -> Self; - /// Change the priority of the written data. - fn priority(self, priority: Priority) -> Self; - /// Change the `express` policy to apply when routing the data. - /// When express is set to `true`, then the message will not be batched. - /// This usually has a positive impact on latency but negative impact on throughput. - fn express(self, is_express: bool) -> Self; -} - -pub trait TimestampBuilderTrait { - /// Sets of clears timestamp - fn timestamp>>(self, timestamp: T) -> Self; -} - -#[zenoh_macros::unstable] -pub trait SampleBuilderTrait { - /// Attach source information - #[zenoh_macros::unstable] - fn source_info(self, source_info: SourceInfo) -> Self; - /// Attach user-provided data in key-value format - #[zenoh_macros::unstable] - fn attachment>>(self, attachment: T) -> Self; -} - -pub trait ValueBuilderTrait { - /// Set the [`Encoding`] - fn encoding>(self, encoding: T) -> Self; - /// Sets the payload - fn payload>(self, payload: T) -> Self; - /// Sets both payload and encoding at once. - /// This is convenient for passing user type which supports `Into` when both payload and encoding depends on user type - fn value>(self, value: T) -> Self; -} - -#[derive(Clone, Debug)] -pub struct SampleBuilderPut; -#[derive(Clone, Debug)] -pub struct SampleBuilderDelete; -#[derive(Clone, Debug)] -pub struct SampleBuilderAny; - -#[derive(Clone, Debug)] -pub struct SampleBuilder { - sample: Sample, - _t: PhantomData, -} - -impl SampleBuilder { - pub fn put( - key_expr: IntoKeyExpr, - payload: IntoPayload, - ) -> SampleBuilder - where - IntoKeyExpr: Into>, - IntoPayload: Into, - { - Self { - sample: Sample { - key_expr: key_expr.into(), - payload: payload.into(), - kind: SampleKind::Put, - encoding: Encoding::default(), - timestamp: None, - qos: QoS::default(), - #[cfg(feature = "unstable")] - source_info: SourceInfo::empty(), - #[cfg(feature = "unstable")] - attachment: None, - }, - _t: PhantomData::, - } - } -} - -impl SampleBuilder { - pub fn delete(key_expr: IntoKeyExpr) -> SampleBuilder - where - IntoKeyExpr: Into>, - { - Self { - sample: Sample { - key_expr: key_expr.into(), - payload: Payload::empty(), - kind: SampleKind::Delete, - encoding: Encoding::default(), - timestamp: None, - qos: QoS::default(), - #[cfg(feature = "unstable")] - source_info: SourceInfo::empty(), - #[cfg(feature = "unstable")] - attachment: None, - }, - _t: PhantomData::, - } - } -} - -impl SampleBuilder { - /// Allows to change keyexpr of [`Sample`] - pub fn keyexpr(self, key_expr: IntoKeyExpr) -> Self - where - IntoKeyExpr: Into>, - { - Self { - sample: Sample { - key_expr: key_expr.into(), - ..self.sample - }, - _t: PhantomData::, - } - } - - // Allows to change qos as a whole of [`Sample`] - pub fn qos(self, qos: QoS) -> Self { - Self { - sample: Sample { qos, ..self.sample }, - _t: PhantomData::, - } - } -} - -impl TimestampBuilderTrait for SampleBuilder { - fn timestamp>>(self, timestamp: U) -> Self { - Self { - sample: Sample { - timestamp: timestamp.into(), - ..self.sample - }, - _t: PhantomData::, - } - } -} - -#[cfg(feature = "unstable")] -impl SampleBuilderTrait for SampleBuilder { - #[zenoh_macros::unstable] - fn source_info(self, source_info: SourceInfo) -> Self { - Self { - sample: Sample { - source_info, - ..self.sample - }, - _t: PhantomData::, - } - } - - #[zenoh_macros::unstable] - fn attachment>>(self, attachment: U) -> Self { - Self { - sample: Sample { - attachment: attachment.into(), - ..self.sample - }, - _t: PhantomData::, - } - } -} - -impl QoSBuilderTrait for SampleBuilder { - fn congestion_control(self, congestion_control: CongestionControl) -> Self { - let qos: QoSBuilder = self.sample.qos.into(); - let qos = qos.congestion_control(congestion_control).into(); - Self { - sample: Sample { qos, ..self.sample }, - _t: PhantomData::, - } - } - fn priority(self, priority: Priority) -> Self { - let qos: QoSBuilder = self.sample.qos.into(); - let qos = qos.priority(priority).into(); - Self { - sample: Sample { qos, ..self.sample }, - _t: PhantomData::, - } - } - fn express(self, is_express: bool) -> Self { - let qos: QoSBuilder = self.sample.qos.into(); - let qos = qos.express(is_express).into(); - Self { - sample: Sample { qos, ..self.sample }, - _t: PhantomData::, - } - } -} - -impl ValueBuilderTrait for SampleBuilder { - fn encoding>(self, encoding: T) -> Self { - Self { - sample: Sample { - encoding: encoding.into(), - ..self.sample - }, - _t: PhantomData::, - } - } - fn payload>(self, payload: T) -> Self { - Self { - sample: Sample { - payload: payload.into(), - ..self.sample - }, - _t: PhantomData::, - } - } - fn value>(self, value: T) -> Self { - let Value { payload, encoding } = value.into(); - Self { - sample: Sample { - payload, - encoding, - ..self.sample - }, - _t: PhantomData::, - } - } -} - -impl From for SampleBuilder { - fn from(sample: Sample) -> Self { - SampleBuilder { - sample, - _t: PhantomData::, - } - } -} - -impl TryFrom for SampleBuilder { - type Error = zresult::Error; - fn try_from(sample: Sample) -> Result { - if sample.kind != SampleKind::Put { - bail!("Sample is not a put sample") - } - Ok(SampleBuilder { - sample, - _t: PhantomData::, - }) - } -} - -impl TryFrom for SampleBuilder { - type Error = zresult::Error; - fn try_from(sample: Sample) -> Result { - if sample.kind != SampleKind::Delete { - bail!("Sample is not a delete sample") - } - Ok(SampleBuilder { - sample, - _t: PhantomData::, - }) - } -} - -impl From> for Sample { - fn from(sample_builder: SampleBuilder) -> Self { - sample_builder.sample - } -} diff --git a/zenoh/src/scouting.rs b/zenoh/src/scouting.rs index 49f2b4c01f..f2c90123ce 100644 --- a/zenoh/src/scouting.rs +++ b/zenoh/src/scouting.rs @@ -14,12 +14,13 @@ use crate::handlers::{locked, Callback, DefaultHandler}; use crate::net::runtime::{orchestrator::Loop, Runtime}; -use futures::StreamExt; +use std::time::Duration; use std::{fmt, future::Ready, net::SocketAddr, ops::Deref}; use tokio::net::UdpSocket; use zenoh_core::{AsyncResolve, Resolvable, SyncResolve}; use zenoh_protocol::core::WhatAmIMatcher; use zenoh_result::ZResult; +use zenoh_task::TerminatableTask; /// Constants and helpers for zenoh `whatami` flags. pub use zenoh_protocol::core::WhatAmI; @@ -118,7 +119,7 @@ impl ScoutBuilder { self.callback(locked(callback)) } - /// Receive the [`Hello`] messages from this scout with a [`Handler`](crate::prelude::IntoHandler). + /// Receive the [`Hello`] messages from this scout with a [`Handler`](crate::prelude::IntoCallbackReceiverPair). /// /// # Examples /// ```no_run @@ -140,7 +141,7 @@ impl ScoutBuilder { #[inline] pub fn with(self, handler: Handler) -> ScoutBuilder where - Handler: crate::prelude::IntoHandler<'static, Hello>, + Handler: crate::prelude::IntoCallbackReceiverPair<'static, Hello>, { let ScoutBuilder { what, @@ -157,27 +158,27 @@ impl ScoutBuilder { impl Resolvable for ScoutBuilder where - Handler: crate::prelude::IntoHandler<'static, Hello> + Send, - Handler::Handler: Send, + Handler: crate::prelude::IntoCallbackReceiverPair<'static, Hello> + Send, + Handler::Receiver: Send, { - type To = ZResult>; + type To = ZResult>; } impl SyncResolve for ScoutBuilder where - Handler: crate::prelude::IntoHandler<'static, Hello> + Send, - Handler::Handler: Send, + Handler: crate::prelude::IntoCallbackReceiverPair<'static, Hello> + Send, + Handler::Receiver: Send, { fn res_sync(self) -> ::To { - let (callback, receiver) = self.handler.into_handler(); + let (callback, receiver) = self.handler.into_cb_receiver_pair(); scout(self.what, self.config?, callback).map(|scout| Scout { scout, receiver }) } } impl AsyncResolve for ScoutBuilder where - Handler: crate::prelude::IntoHandler<'static, Hello> + Send, - Handler::Handler: Send, + Handler: crate::prelude::IntoCallbackReceiverPair<'static, Hello> + Send, + Handler::Receiver: Send, { type Future = Ready; @@ -204,7 +205,7 @@ where /// ``` pub(crate) struct ScoutInner { #[allow(dead_code)] - pub(crate) stop_sender: flume::Sender<()>, + pub(crate) scout_task: Option, } impl ScoutInner { @@ -226,18 +227,26 @@ impl ScoutInner { /// # } /// ``` pub fn stop(self) { - // This drops the inner `stop_sender` and hence stops the scouting receiver std::mem::drop(self); } } +impl Drop for ScoutInner { + fn drop(&mut self) { + if self.scout_task.is_some() { + let task = self.scout_task.take(); + task.unwrap().terminate(Duration::from_secs(10)); + } + } +} + impl fmt::Debug for ScoutInner { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.debug_struct("CallbackScout").finish() } } -/// A scout that returns [`Hello`] messages through a [`Handler`](crate::prelude::IntoHandler). +/// A scout that returns [`Hello`] messages through a [`Handler`](crate::prelude::IntoCallbackReceiverPair). /// /// # Examples /// ```no_run @@ -307,7 +316,6 @@ fn scout( zenoh_config::defaults::scouting::multicast::interface, |s| s.as_ref(), ); - let (stop_sender, stop_receiver) = flume::bounded::<()>(1); let ifaces = Runtime::get_interfaces(ifaces); if !ifaces.is_empty() { let sockets: Vec = ifaces @@ -315,25 +323,29 @@ fn scout( .filter_map(|iface| Runtime::bind_ucast_port(iface).ok()) .collect(); if !sockets.is_empty() { - zenoh_runtime::ZRuntime::Net.spawn(async move { - let mut stop_receiver = stop_receiver.stream(); - let scout = Runtime::scout(&sockets, what, &addr, move |hello| { - let callback = callback.clone(); - async move { - callback(hello); - Loop::Continue + let cancellation_token = TerminatableTask::create_cancellation_token(); + let cancellation_token_clone = cancellation_token.clone(); + let task = TerminatableTask::spawn( + zenoh_runtime::ZRuntime::Acceptor, + async move { + let scout = Runtime::scout(&sockets, what, &addr, move |hello| { + let callback = callback.clone(); + async move { + callback(hello); + Loop::Continue + } + }); + tokio::select! { + _ = scout => {}, + _ = cancellation_token_clone.cancelled() => { log::trace!("stop scout({}, {})", what, &config); }, } - }); - let stop = async move { - stop_receiver.next().await; - log::trace!("stop scout({}, {})", what, &config); - }; - tokio::select! { - _ = scout => {}, - _ = stop => {}, - } + }, + cancellation_token.clone(), + ); + return Ok(ScoutInner { + scout_task: Some(task), }); } } - Ok(ScoutInner { stop_sender }) + Ok(ScoutInner { scout_task: None }) } diff --git a/zenoh/src/session.rs b/zenoh/src/session.rs index 375411d663..88f3c2cb77 100644 --- a/zenoh/src/session.rs +++ b/zenoh/src/session.rs @@ -11,10 +11,10 @@ // Contributors: // ZettaScale Zenoh Team, // + use crate::admin; use crate::config::Config; use crate::config::Notifier; -use crate::encoding::Encoding; use crate::handlers::{Callback, DefaultHandler}; use crate::info::*; use crate::key_expr::KeyExprInner; @@ -23,7 +23,6 @@ use crate::liveliness::{Liveliness, LivelinessTokenState}; use crate::net::primitives::Primitives; use crate::net::routing::dispatcher::face::Face; use crate::net::runtime::Runtime; -use crate::payload::Payload; use crate::prelude::Locality; use crate::prelude::{KeyExpr, Parameters}; use crate::publication::*; @@ -32,7 +31,6 @@ use crate::queryable::*; #[cfg(feature = "unstable")] use crate::sample::Attachment; use crate::sample::DataInfo; -use crate::sample::DataInfoIntoSample; use crate::sample::QoS; use crate::selector::TIME_RANGE_KEY; use crate::subscriber::*; @@ -41,8 +39,6 @@ use crate::Priority; use crate::Sample; use crate::SampleKind; use crate::Selector; -#[cfg(feature = "unstable")] -use crate::SourceInfo; use crate::Value; use log::{error, trace, warn}; use std::collections::HashMap; @@ -50,7 +46,7 @@ use std::convert::TryFrom; use std::convert::TryInto; use std::fmt; use std::ops::Deref; -use std::sync::atomic::{AtomicU16, Ordering}; +use std::sync::atomic::{AtomicU16, AtomicUsize, Ordering}; use std::sync::Arc; use std::sync::RwLock; use std::time::Duration; @@ -59,8 +55,8 @@ use zenoh_buffers::ZBuf; use zenoh_collections::SingleOrVec; use zenoh_config::unwrap_or_default; use zenoh_core::{zconfigurable, zread, Resolve, ResolveClosure, ResolveFuture, SyncResolve}; -#[cfg(feature = "unstable")] -use zenoh_protocol::network::{declare::SubscriberId, ext}; +use zenoh_protocol::network::AtomicRequestId; +use zenoh_protocol::network::RequestId; use zenoh_protocol::{ core::{ key_expr::{keyexpr, OwnedKeyExpr}, @@ -68,20 +64,24 @@ use zenoh_protocol::{ }, network::{ declare::{ - self, common::ext::WireExprType, queryable::ext::QueryableInfoType, - subscriber::ext::SubscriberInfo, Declare, DeclareBody, DeclareKeyExpr, DeclareMode, + self, common::ext::WireExprType, queryable::ext::QueryableInfo, + subscriber::ext::SubscriberInfo, Declare, DeclareBody, DeclareKeyExpr, DeclareQueryable, DeclareSubscriber, UndeclareQueryable, UndeclareSubscriber, }, + ext, request::{self, ext::TargetType, Request}, - AtomicRequestId, Mapping, Push, RequestId, Response, ResponseFinal, + Mapping, Push, Response, ResponseFinal, }, zenoh::{ - query::{self, ext::QueryBodyType, Consolidation}, - reply::ReplyBody, - Del, PushBody, Put, RequestBody, ResponseBody, + query::{ + self, + ext::{ConsolidationType, QueryBodyType}, + }, + Pull, PushBody, RequestBody, ResponseBody, }, }; use zenoh_result::ZResult; +use zenoh_task::TaskController; use zenoh_util::core::AsyncResolve; zconfigurable! { @@ -96,10 +96,9 @@ pub(crate) struct SessionState { pub(crate) primitives: Option>, // @TODO replace with MaybeUninit ?? pub(crate) expr_id_counter: AtomicExprId, // @TODO: manage rollover and uniqueness pub(crate) qid_counter: AtomicRequestId, + pub(crate) decl_id_counter: AtomicUsize, pub(crate) local_resources: HashMap, pub(crate) remote_resources: HashMap, - #[cfg(feature = "unstable")] - pub(crate) remote_subscribers: HashMap>, //pub(crate) publications: Vec, pub(crate) subscribers: HashMap>, pub(crate) queryables: HashMap>, @@ -121,10 +120,9 @@ impl SessionState { primitives: None, expr_id_counter: AtomicExprId::new(1), // Note: start at 1 because 0 is reserved for NO_RESOURCE qid_counter: AtomicRequestId::new(0), + decl_id_counter: AtomicUsize::new(0), local_resources: HashMap::new(), remote_resources: HashMap::new(), - #[cfg(feature = "unstable")] - remote_subscribers: HashMap::new(), //publications: Vec::new(), subscribers: HashMap::new(), queryables: HashMap::new(), @@ -291,7 +289,7 @@ impl<'s, 'a> SessionDeclarations<'s, 'a> for SessionRef<'a> { fn declare_subscriber<'b, TryIntoKeyExpr>( &'s self, key_expr: TryIntoKeyExpr, - ) -> SubscriberBuilder<'a, 'b, DefaultHandler> + ) -> SubscriberBuilder<'a, 'b, PushMode, DefaultHandler> where TryIntoKeyExpr: TryInto>, >>::Error: Into, @@ -299,7 +297,8 @@ impl<'s, 'a> SessionDeclarations<'s, 'a> for SessionRef<'a> { SubscriberBuilder { session: self.clone(), key_expr: TryIntoKeyExpr::try_into(key_expr).map_err(Into::into), - reliability: Reliability::DEFAULT, + reliability: Reliability::default(), + mode: PushMode, origin: Locality::default(), handler: DefaultHandler, } @@ -331,9 +330,8 @@ impl<'s, 'a> SessionDeclarations<'s, 'a> for SessionRef<'a> { PublisherBuilder { session: self.clone(), key_expr: key_expr.try_into().map_err(Into::into), - congestion_control: CongestionControl::DEFAULT, - priority: Priority::DEFAULT, - is_express: false, + congestion_control: CongestionControl::default(), + priority: Priority::default(), destination: Locality::default(), } } @@ -395,6 +393,8 @@ pub struct Session { pub(crate) state: Arc>, pub(crate) id: u16, pub(crate) alive: bool, + owns_runtime: bool, + task_controller: TaskController, } static SESSION_ID_COUNTER: AtomicU16 = AtomicU16::new(0); @@ -415,6 +415,8 @@ impl Session { state: state.clone(), id: SESSION_ID_COUNTER.fetch_add(1, Ordering::SeqCst), alive: true, + owns_runtime: false, + task_controller: TaskController::default(), }; runtime.new_handler(Arc::new(admin::Handler::new(session.clone()))); @@ -518,14 +520,19 @@ impl Session { /// session.close().res().await.unwrap(); /// # } /// ``` - pub fn close(self) -> impl Resolve> { + pub fn close(mut self) -> impl Resolve> { ResolveFuture::new(async move { trace!("close()"); - self.runtime.close().await?; - - let primitives = zwrite!(self.state).primitives.as_ref().unwrap().clone(); - primitives.send_close(); - + self.task_controller.terminate_all(Duration::from_secs(10)); + if self.owns_runtime { + self.runtime.close().await?; + } + let mut state = zwrite!(self.state); + state.primitives.as_ref().unwrap().send_close(); + // clean up to break cyclic references from self.state to itself + state.primitives.take(); + state.queryables.clear(); + self.alive = false; Ok(()) }) } @@ -579,7 +586,7 @@ impl<'a> SessionDeclarations<'a, 'a> for Session { fn declare_subscriber<'b, TryIntoKeyExpr>( &'a self, key_expr: TryIntoKeyExpr, - ) -> SubscriberBuilder<'a, 'b, DefaultHandler> + ) -> SubscriberBuilder<'a, 'b, PushMode, DefaultHandler> where TryIntoKeyExpr: TryInto>, >>::Error: Into, @@ -678,46 +685,40 @@ impl Session { /// # Arguments /// /// * `key_expr` - Key expression matching the resources to put - /// * `payload` - The payload to put + /// * `value` - The value to put /// /// # Examples /// ``` /// # #[tokio::main] /// # async fn main() { /// use zenoh::prelude::r#async::*; - /// use zenoh::prelude::*; /// /// let session = zenoh::open(config::peer()).res().await.unwrap(); /// session - /// .put("key/expression", "payload") - /// .encoding(Encoding::TEXT_PLAIN) + /// .put("key/expression", "value") + /// .encoding(KnownEncoding::TextPlain) /// .res() /// .await /// .unwrap(); /// # } /// ``` #[inline] - pub fn put<'a, 'b: 'a, TryIntoKeyExpr, IntoPayload>( + pub fn put<'a, 'b: 'a, TryIntoKeyExpr, IntoValue>( &'a self, key_expr: TryIntoKeyExpr, - payload: IntoPayload, - ) -> SessionPutBuilder<'a, 'b> + value: IntoValue, + ) -> PutBuilder<'a, 'b> where TryIntoKeyExpr: TryInto>, >>::Error: Into, - IntoPayload: Into, + IntoValue: Into, { - PublicationBuilder { + PutBuilder { publisher: self.declare_publisher(key_expr), - kind: PublicationBuilderPut { - payload: payload.into(), - encoding: Encoding::default(), - }, - timestamp: None, + value: value.into(), + kind: SampleKind::Put, #[cfg(feature = "unstable")] attachment: None, - #[cfg(feature = "unstable")] - source_info: SourceInfo::empty(), } } @@ -741,19 +742,17 @@ impl Session { pub fn delete<'a, 'b: 'a, TryIntoKeyExpr>( &'a self, key_expr: TryIntoKeyExpr, - ) -> SessionDeleteBuilder<'a, 'b> + ) -> DeleteBuilder<'a, 'b> where TryIntoKeyExpr: TryInto>, >>::Error: Into, { - PublicationBuilder { + PutBuilder { publisher: self.declare_publisher(key_expr), - kind: PublicationBuilderDelete, - timestamp: None, + value: Value::empty(), + kind: SampleKind::Delete, #[cfg(feature = "unstable")] attachment: None, - #[cfg(feature = "unstable")] - source_info: SourceInfo::empty(), } } /// Query data from the matching queryables in the system. @@ -778,35 +777,31 @@ impl Session { /// } /// # } /// ``` - pub fn get<'a, 'b: 'a, IntoSelector>( + pub fn get<'a, 'b: 'a, TryIntoSelector>( &'a self, - selector: IntoSelector, + selector: TryIntoSelector, ) -> GetBuilder<'a, 'b, DefaultHandler> where - IntoSelector: TryInto>, - >>::Error: Into, + TryIntoSelector: TryInto>, + >>::Error: Into, { let selector = selector.try_into().map_err(Into::into); let timeout = { let conf = self.runtime.config().lock(); Duration::from_millis(unwrap_or_default!(conf.queries_default_timeout())) }; - let qos: QoS = request::ext::QoSType::REQUEST.into(); GetBuilder { session: self, selector, scope: Ok(None), - target: QueryTarget::DEFAULT, - consolidation: QueryConsolidation::DEFAULT, - qos: qos.into(), + target: QueryTarget::default(), + consolidation: QueryConsolidation::default(), destination: Locality::default(), timeout, value: None, #[cfg(feature = "unstable")] attachment: None, handler: DefaultHandler, - #[cfg(feature = "unstable")] - source_info: SourceInfo::empty(), } } } @@ -818,6 +813,8 @@ impl Session { state: self.state.clone(), id: self.id, alive: false, + owns_runtime: self.owns_runtime, + task_controller: self.task_controller.clone(), } } @@ -829,13 +826,14 @@ impl Session { let aggregated_publishers = config.aggregation().publishers().clone(); match Runtime::init(config).await { Ok(mut runtime) => { - let session = Self::init( + let mut session = Self::init( runtime.clone(), aggregated_subscribers, aggregated_publishers, ) .res_async() .await; + session.owns_runtime = true; match runtime.start().await { Ok(()) => { // Workaround for the declare_and_shoot problem @@ -880,10 +878,9 @@ impl Session { let primitives = state.primitives.as_ref().unwrap().clone(); drop(state); primitives.send_declare(Declare { - mode: DeclareMode::Push, - ext_qos: declare::ext::QoSType::DECLARE, + ext_qos: declare::ext::QoSType::declare_default(), ext_tstamp: None, - ext_nodeid: declare::ext::NodeIdType::DEFAULT, + ext_nodeid: declare::ext::NodeIdType::default(), body: DeclareBody::DeclareKeyExpr(DeclareKeyExpr { id: expr_id, wire_expr: WireExpr { @@ -989,20 +986,19 @@ impl Session { ) -> ZResult> { let mut state = zwrite!(self.state); log::trace!("subscribe({:?})", key_expr); - let id = self.runtime.next_id(); + let id = state.decl_id_counter.fetch_add(1, Ordering::SeqCst); let key_expr = match scope { Some(scope) => scope / key_expr, None => key_expr.clone(), }; - let mut sub_state = SubscriberState { + let sub_state = Arc::new(SubscriberState { id, - remote_id: id, key_expr: key_expr.clone().into_owned(), scope: scope.clone().map(|e| e.into_owned()), origin, callback, - }; + }); #[cfg(not(feature = "unstable"))] let declared_sub = origin != Locality::SessionLocal; @@ -1012,39 +1008,29 @@ impl Session { .as_str() .starts_with(crate::liveliness::PREFIX_LIVELINESS); - let declared_sub = - declared_sub - .then(|| { - match state - .aggregated_subscribers - .iter() - .find(|s| s.includes(&key_expr)) - { - Some(join_sub) => { - if let Some(joined_sub) = state.subscribers.values().find(|s| { - s.origin != Locality::SessionLocal && join_sub.includes(&s.key_expr) - }) { - sub_state.remote_id = joined_sub.remote_id; - None - } else { - Some(join_sub.clone().into()) - } - } - None => { - if let Some(twin_sub) = state.subscribers.values().find(|s| { - s.origin != Locality::SessionLocal && s.key_expr == key_expr - }) { - sub_state.remote_id = twin_sub.remote_id; - None - } else { - Some(key_expr.clone()) - } - } + let declared_sub = declared_sub + .then(|| { + match state + .aggregated_subscribers // TODO: can this be an OwnedKeyExpr? + .iter() + .find(|s| s.includes( &key_expr)) + { + Some(join_sub) => { + let joined_sub = state.subscribers.values().any(|s| { + s.origin != Locality::SessionLocal && join_sub.includes(&s.key_expr) + }); + (!joined_sub).then(|| join_sub.clone().into()) } - }) - .flatten(); - - let sub_state = Arc::new(sub_state); + None => { + let twin_sub = state + .subscribers + .values() + .any(|s| s.origin != Locality::SessionLocal && s.key_expr == key_expr); + (!twin_sub).then(|| key_expr.clone()) + } + } + }) + .flatten(); state.subscribers.insert(sub_state.id, sub_state.clone()); for res in state @@ -1093,12 +1079,11 @@ impl Session { // }; primitives.send_declare(Declare { - mode: DeclareMode::Push, - ext_qos: declare::ext::QoSType::DECLARE, + ext_qos: declare::ext::QoSType::declare_default(), ext_tstamp: None, - ext_nodeid: declare::ext::NodeIdType::DEFAULT, + ext_nodeid: declare::ext::NodeIdType::default(), body: DeclareBody::DeclareSubscriber(DeclareSubscriber { - id, + id: id as u32, wire_expr: key_expr.to_wire(self).to_owned(), ext_info: *info, }), @@ -1114,7 +1099,7 @@ impl Session { Ok(sub_state) } - pub(crate) fn unsubscribe(&self, sid: Id) -> ZResult<()> { + pub(crate) fn unsubscribe(&self, sid: usize) -> ZResult<()> { let mut state = zwrite!(self.state); if let Some(sub_state) = state.subscribers.remove(&sid) { trace!("unsubscribe({:?})", sub_state); @@ -1144,29 +1129,65 @@ impl Session { if send_forget { // Note: there might be several Subscribers on the same KeyExpr. // Before calling forget_subscriber(key_expr), check if this was the last one. - if !state.subscribers.values().any(|s| { - s.origin != Locality::SessionLocal && s.remote_id == sub_state.remote_id - }) { - let primitives = state.primitives.as_ref().unwrap().clone(); - drop(state); - primitives.send_declare(Declare { - mode: DeclareMode::Push, - ext_qos: declare::ext::QoSType::DECLARE, - ext_tstamp: None, - ext_nodeid: declare::ext::NodeIdType::DEFAULT, - body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { - id: sub_state.remote_id, - ext_wire_expr: WireExprType { - wire_expr: WireExpr::empty(), - }, - }), - }); - #[cfg(feature = "unstable")] - { - let state = zread!(self.state); - self.update_status_down(&state, &sub_state.key_expr) + let key_expr = &sub_state.key_expr; + match state + .aggregated_subscribers + .iter() + .find(|s| s.includes(key_expr)) + { + Some(join_sub) => { + let joined_sub = state.subscribers.values().any(|s| { + s.origin != Locality::SessionLocal && join_sub.includes(&s.key_expr) + }); + if !joined_sub { + let primitives = state.primitives.as_ref().unwrap().clone(); + let wire_expr = WireExpr::from(join_sub).to_owned(); + drop(state); + primitives.send_declare(Declare { + ext_qos: ext::QoSType::declare_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::default(), + body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { + id: 0, // @TODO use proper SubscriberId (#703) + ext_wire_expr: WireExprType { wire_expr }, + }), + }); + + #[cfg(feature = "unstable")] + { + let state = zread!(self.state); + self.update_status_down(&state, &sub_state.key_expr) + } + } } - } + None => { + let twin_sub = state + .subscribers + .values() + .any(|s| s.origin != Locality::SessionLocal && s.key_expr == *key_expr); + if !twin_sub { + let primitives = state.primitives.as_ref().unwrap().clone(); + drop(state); + primitives.send_declare(Declare { + ext_qos: ext::QoSType::declare_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::default(), + body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { + id: 0, // @TODO use proper SubscriberId (#703) + ext_wire_expr: WireExprType { + wire_expr: key_expr.to_wire(self).to_owned(), + }, + }), + }); + + #[cfg(feature = "unstable")] + { + let state = zread!(self.state); + self.update_status_down(&state, &sub_state.key_expr) + } + } + } + }; } Ok(()) } else { @@ -1183,7 +1204,7 @@ impl Session { ) -> ZResult> { let mut state = zwrite!(self.state); log::trace!("queryable({:?})", key_expr); - let id = self.runtime.next_id(); + let id = state.decl_id_counter.fetch_add(1, Ordering::SeqCst); let qable_state = Arc::new(QueryableState { id, key_expr: key_expr.to_owned(), @@ -1191,50 +1212,158 @@ impl Session { origin, callback, }); + #[cfg(feature = "complete_n")] + { + state.queryables.insert(id, qable_state.clone()); + + if origin != Locality::SessionLocal && complete { + let primitives = state.primitives.as_ref().unwrap().clone(); + let complete = Session::complete_twin_qabls(&state, key_expr); + drop(state); + let qabl_info = QueryableInfo { + complete, + distance: 0, + }; + primitives.send_declare(Declare { + ext_qos: declare::ext::QoSType::declare_default(), + ext_tstamp: None, + ext_nodeid: declare::ext::NodeIdType::default(), + body: DeclareBody::DeclareQueryable(DeclareQueryable { + id: id as u32, + wire_expr: key_expr.to_owned(), + ext_info: qabl_info, + }), + }); + } + } + #[cfg(not(feature = "complete_n"))] + { + let twin_qabl = Session::twin_qabl(&state, key_expr); + let complete_twin_qabl = twin_qabl && Session::complete_twin_qabl(&state, key_expr); - state.queryables.insert(id, qable_state.clone()); + state.queryables.insert(id, qable_state.clone()); - if origin != Locality::SessionLocal { - let primitives = state.primitives.as_ref().unwrap().clone(); - drop(state); - let qabl_info = QueryableInfoType { - complete, - distance: 0, - }; - primitives.send_declare(Declare { - mode: DeclareMode::Push, - ext_qos: declare::ext::QoSType::DECLARE, - ext_tstamp: None, - ext_nodeid: declare::ext::NodeIdType::DEFAULT, - body: DeclareBody::DeclareQueryable(DeclareQueryable { - id, - wire_expr: key_expr.to_owned(), - ext_info: qabl_info, - }), - }); + if origin != Locality::SessionLocal && (!twin_qabl || (!complete_twin_qabl && complete)) + { + let primitives = state.primitives.as_ref().unwrap().clone(); + let complete = u8::from(!complete_twin_qabl && complete); + drop(state); + let qabl_info = QueryableInfo { + complete, + distance: 0, + }; + primitives.send_declare(Declare { + ext_qos: declare::ext::QoSType::declare_default(), + ext_tstamp: None, + ext_nodeid: declare::ext::NodeIdType::default(), + body: DeclareBody::DeclareQueryable(DeclareQueryable { + id: id as u32, + wire_expr: key_expr.to_owned(), + ext_info: qabl_info, + }), + }); + } } Ok(qable_state) } - pub(crate) fn close_queryable(&self, qid: Id) -> ZResult<()> { + pub(crate) fn twin_qabl(state: &SessionState, key: &WireExpr) -> bool { + state.queryables.values().any(|q| { + q.origin != Locality::SessionLocal + && state.local_wireexpr_to_expr(&q.key_expr).unwrap() + == state.local_wireexpr_to_expr(key).unwrap() + }) + } + + #[cfg(not(feature = "complete_n"))] + pub(crate) fn complete_twin_qabl(state: &SessionState, key: &WireExpr) -> bool { + state.queryables.values().any(|q| { + q.origin != Locality::SessionLocal + && q.complete + && state.local_wireexpr_to_expr(&q.key_expr).unwrap() + == state.local_wireexpr_to_expr(key).unwrap() + }) + } + + #[cfg(feature = "complete_n")] + pub(crate) fn complete_twin_qabls(state: &SessionState, key: &WireExpr) -> u8 { + state + .queryables + .values() + .filter(|q| { + q.origin != Locality::SessionLocal + && q.complete + && state.local_wireexpr_to_expr(&q.key_expr).unwrap() + == state.local_wireexpr_to_expr(key).unwrap() + }) + .count() as u8 + } + + pub(crate) fn close_queryable(&self, qid: usize) -> ZResult<()> { let mut state = zwrite!(self.state); if let Some(qable_state) = state.queryables.remove(&qid) { trace!("close_queryable({:?})", qable_state); if qable_state.origin != Locality::SessionLocal { let primitives = state.primitives.as_ref().unwrap().clone(); - drop(state); - primitives.send_declare(Declare { - mode: DeclareMode::Push, - ext_qos: declare::ext::QoSType::DECLARE, - ext_tstamp: None, - ext_nodeid: declare::ext::NodeIdType::DEFAULT, - body: DeclareBody::UndeclareQueryable(UndeclareQueryable { - id: qable_state.id, - ext_wire_expr: WireExprType { - wire_expr: qable_state.key_expr.clone(), - }, - }), - }); + if Session::twin_qabl(&state, &qable_state.key_expr) { + // There still exist Queryables on the same KeyExpr. + if qable_state.complete { + #[cfg(feature = "complete_n")] + { + let complete = + Session::complete_twin_qabls(&state, &qable_state.key_expr); + drop(state); + let qabl_info = QueryableInfo { + complete, + distance: 0, + }; + primitives.send_declare(Declare { + ext_qos: declare::ext::QoSType::declare_default(), + ext_tstamp: None, + ext_nodeid: declare::ext::NodeIdType::default(), + body: DeclareBody::DeclareQueryable(DeclareQueryable { + id: 0, // @TODO use proper QueryableId (#703) + wire_expr: qable_state.key_expr.clone(), + ext_info: qabl_info, + }), + }); + } + #[cfg(not(feature = "complete_n"))] + { + if !Session::complete_twin_qabl(&state, &qable_state.key_expr) { + drop(state); + let qabl_info = QueryableInfo { + complete: 0, + distance: 0, + }; + primitives.send_declare(Declare { + ext_qos: declare::ext::QoSType::declare_default(), + ext_tstamp: None, + ext_nodeid: declare::ext::NodeIdType::default(), + body: DeclareBody::DeclareQueryable(DeclareQueryable { + id: 0, // @TODO use proper QueryableId (#703) + wire_expr: qable_state.key_expr.clone(), + ext_info: qabl_info, + }), + }); + } + } + } + } else { + // There are no more Queryables on the same KeyExpr. + drop(state); + primitives.send_declare(Declare { + ext_qos: declare::ext::QoSType::declare_default(), + ext_tstamp: None, + ext_nodeid: declare::ext::NodeIdType::default(), + body: DeclareBody::UndeclareQueryable(UndeclareQueryable { + id: 0, // @TODO use proper QueryableId (#703) + ext_wire_expr: WireExprType { + wire_expr: qable_state.key_expr.clone(), + }, + }), + }); + } } Ok(()) } else { @@ -1249,7 +1378,7 @@ impl Session { ) -> ZResult> { let mut state = zwrite!(self.state); log::trace!("declare_liveliness({:?})", key_expr); - let id = self.runtime.next_id(); + let id = state.decl_id_counter.fetch_add(1, Ordering::SeqCst); let key_expr = KeyExpr::from(*crate::liveliness::KE_PREFIX_LIVELINESS / key_expr); let tok_state = Arc::new(LivelinessTokenState { id, @@ -1260,21 +1389,20 @@ impl Session { let primitives = state.primitives.as_ref().unwrap().clone(); drop(state); primitives.send_declare(Declare { - mode: DeclareMode::Push, - ext_qos: declare::ext::QoSType::DECLARE, + ext_qos: declare::ext::QoSType::declare_default(), ext_tstamp: None, - ext_nodeid: declare::ext::NodeIdType::DEFAULT, + ext_nodeid: declare::ext::NodeIdType::default(), body: DeclareBody::DeclareSubscriber(DeclareSubscriber { - id, + id: id as u32, wire_expr: key_expr.to_wire(self).to_owned(), - ext_info: SubscriberInfo::DEFAULT, + ext_info: SubscriberInfo::default(), }), }); Ok(tok_state) } #[zenoh_macros::unstable] - pub(crate) fn undeclare_liveliness(&self, tid: Id) -> ZResult<()> { + pub(crate) fn undeclare_liveliness(&self, tid: usize) -> ZResult<()> { let mut state = zwrite!(self.state); if let Some(tok_state) = state.tokens.remove(&tid) { trace!("undeclare_liveliness({:?})", tok_state); @@ -1285,13 +1413,14 @@ impl Session { let primitives = state.primitives.as_ref().unwrap().clone(); drop(state); primitives.send_declare(Declare { - mode: DeclareMode::Push, - ext_qos: ext::QoSType::DECLARE, + ext_qos: ext::QoSType::declare_default(), ext_tstamp: None, - ext_nodeid: ext::NodeIdType::DEFAULT, + ext_nodeid: ext::NodeIdType::default(), body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { - id: tok_state.id, - ext_wire_expr: WireExprType::null(), + id: 0, // @TODO use proper SubscriberId (#703) + ext_wire_expr: WireExprType { + wire_expr: key_expr.to_wire(self).to_owned(), + }, }), }); } @@ -1308,7 +1437,8 @@ impl Session { callback: Callback<'static, MatchingStatus>, ) -> ZResult> { let mut state = zwrite!(self.state); - let id = self.runtime.next_id(); + + let id = state.decl_id_counter.fetch_add(1, Ordering::SeqCst); log::trace!("matches_listener({:?}) => {id}", publisher.key_expr); let listener_state = Arc::new(MatchingListenerState { id, @@ -1382,30 +1512,34 @@ impl Session { if key_expr.intersects(&msub.key_expr) { // Cannot hold session lock when calling tables (matching_status()) // TODO: check which ZRuntime should be used - zenoh_runtime::ZRuntime::RX.spawn({ - let session = self.clone(); - let msub = msub.clone(); - async move { - match msub.current.lock() { - Ok(mut current) => { - if !*current { - if let Ok(status) = - session.matching_status(&msub.key_expr, msub.destination) - { - if status.matching_subscribers() { - *current = true; - let callback = msub.callback.clone(); - (callback)(status) + self.task_controller + .spawn_with_rt(zenoh_runtime::ZRuntime::Net, { + let session = self.clone(); + let msub = msub.clone(); + async move { + match msub.current.lock() { + Ok(mut current) => { + if !*current { + if let Ok(status) = session + .matching_status(&msub.key_expr, msub.destination) + { + if status.matching_subscribers() { + *current = true; + let callback = msub.callback.clone(); + (callback)(status) + } } } } - } - Err(e) => { - log::error!("Error trying to acquire MathginListener lock: {}", e); + Err(e) => { + log::error!( + "Error trying to acquire MathginListener lock: {}", + e + ); + } } } - } - }); + }); } } } @@ -1416,36 +1550,40 @@ impl Session { if key_expr.intersects(&msub.key_expr) { // Cannot hold session lock when calling tables (matching_status()) // TODO: check which ZRuntime should be used - zenoh_runtime::ZRuntime::RX.spawn({ - let session = self.clone(); - let msub = msub.clone(); - async move { - match msub.current.lock() { - Ok(mut current) => { - if *current { - if let Ok(status) = - session.matching_status(&msub.key_expr, msub.destination) - { - if !status.matching_subscribers() { - *current = false; - let callback = msub.callback.clone(); - (callback)(status) + self.task_controller + .spawn_with_rt(zenoh_runtime::ZRuntime::Net, { + let session = self.clone(); + let msub = msub.clone(); + async move { + match msub.current.lock() { + Ok(mut current) => { + if *current { + if let Ok(status) = session + .matching_status(&msub.key_expr, msub.destination) + { + if !status.matching_subscribers() { + *current = false; + let callback = msub.callback.clone(); + (callback)(status) + } } } } - } - Err(e) => { - log::error!("Error trying to acquire MathginListener lock: {}", e); + Err(e) => { + log::error!( + "Error trying to acquire MathginListener lock: {}", + e + ); + } } } - } - }); + }); } } } #[zenoh_macros::unstable] - pub(crate) fn undeclare_matches_listener_inner(&self, sid: Id) -> ZResult<()> { + pub(crate) fn undeclare_matches_listener_inner(&self, sid: usize) -> ZResult<()> { let mut state = zwrite!(self.state); if let Some(state) = state.matching_listeners.remove(&sid) { trace!("undeclare_matches_listener_inner({:?})", state); @@ -1562,25 +1700,48 @@ impl Session { drop(state); let zenoh_collections::single_or_vec::IntoIter { drain, last } = callbacks.into_iter(); for (cb, key_expr) in drain { - let sample = info.clone().into_sample( - key_expr, - payload.clone(), - #[cfg(feature = "unstable")] - attachment.clone(), - ); + #[allow(unused_mut)] + let mut sample = Sample::with_info(key_expr, payload.clone(), info.clone()); + #[cfg(feature = "unstable")] + { + sample.attachment = attachment.clone(); + } cb(sample); } if let Some((cb, key_expr)) = last { - let sample = info.into_sample( - key_expr, - payload, - #[cfg(feature = "unstable")] - attachment.clone(), - ); + #[allow(unused_mut)] + let mut sample = Sample::with_info(key_expr, payload, info); + #[cfg(feature = "unstable")] + { + sample.attachment = attachment; + } cb(sample); } } + pub(crate) fn pull<'a>(&'a self, key_expr: &'a KeyExpr) -> impl Resolve> + 'a { + ResolveClosure::new(move || { + trace!("pull({:?})", key_expr); + let state = zread!(self.state); + let primitives = state.primitives.as_ref().unwrap().clone(); + drop(state); + primitives.send_request(Request { + id: 0, // @TODO compute a proper request ID + wire_expr: key_expr.to_wire(self).to_owned(), + ext_qos: ext::QoSType::request_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::default(), + ext_target: request::ext::TargetType::default(), + ext_budget: None, + ext_timeout: None, + payload: RequestBody::Pull(Pull { + ext_unknown: vec![], + }), + }); + Ok(()) + }) + } + #[allow(clippy::too_many_arguments)] pub(crate) fn query( &self, @@ -1588,25 +1749,23 @@ impl Session { scope: &Option>, target: QueryTarget, consolidation: QueryConsolidation, - qos: QoS, destination: Locality, timeout: Duration, value: Option, #[cfg(feature = "unstable")] attachment: Option, - #[cfg(feature = "unstable")] source: SourceInfo, callback: Callback<'static, Reply>, ) -> ZResult<()> { log::trace!("get({}, {:?}, {:?})", selector, target, consolidation); let mut state = zwrite!(self.state); let consolidation = match consolidation.mode { - ConsolidationMode::Auto => { + Mode::Auto => { if selector.decode().any(|(k, _)| k.as_ref() == TIME_RANGE_KEY) { ConsolidationMode::None } else { ConsolidationMode::Latest } } - mode => mode, + Mode::Manual(mode) => mode, }; let qid = state.qid_counter.fetch_add(1, Ordering::SeqCst); let nb_final = match destination { @@ -1614,27 +1773,33 @@ impl Session { _ => 1, }; - zenoh_runtime::ZRuntime::Net.spawn({ - let state = self.state.clone(); - let zid = self.runtime.zid(); - async move { - tokio::time::sleep(timeout).await; - let mut state = zwrite!(state); - if let Some(query) = state.queries.remove(&qid) { - std::mem::drop(state); - log::debug!("Timeout on query {}! Send error and close.", qid); - if query.reception_mode == ConsolidationMode::Latest { - for (_, reply) in query.replies.unwrap().into_iter() { - (query.callback)(reply); + let token = self.task_controller.get_cancellation_token(); + self.task_controller + .spawn_with_rt(zenoh_runtime::ZRuntime::Net, { + let state = self.state.clone(); + let zid = self.runtime.zid(); + async move { + tokio::select! { + _ = tokio::time::sleep(timeout) => { + let mut state = zwrite!(state); + if let Some(query) = state.queries.remove(&qid) { + std::mem::drop(state); + log::debug!("Timeout on query {}! Send error and close.", qid); + if query.reception_mode == ConsolidationMode::Latest { + for (_, reply) in query.replies.unwrap().into_iter() { + (query.callback)(reply); + } + } + (query.callback)(Reply { + sample: Err("Timeout".into()), + replier_id: zid, + }); + } } + _ = token.cancelled() => {} } - (query.callback)(Reply { - sample: Err("Timeout".into()), - replier_id: zid, - }); } - } - }); + }); let selector = match scope { Some(scope) => Selector { @@ -1659,8 +1824,8 @@ impl Session { ); let primitives = state.primitives.as_ref().unwrap().clone(); - drop(state); + drop(state); if destination != Locality::SessionLocal { #[allow(unused_mut)] let mut ext_attachment = None; @@ -1673,24 +1838,21 @@ impl Session { primitives.send_request(Request { id: qid, wire_expr: wexpr.clone(), - ext_qos: qos.into(), + ext_qos: request::ext::QoSType::request_default(), ext_tstamp: None, - ext_nodeid: request::ext::NodeIdType::DEFAULT, + ext_nodeid: request::ext::NodeIdType::default(), ext_target: target, ext_budget: None, ext_timeout: Some(timeout), payload: RequestBody::Query(zenoh_protocol::zenoh::Query { - consolidation, parameters: selector.parameters().to_string(), - #[cfg(feature = "unstable")] - ext_sinfo: source.into(), - #[cfg(not(feature = "unstable"))] ext_sinfo: None, + ext_consolidation: consolidation.into(), ext_body: value.as_ref().map(|v| query::ext::QueryBodyType { #[cfg(feature = "shared-memory")] ext_shm: None, - encoding: v.encoding.clone().into(), - payload: v.payload.clone().into(), + encoding: v.encoding.clone(), + payload: v.payload.clone(), }), ext_attachment, ext_unknown: vec![], @@ -1704,12 +1866,12 @@ impl Session { selector.parameters(), qid, target, - consolidation, + consolidation.into(), value.as_ref().map(|v| query::ext::QueryBodyType { #[cfg(feature = "shared-memory")] ext_shm: None, - encoding: v.encoding.clone().into(), - payload: v.payload.clone().into(), + encoding: v.encoding.clone(), + payload: v.payload.clone(), }), #[cfg(feature = "unstable")] attachment, @@ -1726,19 +1888,19 @@ impl Session { parameters: &str, qid: RequestId, _target: TargetType, - _consolidation: Consolidation, + _consolidation: ConsolidationType, body: Option, #[cfg(feature = "unstable")] attachment: Option, ) { - let (primitives, key_expr, queryables) = { + let (primitives, key_expr, callbacks) = { let state = zread!(self.state); match state.wireexpr_to_keyexpr(key_expr, local) { Ok(key_expr) => { - let queryables = state + let callbacks = state .queryables - .iter() + .values() .filter( - |(_, queryable)| + |queryable| (queryable.origin == Locality::Any || (local == (queryable.origin == Locality::SessionLocal))) && @@ -1755,12 +1917,12 @@ impl Session { } } ) - .map(|(id, qable)| (*id, qable.callback.clone())) - .collect::)>>(); + .map(|qable| qable.callback.clone()) + .collect::>>(); ( state.primitives.as_ref().unwrap().clone(), key_expr.into_owned(), - queryables, + callbacks, ) } Err(err) => { @@ -1772,30 +1934,29 @@ impl Session { let parameters = parameters.to_owned(); - let zid = self.runtime.zid(); + let zid = self.runtime.zid(); // @TODO build/use prebuilt specific zid - let query_inner = Arc::new(QueryInner { - key_expr, - parameters, - value: body.map(|b| Value { - payload: b.payload.into(), - encoding: b.encoding.into(), + let query = Query { + inner: Arc::new(QueryInner { + key_expr, + parameters, + value: body.map(|b| Value { + payload: b.payload, + encoding: b.encoding, + }), + qid, + zid, + primitives: if local { + Arc::new(self.clone()) + } else { + primitives + }, + #[cfg(feature = "unstable")] + attachment, }), - qid, - zid, - primitives: if local { - Arc::new(self.clone()) - } else { - primitives - }, - #[cfg(feature = "unstable")] - attachment, - }); - for (eid, callback) in queryables { - callback(Query { - inner: query_inner.clone(), - eid, - }); + }; + for callback in callbacks.iter() { + callback(query.clone()); } } } @@ -1828,7 +1989,7 @@ impl<'s> SessionDeclarations<'s, 'static> for Arc { fn declare_subscriber<'b, TryIntoKeyExpr>( &'s self, key_expr: TryIntoKeyExpr, - ) -> SubscriberBuilder<'static, 'b, DefaultHandler> + ) -> SubscriberBuilder<'static, 'b, PushMode, DefaultHandler> where TryIntoKeyExpr: TryInto>, >>::Error: Into, @@ -1836,7 +1997,8 @@ impl<'s> SessionDeclarations<'s, 'static> for Arc { SubscriberBuilder { session: SessionRef::Shared(self.clone()), key_expr: key_expr.try_into().map_err(Into::into), - reliability: Reliability::DEFAULT, + reliability: Reliability::default(), + mode: PushMode, origin: Locality::default(), handler: DefaultHandler, } @@ -1862,10 +2024,10 @@ impl<'s> SessionDeclarations<'s, 'static> for Arc { /// .unwrap(); /// tokio::task::spawn(async move { /// while let Ok(query) = queryable.recv_async().await { - /// query.reply( - /// KeyExpr::try_from("key/expression").unwrap(), + /// query.reply(Ok(Sample::try_from( + /// "key/expression", /// "value", - /// ).res().await.unwrap(); + /// ).unwrap())).res().await.unwrap(); /// } /// }).await; /// # } @@ -1918,9 +2080,8 @@ impl<'s> SessionDeclarations<'s, 'static> for Arc { PublisherBuilder { session: SessionRef::Shared(self.clone()), key_expr: key_expr.try_into().map_err(Into::into), - congestion_control: CongestionControl::DEFAULT, - priority: Priority::DEFAULT, - is_express: false, + congestion_control: CongestionControl::default(), + priority: Priority::default(), destination: Locality::default(), } } @@ -1990,13 +2151,9 @@ impl Primitives for Session { trace!("recv DeclareSubscriber {} {:?}", m.id, m.wire_expr); #[cfg(feature = "unstable")] { - let mut state = zwrite!(self.state); - match state - .wireexpr_to_keyexpr(&m.wire_expr, false) - .map(|e| e.into_owned()) - { + let state = zread!(self.state); + match state.wireexpr_to_keyexpr(&m.wire_expr, false) { Ok(expr) => { - state.remote_subscribers.insert(m.id, expr.clone()); self.update_status_up(&state, &expr); if expr @@ -2024,30 +2181,33 @@ impl Primitives for Session { trace!("recv UndeclareSubscriber {:?}", m.id); #[cfg(feature = "unstable")] { - let mut state = zwrite!(self.state); - if let Some(expr) = state.remote_subscribers.remove(&m.id) { - self.update_status_down(&state, &expr); + let state = zread!(self.state); + match state.wireexpr_to_keyexpr(&m.ext_wire_expr.wire_expr, false) { + Ok(expr) => { + self.update_status_down(&state, &expr); - if expr - .as_str() - .starts_with(crate::liveliness::PREFIX_LIVELINESS) - { - drop(state); - let data_info = DataInfo { - kind: SampleKind::Delete, - ..Default::default() - }; - self.handle_data( - false, - &m.ext_wire_expr.wire_expr, - Some(data_info), - ZBuf::default(), - #[cfg(feature = "unstable")] - None, - ); + if expr + .as_str() + .starts_with(crate::liveliness::PREFIX_LIVELINESS) + { + drop(state); + let data_info = DataInfo { + kind: SampleKind::Delete, + ..Default::default() + }; + self.handle_data( + false, + &m.ext_wire_expr.wire_expr, + Some(data_info), + ZBuf::default(), + #[cfg(feature = "unstable")] + None, + ); + } + } + Err(err) => { + log::error!("Received Forget Subscriber for unkown key_expr: {}", err) } - } else { - log::error!("Received Undeclare Subscriber for unkown id: {}", m.id); } } } @@ -2060,7 +2220,8 @@ impl Primitives for Session { DeclareBody::DeclareToken(_) => todo!(), DeclareBody::UndeclareToken(_) => todo!(), DeclareBody::DeclareInterest(_) => todo!(), - DeclareBody::DeclareFinal(_) => todo!(), + DeclareBody::FinalInterest(_) => todo!(), + DeclareBody::UndeclareInterest(_) => todo!(), } } @@ -2070,10 +2231,10 @@ impl Primitives for Session { PushBody::Put(m) => { let info = DataInfo { kind: SampleKind::Put, - encoding: Some(m.encoding.into()), + encoding: Some(m.encoding), timestamp: m.timestamp, qos: QoS::from(msg.ext_qos), - source_id: m.ext_sinfo.as_ref().map(|i| i.id.clone()), + source_id: m.ext_sinfo.as_ref().map(|i| i.zid), source_sn: m.ext_sinfo.as_ref().map(|i| i.sn as u64), }; self.handle_data( @@ -2091,7 +2252,7 @@ impl Primitives for Session { encoding: None, timestamp: m.timestamp, qos: QoS::from(msg.ext_qos), - source_id: m.ext_sinfo.as_ref().map(|i| i.id.clone()), + source_id: m.ext_sinfo.as_ref().map(|i| i.zid), source_sn: m.ext_sinfo.as_ref().map(|i| i.sn as u64), }; self.handle_data( @@ -2115,29 +2276,48 @@ impl Primitives for Session { &m.parameters, msg.id, msg.ext_target, - m.consolidation, + m.ext_consolidation, m.ext_body, #[cfg(feature = "unstable")] m.ext_attachment.map(Into::into), ), + RequestBody::Put(_) => (), + RequestBody::Del(_) => (), + RequestBody::Pull(_) => todo!(), } } fn send_response(&self, msg: Response) { trace!("recv Response {:?}", msg); match msg.payload { + ResponseBody::Ack(_) => { + log::warn!( + "Received a ResponseBody::Ack, but this isn't supported yet. Dropping message." + ) + } + ResponseBody::Put(_) => { + log::warn!( + "Received a ResponseBody::Put, but this isn't supported yet. Dropping message." + ) + } ResponseBody::Err(e) => { let mut state = zwrite!(self.state); match state.queries.get_mut(&msg.rid) { Some(query) => { let callback = query.callback.clone(); std::mem::drop(state); - let value = Value { - payload: e.payload.into(), - encoding: e.encoding.into(), + let value = match e.ext_body { + Some(body) => Value { + payload: body.payload, + encoding: body.encoding, + }, + None => Value { + payload: ZBuf::empty(), + encoding: zenoh_protocol::core::Encoding::EMPTY, + }, }; let replier_id = match e.ext_sinfo { - Some(info) => info.id.zid, + Some(info) => info.zid, None => ZenohId::rand(), }; let new_reply = Reply { @@ -2204,64 +2384,21 @@ impl Primitives for Session { } None => key_expr, }; - - struct Ret { - payload: ZBuf, - info: DataInfo, - #[cfg(feature = "unstable")] - attachment: Option, - } - let Ret { - payload, - info, - #[cfg(feature = "unstable")] - attachment, - } = match m.payload { - ReplyBody::Put(Put { - timestamp, - encoding, - ext_sinfo, - ext_attachment: _attachment, - payload, - .. - }) => Ret { - payload, - info: DataInfo { - kind: SampleKind::Put, - encoding: Some(encoding.into()), - timestamp, - qos: QoS::from(msg.ext_qos), - source_id: ext_sinfo.as_ref().map(|i| i.id.clone()), - source_sn: ext_sinfo.as_ref().map(|i| i.sn as u64), - }, - #[cfg(feature = "unstable")] - attachment: _attachment.map(Into::into), - }, - ReplyBody::Del(Del { - timestamp, - ext_sinfo, - ext_attachment: _attachment, - .. - }) => Ret { - payload: ZBuf::empty(), - info: DataInfo { - kind: SampleKind::Delete, - encoding: None, - timestamp, - qos: QoS::from(msg.ext_qos), - source_id: ext_sinfo.as_ref().map(|i| i.id.clone()), - source_sn: ext_sinfo.as_ref().map(|i| i.sn as u64), - }, - #[cfg(feature = "unstable")] - attachment: _attachment.map(Into::into), - }, + let info = DataInfo { + kind: SampleKind::Put, + encoding: Some(m.encoding), + timestamp: m.timestamp, + qos: QoS::from(msg.ext_qos), + source_id: m.ext_sinfo.as_ref().map(|i| i.zid), + source_sn: m.ext_sinfo.as_ref().map(|i| i.sn as u64), }; - let sample = info.into_sample( - key_expr.into_owned(), - payload, - #[cfg(feature = "unstable")] - attachment, - ); + #[allow(unused_mut)] + let mut sample = + Sample::with_info(key_expr.into_owned(), m.payload, Some(info)); + #[cfg(feature = "unstable")] + { + sample.attachment = m.ext_attachment.map(Into::into); + } let new_reply = Reply { sample: Ok(sample), replier_id: ZenohId::rand(), // TODO @@ -2309,7 +2446,7 @@ impl Primitives for Session { } } } - Consolidation::Auto | ConsolidationMode::Latest => { + ConsolidationMode::Latest => { match query.replies.as_ref().unwrap().get( new_reply.sample.as_ref().unwrap().key_expr.as_keyexpr(), ) { @@ -2455,7 +2592,7 @@ pub trait SessionDeclarations<'s, 'a> { fn declare_subscriber<'b, TryIntoKeyExpr>( &'s self, key_expr: TryIntoKeyExpr, - ) -> SubscriberBuilder<'a, 'b, DefaultHandler> + ) -> SubscriberBuilder<'a, 'b, PushMode, DefaultHandler> where TryIntoKeyExpr: TryInto>, >>::Error: Into; @@ -2480,10 +2617,10 @@ pub trait SessionDeclarations<'s, 'a> { /// .unwrap(); /// tokio::task::spawn(async move { /// while let Ok(query) = queryable.recv_async().await { - /// query.reply( - /// KeyExpr::try_from("key/expression").unwrap(), + /// query.reply(Ok(Sample::try_from( + /// "key/expression", /// "value", - /// ).res().await.unwrap(); + /// ).unwrap())).res().await.unwrap(); /// } /// }).await; /// # } diff --git a/zenoh/src/subscriber.rs b/zenoh/src/subscriber.rs index 47d41ebb1f..dc53120fff 100644 --- a/zenoh/src/subscriber.rs +++ b/zenoh/src/subscriber.rs @@ -13,28 +13,26 @@ // //! Subscribing primitives. -use crate::handlers::{locked, Callback, DefaultHandler, IntoHandler}; -use crate::key_expr::KeyExpr; +use crate::handlers::{locked, Callback, DefaultHandler}; use crate::prelude::Locality; -use crate::sample::Sample; -use crate::Id; +use crate::prelude::{Id, IntoCallbackReceiverPair, KeyExpr, Sample}; use crate::Undeclarable; use crate::{Result as ZResult, SessionRef}; use std::fmt; use std::future::Ready; use std::ops::{Deref, DerefMut}; use std::sync::Arc; -use zenoh_core::{AsyncResolve, Resolvable, SyncResolve}; -#[cfg(feature = "unstable")] -use zenoh_protocol::core::EntityGlobalId; -use zenoh_protocol::network::declare::subscriber::ext::SubscriberInfo; +use zenoh_core::{AsyncResolve, Resolvable, Resolve, SyncResolve}; +use zenoh_protocol::network::declare::{subscriber::ext::SubscriberInfo, Mode}; + +/// The subscription mode. +pub use zenoh_protocol::core::SubMode; /// The kind of reliability. pub use zenoh_protocol::core::Reliability; pub(crate) struct SubscriberState { pub(crate) id: Id, - pub(crate) remote_id: Id, pub(crate) key_expr: KeyExpr<'static>, pub(crate) scope: Option>, pub(crate) origin: Locality, @@ -68,7 +66,7 @@ impl fmt::Debug for SubscriberState { /// let session = zenoh::open(config::peer()).res().await.unwrap(); /// let subscriber = session /// .declare_subscriber("key/expression") -/// .callback(|sample| { println!("Received: {} {:?}", sample.key_expr(), sample.payload()) }) +/// .callback(|sample| { println!("Received: {} {}", sample.key_expr, sample.value); }) /// .res() /// .await /// .unwrap(); @@ -81,6 +79,94 @@ pub(crate) struct SubscriberInner<'a> { pub(crate) alive: bool, } +/// A [`PullMode`] subscriber that provides data through a callback. +/// +/// CallbackPullSubscribers only provide data when explicitely pulled by the +/// application with the [`pull`](CallbackPullSubscriber::pull) function. +/// CallbackPullSubscribers can be created from a zenoh [`Session`](crate::Session) +/// with the [`declare_subscriber`](crate::SessionDeclarations::declare_subscriber) function, +/// the [`callback`](SubscriberBuilder::callback) function +/// and the [`pull_mode`](SubscriberBuilder::pull_mode) function +/// of the resulting builder. +/// +/// Subscribers are automatically undeclared when dropped. +/// +/// # Examples +/// ``` +/// # #[tokio::main] +/// # async fn main() { +/// use zenoh::prelude::r#async::*; +/// +/// let session = zenoh::open(config::peer()).res().await.unwrap(); +/// let subscriber = session +/// .declare_subscriber("key/expression") +/// .callback(|sample| { println!("Received: {} {}", sample.key_expr, sample.value); }) +/// .pull_mode() +/// .res() +/// .await +/// .unwrap(); +/// subscriber.pull(); +/// # } +/// ``` +pub(crate) struct PullSubscriberInner<'a> { + inner: SubscriberInner<'a>, +} + +impl<'a> PullSubscriberInner<'a> { + /// Pull available data for a [`CallbackPullSubscriber`]. + /// + /// # Examples + /// ``` + /// # #[tokio::main] + /// # async fn main() { + /// use zenoh::prelude::r#async::*; + /// use zenoh::subscriber::SubMode; + /// + /// let session = zenoh::open(config::peer()).res().await.unwrap(); + /// let subscriber = session + /// .declare_subscriber("key/expression") + /// .callback(|sample| { println!("Received: {} {}", sample.key_expr, sample.value); }) + /// .pull_mode() + /// .res() + /// .await + /// .unwrap(); + /// subscriber.pull(); + /// # } + /// ``` + #[inline] + pub fn pull(&self) -> impl Resolve> + '_ { + self.inner.session.pull(&self.inner.state.key_expr) + } + + /// Close a [`CallbackPullSubscriber`](CallbackPullSubscriber). + /// + /// `CallbackPullSubscribers` are automatically closed when dropped, but you may want to use this function to handle errors or + /// close the `CallbackPullSubscriber` asynchronously. + /// + /// # Examples + /// ``` + /// # #[tokio::main] + /// # async fn main() { + /// use zenoh::prelude::r#async::*; + /// + /// let session = zenoh::open(config::peer()).res().await.unwrap(); + /// # fn data_handler(_sample: Sample) { }; + /// let subscriber = session + /// .declare_subscriber("key/expression") + /// .callback(data_handler) + /// .pull_mode() + /// .res() + /// .await + /// .unwrap(); + /// subscriber.undeclare().res().await.unwrap(); + /// # } + /// ``` + #[inline] + pub fn undeclare(self) -> impl Resolve> + 'a { + Undeclarable::undeclare_inner(self.inner, ()) + } +} + impl<'a> SubscriberInner<'a> { /// Close a [`CallbackSubscriber`](CallbackSubscriber). /// @@ -167,6 +253,40 @@ impl Drop for SubscriberInner<'_> { } } +/// The mode for pull subscribers. +#[non_exhaustive] +#[derive(Debug, Clone, Copy)] +pub struct PullMode; + +impl From for SubMode { + fn from(_: PullMode) -> Self { + SubMode::Pull + } +} + +impl From for Mode { + fn from(_: PullMode) -> Self { + Mode::Pull + } +} + +/// The mode for push subscribers. +#[non_exhaustive] +#[derive(Debug, Clone, Copy)] +pub struct PushMode; + +impl From for SubMode { + fn from(_: PushMode) -> Self { + SubMode::Push + } +} + +impl From for Mode { + fn from(_: PushMode) -> Self { + Mode::Push + } +} + /// A builder for initializing a [`FlumeSubscriber`]. /// /// # Examples @@ -179,6 +299,7 @@ impl Drop for SubscriberInner<'_> { /// let subscriber = session /// .declare_subscriber("key/expression") /// .best_effort() +/// .pull_mode() /// .res() /// .await /// .unwrap(); @@ -186,7 +307,7 @@ impl Drop for SubscriberInner<'_> { /// ``` #[must_use = "Resolvables do nothing unless you resolve them using the `res` method from either `SyncResolve` or `AsyncResolve`"] #[derive(Debug)] -pub struct SubscriberBuilder<'a, 'b, Handler> { +pub struct SubscriberBuilder<'a, 'b, Mode, Handler> { #[cfg(feature = "unstable")] pub session: SessionRef<'a>, #[cfg(not(feature = "unstable"))] @@ -202,6 +323,11 @@ pub struct SubscriberBuilder<'a, 'b, Handler> { #[cfg(not(feature = "unstable"))] pub(crate) reliability: Reliability, + #[cfg(feature = "unstable")] + pub mode: Mode, + #[cfg(not(feature = "unstable"))] + pub(crate) mode: Mode, + #[cfg(feature = "unstable")] pub origin: Locality, #[cfg(not(feature = "unstable"))] @@ -213,7 +339,7 @@ pub struct SubscriberBuilder<'a, 'b, Handler> { pub(crate) handler: Handler, } -impl<'a, 'b> SubscriberBuilder<'a, 'b, DefaultHandler> { +impl<'a, 'b, Mode> SubscriberBuilder<'a, 'b, Mode, DefaultHandler> { /// Receive the samples for this subscription with a callback. /// /// # Examples @@ -225,14 +351,14 @@ impl<'a, 'b> SubscriberBuilder<'a, 'b, DefaultHandler> { /// let session = zenoh::open(config::peer()).res().await.unwrap(); /// let subscriber = session /// .declare_subscriber("key/expression") - /// .callback(|sample| { println!("Received: {} {:?}", sample.key_expr(), sample.payload()); }) + /// .callback(|sample| { println!("Received: {} {}", sample.key_expr, sample.value); }) /// .res() /// .await /// .unwrap(); /// # } /// ``` #[inline] - pub fn callback(self, callback: Callback) -> SubscriberBuilder<'a, 'b, Callback> + pub fn callback(self, callback: Callback) -> SubscriberBuilder<'a, 'b, Mode, Callback> where Callback: Fn(Sample) + Send + Sync + 'static, { @@ -240,7 +366,7 @@ impl<'a, 'b> SubscriberBuilder<'a, 'b, DefaultHandler> { session, key_expr, reliability, - + mode, origin, handler: _, } = self; @@ -248,7 +374,7 @@ impl<'a, 'b> SubscriberBuilder<'a, 'b, DefaultHandler> { session, key_expr, reliability, - + mode, origin, handler: callback, } @@ -279,14 +405,14 @@ impl<'a, 'b> SubscriberBuilder<'a, 'b, DefaultHandler> { pub fn callback_mut( self, callback: CallbackMut, - ) -> SubscriberBuilder<'a, 'b, impl Fn(Sample) + Send + Sync + 'static> + ) -> SubscriberBuilder<'a, 'b, Mode, impl Fn(Sample) + Send + Sync + 'static> where CallbackMut: FnMut(Sample) + Send + Sync + 'static, { self.callback(locked(callback)) } - /// Receive the samples for this subscription with a [`Handler`](crate::prelude::IntoHandler). + /// Receive the samples for this subscription with a [`Handler`](crate::prelude::IntoCallbackReceiverPair). /// /// # Examples /// ```no_run @@ -302,19 +428,20 @@ impl<'a, 'b> SubscriberBuilder<'a, 'b, DefaultHandler> { /// .await /// .unwrap(); /// while let Ok(sample) = subscriber.recv_async().await { - /// println!("Received: {} {:?}", sample.key_expr(), sample.payload()); + /// println!("Received: {} {}", sample.key_expr, sample.value); /// } /// # } /// ``` #[inline] - pub fn with(self, handler: Handler) -> SubscriberBuilder<'a, 'b, Handler> + pub fn with(self, handler: Handler) -> SubscriberBuilder<'a, 'b, Mode, Handler> where - Handler: crate::prelude::IntoHandler<'static, Sample>, + Handler: crate::prelude::IntoCallbackReceiverPair<'static, Sample>, { let SubscriberBuilder { session, key_expr, reliability, + mode, origin, handler: _, } = self; @@ -322,13 +449,13 @@ impl<'a, 'b> SubscriberBuilder<'a, 'b, DefaultHandler> { session, key_expr, reliability, + mode, origin, handler, } } } - -impl<'a, 'b, Handler> SubscriberBuilder<'a, 'b, Handler> { +impl<'a, 'b, Mode, Handler> SubscriberBuilder<'a, 'b, Mode, Handler> { /// Change the subscription reliability. #[inline] pub fn reliability(mut self, reliability: Reliability) -> Self { @@ -358,26 +485,68 @@ impl<'a, 'b, Handler> SubscriberBuilder<'a, 'b, Handler> { self.origin = origin; self } + + /// Change the subscription mode to Pull. + #[inline] + pub fn pull_mode(self) -> SubscriberBuilder<'a, 'b, PullMode, Handler> { + let SubscriberBuilder { + session, + key_expr, + reliability, + mode: _, + origin, + handler, + } = self; + SubscriberBuilder { + session, + key_expr, + reliability, + mode: PullMode, + origin, + handler, + } + } + + /// Change the subscription mode to Push. + #[inline] + pub fn push_mode(self) -> SubscriberBuilder<'a, 'b, PushMode, Handler> { + let SubscriberBuilder { + session, + key_expr, + reliability, + mode: _, + origin, + handler, + } = self; + SubscriberBuilder { + session, + key_expr, + reliability, + mode: PushMode, + origin, + handler, + } + } } // Push mode -impl<'a, Handler> Resolvable for SubscriberBuilder<'a, '_, Handler> +impl<'a, Handler> Resolvable for SubscriberBuilder<'a, '_, PushMode, Handler> where - Handler: IntoHandler<'static, Sample> + Send, - Handler::Handler: Send, + Handler: IntoCallbackReceiverPair<'static, Sample> + Send, + Handler::Receiver: Send, { - type To = ZResult>; + type To = ZResult>; } -impl<'a, Handler> SyncResolve for SubscriberBuilder<'a, '_, Handler> +impl<'a, Handler> SyncResolve for SubscriberBuilder<'a, '_, PushMode, Handler> where - Handler: IntoHandler<'static, Sample> + Send, - Handler::Handler: Send, + Handler: IntoCallbackReceiverPair<'static, Sample> + Send, + Handler::Receiver: Send, { fn res_sync(self) -> ::To { let key_expr = self.key_expr?; let session = self.session; - let (callback, receiver) = self.handler.into_handler(); + let (callback, receiver) = self.handler.into_cb_receiver_pair(); session .declare_subscriber_inner( &key_expr, @@ -386,6 +555,7 @@ where callback, &SubscriberInfo { reliability: self.reliability, + mode: self.mode.into(), }, ) .map(|sub_state| Subscriber { @@ -399,10 +569,10 @@ where } } -impl<'a, Handler> AsyncResolve for SubscriberBuilder<'a, '_, Handler> +impl<'a, Handler> AsyncResolve for SubscriberBuilder<'a, '_, PushMode, Handler> where - Handler: IntoHandler<'static, Sample> + Send, - Handler::Handler: Send, + Handler: IntoCallbackReceiverPair<'static, Sample> + Send, + Handler::Receiver: Send, { type Future = Ready; @@ -411,7 +581,61 @@ where } } -/// A subscriber that provides data through a [`Handler`](crate::prelude::IntoHandler). +// Pull mode +impl<'a, Handler> Resolvable for SubscriberBuilder<'a, '_, PullMode, Handler> +where + Handler: IntoCallbackReceiverPair<'static, Sample> + Send, + Handler::Receiver: Send, +{ + type To = ZResult>; +} + +impl<'a, Handler> SyncResolve for SubscriberBuilder<'a, '_, PullMode, Handler> +where + Handler: IntoCallbackReceiverPair<'static, Sample> + Send, + Handler::Receiver: Send, +{ + fn res_sync(self) -> ::To { + let key_expr = self.key_expr?; + let session = self.session; + let (callback, receiver) = self.handler.into_cb_receiver_pair(); + session + .declare_subscriber_inner( + &key_expr, + &None, + self.origin, + callback, + &SubscriberInfo { + reliability: self.reliability, + mode: self.mode.into(), + }, + ) + .map(|sub_state| PullSubscriber { + subscriber: PullSubscriberInner { + inner: SubscriberInner { + session, + state: sub_state, + alive: true, + }, + }, + receiver, + }) + } +} + +impl<'a, Handler> AsyncResolve for SubscriberBuilder<'a, '_, PullMode, Handler> +where + Handler: IntoCallbackReceiverPair<'static, Sample> + Send, + Handler::Receiver: Send, +{ + type Future = Ready; + + fn res_async(self) -> Self::Future { + std::future::ready(self.res_sync()) + } +} + +/// A subscriber that provides data through a [`Handler`](crate::prelude::IntoCallbackReceiverPair). /// /// Subscribers can be created from a zenoh [`Session`](crate::Session) /// with the [`declare_subscriber`](crate::SessionDeclarations::declare_subscriber) function @@ -434,7 +658,7 @@ where /// .await /// .unwrap(); /// while let Ok(sample) = subscriber.recv_async().await { -/// println!("Received: {} {:?}", sample.key_expr(), sample.payload()); +/// println!("Received: {} {}", sample.key_expr, sample.value); /// } /// # } /// ``` @@ -445,8 +669,84 @@ pub struct Subscriber<'a, Receiver> { pub receiver: Receiver, } -impl<'a, Receiver> Subscriber<'a, Receiver> { - /// Returns the [`EntityGlobalId`] of this Subscriber. +/// A [`PullMode`] subscriber that provides data through a [`Handler`](crate::prelude::IntoCallbackReceiverPair). +/// +/// PullSubscribers only provide data when explicitely pulled by the +/// application with the [`pull`](PullSubscriber::pull) function. +/// PullSubscribers can be created from a zenoh [`Session`](crate::Session) +/// with the [`declare_subscriber`](crate::SessionDeclarations::declare_subscriber) function, +/// the [`with`](SubscriberBuilder::with) function +/// and the [`pull_mode`](SubscriberBuilder::pull_mode) function +/// of the resulting builder. +/// +/// Subscribers are automatically undeclared when dropped. +/// +/// # Examples +/// ``` +/// # #[tokio::main] +/// # async fn main() { +/// use zenoh::prelude::r#async::*; +/// +/// let session = zenoh::open(config::peer()).res().await.unwrap(); +/// let subscriber = session +/// .declare_subscriber("key/expression") +/// .with(flume::bounded(32)) +/// .pull_mode() +/// .res() +/// .await +/// .unwrap(); +/// subscriber.pull(); +/// # } +/// ``` +#[non_exhaustive] +pub struct PullSubscriber<'a, Receiver> { + pub(crate) subscriber: PullSubscriberInner<'a>, + pub receiver: Receiver, +} + +impl<'a, Receiver> Deref for PullSubscriber<'a, Receiver> { + type Target = Receiver; + fn deref(&self) -> &Self::Target { + &self.receiver + } +} + +impl<'a, Receiver> DerefMut for PullSubscriber<'a, Receiver> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.receiver + } +} + +impl<'a, Receiver> PullSubscriber<'a, Receiver> { + /// Pull available data for a [`PullSubscriber`]. + /// + /// # Examples + /// ``` + /// # #[tokio::main] + /// # async fn main() { + /// use zenoh::prelude::r#async::*; + /// use zenoh::subscriber::SubMode; + /// + /// let session = zenoh::open(config::peer()).res().await.unwrap(); + /// let subscriber = session + /// .declare_subscriber("key/expression") + /// .with(flume::bounded(32)) + /// .pull_mode() + /// .res() + /// .await + /// .unwrap(); + /// subscriber.pull(); + /// # } + /// ``` + #[inline] + pub fn pull(&self) -> impl Resolve> + '_ { + self.subscriber.pull() + } + + /// Close a [`PullSubscriber`]. + /// + /// Subscribers are automatically closed when dropped, but you may want to use this function to handle errors or + /// close the Subscriber asynchronously. /// /// # Examples /// ``` @@ -456,20 +756,20 @@ impl<'a, Receiver> Subscriber<'a, Receiver> { /// /// let session = zenoh::open(config::peer()).res().await.unwrap(); /// let subscriber = session.declare_subscriber("key/expression") + /// .pull_mode() /// .res() /// .await /// .unwrap(); - /// let subscriber_id = subscriber.id(); + /// subscriber.undeclare().res().await.unwrap(); /// # } /// ``` - #[zenoh_macros::unstable] - pub fn id(&self) -> EntityGlobalId { - EntityGlobalId { - zid: self.subscriber.session.zid(), - eid: self.subscriber.state.id, - } + #[inline] + pub fn undeclare(self) -> impl Resolve> + 'a { + self.subscriber.undeclare() } +} +impl<'a, Receiver> Subscriber<'a, Receiver> { /// Returns the [`KeyExpr`] this Subscriber subscribes to. pub fn key_expr(&self) -> &KeyExpr<'static> { &self.subscriber.state.key_expr diff --git a/zenoh/src/value.rs b/zenoh/src/value.rs index 92a87cb6c5..849cfd57d5 100644 --- a/zenoh/src/value.rs +++ b/zenoh/src/value.rs @@ -13,85 +13,693 @@ // //! Value primitives. -use crate::{encoding::Encoding, payload::Payload, sample::builder::ValueBuilderTrait}; -/// A zenoh [`Value`] contains a `payload` and an [`Encoding`] that indicates how the [`Payload`] should be interpreted. +use base64::{engine::general_purpose::STANDARD as b64_std_engine, Engine}; +use std::borrow::Cow; +use std::convert::TryFrom; +#[cfg(feature = "shared-memory")] +use std::sync::Arc; + +use zenoh_collections::Properties; +use zenoh_result::ZError; + +use crate::buffers::ZBuf; +use crate::prelude::{Encoding, KnownEncoding, Sample, SplitBuffer}; +#[cfg(feature = "shared-memory")] +use zenoh_shm::SharedMemoryBuf; + +/// A zenoh Value. #[non_exhaustive] -#[derive(Clone, Debug, PartialEq, Eq)] +#[derive(Clone)] pub struct Value { - /// The binary [`Payload`] of this [`Value`]. - pub payload: Payload, - /// The [`Encoding`] of this [`Value`]. + /// The payload of this Value. + pub payload: ZBuf, + /// An encoding description indicating how the associated payload is encoded. pub encoding: Encoding, } impl Value { - /// Creates a new [`Value`] with default [`Encoding`]. - pub fn new(payload: T) -> Self - where - T: Into, - { + /// Creates a new zenoh Value. + pub fn new(payload: ZBuf) -> Self { + Value { + payload, + encoding: KnownEncoding::AppOctetStream.into(), + } + } + + /// Creates an empty Value. + pub fn empty() -> Self { + Value { + payload: ZBuf::empty(), + encoding: KnownEncoding::AppOctetStream.into(), + } + } + + /// Sets the encoding of this zenoh Value. + #[inline(always)] + pub fn encoding(mut self, encoding: Encoding) -> Self { + self.encoding = encoding; + self + } +} + +impl std::fmt::Debug for Value { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + write!( + f, + "Value{{ payload: {:?}, encoding: {} }}", + self.payload, self.encoding + ) + } +} + +impl std::fmt::Display for Value { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + let payload = self.payload.contiguous(); + write!( + f, + "{}", + String::from_utf8(payload.clone().into_owned()) + .unwrap_or_else(|_| b64_std_engine.encode(payload)) + ) + } +} + +impl std::error::Error for Value {} + +// Shared memory conversion +#[cfg(feature = "shared-memory")] +impl From> for Value { + fn from(smb: Arc) -> Self { + Value { + payload: smb.into(), + encoding: KnownEncoding::AppOctetStream.into(), + } + } +} + +#[cfg(feature = "shared-memory")] +impl From> for Value { + fn from(smb: Box) -> Self { + let smb: Arc = smb.into(); + Self::from(smb) + } +} + +#[cfg(feature = "shared-memory")] +impl From for Value { + fn from(smb: SharedMemoryBuf) -> Self { + Value { + payload: smb.into(), + encoding: KnownEncoding::AppOctetStream.into(), + } + } +} + +// Bytes conversion +impl From for Value { + fn from(buf: ZBuf) -> Self { + Value { + payload: buf, + encoding: KnownEncoding::AppOctetStream.into(), + } + } +} + +impl TryFrom<&Value> for ZBuf { + type Error = ZError; + + fn try_from(v: &Value) -> Result { + match v.encoding.prefix() { + KnownEncoding::AppOctetStream => Ok(v.payload.clone()), + unexpected => Err(zerror!( + "{:?} can not be converted into Cow<'a, [u8]>", + unexpected + )), + } + } +} + +impl TryFrom for ZBuf { + type Error = ZError; + + fn try_from(v: Value) -> Result { + Self::try_from(&v) + } +} + +impl From<&[u8]> for Value { + fn from(buf: &[u8]) -> Self { + Value::from(ZBuf::from(buf.to_vec())) + } +} + +impl<'a> TryFrom<&'a Value> for Cow<'a, [u8]> { + type Error = ZError; + + fn try_from(v: &'a Value) -> Result { + match v.encoding.prefix() { + KnownEncoding::AppOctetStream => Ok(v.payload.contiguous()), + unexpected => Err(zerror!( + "{:?} can not be converted into Cow<'a, [u8]>", + unexpected + )), + } + } +} + +impl From> for Value { + fn from(buf: Vec) -> Self { + Value::from(ZBuf::from(buf)) + } +} + +impl TryFrom<&Value> for Vec { + type Error = ZError; + + fn try_from(v: &Value) -> Result { + match v.encoding.prefix() { + KnownEncoding::AppOctetStream => Ok(v.payload.contiguous().to_vec()), + unexpected => Err(zerror!( + "{:?} can not be converted into Vec", + unexpected + )), + } + } +} + +impl TryFrom for Vec { + type Error = ZError; + + fn try_from(v: Value) -> Result { + Self::try_from(&v) + } +} + +// String conversion +impl From for Value { + fn from(s: String) -> Self { + Value { + payload: ZBuf::from(s.into_bytes()), + encoding: KnownEncoding::TextPlain.into(), + } + } +} + +impl From<&str> for Value { + fn from(s: &str) -> Self { + Value { + payload: ZBuf::from(Vec::::from(s)), + encoding: KnownEncoding::TextPlain.into(), + } + } +} + +impl TryFrom<&Value> for String { + type Error = ZError; + + fn try_from(v: &Value) -> Result { + match v.encoding.prefix() { + KnownEncoding::TextPlain => { + String::from_utf8(v.payload.contiguous().to_vec()).map_err(|e| zerror!("{}", e)) + } + unexpected => Err(zerror!("{:?} can not be converted into String", unexpected)), + } + } +} + +impl TryFrom for String { + type Error = ZError; + + fn try_from(v: Value) -> Result { + Self::try_from(&v) + } +} + +// Sample conversion +impl From for Value { + fn from(s: Sample) -> Self { + s.value + } +} + +// i64 conversion +impl From for Value { + fn from(i: i64) -> Self { + Value { + payload: ZBuf::from(Vec::::from(i.to_string())), + encoding: KnownEncoding::AppInteger.into(), + } + } +} + +impl TryFrom<&Value> for i64 { + type Error = ZError; + + fn try_from(v: &Value) -> Result { + match v.encoding.prefix() { + KnownEncoding::AppInteger => std::str::from_utf8(&v.payload.contiguous()) + .map_err(|e| zerror!("{}", e))? + .parse() + .map_err(|e| zerror!("{}", e)), + unexpected => Err(zerror!("{:?} can not be converted into i64", unexpected)), + } + } +} + +impl TryFrom for i64 { + type Error = ZError; + + fn try_from(v: Value) -> Result { + Self::try_from(&v) + } +} + +// i32 conversion +impl From for Value { + fn from(i: i32) -> Self { + Value { + payload: ZBuf::from(Vec::::from(i.to_string())), + encoding: KnownEncoding::AppInteger.into(), + } + } +} + +impl TryFrom<&Value> for i32 { + type Error = ZError; + + fn try_from(v: &Value) -> Result { + match v.encoding.prefix() { + KnownEncoding::AppInteger => std::str::from_utf8(&v.payload.contiguous()) + .map_err(|e| zerror!("{}", e))? + .parse() + .map_err(|e| zerror!("{}", e)), + unexpected => Err(zerror!("{:?} can not be converted into i32", unexpected)), + } + } +} + +impl TryFrom for i32 { + type Error = ZError; + + fn try_from(v: Value) -> Result { + Self::try_from(&v) + } +} + +// i16 conversion +impl From for Value { + fn from(i: i16) -> Self { + Value { + payload: ZBuf::from(Vec::::from(i.to_string())), + encoding: KnownEncoding::AppInteger.into(), + } + } +} + +impl TryFrom<&Value> for i16 { + type Error = ZError; + + fn try_from(v: &Value) -> Result { + match v.encoding.prefix() { + KnownEncoding::AppInteger => std::str::from_utf8(&v.payload.contiguous()) + .map_err(|e| zerror!("{}", e))? + .parse() + .map_err(|e| zerror!("{}", e)), + unexpected => Err(zerror!("{:?} can not be converted into i16", unexpected)), + } + } +} + +impl TryFrom for i16 { + type Error = ZError; + + fn try_from(v: Value) -> Result { + Self::try_from(&v) + } +} + +// i8 conversion +impl From for Value { + fn from(i: i8) -> Self { Value { - payload: payload.into(), - encoding: Encoding::default(), + payload: ZBuf::from(Vec::::from(i.to_string())), + encoding: KnownEncoding::AppInteger.into(), + } + } +} + +impl TryFrom<&Value> for i8 { + type Error = ZError; + + fn try_from(v: &Value) -> Result { + match v.encoding.prefix() { + KnownEncoding::AppInteger => std::str::from_utf8(&v.payload.contiguous()) + .map_err(|e| zerror!("{}", e))? + .parse() + .map_err(|e| zerror!("{}", e)), + unexpected => Err(zerror!("{:?} can not be converted into i8", unexpected)), + } + } +} + +impl TryFrom for i8 { + type Error = ZError; + + fn try_from(v: Value) -> Result { + Self::try_from(&v) + } +} + +// isize conversion +impl From for Value { + fn from(i: isize) -> Self { + Value { + payload: ZBuf::from(Vec::::from(i.to_string())), + encoding: KnownEncoding::AppInteger.into(), + } + } +} + +impl TryFrom<&Value> for isize { + type Error = ZError; + + fn try_from(v: &Value) -> Result { + match v.encoding.prefix() { + KnownEncoding::AppInteger => std::str::from_utf8(&v.payload.contiguous()) + .map_err(|e| zerror!("{}", e))? + .parse() + .map_err(|e| zerror!("{}", e)), + unexpected => Err(zerror!("{:?} can not be converted into isize", unexpected)), + } + } +} + +impl TryFrom for isize { + type Error = ZError; + + fn try_from(v: Value) -> Result { + Self::try_from(&v) + } +} + +// u64 conversion +impl From for Value { + fn from(i: u64) -> Self { + Value { + payload: ZBuf::from(Vec::::from(i.to_string())), + encoding: KnownEncoding::AppInteger.into(), + } + } +} + +impl TryFrom<&Value> for u64 { + type Error = ZError; + + fn try_from(v: &Value) -> Result { + match v.encoding.prefix() { + KnownEncoding::AppInteger => std::str::from_utf8(&v.payload.contiguous()) + .map_err(|e| zerror!("{}", e))? + .parse() + .map_err(|e| zerror!("{}", e)), + unexpected => Err(zerror!("{:?} can not be converted into u64", unexpected)), } } - /// Creates an empty [`Value`]. - pub const fn empty() -> Self { +} + +impl TryFrom for u64 { + type Error = ZError; + + fn try_from(v: Value) -> Result { + Self::try_from(&v) + } +} + +// u32 conversion +impl From for Value { + fn from(i: u32) -> Self { Value { - payload: Payload::empty(), - encoding: Encoding::default(), + payload: ZBuf::from(Vec::::from(i.to_string())), + encoding: KnownEncoding::AppInteger.into(), } } - /// Checks if the [`Value`] is empty. - /// Value is considered empty if its payload is empty and encoding is default. - pub fn is_empty(&self) -> bool { - self.payload.is_empty() && self.encoding == Encoding::default() +} + +impl TryFrom<&Value> for u32 { + type Error = ZError; + + fn try_from(v: &Value) -> Result { + match v.encoding.prefix() { + KnownEncoding::AppInteger => std::str::from_utf8(&v.payload.contiguous()) + .map_err(|e| zerror!("{}", e))? + .parse() + .map_err(|e| zerror!("{}", e)), + unexpected => Err(zerror!("{:?} can not be converted into u32", unexpected)), + } + } +} + +impl TryFrom for u32 { + type Error = ZError; + + fn try_from(v: Value) -> Result { + Self::try_from(&v) } } -impl ValueBuilderTrait for Value { - fn encoding>(self, encoding: T) -> Self { - Self { - encoding: encoding.into(), - ..self +// u16 conversion +impl From for Value { + fn from(i: u16) -> Self { + Value { + payload: ZBuf::from(Vec::::from(i.to_string())), + encoding: KnownEncoding::AppInteger.into(), } } - fn payload>(self, payload: T) -> Self { - Self { - payload: payload.into(), - ..self +} + +impl TryFrom<&Value> for u16 { + type Error = ZError; + + fn try_from(v: &Value) -> Result { + match v.encoding.prefix() { + KnownEncoding::AppInteger => std::str::from_utf8(&v.payload.contiguous()) + .map_err(|e| zerror!("{}", e))? + .parse() + .map_err(|e| zerror!("{}", e)), + unexpected => Err(zerror!("{:?} can not be converted into u16", unexpected)), } } - fn value>(self, value: T) -> Self { - let Value { payload, encoding } = value.into(); - Self { payload, encoding } +} + +impl TryFrom for u16 { + type Error = ZError; + + fn try_from(v: Value) -> Result { + Self::try_from(&v) } } -impl From for Value -where - T: Into, -{ - fn from(t: T) -> Self { +// u8 conversion +impl From for Value { + fn from(i: u8) -> Self { Value { - payload: t.into(), - encoding: Encoding::default(), + payload: ZBuf::from(Vec::::from(i.to_string())), + encoding: KnownEncoding::AppInteger.into(), + } + } +} + +impl TryFrom<&Value> for u8 { + type Error = ZError; + + fn try_from(v: &Value) -> Result { + match v.encoding.prefix() { + KnownEncoding::AppInteger => std::str::from_utf8(&v.payload.contiguous()) + .map_err(|e| zerror!("{}", e))? + .parse() + .map_err(|e| zerror!("{}", e)), + unexpected => Err(zerror!("{:?} can not be converted into u8", unexpected)), } } } -impl From> for Value -where - T: Into, -{ - fn from(t: Option) -> Self { - t.map_or_else(Value::empty, Into::into) +impl TryFrom for u8 { + type Error = ZError; + + fn try_from(v: Value) -> Result { + Self::try_from(&v) + } +} + +// usize conversion +impl From for Value { + fn from(i: usize) -> Self { + Value { + payload: ZBuf::from(Vec::::from(i.to_string())), + encoding: KnownEncoding::AppInteger.into(), + } } } -impl Default for Value { - fn default() -> Self { - Value::empty() +impl TryFrom<&Value> for usize { + type Error = ZError; + + fn try_from(v: &Value) -> Result { + match v.encoding.prefix() { + KnownEncoding::AppInteger => std::str::from_utf8(&v.payload.contiguous()) + .map_err(|e| zerror!("{}", e))? + .parse() + .map_err(|e| zerror!("{}", e)), + unexpected => Err(zerror!("{:?} can not be converted into usize", unexpected)), + } + } +} + +impl TryFrom for usize { + type Error = ZError; + + fn try_from(v: Value) -> Result { + Self::try_from(&v) + } +} + +// f64 conversion +impl From for Value { + fn from(f: f64) -> Self { + Value { + payload: ZBuf::from(Vec::::from(f.to_string())), + encoding: KnownEncoding::AppFloat.into(), + } + } +} + +impl TryFrom<&Value> for f64 { + type Error = ZError; + + fn try_from(v: &Value) -> Result { + match v.encoding.prefix() { + KnownEncoding::AppFloat => std::str::from_utf8(&v.payload.contiguous()) + .map_err(|e| zerror!("{}", e))? + .parse() + .map_err(|e| zerror!("{}", e)), + unexpected => Err(zerror!("{:?} can not be converted into f64", unexpected)), + } + } +} + +impl TryFrom for f64 { + type Error = ZError; + + fn try_from(v: Value) -> Result { + Self::try_from(&v) + } +} + +// f32 conversion +impl From for Value { + fn from(f: f32) -> Self { + Value { + payload: ZBuf::from(Vec::::from(f.to_string())), + encoding: KnownEncoding::AppFloat.into(), + } + } +} + +impl TryFrom<&Value> for f32 { + type Error = ZError; + + fn try_from(v: &Value) -> Result { + match v.encoding.prefix() { + KnownEncoding::AppFloat => std::str::from_utf8(&v.payload.contiguous()) + .map_err(|e| zerror!("{}", e))? + .parse() + .map_err(|e| zerror!("{}", e)), + unexpected => Err(zerror!("{:?} can not be converted into f32", unexpected)), + } + } +} + +impl TryFrom for f32 { + type Error = ZError; + + fn try_from(v: Value) -> Result { + Self::try_from(&v) + } +} + +// JSON conversion +impl From<&serde_json::Value> for Value { + fn from(json: &serde_json::Value) -> Self { + Value { + payload: ZBuf::from(Vec::::from(json.to_string())), + encoding: KnownEncoding::AppJson.into(), + } + } +} + +impl From for Value { + fn from(json: serde_json::Value) -> Self { + Value::from(&json) + } +} + +impl TryFrom<&Value> for serde_json::Value { + type Error = ZError; + + fn try_from(v: &Value) -> Result { + match v.encoding.prefix() { + KnownEncoding::AppJson | KnownEncoding::TextJson => { + let r = serde::Deserialize::deserialize(&mut serde_json::Deserializer::from_slice( + &v.payload.contiguous(), + )); + r.map_err(|e| zerror!("{}", e)) + } + unexpected => Err(zerror!( + "{:?} can not be converted into Properties", + unexpected + )), + } + } +} + +impl TryFrom for serde_json::Value { + type Error = ZError; + + fn try_from(v: Value) -> Result { + Self::try_from(&v) + } +} + +// Properties conversion +impl From for Value { + fn from(p: Properties) -> Self { + Value { + payload: ZBuf::from(Vec::::from(p.to_string())), + encoding: KnownEncoding::AppProperties.into(), + } + } +} + +impl TryFrom<&Value> for Properties { + type Error = ZError; + + fn try_from(v: &Value) -> Result { + match *v.encoding.prefix() { + KnownEncoding::AppProperties => Ok(Properties::from( + std::str::from_utf8(&v.payload.contiguous()).map_err(|e| zerror!("{}", e))?, + )), + unexpected => Err(zerror!( + "{:?} can not be converted into Properties", + unexpected + )), + } + } +} + +impl TryFrom for Properties { + type Error = ZError; + + fn try_from(v: Value) -> Result { + Self::try_from(&v) } } diff --git a/zenoh/tests/attachments.rs b/zenoh/tests/attachments.rs index 9fb99b7cc0..d1fbd1086a 100644 --- a/zenoh/tests/attachments.rs +++ b/zenoh/tests/attachments.rs @@ -1,16 +1,3 @@ -// -// Copyright (c) 2024 ZettaScale Technology -// -// This program and the accompanying materials are made available under the -// terms of the Eclipse Public License 2.0 which is available at -// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 -// which is available at https://www.apache.org/licenses/LICENSE-2.0. -// -// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 -// -// Contributors: -// ZettaScale Zenoh Team, -// #[cfg(feature = "unstable")] #[test] fn pubsub() { @@ -20,8 +7,11 @@ fn pubsub() { let _sub = zenoh .declare_subscriber("test/attachment") .callback(|sample| { - println!("{}", sample.payload().deserialize::().unwrap()); - for (k, v) in sample.attachment().unwrap() { + println!( + "{}", + std::str::from_utf8(&sample.payload.contiguous()).unwrap() + ); + for (k, v) in &sample.attachment.unwrap() { assert!(k.iter().rev().zip(v.as_slice()).all(|(k, v)| k == v)) } }) @@ -38,22 +28,22 @@ fn pubsub() { } zenoh .put("test/attachment", "put") - .attachment(Some( + .with_attachment( backer .iter() .map(|b| (b.0.as_slice(), b.1.as_slice())) .collect(), - )) + ) .res() .unwrap(); publisher .put("publisher") - .attachment(Some( + .with_attachment( backer .iter() .map(|b| (b.0.as_slice(), b.1.as_slice())) .collect(), - )) + ) .res() .unwrap(); } @@ -61,7 +51,7 @@ fn pubsub() { #[cfg(feature = "unstable")] #[test] fn queries() { - use zenoh::{prelude::sync::*, sample::builder::SampleBuilderTrait, sample::Attachment}; + use zenoh::{prelude::sync::*, sample::Attachment}; let zenoh = zenoh::open(Config::default()).res().unwrap(); let _sub = zenoh @@ -69,10 +59,13 @@ fn queries() { .callback(|query| { println!( "{}", - query - .value() - .map(|q| q.payload.deserialize::().unwrap()) - .unwrap_or_default() + std::str::from_utf8( + &query + .value() + .map(|q| q.payload.contiguous()) + .unwrap_or_default() + ) + .unwrap() ); let mut attachment = Attachment::new(); for (k, v) in query.attachment().unwrap() { @@ -80,11 +73,11 @@ fn queries() { attachment.insert(&k, &k); } query - .reply( + .reply(Ok(Sample::new( query.key_expr().clone(), - query.value().unwrap().payload.clone(), + query.value().unwrap().clone(), ) - .attachment(attachment) + .with_attachment(attachment))) .res() .unwrap(); }) @@ -100,13 +93,13 @@ fn queries() { } let get = zenoh .get("test/attachment") - .payload("query") - .attachment(Some( + .with_value("query") + .with_attachment( backer .iter() .map(|b| (b.0.as_slice(), b.1.as_slice())) .collect(), - )) + ) .res() .unwrap(); while let Ok(reply) = get.recv() { diff --git a/zenoh/tests/events.rs b/zenoh/tests/events.rs index 201f4941f9..6b2790e151 100644 --- a/zenoh/tests/events.rs +++ b/zenoh/tests/events.rs @@ -58,15 +58,15 @@ async fn zenoh_events() { let sample = ztimeout!(sub1.recv_async()); assert!(sample.is_ok()); - let key_expr = sample.as_ref().unwrap().key_expr().as_str(); + let key_expr = sample.as_ref().unwrap().key_expr.as_str(); assert!(key_expr.eq(&format!("@/session/{zid}/transport/unicast/{zid2}"))); - assert!(sample.as_ref().unwrap().kind() == SampleKind::Put); + assert!(sample.as_ref().unwrap().kind == SampleKind::Put); let sample = ztimeout!(sub2.recv_async()); assert!(sample.is_ok()); - let key_expr = sample.as_ref().unwrap().key_expr().as_str(); + let key_expr = sample.as_ref().unwrap().key_expr.as_str(); assert!(key_expr.starts_with(&format!("@/session/{zid}/transport/unicast/{zid2}/link/"))); - assert!(sample.as_ref().unwrap().kind() == SampleKind::Put); + assert!(sample.as_ref().unwrap().kind == SampleKind::Put); let replies: Vec = ztimeout!(session .get(format!("@/session/{zid}/transport/unicast/*")) @@ -76,7 +76,7 @@ async fn zenoh_events() { .collect(); assert!(replies.len() == 1); assert!(replies[0].sample.is_ok()); - let key_expr = replies[0].sample.as_ref().unwrap().key_expr().as_str(); + let key_expr = replies[0].sample.as_ref().unwrap().key_expr.as_str(); assert!(key_expr.eq(&format!("@/session/{zid}/transport/unicast/{zid2}"))); let replies: Vec = ztimeout!(session @@ -87,22 +87,22 @@ async fn zenoh_events() { .collect(); assert!(replies.len() == 1); assert!(replies[0].sample.is_ok()); - let key_expr = replies[0].sample.as_ref().unwrap().key_expr().as_str(); + let key_expr = replies[0].sample.as_ref().unwrap().key_expr.as_str(); assert!(key_expr.starts_with(&format!("@/session/{zid}/transport/unicast/{zid2}/link/"))); close_session(session2).await; let sample = ztimeout!(sub1.recv_async()); assert!(sample.is_ok()); - let key_expr = sample.as_ref().unwrap().key_expr().as_str(); + let key_expr = sample.as_ref().unwrap().key_expr.as_str(); assert!(key_expr.eq(&format!("@/session/{zid}/transport/unicast/{zid2}"))); - assert!(sample.as_ref().unwrap().kind() == SampleKind::Delete); + assert!(sample.as_ref().unwrap().kind == SampleKind::Delete); let sample = ztimeout!(sub2.recv_async()); assert!(sample.is_ok()); - let key_expr = sample.as_ref().unwrap().key_expr().as_str(); + let key_expr = sample.as_ref().unwrap().key_expr.as_str(); assert!(key_expr.starts_with(&format!("@/session/{zid}/transport/unicast/{zid2}/link/"))); - assert!(sample.as_ref().unwrap().kind() == SampleKind::Delete); + assert!(sample.as_ref().unwrap().kind == SampleKind::Delete); sub2.undeclare().res().await.unwrap(); sub1.undeclare().res().await.unwrap(); diff --git a/zenoh/tests/formatters.rs b/zenoh/tests/formatters.rs index 22600b6cc0..ae894e44b6 100644 --- a/zenoh/tests/formatters.rs +++ b/zenoh/tests/formatters.rs @@ -1,16 +1,3 @@ -// -// Copyright (c) 2024 ZettaScale Technology -// -// This program and the accompanying materials are made available under the -// terms of the Eclipse Public License 2.0 which is available at -// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 -// which is available at https://www.apache.org/licenses/LICENSE-2.0. -// -// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 -// -// Contributors: -// ZettaScale Zenoh Team, -// #[test] fn reuse() { zenoh::kedefine!( diff --git a/zenoh/tests/handler.rs b/zenoh/tests/handler.rs deleted file mode 100644 index ceed15e2c3..0000000000 --- a/zenoh/tests/handler.rs +++ /dev/null @@ -1,80 +0,0 @@ -// -// Copyright (c) 2024 ZettaScale Technology -// -// This program and the accompanying materials are made available under the -// terms of the Eclipse Public License 2.0 which is available at -// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 -// which is available at https://www.apache.org/licenses/LICENSE-2.0. -// -// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 -// -// Contributors: -// ZettaScale Zenoh Team, -// -#[test] -fn pubsub_with_ringbuffer() { - use std::{thread, time::Duration}; - use zenoh::{handlers::RingBuffer, prelude::sync::*}; - - let zenoh = zenoh::open(Config::default()).res().unwrap(); - let sub = zenoh - .declare_subscriber("test/ringbuffer") - .with(RingBuffer::new(3)) - .res() - .unwrap(); - for i in 0..10 { - zenoh - .put("test/ringbuffer", format!("put{i}")) - .res() - .unwrap(); - } - // Should only receive the last three samples ("put7", "put8", "put9") - for i in 7..10 { - assert_eq!( - sub.recv() - .unwrap() - .unwrap() - .payload() - .deserialize::() - .unwrap(), - format!("put{i}") - ); - } - // Wait for the subscriber to get the value - thread::sleep(Duration::from_millis(1000)); -} - -#[test] -fn query_with_ringbuffer() { - use zenoh::{handlers::RingBuffer, prelude::sync::*}; - - let zenoh = zenoh::open(Config::default()).res().unwrap(); - let queryable = zenoh - .declare_queryable("test/ringbuffer_query") - .with(RingBuffer::new(1)) - .res() - .unwrap(); - - let _reply1 = zenoh - .get("test/ringbuffer_query") - .payload("query1") - .res() - .unwrap(); - let _reply2 = zenoh - .get("test/ringbuffer_query") - .payload("query2") - .res() - .unwrap(); - - let query = queryable.recv().unwrap().unwrap(); - // Only receive the latest query - assert_eq!( - query - .value() - .unwrap() - .payload - .deserialize::() - .unwrap(), - "query2" - ); -} diff --git a/zenoh/tests/interceptors.rs b/zenoh/tests/interceptors.rs index 1ee93e4949..073d85566b 100644 --- a/zenoh/tests/interceptors.rs +++ b/zenoh/tests/interceptors.rs @@ -1,16 +1,3 @@ -// -// Copyright (c) 2024 ZettaScale Technology -// -// This program and the accompanying materials are made available under the -// terms of the Eclipse Public License 2.0 which is available at -// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 -// which is available at https://www.apache.org/licenses/LICENSE-2.0. -// -// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 -// -// Contributors: -// ZettaScale Zenoh Team, -// use std::sync::{Arc, Mutex}; use zenoh_core::zlock; @@ -105,9 +92,9 @@ fn downsampling_by_keyexpr_impl(egress: bool) { .callback(move |sample| { let mut count = zlock!(total_count_clone); *count += 1; - if sample.key_expr().as_str() == "test/downsamples_by_keyexp/r100" { + if sample.key_expr.as_str() == "test/downsamples_by_keyexp/r100" { zlock!(counter_r100).tick(); - } else if sample.key_expr().as_str() == "test/downsamples_by_keyexp/r50" { + } else if sample.key_expr.as_str() == "test/downsamples_by_keyexp/r50" { zlock!(counter_r50).tick(); } }) @@ -221,7 +208,7 @@ fn downsampling_by_interface_impl(egress: bool) { .callback(move |sample| { let mut count = zlock!(total_count_clone); *count += 1; - if sample.key_expr().as_str() == "test/downsamples_by_interface/r100" { + if sample.key_expr.as_str() == "test/downsamples_by_interface/r100" { zlock!(counter_r100).tick(); } }) diff --git a/zenoh/tests/liveliness.rs b/zenoh/tests/liveliness.rs index 0e2870d808..b4b138d78f 100644 --- a/zenoh/tests/liveliness.rs +++ b/zenoh/tests/liveliness.rs @@ -54,14 +54,14 @@ async fn zenoh_liveliness() { .res_async()) .unwrap(); let sample = ztimeout!(replies.recv_async()).unwrap().sample.unwrap(); - assert!(sample.kind() == SampleKind::Put); - assert!(sample.key_expr().as_str() == "zenoh_liveliness_test"); + assert!(sample.kind == SampleKind::Put); + assert!(sample.key_expr.as_str() == "zenoh_liveliness_test"); assert!(ztimeout!(replies.recv_async()).is_err()); let sample = ztimeout!(sub.recv_async()).unwrap(); - assert!(sample.kind() == SampleKind::Put); - assert!(sample.key_expr().as_str() == "zenoh_liveliness_test"); + assert!(sample.kind == SampleKind::Put); + assert!(sample.key_expr.as_str() == "zenoh_liveliness_test"); drop(token); diff --git a/zenoh/tests/qos.rs b/zenoh/tests/qos.rs index 1885c316ea..0e28af0847 100644 --- a/zenoh/tests/qos.rs +++ b/zenoh/tests/qos.rs @@ -42,15 +42,13 @@ async fn pubsub() { tokio::time::sleep(SLEEP).await; ztimeout!(publisher1.put("qos").res_async()).unwrap(); - let sample = ztimeout!(subscriber.recv_async()).unwrap(); - let qos = sample.qos(); + let qos = ztimeout!(subscriber.recv_async()).unwrap().qos; assert_eq!(qos.priority(), Priority::DataHigh); assert_eq!(qos.congestion_control(), CongestionControl::Drop); ztimeout!(publisher2.put("qos").res_async()).unwrap(); - let sample = ztimeout!(subscriber.recv_async()).unwrap(); - let qos = sample.qos(); + let qos = ztimeout!(subscriber.recv_async()).unwrap().qos; assert_eq!(qos.priority(), Priority::DataLow); assert_eq!(qos.congestion_control(), CongestionControl::Block); diff --git a/zenoh/tests/routing.rs b/zenoh/tests/routing.rs index 56bacd7fdd..6c5afe0673 100644 --- a/zenoh/tests/routing.rs +++ b/zenoh/tests/routing.rs @@ -11,14 +11,18 @@ // Contributors: // ZettaScale Zenoh Team, // -use std::str::FromStr; -use std::sync::atomic::Ordering; -use std::sync::{atomic::AtomicUsize, Arc}; -use std::time::Duration; +use std::{ + str::FromStr, + sync::{atomic::AtomicUsize, atomic::Ordering, Arc}, + time::Duration, +}; use tokio_util::{sync::CancellationToken, task::TaskTracker}; -use zenoh::config::{Config, ModeDependentValue}; -use zenoh::prelude::r#async::*; -use zenoh::Result; +use zenoh::{ + config::{Config, ModeDependentValue}, + prelude::r#async::*, + value::Value, + Result, +}; use zenoh_core::ztimeout; use zenoh_protocol::core::{WhatAmI, WhatAmIMatcher}; use zenoh_result::bail; @@ -57,7 +61,7 @@ impl Task { _ = token.cancelled() => break, res = sub.recv_async() => { if let Ok(sample) = res { - let recv_size = sample.payload().len(); + let recv_size = sample.value.payload.len(); if recv_size != *expected_size { bail!("Received payload size {recv_size} mismatches the expected {expected_size}"); } @@ -75,13 +79,15 @@ impl Task { // The Pub task keeps putting messages until all checkpoints are finished. Self::Pub(ke, payload_size) => { + let value: Value = vec![0u8; *payload_size].into(); + // while remaining_checkpoints.load(Ordering::Relaxed) > 0 { loop { tokio::select! { _ = token.cancelled() => break, // WARN: this won't yield after a timeout since the put is a blocking call res = tokio::time::timeout(std::time::Duration::from_secs(1), session - .put(ke, vec![0u8; *payload_size]) + .put(ke, value.clone()) .congestion_control(CongestionControl::Block) .res()) => { let _ = res?; @@ -102,7 +108,7 @@ impl Task { while let Ok(reply) = replies.recv_async().await { match reply.sample { Ok(sample) => { - let recv_size = sample.payload().len(); + let recv_size = sample.value.payload.len(); if recv_size != *expected_size { bail!("Received payload size {recv_size} mismatches the expected {expected_size}"); } @@ -110,7 +116,7 @@ impl Task { Err(err) => { log::warn!( - "Sample got from {} failed to unwrap! Error: {:?}.", + "Sample got from {} failed to unwrap! Error: {}.", ke, err ); @@ -128,13 +134,13 @@ impl Task { // The Queryable task keeps replying to requested messages until all checkpoints are finished. Self::Queryable(ke, payload_size) => { let queryable = session.declare_queryable(ke).res_async().await?; - let payload = vec![0u8; *payload_size]; + let sample = Sample::try_from(ke.clone(), vec![0u8; *payload_size])?; loop { tokio::select! { _ = token.cancelled() => break, query = queryable.recv_async() => { - query?.reply(ke.to_owned(), payload.clone()).res_async().await?; + query?.reply(Ok(sample.clone())).res_async().await?; }, } } diff --git a/zenoh/tests/session.rs b/zenoh/tests/session.rs index 8c2d2e9937..7e50f7a6bb 100644 --- a/zenoh/tests/session.rs +++ b/zenoh/tests/session.rs @@ -15,6 +15,7 @@ use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::Arc; use std::time::Duration; use zenoh::prelude::r#async::*; +use zenoh::runtime::Runtime; use zenoh_core::ztimeout; const TIMEOUT: Duration = Duration::from_secs(60); @@ -64,7 +65,7 @@ async fn open_session_multicast(endpoint01: &str, endpoint02: &str) -> (Session, } async fn close_session(peer01: Session, peer02: Session) { - println!("[ ][01d] Closing peer02 session"); + println!("[ ][01d] Closing peer01 session"); ztimeout!(peer01.close().res_async()).unwrap(); println!("[ ][02d] Closing peer02 session"); ztimeout!(peer02.close().res_async()).unwrap(); @@ -87,7 +88,7 @@ async fn test_session_pubsub(peer01: &Session, peer02: &Session, reliability: Re let sub = ztimeout!(peer01 .declare_subscriber(key_expr) .callback(move |sample| { - assert_eq!(sample.payload().len(), size); + assert_eq!(sample.value.payload.len(), size); c_msgs.fetch_add(1, Ordering::Relaxed); }) .res_async()) @@ -145,39 +146,13 @@ async fn test_session_qryrep(peer01: &Session, peer02: &Session, reliability: Re let c_msgs = msgs.clone(); let qbl = ztimeout!(peer01 .declare_queryable(key_expr) - .callback(move |query| { + .callback(move |sample| { c_msgs.fetch_add(1, Ordering::Relaxed); - match query.parameters() { - "ok_put" => { - tokio::task::block_in_place(|| { - tokio::runtime::Handle::current().block_on(async { - ztimeout!(query - .reply( - KeyExpr::try_from(key_expr).unwrap(), - vec![0u8; size].to_vec() - ) - .res_async()) - .unwrap() - }) - }); - } - "ok_del" => { - tokio::task::block_in_place(|| { - tokio::runtime::Handle::current().block_on(async { - ztimeout!(query.reply_del(key_expr).res_async()).unwrap() - }) - }); - } - "err" => { - let rep = Value::from(vec![0u8; size]); - tokio::task::block_in_place(|| { - tokio::runtime::Handle::current().block_on(async { - ztimeout!(query.reply_err(rep).res_async()).unwrap() - }) - }); - } - _ => panic!("Unknown query parameter"), - } + let rep = Sample::try_from(key_expr, vec![0u8; size]).unwrap(); + tokio::task::block_in_place(|| { + tokio::runtime::Handle::current() + .block_on(async { ztimeout!(sample.reply(Ok(rep)).res_async()).unwrap() }) + }); }) .res_async()) .unwrap(); @@ -186,15 +161,12 @@ async fn test_session_qryrep(peer01: &Session, peer02: &Session, reliability: Re tokio::time::sleep(SLEEP).await; // Get data - println!("[QR][02c] Getting Ok(Put) on peer02 session. {msg_count} msgs."); + println!("[QR][02c] Getting on peer02 session. {msg_count} msgs."); let mut cnt = 0; for _ in 0..msg_count { - let selector = format!("{}?ok_put", key_expr); - let rs = ztimeout!(peer02.get(selector).res_async()).unwrap(); + let rs = ztimeout!(peer02.get(key_expr).res_async()).unwrap(); while let Ok(s) = ztimeout!(rs.recv_async()) { - let s = s.sample.unwrap(); - assert_eq!(s.kind(), SampleKind::Put); - assert_eq!(s.payload().len(), size); + assert_eq!(s.sample.unwrap().value.payload.len(), size); cnt += 1; } } @@ -202,41 +174,6 @@ async fn test_session_qryrep(peer01: &Session, peer02: &Session, reliability: Re assert_eq!(msgs.load(Ordering::Relaxed), msg_count); assert_eq!(cnt, msg_count); - msgs.store(0, Ordering::Relaxed); - - println!("[QR][03c] Getting Ok(Delete) on peer02 session. {msg_count} msgs."); - let mut cnt = 0; - for _ in 0..msg_count { - let selector = format!("{}?ok_del", key_expr); - let rs = ztimeout!(peer02.get(selector).res_async()).unwrap(); - while let Ok(s) = ztimeout!(rs.recv_async()) { - let s = s.sample.unwrap(); - assert_eq!(s.kind(), SampleKind::Delete); - assert_eq!(s.payload().len(), 0); - cnt += 1; - } - } - println!("[QR][03c] Got on peer02 session. {cnt}/{msg_count} msgs."); - assert_eq!(msgs.load(Ordering::Relaxed), msg_count); - assert_eq!(cnt, msg_count); - - msgs.store(0, Ordering::Relaxed); - - println!("[QR][04c] Getting Err() on peer02 session. {msg_count} msgs."); - let mut cnt = 0; - for _ in 0..msg_count { - let selector = format!("{}?err", key_expr); - let rs = ztimeout!(peer02.get(selector).res_async()).unwrap(); - while let Ok(s) = ztimeout!(rs.recv_async()) { - let e = s.sample.unwrap_err(); - assert_eq!(e.payload.len(), size); - cnt += 1; - } - } - println!("[QR][04c] Got on peer02 session. {cnt}/{msg_count} msgs."); - assert_eq!(msgs.load(Ordering::Relaxed), msg_count); - assert_eq!(cnt, msg_count); - println!("[PS][03c] Unqueryable on peer01 session"); ztimeout!(qbl.undeclare().res_async()).unwrap(); @@ -248,7 +185,6 @@ async fn test_session_qryrep(peer01: &Session, peer02: &Session, reliability: Re #[tokio::test(flavor = "multi_thread", worker_threads = 4)] async fn zenoh_session_unicast() { let _ = env_logger::try_init(); - let (peer01, peer02) = open_session_unicast(&["tcp/127.0.0.1:17447"]).await; test_session_pubsub(&peer01, &peer02, Reliability::Reliable).await; test_session_qryrep(&peer01, &peer02, Reliability::Reliable).await; @@ -258,9 +194,50 @@ async fn zenoh_session_unicast() { #[tokio::test(flavor = "multi_thread", worker_threads = 4)] async fn zenoh_session_multicast() { let _ = env_logger::try_init(); - let (peer01, peer02) = open_session_multicast("udp/224.0.0.1:17448", "udp/224.0.0.1:17448").await; test_session_pubsub(&peer01, &peer02, Reliability::BestEffort).await; close_session(peer01, peer02).await; } + +async fn open_session_unicast_runtime(endpoints: &[&str]) -> (Runtime, Runtime) { + // Open the sessions + let mut config = config::peer(); + config.listen.endpoints = endpoints + .iter() + .map(|e| e.parse().unwrap()) + .collect::>(); + config.scouting.multicast.set_enabled(Some(false)).unwrap(); + println!("[ ][01a] Creating r1 session runtime: {:?}", endpoints); + let r1 = Runtime::new(config).await.unwrap(); + + let mut config = config::peer(); + config.connect.endpoints = endpoints + .iter() + .map(|e| e.parse().unwrap()) + .collect::>(); + config.scouting.multicast.set_enabled(Some(false)).unwrap(); + println!("[ ][02a] Creating r2 session runtime: {:?}", endpoints); + let r2 = Runtime::new(config).await.unwrap(); + + (r1, r2) +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +async fn zenoh_2sessions_1runtime_init() { + let (r1, r2) = open_session_unicast_runtime(&["tcp/127.0.0.1:17449"]).await; + println!("[RI][02a] Creating peer01 session from runtime 1"); + let peer01 = zenoh::init(r1.clone()).res_async().await.unwrap(); + println!("[RI][02b] Creating peer02 session from runtime 2"); + let peer02 = zenoh::init(r2.clone()).res_async().await.unwrap(); + println!("[RI][02c] Creating peer01a session from runtime 1"); + let peer01a = zenoh::init(r1.clone()).res_async().await.unwrap(); + println!("[RI][03c] Closing peer01a session"); + std::mem::drop(peer01a); + test_session_pubsub(&peer01, &peer02, Reliability::Reliable).await; + close_session(peer01, peer02).await; + println!("[ ][01e] Closing r1 runtime"); + ztimeout!(r1.close()).unwrap(); + println!("[ ][02e] Closing r2 runtime"); + ztimeout!(r2.close()).unwrap(); +} diff --git a/zenoh/tests/unicity.rs b/zenoh/tests/unicity.rs index f34704fb7e..865121308a 100644 --- a/zenoh/tests/unicity.rs +++ b/zenoh/tests/unicity.rs @@ -107,7 +107,7 @@ async fn test_unicity_pubsub(s01: &Session, s02: &Session, s03: &Session) { let sub1 = ztimeout!(s01 .declare_subscriber(key_expr) .callback(move |sample| { - assert_eq!(sample.payload().len(), size); + assert_eq!(sample.value.payload.len(), size); c_msgs1.fetch_add(1, Ordering::Relaxed); }) .res_async()) @@ -119,7 +119,7 @@ async fn test_unicity_pubsub(s01: &Session, s02: &Session, s03: &Session) { let sub2 = ztimeout!(s02 .declare_subscriber(key_expr) .callback(move |sample| { - assert_eq!(sample.payload().len(), size); + assert_eq!(sample.value.payload.len(), size); c_msgs2.fetch_add(1, Ordering::Relaxed); }) .res_async()) @@ -173,7 +173,7 @@ async fn test_unicity_pubsub(s01: &Session, s02: &Session, s03: &Session) { } async fn test_unicity_qryrep(s01: &Session, s02: &Session, s03: &Session) { - let key_expr = KeyExpr::new("test/unicity").unwrap(); + let key_expr = "test/unicity"; let msg_count = 1; let msgs1 = Arc::new(AtomicUsize::new(0)); let msgs2 = Arc::new(AtomicUsize::new(0)); @@ -184,20 +184,16 @@ async fn test_unicity_qryrep(s01: &Session, s02: &Session, s03: &Session) { // Queryable to data println!("[QR][01c] Queryable on s01 session"); - let cke = key_expr.clone(); let c_msgs1 = msgs1.clone(); let qbl1 = ztimeout!(s01 - .declare_queryable(cke.clone()) + .declare_queryable(key_expr) .callback(move |sample| { c_msgs1.fetch_add(1, Ordering::Relaxed); - tokio::task::block_in_place({ - let cke2 = cke.clone(); - move || { - Handle::current().block_on(async move { - ztimeout!(sample.reply(cke2.clone(), vec![0u8; size]).res_async()) - .unwrap() - }); - } + let rep = Sample::try_from(key_expr, vec![0u8; size]).unwrap(); + tokio::task::block_in_place(move || { + Handle::current().block_on(async move { + ztimeout!(sample.reply(Ok(rep)).res_async()).unwrap() + }); }); }) .res_async()) @@ -205,20 +201,16 @@ async fn test_unicity_qryrep(s01: &Session, s02: &Session, s03: &Session) { // Queryable to data println!("[QR][02c] Queryable on s02 session"); - let cke = key_expr.clone(); let c_msgs2 = msgs2.clone(); let qbl2 = ztimeout!(s02 - .declare_queryable(cke.clone()) + .declare_queryable(key_expr) .callback(move |sample| { c_msgs2.fetch_add(1, Ordering::Relaxed); - tokio::task::block_in_place({ - let cke2 = cke.clone(); - move || { - Handle::current().block_on(async move { - ztimeout!(sample.reply(cke2.clone(), vec![0u8; size]).res_async()) - .unwrap() - }); - } + let rep = Sample::try_from(key_expr, vec![0u8; size]).unwrap(); + tokio::task::block_in_place(move || { + Handle::current().block_on(async move { + ztimeout!(sample.reply(Ok(rep)).res_async()).unwrap() + }); }); }) .res_async()) @@ -229,12 +221,11 @@ async fn test_unicity_qryrep(s01: &Session, s02: &Session, s03: &Session) { // Get data println!("[QR][03c] Getting on s03 session. {msg_count} msgs."); - let cke = key_expr.clone(); let mut cnt = 0; for _ in 0..msg_count { - let rs = ztimeout!(s03.get(cke.clone()).res_async()).unwrap(); + let rs = ztimeout!(s03.get(key_expr).res_async()).unwrap(); while let Ok(s) = ztimeout!(rs.recv_async()) { - assert_eq!(s.sample.unwrap().payload().len(), size); + assert_eq!(s.sample.unwrap().value.payload.len(), size); cnt += 1; } } diff --git a/zenohd/src/main.rs b/zenohd/src/main.rs index 4faa10534c..d7cb9a52a9 100644 --- a/zenohd/src/main.rs +++ b/zenohd/src/main.rs @@ -374,6 +374,7 @@ fn test_default_features() { concat!( " zenoh/auth_pubkey", " zenoh/auth_usrpwd", + // " zenoh/complete_n", // " zenoh/shared-memory", // " zenoh/stats", " zenoh/transport_multilink", @@ -400,6 +401,7 @@ fn test_no_default_features() { concat!( // " zenoh/auth_pubkey", // " zenoh/auth_usrpwd", + // " zenoh/complete_n", // " zenoh/shared-memory", // " zenoh/stats", // " zenoh/transport_multilink",