From 0db509263c18ff011dcb64af0b0e87f6f68a7c16 Mon Sep 17 00:00:00 2001 From: Alin Dima Date: Wed, 17 Jul 2024 12:27:11 +0300 Subject: [PATCH 1/4] add elastic scaling MVP guide (#4663) Resolves https://github.com/paritytech/polkadot-sdk/issues/4468 Gives instructions on how to enable elastic scaling MVP to parachain teams. Still a draft because it depends on further changes we make to the slot-based collator: https://github.com/paritytech/polkadot-sdk/pull/4097 Parachains cannot use this yet because the collator was not released and no relay chain network has been configured for elastic scaling yet --- Cargo.lock | 1 + cumulus/polkadot-parachain/Cargo.toml | 1 + cumulus/polkadot-parachain/src/service.rs | 71 ++++++--- .../src/guides/enable_elastic_scaling_mvp.rs | 142 ++++++++++++++++++ docs/sdk/src/guides/mod.rs | 3 + prdoc/pr_4663.prdoc | 14 ++ 6 files changed, 215 insertions(+), 17 deletions(-) create mode 100644 docs/sdk/src/guides/enable_elastic_scaling_mvp.rs create mode 100644 prdoc/pr_4663.prdoc diff --git a/Cargo.lock b/Cargo.lock index 3866a74b3f21..ad75224fefdc 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -13766,6 +13766,7 @@ dependencies = [ "cumulus-primitives-aura", "cumulus-primitives-core", "cumulus-relay-chain-interface", + "docify", "frame-benchmarking", "frame-benchmarking-cli", "frame-support", diff --git a/cumulus/polkadot-parachain/Cargo.toml b/cumulus/polkadot-parachain/Cargo.toml index 7085211dad26..b20d2a28fa7f 100644 --- a/cumulus/polkadot-parachain/Cargo.toml +++ b/cumulus/polkadot-parachain/Cargo.toml @@ -24,6 +24,7 @@ hex-literal = { workspace = true, default-features = true } log = { workspace = true, default-features = true } serde = { features = ["derive"], workspace = true, default-features = true } serde_json = { workspace = true, default-features = true } +docify = { workspace = true } # Local rococo-parachain-runtime = { workspace = true } diff --git a/cumulus/polkadot-parachain/src/service.rs b/cumulus/polkadot-parachain/src/service.rs index f5f6189d1f0d..6a6cf15635e0 100644 --- a/cumulus/polkadot-parachain/src/service.rs +++ b/cumulus/polkadot-parachain/src/service.rs @@ -15,13 +15,16 @@ // along with Cumulus. If not, see . use cumulus_client_cli::{CollatorOptions, ExportGenesisHeadCommand}; -use cumulus_client_collator::service::CollatorService; -use cumulus_client_consensus_aura::collators::{ - lookahead::{self as aura, Params as AuraParams}, - slot_based::{self as slot_based, Params as SlotBasedParams}, +use cumulus_client_collator::service::{ + CollatorService, ServiceInterface as CollatorServiceInterface, +}; +use cumulus_client_consensus_aura::collators::lookahead::{self as aura, Params as AuraParams}; +#[docify::export(slot_based_colator_import)] +use cumulus_client_consensus_aura::collators::slot_based::{ + self as slot_based, Params as SlotBasedParams, }; use cumulus_client_consensus_common::ParachainBlockImport as TParachainBlockImport; -use cumulus_client_consensus_proposer::Proposer; +use cumulus_client_consensus_proposer::{Proposer, ProposerInterface}; use cumulus_client_consensus_relay_chain::Verifier as RelayChainVerifier; #[allow(deprecated)] use cumulus_client_service::old_consensus; @@ -62,6 +65,7 @@ use sc_sysinfo::HwBench; use sc_telemetry::{Telemetry, TelemetryHandle, TelemetryWorker, TelemetryWorkerHandle}; use sc_transaction_pool::FullPool; use sp_api::ProvideRuntimeApi; +use sp_inherents::CreateInherentDataProviders; use sp_keystore::KeystorePtr; use sp_runtime::{app_crypto::AppCrypto, traits::Header as HeaderT}; use std::{marker::PhantomData, pin::Pin, sync::Arc, time::Duration}; @@ -623,6 +627,48 @@ pub(crate) struct StartSlotBasedAuraConsensus( PhantomData<(RuntimeApi, AuraId)>, ); +impl StartSlotBasedAuraConsensus +where + RuntimeApi: ConstructNodeRuntimeApi>, + RuntimeApi::RuntimeApi: AuraRuntimeApi, + AuraId: AuraIdT + Sync, +{ + #[docify::export_content] + fn launch_slot_based_collator( + params: SlotBasedParams< + ParachainBlockImport, + CIDP, + ParachainClient, + ParachainBackend, + Arc, + CHP, + Proposer, + CS, + >, + task_manager: &TaskManager, + ) where + CIDP: CreateInherentDataProviders + 'static, + CIDP::InherentDataProviders: Send, + CHP: cumulus_client_consensus_common::ValidationCodeHashProvider + Send + 'static, + Proposer: ProposerInterface + Send + Sync + 'static, + CS: CollatorServiceInterface + Send + Sync + Clone + 'static, + { + let (collation_future, block_builder_future) = + slot_based::run::::Pair, _, _, _, _, _, _, _, _>(params); + + task_manager.spawn_essential_handle().spawn( + "collation-task", + Some("parachain-block-authoring"), + collation_future, + ); + task_manager.spawn_essential_handle().spawn( + "block-builder-task", + Some("parachain-block-authoring"), + block_builder_future, + ); + } +} + impl StartConsensus for StartSlotBasedAuraConsensus where @@ -683,19 +729,10 @@ where slot_drift: Duration::from_secs(1), }; - let (collation_future, block_builder_future) = - slot_based::run::::Pair, _, _, _, _, _, _, _, _>(params); + // We have a separate function only to be able to use `docify::export` on this piece of + // code. + Self::launch_slot_based_collator(params, task_manager); - task_manager.spawn_essential_handle().spawn( - "collation-task", - Some("parachain-block-authoring"), - collation_future, - ); - task_manager.spawn_essential_handle().spawn( - "block-builder-task", - Some("parachain-block-authoring"), - block_builder_future, - ); Ok(()) } } diff --git a/docs/sdk/src/guides/enable_elastic_scaling_mvp.rs b/docs/sdk/src/guides/enable_elastic_scaling_mvp.rs new file mode 100644 index 000000000000..bc4f36c271fe --- /dev/null +++ b/docs/sdk/src/guides/enable_elastic_scaling_mvp.rs @@ -0,0 +1,142 @@ +//! # Enable elastic scaling MVP for a parachain +//! +//!
This guide assumes full familiarity with Asynchronous Backing and its +//! terminology, as defined in https://wiki.polkadot.network/docs/maintain-guides-async-backing. +//! Furthermore, the parachain should have already been upgraded according to the guide.
+//! +//! ## Quick introduction to elastic scaling +//! +//! [Elastic scaling](https://polkadot.network/blog/elastic-scaling-streamling-growth-on-polkadot) +//! is a feature that will enable parachains to seamlessly scale up/down the number of used cores. +//! This can be desirable in order to increase the compute or storage throughput of a parachain or +//! to lower the latency between a transaction being submitted and it getting built in a parachain +//! block. +//! +//! At present, with Asynchronous Backing enabled, a parachain can only include a block on the relay +//! chain every 6 seconds, irregardless of how many cores the parachain acquires. Elastic scaling +//! builds further on the 10x throughput increase of Async Backing, enabling collators to submit up +//! to 3 parachain blocks per relay chain block, resulting in a further 3x throughput increase. +//! +//! ## Current limitations of the MVP +//! +//! The full implementation of elastic scaling spans across the entire relay/parachain stack and is +//! still [work in progress](https://github.com/paritytech/polkadot-sdk/issues/1829). +//! The MVP is still considered experimental software, so stability is not guaranteed. +//! If you encounter any problems, +//! [please open an issue](https://github.com/paritytech/polkadot-sdk/issues). +//! Below are described the current limitations of the MVP: +//! +//! 1. **Limited core count**. Parachain block authoring is sequential, so the second block will +//! start being built only after the previous block is imported. The current block production is +//! capped at 2 seconds of execution. Therefore, assuming the full 2 seconds are used, a +//! parachain can only utilise at most 3 cores in a relay chain slot of 6 seconds. If the full +//! execution time is not being used, higher core counts can be achieved. +//! 2. **Single collator requirement for consistently scaling beyond a core at full authorship +//! duration of 2 seconds per block.** Using the current implementation with multiple collators +//! adds additional latency to the block production pipeline. Assuming block execution takes +//! about the same as authorship, the additional overhead is equal the duration of the authorship +//! plus the block announcement. Each collator must first import the previous block before +//! authoring a new one, so it is clear that the highest throughput can be achieved using a +//! single collator. Experiments show that the peak performance using more than one collator +//! (measured up to 10 collators) is utilising 2 cores with authorship time of 1.3 seconds per +//! block, which leaves 400ms for networking overhead. This would allow for 2.6 seconds of +//! execution, compared to the 2 seconds async backing enabled. +//! [More experiments](https://github.com/paritytech/polkadot-sdk/issues/4696) are being +//! conducted in this space. +//! 3. **Trusted collator set.** The collator set needs to be trusted until there’s a mitigation +//! that would prevent or deter multiple collators from submitting the same collation to multiple +//! backing groups. A solution is being discussed +//! [here](https://github.com/polkadot-fellows/RFCs/issues/92). +//! 4. **Fixed scaling.** For true elasticity, the parachain must be able to seamlessly acquire or +//! sell coretime as the user demand grows and shrinks over time, in an automated manner. This is +//! currently lacking - a parachain can only scale up or down by “manually” acquiring coretime. +//! This is not in the scope of the relay chain functionality. Parachains can already start +//! implementing such autoscaling, but we aim to provide a framework/examples for developing +//! autoscaling strategies. +//! +//! Another hard limitation that is not envisioned to ever be lifted is that parachains which create +//! forks will generally not be able to utilise the full number of cores they acquire. +//! +//! ## Using elastic scaling MVP +//! +//! ### Prerequisites +//! +//! - Ensure Asynchronous Backing is enabled on the network and you have enabled it on the parachain +//! using [`crate::guides::async_backing_guide`]. +//! - Ensure the `AsyncBackingParams.max_candidate_depth` value is configured to a value that is at +//! least double the maximum targeted parachain velocity. For example, if the parachain will build +//! at most 3 candidates per relay chain block, the `max_candidate_depth` should be at least 6. +//! - Use a trusted single collator for maximum throughput. +//! - Ensure enough coretime is assigned to the parachain. For maximum throughput the upper bound is +//! 3 cores. +//! +//!
Phase 1 is not needed if using the `polkadot-parachain` binary built +//! from the latest polkadot-sdk release! Simply pass the `--experimental-use-slot-based` parameter +//! to the command line and jump to Phase 2.
+//! +//! The following steps assume using the cumulus parachain template. +//! +//! ### Phase 1 - (For custom parachain node) Update Parachain Node +//! +//! This assumes you are using +//! [the latest parachain template](https://github.com/paritytech/polkadot-sdk/tree/master/templates/parachain). +//! +//! This phase consists of plugging in the new slot-based collator. +//! +//! 1. In `node/src/service.rs` import the slot based collator instead of the lookahead collator. +#![doc = docify::embed!("../../cumulus/polkadot-parachain/src/service.rs", slot_based_colator_import)] +//! +//! 2. In `start_consensus()` +//! - Remove the `overseer_handle` param (also remove the +//! `OverseerHandle` type import if it’s not used elsewhere). +//! - Rename `AuraParams` to `SlotBasedParams`, remove the `overseer_handle` field and add a +//! `slot_drift` field with a value of `Duration::from_secs(1)`. +//! - Replace the single future returned by `aura::run` with the two futures returned by it and +//! spawn them as separate tasks: +#![doc = docify::embed!("../../cumulus/polkadot-parachain/src/service.rs", launch_slot_based_collator)] +//! +//! 3. In `start_parachain_node()` remove the `overseer_handle` param passed to `start_consensus`. +//! +//! ### Phase 2 - Activate fixed factor scaling in the runtime +//! +//! This phase consists of a couple of changes needed to be made to the parachain’s runtime in order +//! to utilise fixed factor scaling. +//! +//! First of all, you need to decide the upper limit to how many parachain blocks you need to +//! produce per relay chain block (in direct correlation with the number of acquired cores). This +//! should be either 1 (no scaling), 2 or 3. This is called the parachain velocity. +//! +//! If you configure a velocity which is different from the number of assigned cores, the measured +//! velocity in practice will be the minimum of these two. +//! +//! The chosen velocity will also be used to compute: +//! - The slot duration, by dividing the 6000 ms duration of the relay chain slot duration by the +//! velocity. +//! - The unincluded segment capacity, by multiplying the velocity with 2 and adding 1 to +//! it. +//! +//! Let’s assume a desired maximum velocity of 3 parachain blocks per relay chain block. The needed +//! changes would all be done in `runtime/src/lib.rs`: +//! +//! 1. Rename `BLOCK_PROCESSING_VELOCITY` to `MAX_BLOCK_PROCESSING_VELOCITY` and increase it to the +//! desired value. In this example, 3. +//! +//! ```ignore +//! const MAX_BLOCK_PROCESSING_VELOCITY: u32 = 3; +//! ``` +//! +//! 2. Set the `MILLISECS_PER_BLOCK` to the desired value. +//! +//! ```ignore +//! const MILLISECS_PER_BLOCK: u32 = +//! RELAY_CHAIN_SLOT_DURATION_MILLIS / MAX_BLOCK_PROCESSING_VELOCITY; +//! ``` +//! Note: for a parachain which measures time in terms of its own block number, changing block +//! time may cause complications, requiring additional changes. See here more information: +//! [`crate::guides::async_backing_guide#timing-by-block-number`]. +//! +//! 3. Increase the `UNINCLUDED_SEGMENT_CAPACITY` to the desired value. +//! +//! ```ignore +//! const UNINCLUDED_SEGMENT_CAPACITY: u32 = 2 * MAX_BLOCK_PROCESSING_VELOCITY + 1; +//! ``` diff --git a/docs/sdk/src/guides/mod.rs b/docs/sdk/src/guides/mod.rs index 8296ed447e14..9384f4c82ab3 100644 --- a/docs/sdk/src/guides/mod.rs +++ b/docs/sdk/src/guides/mod.rs @@ -41,3 +41,6 @@ pub mod async_backing_guide; /// How to enable metadata hash verification in the runtime. pub mod enable_metadata_hash; + +/// How to enable elastic scaling MVP on a parachain. +pub mod enable_elastic_scaling_mvp; diff --git a/prdoc/pr_4663.prdoc b/prdoc/pr_4663.prdoc new file mode 100644 index 000000000000..74b1274828d5 --- /dev/null +++ b/prdoc/pr_4663.prdoc @@ -0,0 +1,14 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Add elastic scaling MVP guide + +doc: + - audience: Node Operator + description: | + Adds a guide for parachains that want to use the experimental elastic scaling MVP. + Will be viewable at: https://paritytech.github.io/polkadot-sdk/master/polkadot_sdk_docs/guides/enable_elastic_scaling_mvp/index.html + +crates: + - name: polkadot-parachain-bin + bump: none From 739951991f14279a7dc05d42c29ccf57d3740a4c Mon Sep 17 00:00:00 2001 From: Egor_P Date: Wed, 17 Jul 2024 13:28:58 +0200 Subject: [PATCH 2/4] Adjust release flows to use those with the new branch model (#5015) This PR contains adjustments of the node release pipelines so that it will be possible to use those to trigger release actions based on the `stable` branch. Previously the whole pipeline of the flows from [creation of the `rc-tag`](https://github.com/paritytech/polkadot-sdk/blob/master/.github/workflows/release-10_rc-automation.yml) (v1.15.0-rc1, v1.15.0-rc2, etc) till [the release draft creation](https://github.com/paritytech/polkadot-sdk/blob/master/.github/workflows/release-30_publish_release_draft.yml) was triggered on push to the node release branch. As we had the node release branch and the crates release branch separately, it worked fine. From now on, as we are switching to the one branch approach, for the first iteration I would like to keep things simple to see how the new release process will work with both parts (crates and node) made from one branch. Changes made: - The first step in the pipeline (rc-tag creation) will be triggered manually instead of the push to the branch - The tag version will be set manually from the input instead of to be taken from the branch name - Docker image will be additionally tagged as `stable` Closes: https://github.com/paritytech/release-engineering/issues/214 --- .github/scripts/common/lib.sh | 13 ++++ .../workflows/release-10_rc-automation.yml | 78 +++++-------------- .../workflows/release-50_publish-docker.yml | 2 + .github/workflows/release-srtool.yml | 5 -- 4 files changed, 36 insertions(+), 62 deletions(-) diff --git a/.github/scripts/common/lib.sh b/.github/scripts/common/lib.sh index f844e962c41d..33ef2d3e7eda 100755 --- a/.github/scripts/common/lib.sh +++ b/.github/scripts/common/lib.sh @@ -444,3 +444,16 @@ get_latest_release_tag() { latest_release_tag=$(curl -s -H "$TOKEN" $api_base/paritytech/polkadot-sdk/releases/latest | jq -r '.tag_name') printf $latest_release_tag } + +function get_polkadot_node_version_from_code() { + # list all the files with node version + git grep -e "\(NODE_VERSION[^=]*= \)\".*\"" | + # fetch only the one we need + grep "primitives/src/lib.rs:" | + # Print only the version + awk '{ print $7 }' | + # Remove the quotes + sed 's/"//g' | + # Remove the semicolon + sed 's/;//g' +} diff --git a/.github/workflows/release-10_rc-automation.yml b/.github/workflows/release-10_rc-automation.yml index 7231a8b75886..f5c5de8d0da9 100644 --- a/.github/workflows/release-10_rc-automation.yml +++ b/.github/workflows/release-10_rc-automation.yml @@ -1,13 +1,18 @@ name: Release - RC automation on: - push: - branches: - # Catches release-polkadot-v1.2.3, release-v1.2.3-rc1, etc - - release-v[0-9]+.[0-9]+.[0-9]+* - - release-cumulus-v[0-9]+* - - release-polkadot-v[0-9]+* + # TODO: Activate it and delete old branches patterns, when the release process from stable is setteled + #push: + # branches: + # # Catches release-polkadot-v1.2.3, release-v1.2.3-rc1, etc + # - release-v[0-9]+.[0-9]+.[0-9]+* + # - release-cumulus-v[0-9]+* + # - release-polkadot-v[0-9]+* + # - stable workflow_dispatch: + inputs: + version: + description: Current release/rc version in format vX.X.X jobs: tag_rc: @@ -25,28 +30,19 @@ jobs: with: fetch-depth: 0 - - name: Get release product - id: get_rel_product - shell: bash - run: | - current_branch=$(git branch --show-current) - echo "Current branch: $current_branch" - if [[ "$current_branch" =~ "release-polkadot" ]]; then - echo "product=polkadot" >> $GITHUB_OUTPUT - elif [[ "$current_branch" =~ "release-cumulus" ]]; then - echo "product=polkadot-parachain" >> $GITHUB_OUTPUT - fi - - - - name: Compute next rc tag for polkadot - if: ${{ steps.get_rel_product.outputs.product == 'polkadot' }} - id: compute_tag_polkadot + - name: Compute next rc tag + # if: ${{ steps.get_rel_product.outputs.product == 'polkadot' }} + id: compute_tag shell: bash run: | . ./.github/scripts/common/lib.sh # Get last rc tag if exists, else set it to {version}-rc1 - version=$(get_version_from_ghref ${GITHUB_REF}) + if [[ -z "${{ inputs.version }}" ]]; then + version=v$(get_polkadot_node_version_from_code) + else + version=$(filter_version_from_input ${{ inputs.version }}) + fi echo "$version" echo "version=$version" >> $GITHUB_OUTPUT @@ -61,28 +57,6 @@ jobs: echo "first_rc=true" >> $GITHUB_OUTPUT fi - - name: Compute next rc tag for polkadot-parachain - if: ${{ steps.get_rel_product.outputs.product == 'polkadot-parachain' }} - id: compute_tag_cumulus - shell: bash - run: | - . ./.github/scripts/common/lib.sh - - # Get last rc tag if exists, else set it to polkadot-parachains-{version}-rc1 - version=$(get_version_from_ghref ${GITHUB_REF}) - echo "$version" - echo "version=$version" >> $GITHUB_OUTPUT - - last_rc=$(get_latest_rc_tag $version polkadot-parachain) - if [ -n "$last_rc" ]; then - suffix=$(increment_rc_tag $last_rc) - echo "new_tag=polkadot-parachains-$version-rc$suffix" >> $GITHUB_OUTPUT - echo "first_rc=false" >> $GITHUB_OUTPUT - else - echo "new_tag=polkadot-parachain-$version-rc1" >> $GITHUB_OUTPUT - echo "first_rc=true" >> $GITHUB_OUTPUT - fi - - name: Apply new tag uses: tvdias/github-tagger@ed7350546e3e503b5e942dffd65bc8751a95e49d # v0.0.2 with: @@ -90,17 +64,7 @@ jobs: # https://docs.github.com/en/actions/reference/events-that-trigger-workflows#triggering-new-workflows-using-a-personal-access-token # RELEASE_BRANCH_TOKEN requires public_repo OAuth scope repo-token: "${{ secrets.RELEASE_BRANCH_TOKEN }}" - tag: ${{ steps.compute_tag_polkadot.outputs.new_tag || steps.compute_tag_cumulus.outputs.new_tag }} - - # - id: create-issue - # uses: JasonEtco/create-an-issue@e27dddc79c92bc6e4562f268fffa5ed752639abd # v2.9.1 - # # Only create the issue if it's the first release candidate - # if: steps.compute_tag.outputs.first_rc == 'true' - # env: - # GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - # VERSION: ${{ steps.compute_tag.outputs.version }} - # with: - # filename: .github/ISSUE_TEMPLATE/release.md + tag: ${{ steps.compute_tag.outputs.new_tag }} - name: Send Matrix message to ${{ matrix.channel.name }} uses: s3krit/matrix-message-action@70ad3fb812ee0e45ff8999d6af11cafad11a6ecf # v0.0.3 @@ -110,4 +74,4 @@ jobs: access_token: ${{ secrets.RELEASENOTES_MATRIX_V2_ACCESS_TOKEN }} server: m.parity.io message: | - Release process for polkadot ${{ steps.compute_tag_polkadot.outputs.new_tag || steps.compute_tag_cumulus.outputs.new_tag }} has been started.
+ Release process for polkadot ${{ steps.compute_tag.outputs.new_tag }} has been started.
diff --git a/.github/workflows/release-50_publish-docker.yml b/.github/workflows/release-50_publish-docker.yml index 4679f58578f7..723883eaf64c 100644 --- a/.github/workflows/release-50_publish-docker.yml +++ b/.github/workflows/release-50_publish-docker.yml @@ -179,6 +179,7 @@ jobs: release=$( echo $VERSION | cut -f1 -d- ) echo "tag=latest" >> $GITHUB_OUTPUT echo "release=${release}" >> $GITHUB_OUTPUT + echo "stable=stable" >> $GITHUB_OUTPUT - name: Build Injected Container image for polkadot rc or chain-spec-builder if: ${{ env.BINARY == 'polkadot' || env.BINARY == 'chain-spec-builder' }} @@ -294,6 +295,7 @@ jobs: # TODO: The owner should be used below but buildx does not resolve the VARs # TODO: It would be good to get rid of this GHA that we don't really need. tags: | + parity/polkadot:stable parity/polkadot:latest parity/polkadot:${{ needs.fetch-latest-debian-package-version.outputs.polkadot_container_tag }} build-args: | diff --git a/.github/workflows/release-srtool.yml b/.github/workflows/release-srtool.yml index 69a4bdbdda9a..e98269fecab0 100644 --- a/.github/workflows/release-srtool.yml +++ b/.github/workflows/release-srtool.yml @@ -5,11 +5,6 @@ env: TOML_CLI_VERSION: 0.2.4 on: - push: - branches: - - release-v[0-9]+.[0-9]+.[0-9]+* - - release-cumulus-v[0-9]+* - - release-polkadot-v[0-9]+* workflow_call: inputs: excluded_runtimes: From 1b6292bf7c71d56b793e98b651799f41bb0ef76b Mon Sep 17 00:00:00 2001 From: Sebastian Kunert Date: Wed, 17 Jul 2024 17:41:26 +0200 Subject: [PATCH 3/4] Do not crash on block gap in `displaced_leaves_after_finalizing` (#4997) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit After the merge of #4922 we saw failing zombienet tests with the following error: ``` 2024-07-09 10:30:09 Error applying finality to block (0xb9e1d3d9cb2047fe61667e28a0963e0634a7b29781895bc9ca40c898027b4c09, 56685): UnknownBlock: Header was not found in the database: 0x0000000000000000000000000000000000000000000000000000000000000000 2024-07-09 10:30:09 GRANDPA voter error: could not complete a round on disk: UnknownBlock: Header was not found in the database: 0x0000000000000000000000000000000000000000000000000000000000000000 ``` [Example](https://gitlab.parity.io/parity/mirrors/polkadot-sdk/-/jobs/6662262) The crashing situation is warp-sync related. After warp syncing, it can happen that there are gaps in block ancestry where we don't have the header. At the same time, the genesis hash is in the set of leaves. In `displaced_leaves_after_finalizing` we then iterate from the finalized block backwards until we hit an unknown block, crashing the node. This PR makes the detection of displaced branches resilient against unknown block in the finalized block chain. cc @nazar-pc (github won't let me request a review from you) --------- Co-authored-by: Bastian Köcher Co-authored-by: command-bot <> --- Cargo.lock | 2 +- prdoc/pr_4997.prdoc | 20 +++ substrate/client/db/src/lib.rs | 155 ++++++++++++++++++ substrate/primitives/blockchain/Cargo.toml | 2 +- .../primitives/blockchain/src/backend.rs | 62 +++++-- substrate/primitives/blockchain/src/lib.rs | 2 + 6 files changed, 228 insertions(+), 15 deletions(-) create mode 100644 prdoc/pr_4997.prdoc diff --git a/Cargo.lock b/Cargo.lock index ad75224fefdc..aad15fe033d1 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -19855,7 +19855,6 @@ name = "sp-blockchain" version = "28.0.0" dependencies = [ "futures", - "log", "parity-scale-codec", "parking_lot 0.12.3", "schnellru", @@ -19866,6 +19865,7 @@ dependencies = [ "sp-runtime", "sp-state-machine", "thiserror", + "tracing", ] [[package]] diff --git a/prdoc/pr_4997.prdoc b/prdoc/pr_4997.prdoc new file mode 100644 index 000000000000..25620a7e63ea --- /dev/null +++ b/prdoc/pr_4997.prdoc @@ -0,0 +1,20 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Do not crash on block gap in displaced_leaves_after_finalizing + +doc: + - audience: + - Node Operator + - Node Dev + description: | + After recent changes, crashes where occuring when calculating displaced branches after a block was finalized. + The reason are block gaps in the finalized chain. When encountering unknown blocks, the node was panicking. + This PR introduces changes to tolerate unknown blocks. Leafs that are separated by a gap from the to-be-finalized + block are not marked as displaced. + +crates: +- name: sc-client-db + bump: none +- name: sp-blockchain + bump: patch diff --git a/substrate/client/db/src/lib.rs b/substrate/client/db/src/lib.rs index e95cd9e4ad5f..acd165d91613 100644 --- a/substrate/client/db/src/lib.rs +++ b/substrate/client/db/src/lib.rs @@ -2613,6 +2613,35 @@ pub(crate) mod tests { Ok(header.hash()) } + pub fn insert_disconnected_header( + backend: &Backend, + number: u64, + parent_hash: H256, + extrinsics_root: H256, + best: bool, + ) -> H256 { + use sp_runtime::testing::Digest; + + let digest = Digest::default(); + let header = + Header { number, parent_hash, state_root: Default::default(), digest, extrinsics_root }; + + let mut op = backend.begin_operation().unwrap(); + + op.set_block_data( + header.clone(), + Some(vec![]), + None, + None, + if best { NewBlockState::Best } else { NewBlockState::Normal }, + ) + .unwrap(); + + backend.commit_operation(op).unwrap(); + + header.hash() + } + pub fn insert_header_no_head( backend: &Backend, number: u64, @@ -3112,6 +3141,123 @@ pub(crate) mod tests { } } + #[test] + fn displaced_leaves_after_finalizing_works_with_disconnect() { + // In this test we will create a situation that can typically happen after warp sync. + // The situation looks like this: + // g -> -> a3 -> a4 + // Basically there is a gap of unimported blocks at some point in the chain. + let backend = Backend::::new_test(1000, 100); + let blockchain = backend.blockchain(); + let genesis_number = 0; + let genesis_hash = + insert_header(&backend, genesis_number, Default::default(), None, Default::default()); + + let a3_number = 3; + let a3_hash = insert_disconnected_header( + &backend, + a3_number, + H256::from([200; 32]), + H256::from([1; 32]), + true, + ); + + let a4_number = 4; + let a4_hash = + insert_disconnected_header(&backend, a4_number, a3_hash, H256::from([2; 32]), true); + { + let displaced = + blockchain.displaced_leaves_after_finalizing(a3_hash, a3_number).unwrap(); + assert_eq!(blockchain.leaves().unwrap(), vec![a4_hash, genesis_hash]); + assert_eq!(displaced.displaced_leaves, vec![]); + assert_eq!(displaced.displaced_blocks, vec![]); + } + + { + let displaced = + blockchain.displaced_leaves_after_finalizing(a4_hash, a4_number).unwrap(); + assert_eq!(blockchain.leaves().unwrap(), vec![a4_hash, genesis_hash]); + assert_eq!(displaced.displaced_leaves, vec![]); + assert_eq!(displaced.displaced_blocks, vec![]); + } + + // Import block a1 which has the genesis block as parent. + // g -> a1 -> -> a3(f) -> a4 + let a1_number = 1; + let a1_hash = insert_disconnected_header( + &backend, + a1_number, + genesis_hash, + H256::from([123; 32]), + false, + ); + { + let displaced = + blockchain.displaced_leaves_after_finalizing(a3_hash, a3_number).unwrap(); + assert_eq!(blockchain.leaves().unwrap(), vec![a4_hash, a1_hash]); + assert_eq!(displaced.displaced_leaves, vec![]); + assert_eq!(displaced.displaced_blocks, vec![]); + } + + // Import block b1 which has the genesis block as parent. + // g -> a1 -> -> a3(f) -> a4 + // \-> b1 + let b1_number = 1; + let b1_hash = insert_disconnected_header( + &backend, + b1_number, + genesis_hash, + H256::from([124; 32]), + false, + ); + { + let displaced = + blockchain.displaced_leaves_after_finalizing(a3_hash, a3_number).unwrap(); + assert_eq!(blockchain.leaves().unwrap(), vec![a4_hash, a1_hash, b1_hash]); + assert_eq!(displaced.displaced_leaves, vec![]); + assert_eq!(displaced.displaced_blocks, vec![]); + } + + // If branch of b blocks is higher in number than a branch, we + // should still not prune disconnected leafs. + // g -> a1 -> -> a3(f) -> a4 + // \-> b1 -> b2 ----------> b3 ----> b4 -> b5 + let b2_number = 2; + let b2_hash = + insert_disconnected_header(&backend, b2_number, b1_hash, H256::from([40; 32]), false); + let b3_number = 3; + let b3_hash = + insert_disconnected_header(&backend, b3_number, b2_hash, H256::from([41; 32]), false); + let b4_number = 4; + let b4_hash = + insert_disconnected_header(&backend, b4_number, b3_hash, H256::from([42; 32]), false); + let b5_number = 5; + let b5_hash = + insert_disconnected_header(&backend, b5_number, b4_hash, H256::from([43; 32]), false); + { + let displaced = + blockchain.displaced_leaves_after_finalizing(a3_hash, a3_number).unwrap(); + assert_eq!(blockchain.leaves().unwrap(), vec![b5_hash, a4_hash, a1_hash]); + assert_eq!(displaced.displaced_leaves, vec![]); + assert_eq!(displaced.displaced_blocks, vec![]); + } + + // Even though there is a disconnect, diplace should still detect + // branches above the block gap. + // /-> c4 + // g -> a1 -> -> a3 -> a4(f) + // \-> b1 -> b2 ----------> b3 -> b4 -> b5 + let c4_number = 4; + let c4_hash = + insert_disconnected_header(&backend, c4_number, a3_hash, H256::from([44; 32]), false); + { + let displaced = + blockchain.displaced_leaves_after_finalizing(a4_hash, a4_number).unwrap(); + assert_eq!(blockchain.leaves().unwrap(), vec![b5_hash, a4_hash, c4_hash, a1_hash]); + assert_eq!(displaced.displaced_leaves, vec![(c4_number, c4_hash)]); + assert_eq!(displaced.displaced_blocks, vec![c4_hash]); + } + } #[test] fn displaced_leaves_after_finalizing_works() { let backend = Backend::::new_test(1000, 100); @@ -3156,6 +3302,15 @@ pub(crate) mod tests { assert_eq!(displaced_a3.displaced_leaves, vec![]); assert_eq!(displaced_a3.displaced_blocks, vec![]); } + { + // Finalized block is above leaves and not imported yet. + // We will not be able to make a connection, + // nothing can be marked as displaced. + let displaced = + blockchain.displaced_leaves_after_finalizing(H256::from([57; 32]), 10).unwrap(); + assert_eq!(displaced.displaced_leaves, vec![]); + assert_eq!(displaced.displaced_blocks, vec![]); + } // fork from genesis: 2 prong. let b1_number = 1; diff --git a/substrate/primitives/blockchain/Cargo.toml b/substrate/primitives/blockchain/Cargo.toml index aedd720612c3..bd0daaf63c05 100644 --- a/substrate/primitives/blockchain/Cargo.toml +++ b/substrate/primitives/blockchain/Cargo.toml @@ -19,7 +19,6 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { features = ["derive"], workspace = true } futures = { workspace = true } -log = { workspace = true, default-features = true } parking_lot = { workspace = true, default-features = true } schnellru = { workspace = true } thiserror = { workspace = true } @@ -29,3 +28,4 @@ sp-consensus = { workspace = true, default-features = true } sp-database = { workspace = true, default-features = true } sp-runtime = { workspace = true, default-features = true } sp-state-machine = { workspace = true, default-features = true } +tracing = { workspace = true, default-features = true } diff --git a/substrate/primitives/blockchain/src/backend.rs b/substrate/primitives/blockchain/src/backend.rs index a928217d5885..2accd4dad12c 100644 --- a/substrate/primitives/blockchain/src/backend.rs +++ b/substrate/primitives/blockchain/src/backend.rs @@ -17,7 +17,6 @@ //! Substrate blockchain trait -use log::warn; use parking_lot::RwLock; use sp_runtime::{ generic::BlockId, @@ -25,6 +24,7 @@ use sp_runtime::{ Justifications, }; use std::collections::{btree_set::BTreeSet, HashMap, VecDeque}; +use tracing::{debug, warn}; use crate::{ error::{Error, Result}, @@ -228,6 +228,7 @@ pub trait Backend: // // FIXME #1558 only issue this warning when not on a dead fork warn!( + target: crate::LOG_TARGET, "Block {:?} exists in chain but not found when following all leaves backwards", base_hash, ); @@ -254,16 +255,35 @@ pub trait Backend: ) -> std::result::Result, Error> { let leaves = self.leaves()?; + debug!( + target: crate::LOG_TARGET, + ?leaves, + %finalized_block_hash, + ?finalized_block_number, + "Checking for displaced leaves after finalization." + ); + // If we have only one leaf there are no forks, and we can return early. if finalized_block_number == Zero::zero() || leaves.len() == 1 { return Ok(DisplacedLeavesAfterFinalization::default()) } - // Store hashes of finalized blocks for quick checking later, the last block if the + // Store hashes of finalized blocks for quick checking later, the last block is the // finalized one let mut finalized_chain = VecDeque::new(); - finalized_chain - .push_front(MinimalBlockMetadata::from(&self.header_metadata(finalized_block_hash)?)); + let current_finalized = match self.header_metadata(finalized_block_hash) { + Ok(metadata) => metadata, + Err(Error::UnknownBlock(_)) => { + debug!( + target: crate::LOG_TARGET, + hash = ?finalized_block_hash, + "Tried to fetch unknown block, block ancestry has gaps." + ); + return Ok(DisplacedLeavesAfterFinalization::default()); + }, + Err(e) => Err(e)?, + }; + finalized_chain.push_front(MinimalBlockMetadata::from(¤t_finalized)); // Local cache is a performance optimization in case of finalized block deep below the // tip of the chain with a lot of leaves above finalized block @@ -273,6 +293,7 @@ pub trait Backend: displaced_leaves: Vec::with_capacity(leaves.len()), displaced_blocks: Vec::with_capacity(leaves.len()), }; + let mut displaced_blocks_candidates = Vec::new(); for leaf_hash in leaves { @@ -306,11 +327,11 @@ pub trait Backend: continue; } - // Otherwise the whole leaf branch needs to be pruned, track it all the way to the - // point of branching from the finalized chain - result.displaced_leaves.push((leaf_number, leaf_hash)); - result.displaced_blocks.extend(displaced_blocks_candidates.drain(..)); - result.displaced_blocks.push(current_header_metadata.hash); + // We reuse `displaced_blocks_candidates` to store the current metadata. + // This block is not displaced if there is a gap in the ancestry. We + // check for this gap later. + displaced_blocks_candidates.push(current_header_metadata.hash); + // Collect the rest of the displaced blocks of leaf branch for distance_from_finalized in 1_u32.. { // Find block at `distance_from_finalized` from finalized block @@ -318,9 +339,22 @@ pub trait Backend: match finalized_chain.iter().rev().nth(distance_from_finalized as usize) { Some(header) => (header.number, header.hash), None => { - let metadata = MinimalBlockMetadata::from(&self.header_metadata( - finalized_chain.front().expect("Not empty; qed").parent, - )?); + let to_fetch = finalized_chain.front().expect("Not empty; qed"); + let metadata = match self.header_metadata(to_fetch.parent) { + Ok(metadata) => metadata, + Err(Error::UnknownBlock(_)) => { + debug!( + target: crate::LOG_TARGET, + distance_from_finalized, + hash = ?to_fetch.parent, + number = ?to_fetch.number, + "Tried to fetch unknown block, block ancestry has gaps." + ); + break; + }, + Err(e) => Err(e)?, + }; + let metadata = MinimalBlockMetadata::from(&metadata); let result = (metadata.number, metadata.hash); finalized_chain.push_front(metadata); result @@ -336,11 +370,13 @@ pub trait Backend: let parent_hash = current_header_metadata.parent; if finalized_chain_block_hash == parent_hash { // Reached finalized chain, nothing left to do + result.displaced_blocks.extend(displaced_blocks_candidates.drain(..)); + result.displaced_leaves.push((leaf_number, leaf_hash)); break; } // Store displaced block and look deeper for block on finalized chain - result.displaced_blocks.push(parent_hash); + displaced_blocks_candidates.push(parent_hash); current_header_metadata = MinimalBlockMetadata::from(&self.header_metadata(parent_hash)?); } diff --git a/substrate/primitives/blockchain/src/lib.rs b/substrate/primitives/blockchain/src/lib.rs index eabbbcf50d9f..305b7f6afec1 100644 --- a/substrate/primitives/blockchain/src/lib.rs +++ b/substrate/primitives/blockchain/src/lib.rs @@ -24,3 +24,5 @@ mod header_metadata; pub use backend::*; pub use error::*; pub use header_metadata::*; + +const LOG_TARGET: &str = "db::blockchain"; From b862b181ec507e1510dff6d78335b184b395d9b2 Mon Sep 17 00:00:00 2001 From: Alexandru Vasile <60601340+lexnv@users.noreply.github.com> Date: Wed, 17 Jul 2024 20:04:37 +0300 Subject: [PATCH 4/4] fix: Update libp2p-websocket to v0.42.2 to fix panics (#5040) This release includes: https://github.com/libp2p/rust-libp2p/pull/5482 Which fixes substrate node crashing with libp2p trace: ``` 0: sp_panic_handler::set::{{closure}} 1: std::panicking::rust_panic_with_hook 2: std::panicking::begin_panic::{{closure}} 3: std::sys_common::backtrace::__rust_end_short_backtrace 4: std::panicking::begin_panic 5: as futures_sink::Sink>::poll_ready 6: as futures_io::if_std::AsyncWrite>::poll_write 7: as futures_sink::Sink<&alloc::vec::Vec>>::poll_ready 8: as futures_io::if_std::AsyncWrite>::poll_write 9: as futures_sink::Sink>>::poll_ready 10: yamux::connection::Connection::poll_next_inbound 11: as libp2p_core::muxing::StreamMuxer>::poll 12: as libp2p_core::muxing::StreamMuxer>::poll 13: as libp2p_core::muxing::StreamMuxer>::poll 14: libp2p_swarm::connection::pool::task::new_for_established_connection::{{closure}} 15: as core::future::future::Future>::poll 16: as core::future::future::Future>::poll 17: as core::future::future::Future>::poll 18: std::panicking::try 19: tokio::runtime::task::harness::Harness::poll 20: tokio::runtime::scheduler::multi_thread::worker::Context::run_task 21: tokio::runtime::scheduler::multi_thread::worker::Context::run 22: tokio::runtime::context::set_scheduler 23: tokio::runtime::context::runtime::enter_runtime 24: tokio::runtime::scheduler::multi_thread::worker::run 25: tokio::runtime::task::core::Core::poll 26: tokio::runtime::task::harness::Harness::poll 27: std::sys_common::backtrace::__rust_begin_short_backtrace 28: core::ops::function::FnOnce::call_once{{vtable.shim}} 29: std::sys::pal::unix::thread::Thread::new::thread_start 30: 31: Thread 'tokio-runtime-worker' panicked at 'SinkImpl::poll_ready called after error.', /home/ubuntu/.cargo/registry/src/index.crates.io-6f17d22bba15001f/quicksink-0.1.2/src/lib.rs:158 ``` Closes: https://github.com/paritytech/polkadot-sdk/issues/4934 --------- Signed-off-by: Alexandru Vasile --- Cargo.lock | 104 +++++++++++++++++++++----------------------- prdoc/pr_5040.prdoc | 11 +++++ 2 files changed, 60 insertions(+), 55 deletions(-) create mode 100644 prdoc/pr_5040.prdoc diff --git a/Cargo.lock b/Cargo.lock index aad15fe033d1..160e7031af70 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1206,7 +1206,7 @@ checksum = "ff6e472cdea888a4bd64f342f09b3f50e1886d32afe8df3d663c01140b811b18" dependencies = [ "event-listener 5.2.0", "event-listener-strategy", - "pin-project-lite 0.2.12", + "pin-project-lite", ] [[package]] @@ -1260,7 +1260,7 @@ dependencies = [ "log", "memchr", "once_cell", - "pin-project-lite 0.2.12", + "pin-project-lite", "pin-utils", "slab", "wasm-bindgen-futures", @@ -1274,7 +1274,7 @@ checksum = "cd56dd203fef61ac097dd65721a419ddccb106b2d2b70ba60a6b529f03961a51" dependencies = [ "async-stream-impl", "futures-core", - "pin-project-lite 0.2.12", + "pin-project-lite", ] [[package]] @@ -1315,7 +1315,7 @@ dependencies = [ "futures-sink", "futures-util", "memchr", - "pin-project-lite 0.2.12", + "pin-project-lite", ] [[package]] @@ -5352,7 +5352,7 @@ checksum = "2b5fb89194fa3cad959b833185b3063ba881dbfc7030680b314250779fb4cc91" dependencies = [ "concurrent-queue", "parking", - "pin-project-lite 0.2.12", + "pin-project-lite", ] [[package]] @@ -5362,7 +5362,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0f214dc438f977e6d4e3500aaa277f5ad94ca83fbbd9b1a15713ce2344ccc5a1" dependencies = [ "event-listener 5.2.0", - "pin-project-lite 0.2.12", + "pin-project-lite", ] [[package]] @@ -5659,9 +5659,9 @@ dependencies = [ [[package]] name = "form_urlencoded" -version = "1.2.0" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a62bc1cf6f830c2ec14a513a9fb124d0a213a629668a4186f329db21fe045652" +checksum = "e13624c2627564efccf4934284bdd98cbaa14e79b0b5a141218e507b3a823456" dependencies = [ "percent-encoding", ] @@ -6231,7 +6231,7 @@ dependencies = [ "futures-io", "memchr", "parking", - "pin-project-lite 0.2.12", + "pin-project-lite", "waker-fn", ] @@ -6242,7 +6242,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "52527eb5074e35e9339c6b4e8d12600c7128b68fb25dcb9fa9dec18f7c25f3a5" dependencies = [ "futures-core", - "pin-project-lite 0.2.12", + "pin-project-lite", ] [[package]] @@ -6297,7 +6297,7 @@ dependencies = [ "futures-sink", "futures-task", "memchr", - "pin-project-lite 0.2.12", + "pin-project-lite", "pin-utils", "slab", ] @@ -6755,7 +6755,7 @@ checksum = "d5f38f16d184e36f2408a55281cd658ecbd3ca05cce6d6510a176eca393e26d1" dependencies = [ "bytes", "http 0.2.9", - "pin-project-lite 0.2.12", + "pin-project-lite", ] [[package]] @@ -6778,7 +6778,7 @@ dependencies = [ "futures-util", "http 1.1.0", "http-body 1.0.0", - "pin-project-lite 0.2.12", + "pin-project-lite", ] [[package]] @@ -6815,7 +6815,7 @@ dependencies = [ "httparse", "httpdate", "itoa", - "pin-project-lite 0.2.12", + "pin-project-lite", "socket2 0.5.7", "tokio", "tower-service", @@ -6838,7 +6838,7 @@ dependencies = [ "httparse", "httpdate", "itoa", - "pin-project-lite 0.2.12", + "pin-project-lite", "smallvec", "tokio", "want", @@ -6890,7 +6890,7 @@ dependencies = [ "http 1.1.0", "http-body 1.0.0", "hyper 1.3.1", - "pin-project-lite 0.2.12", + "pin-project-lite", "socket2 0.5.7", "tokio", "tower", @@ -6942,6 +6942,16 @@ dependencies = [ "unicode-normalization", ] +[[package]] +name = "idna" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "634d9b1461af396cad843f47fdba5597a4f9e6ddd4bfb6ff5d85028c25cb12f6" +dependencies = [ + "unicode-bidi", + "unicode-normalization", +] + [[package]] name = "if-addrs" version = "0.10.2" @@ -8059,9 +8069,9 @@ dependencies = [ [[package]] name = "libp2p-websocket" -version = "0.42.1" +version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3facf0691bab65f571bc97c6c65ffa836248ca631d631b7691ac91deb7fceb5f" +checksum = "004ee9c4a4631435169aee6aad2f62e3984dc031c43b6d29731e8e82a016c538" dependencies = [ "either", "futures", @@ -8070,9 +8080,10 @@ dependencies = [ "libp2p-identity", "log", "parking_lot 0.12.3", - "quicksink", + "pin-project-lite", "rw-stream-sink", - "soketto 0.7.1", + "soketto 0.8.0", + "thiserror", "url", "webpki-roots 0.25.2", ] @@ -12589,9 +12600,9 @@ dependencies = [ [[package]] name = "percent-encoding" -version = "2.3.0" +version = "2.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b2a4787296e9989611394c33f193f676704af1686e70b8f8033ab5ba9a35a94" +checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" [[package]] name = "pest" @@ -12669,15 +12680,9 @@ dependencies = [ [[package]] name = "pin-project-lite" -version = "0.1.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "257b64915a082f7811703966789728173279bdebb956b143dbcd23f6f970a777" - -[[package]] -name = "pin-project-lite" -version = "0.2.12" +version = "0.2.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "12cc1b0bf1727a77a54b6654e7b5f1af8604923edc8b81885f8ec92f9e3f0a05" +checksum = "bda66fc9667c18cb2758a2ac84d1167245054bcf85d5d1aaa6923f45801bdd02" [[package]] name = "pin-utils" @@ -15072,7 +15077,7 @@ dependencies = [ "concurrent-queue", "libc", "log", - "pin-project-lite 0.2.12", + "pin-project-lite", "windows-sys 0.48.0", ] @@ -15084,7 +15089,7 @@ checksum = "30054e72317ab98eddd8561db0f6524df3367636884b7b21b703e4b280a84a14" dependencies = [ "cfg-if", "concurrent-queue", - "pin-project-lite 0.2.12", + "pin-project-lite", "rustix 0.38.21", "tracing", "windows-sys 0.52.0", @@ -15653,17 +15658,6 @@ dependencies = [ "rand", ] -[[package]] -name = "quicksink" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77de3c815e5a160b1539c6592796801df2043ae35e123b46d73380cfa57af858" -dependencies = [ - "futures-core", - "futures-sink", - "pin-project-lite 0.1.12", -] - [[package]] name = "quinn" version = "0.9.4" @@ -15671,7 +15665,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2e8b432585672228923edbbf64b8b12c14e1112f62e88737655b4a083dbcd78e" dependencies = [ "bytes", - "pin-project-lite 0.2.12", + "pin-project-lite", "quinn-proto 0.9.6", "quinn-udp 0.3.2", "rustc-hash", @@ -15690,7 +15684,7 @@ checksum = "8cc2c5017e4b43d5995dcea317bc46c1e09404c0a9664d2908f7f02dfe943d75" dependencies = [ "bytes", "futures-io", - "pin-project-lite 0.2.12", + "pin-project-lite", "quinn-proto 0.10.6", "quinn-udp 0.4.1", "rustc-hash", @@ -16164,7 +16158,7 @@ dependencies = [ "mime", "once_cell", "percent-encoding", - "pin-project-lite 0.2.12", + "pin-project-lite", "rustls 0.21.7", "rustls-pemfile 1.0.3", "serde", @@ -22063,7 +22057,7 @@ dependencies = [ "mio", "num_cpus", "parking_lot 0.12.3", - "pin-project-lite 0.2.12", + "pin-project-lite", "signal-hook-registry", "socket2 0.5.7", "tokio-macros", @@ -22120,7 +22114,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "397c988d37662c7dda6d2208364a706264bf3d6138b11d436cbac0ad38832842" dependencies = [ "futures-core", - "pin-project-lite 0.2.12", + "pin-project-lite", "tokio", "tokio-util", ] @@ -22163,7 +22157,7 @@ dependencies = [ "futures-core", "futures-io", "futures-sink", - "pin-project-lite 0.2.12", + "pin-project-lite", "tokio", ] @@ -22230,7 +22224,7 @@ dependencies = [ "futures-core", "futures-util", "pin-project", - "pin-project-lite 0.2.12", + "pin-project-lite", "tokio", "tower-layer", "tower-service", @@ -22248,7 +22242,7 @@ dependencies = [ "http 1.1.0", "http-body 1.0.0", "http-body-util", - "pin-project-lite 0.2.12", + "pin-project-lite", "tower-layer", "tower-service", ] @@ -22272,7 +22266,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c3523ab5a71916ccf420eebdf5521fcef02141234bbc0b8a49f2fdc4544364ef" dependencies = [ "log", - "pin-project-lite 0.2.12", + "pin-project-lite", "tracing-attributes", "tracing-core", ] @@ -22716,12 +22710,12 @@ checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1" [[package]] name = "url" -version = "2.4.0" +version = "2.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "50bff7831e19200a85b17131d085c25d7811bc4e186efdaf54bbd132994a88cb" +checksum = "22784dbdf76fdde8af1aeda5622b546b422b6fc585325248a2bf9f5e41e94d6c" dependencies = [ "form_urlencoded", - "idna 0.4.0", + "idna 0.5.0", "percent-encoding", ] diff --git a/prdoc/pr_5040.prdoc b/prdoc/pr_5040.prdoc new file mode 100644 index 000000000000..62b175c1d648 --- /dev/null +++ b/prdoc/pr_5040.prdoc @@ -0,0 +1,11 @@ +title: Update libp2p-websocket to v0.42.2 + +doc: + - audience: Node Operator + description: | + Fixes a panic coming from the libp2p-websocket which stops the node. + This fix ensures that polling multiple time after error results in an error instead of panics. + +crates: +- name: sc-network + bump: minor