Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Deprecate ParaBackingState API #6867

Merged
merged 20 commits into from
Jan 23, 2025
Merged
Show file tree
Hide file tree
Changes from 8 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ use futures::{Stream, StreamExt};
use polkadot_core_primitives::{Block, BlockNumber, Hash, Header};
use polkadot_overseer::{ChainApiBackend, RuntimeApiSubsystemClient};
use polkadot_primitives::{
async_backing::AsyncBackingParams, slashing, vstaging::async_backing::BackingState,
async_backing::AsyncBackingParams, slashing, vstaging::async_backing::{BackingState, Constraints},
ApprovalVotingParams, CoreIndex, NodeFeatures,
};
use sc_authority_discovery::{AuthorityDiscovery, Error as AuthorityDiscoveryError};
Expand Down Expand Up @@ -454,6 +454,14 @@ impl RuntimeApiSubsystemClient for BlockChainRpcClient {
.parachain_host_candidates_pending_availability(at, para_id)
.await?)
}

async fn backing_constraints(
&self,
at: Hash,
para_id: ParaId,
) -> Result<Option<Constraints>, ApiError> {
Ok(self.rpc_client.parachain_host_backing_constraints(at, para_id).await?)
}
}

#[async_trait::async_trait]
Expand Down
11 changes: 10 additions & 1 deletion cumulus/client/relay-chain-rpc-interface/src/rpc_client.rs
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ use cumulus_primitives_core::{
async_backing::AsyncBackingParams,
slashing,
vstaging::{
async_backing::BackingState, CandidateEvent,
async_backing::{BackingState, Constraints}, CandidateEvent,
CommittedCandidateReceiptV2 as CommittedCandidateReceipt, CoreState,
ScrapedOnChainVotes,
},
Expand Down Expand Up @@ -720,6 +720,15 @@ impl RelayChainRpcClient {
.await
}

pub async fn parachain_host_backing_constraints(
&self,
at: RelayHash,
para_id: ParaId,
) -> Result<Option<Constraints>, RelayChainError> {
self.call_remote_runtime_function("ParachainHost_backing_constraints", at, Some(para_id))
.await
}

fn send_register_message_to_worker(
&self,
message: RpcDispatcherMessage,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -132,8 +132,8 @@ use std::{
use super::LOG_TARGET;
use polkadot_node_subsystem::messages::Ancestors;
use polkadot_node_subsystem_util::inclusion_emulator::{
self, ConstraintModifications, Constraints, Fragment, HypotheticalOrConcreteCandidate,
ProspectiveCandidate, RelayChainBlockInfo,
self, validate_commitments, ConstraintModifications, Constraints, Fragment,
HypotheticalOrConcreteCandidate, ProspectiveCandidate, RelayChainBlockInfo,
};
use polkadot_primitives::{
vstaging::CommittedCandidateReceiptV2 as CommittedCandidateReceipt, BlockNumber,
Expand Down Expand Up @@ -1052,7 +1052,7 @@ impl FragmentChain {

// Try seeing if the parent candidate is in the current chain or if it is the latest
// included candidate. If so, get the constraints the candidate must satisfy.
let (constraints, maybe_min_relay_parent_number) =
let (is_unconnected, constraints, maybe_min_relay_parent_number) =
if let Some(parent_candidate) = self.best_chain.by_output_head.get(&parent_head_hash) {
let Some(parent_candidate) =
self.best_chain.chain.iter().find(|c| &c.candidate_hash == parent_candidate)
Expand All @@ -1062,6 +1062,7 @@ impl FragmentChain {
};

(
false,
self.scope
.base_constraints
.apply_modifications(&parent_candidate.cumulative_modifications)
Expand All @@ -1070,11 +1071,10 @@ impl FragmentChain {
)
} else if self.scope.base_constraints.required_parent.hash() == parent_head_hash {
// It builds on the latest included candidate.
(self.scope.base_constraints.clone(), None)
(false, self.scope.base_constraints.clone(), None)
} else {
// If the parent is not yet part of the chain, there's nothing else we can check for
// now.
return Ok(())
// The parent is not yet part of the chain
(true, self.scope.base_constraints.clone(), None)
};

// Check for cycles or invalid tree transitions.
Expand All @@ -1088,6 +1088,17 @@ impl FragmentChain {
candidate.persisted_validation_data(),
candidate.validation_code_hash(),
) {
if is_unconnected {
// If the parent is not yet part of the chain, we can check the commitments only
// if we have the full candidate.
return validate_commitments(
&self.scope.base_constraints,
&relay_parent,
commitments,
&validation_code_hash,
)
.map_err(Error::CheckAgainstConstraints)
}
Fragment::check_against_constraints(
&relay_parent,
&constraints,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,7 @@ fn make_constraints(
min_relay_parent_number,
max_pov_size: 1_000_000,
max_code_size: 1_000_000,
max_head_data_size: 20480,
ump_remaining: 10,
ump_remaining_bytes: 1_000,
max_ump_num_per_candidate: 10,
Expand Down
90 changes: 71 additions & 19 deletions polkadot/node/core/prospective-parachains/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -45,15 +45,12 @@ use polkadot_node_subsystem::{
use polkadot_node_subsystem_util::{
backing_implicit_view::{BlockInfoProspectiveParachains as BlockInfo, View as ImplicitView},
inclusion_emulator::{Constraints, RelayChainBlockInfo},
request_session_index_for_child,
request_candidates_pending_availability, request_backing_constraints, request_session_index_for_child,
runtime::{fetch_claim_queue, prospective_parachains_mode, ProspectiveParachainsMode},
};
use polkadot_primitives::{
vstaging::{
async_backing::CandidatePendingAvailability,
CommittedCandidateReceiptV2 as CommittedCandidateReceipt, CoreState,
},
BlockNumber, CandidateHash, Hash, HeadData, Header, Id as ParaId, PersistedValidationData,
vstaging::{CommittedCandidateReceiptV2 as CommittedCandidateReceipt, CoreState},
BlockNumber, CandidateHash, Hash, Header, Id as ParaId, PersistedValidationData,
};

use crate::{
Expand Down Expand Up @@ -257,8 +254,9 @@ async fn handle_active_leaves_update<Context>(
let mut fragment_chains = HashMap::new();
for para in scheduled_paras {
// Find constraints and pending availability candidates.
let backing_state = fetch_backing_state(ctx, hash, para).await?;
let Some((constraints, pending_availability)) = backing_state else {
let Some((constraints, pending_availability)) =
fetch_backing_constraints_and_candidates(ctx, hash, para).await?
else {
// This indicates a runtime conflict of some kind.
gum::debug!(
target: LOG_TARGET,
Expand All @@ -273,7 +271,7 @@ async fn handle_active_leaves_update<Context>(
let pending_availability = preprocess_candidates_pending_availability(
ctx,
&mut temp_header_cache,
constraints.required_parent.clone(),
&constraints,
pending_availability,
)
.await?;
Expand Down Expand Up @@ -445,22 +443,23 @@ struct ImportablePendingAvailability {
async fn preprocess_candidates_pending_availability<Context>(
ctx: &mut Context,
cache: &mut HashMap<Hash, Header>,
required_parent: HeadData,
pending_availability: Vec<CandidatePendingAvailability>,
constraints: &Constraints,
pending_availability: Vec<CommittedCandidateReceipt>,
) -> JfyiErrorResult<Vec<ImportablePendingAvailability>> {
let mut required_parent = required_parent;
let mut required_parent = constraints.required_parent.clone();

let mut importable = Vec::new();
let expected_count = pending_availability.len();

for (i, pending) in pending_availability.into_iter().enumerate() {
let candidate_hash = pending.hash();
let Some(relay_parent) =
fetch_block_info(ctx, cache, pending.descriptor.relay_parent()).await?
else {
let para_id = pending.descriptor.para_id();
gum::debug!(
target: LOG_TARGET,
?pending.candidate_hash,
?candidate_hash,
?para_id,
index = ?i,
?expected_count,
Expand All @@ -478,12 +477,12 @@ async fn preprocess_candidates_pending_availability<Context>(
},
persisted_validation_data: PersistedValidationData {
parent_head: required_parent,
max_pov_size: pending.max_pov_size,
max_pov_size: constraints.max_pov_size as _,
relay_parent_number: relay_parent.number,
relay_parent_storage_root: relay_parent.storage_root,
},
compact: fragment_chain::PendingAvailability {
candidate_hash: pending.candidate_hash,
candidate_hash,
relay_parent: relay_parent.into(),
},
});
Expand Down Expand Up @@ -883,18 +882,71 @@ async fn fetch_backing_state<Context>(
ctx: &mut Context,
relay_parent: Hash,
para_id: ParaId,
) -> JfyiErrorResult<Option<(Constraints, Vec<CandidatePendingAvailability>)>> {
) -> JfyiErrorResult<Option<(Constraints, Vec<CommittedCandidateReceipt>)>> {
let (tx, rx) = oneshot::channel();
ctx.send_message(RuntimeApiMessage::Request(
relay_parent,
RuntimeApiRequest::ParaBackingState(para_id, tx),
))
.await;

Ok(rx
Ok(rx.await.map_err(JfyiError::RuntimeApiRequestCanceled)??.map(|s| {
(
From::from(s.constraints),
s.pending_availability
.into_iter()
.map(|c| CommittedCandidateReceipt {
descriptor: c.descriptor,
commitments: c.commitments,
})
.collect(),
)
}))
}

#[overseer::contextbounds(ProspectiveParachains, prefix = self::overseer)]
async fn fetch_backing_constraints_and_candidates<Context>(
ctx: &mut Context,
relay_parent: Hash,
para_id: ParaId,
) -> JfyiErrorResult<Option<(Constraints, Vec<CommittedCandidateReceipt>)>> {
match fetch_backing_constraints_and_candidates_inner(ctx, relay_parent, para_id).await {
Err(error) => {
gum::debug!(
target: LOG_TARGET,
?para_id,
?relay_parent,
?error,
"Failed to get constraints and candidates pending availability."
);

// Fallback to backing state.
fetch_backing_state(ctx, relay_parent, para_id).await
},
Ok(maybe_constraints_and_candidatest) => Ok(maybe_constraints_and_candidatest),
}
}

#[overseer::contextbounds(ProspectiveParachains, prefix = self::overseer)]
async fn fetch_backing_constraints_and_candidates_inner<Context>(
ctx: &mut Context,
relay_parent: Hash,
para_id: ParaId,
) -> JfyiErrorResult<Option<(Constraints, Vec<CommittedCandidateReceipt>)>> {
let maybe_constraints = request_backing_constraints(relay_parent, para_id, ctx.sender())
.await
.await
.map_err(JfyiError::RuntimeApiRequestCanceled)??
.map(|s| (From::from(s.constraints), s.pending_availability)))
.map_err(JfyiError::RuntimeApiRequestCanceled)??;

let Some(constraints) = maybe_constraints else { return Ok(None) };

let pending_availability =
request_candidates_pending_availability(relay_parent, para_id, ctx.sender())
.await
.await
.map_err(JfyiError::RuntimeApiRequestCanceled)??;

Ok(Some((From::from(constraints), pending_availability)))
}

#[overseer::contextbounds(ProspectiveParachains, prefix = self::overseer)]
Expand Down
Loading
Loading