diff --git a/beacon_node/beacon_chain/src/block_verification.rs b/beacon_node/beacon_chain/src/block_verification.rs index 12652763763..fe13af02f88 100644 --- a/beacon_node/beacon_chain/src/block_verification.rs +++ b/beacon_node/beacon_chain/src/block_verification.rs @@ -1254,15 +1254,8 @@ impl IntoExecutionPendingBlock for Arc IntoExecutionPendingBlock for Arc IntoExecutionPendingBlock for RpcBlock { - /// Verifies the `SignedBeaconBlock` by first transforming it into a `SignatureVerifiedBlock` - /// and then using that implementation of `IntoExecutionPendingBlock` to complete verification. - fn into_execution_pending_block_slashable( - self, - block_root: Hash256, - chain: &Arc>, - notify_execution_layer: NotifyExecutionLayer, - ) -> Result, BlockSlashInfo> { - // Perform an early check to prevent wasting time on irrelevant blocks. - let block_root = check_block_relevancy(self.as_block(), block_root, chain) - .map_err(|e| BlockSlashInfo::SignatureNotChecked(self.signed_block_header(), e))?; - let maybe_available = chain - .data_availability_checker - .verify_kzg_for_rpc_block(self.clone()) - .map_err(|e| { - BlockSlashInfo::SignatureNotChecked( - self.signed_block_header(), - BlockError::AvailabilityCheck(e), - ) - })?; - SignatureVerifiedBlock::check_slashable(maybe_available, block_root, chain)? - .into_execution_pending_block_slashable(block_root, chain, notify_execution_layer) - } - - fn block(&self) -> &SignedBeaconBlock { - self.as_block() - } - - fn block_cloned(&self) -> Arc> { - self.block_cloned() - } -} - impl ExecutionPendingBlock { /// Instantiates `Self`, a wrapper that indicates that the given `block` is fully valid. See /// the struct-level documentation for more information. diff --git a/beacon_node/beacon_chain/src/data_availability_checker.rs b/beacon_node/beacon_chain/src/data_availability_checker.rs index f10d59ca1a5..aca7903fc31 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker.rs @@ -327,69 +327,6 @@ impl DataAvailabilityChecker { .remove_pending_components(block_root) } - /// Verifies kzg commitments for an RpcBlock, returns a `MaybeAvailableBlock` that may - /// include the fully available block. - /// - /// WARNING: This function assumes all required blobs are already present, it does NOT - /// check if there are any missing blobs. - pub fn verify_kzg_for_rpc_block( - &self, - block: RpcBlock, - ) -> Result, AvailabilityCheckError> { - let (block_root, block, blobs, data_columns) = block.deconstruct(); - if self.blobs_required_for_block(&block) { - return if let Some(blob_list) = blobs.as_ref() { - verify_kzg_for_blob_list(blob_list.iter(), &self.kzg) - .map_err(AvailabilityCheckError::InvalidBlobs)?; - Ok(MaybeAvailableBlock::Available(AvailableBlock { - block_root, - block, - blobs, - blobs_available_timestamp: None, - data_columns: None, - spec: self.spec.clone(), - })) - } else { - Ok(MaybeAvailableBlock::AvailabilityPending { block_root, block }) - }; - } - if self.data_columns_required_for_block(&block) { - return if let Some(data_column_list) = data_columns.as_ref() { - verify_kzg_for_data_column_list_with_scoring( - data_column_list - .iter() - .map(|custody_column| custody_column.as_data_column()), - &self.kzg, - ) - .map_err(AvailabilityCheckError::InvalidColumn)?; - Ok(MaybeAvailableBlock::Available(AvailableBlock { - block_root, - block, - blobs: None, - blobs_available_timestamp: None, - data_columns: Some( - data_column_list - .into_iter() - .map(|d| d.clone_arc()) - .collect(), - ), - spec: self.spec.clone(), - })) - } else { - Ok(MaybeAvailableBlock::AvailabilityPending { block_root, block }) - }; - } - - Ok(MaybeAvailableBlock::Available(AvailableBlock { - block_root, - block, - blobs: None, - blobs_available_timestamp: None, - data_columns: None, - spec: self.spec.clone(), - })) - } - /// Checks if a vector of blocks are available. Returns a vector of `MaybeAvailableBlock` /// This is more efficient than calling `verify_kzg_for_rpc_block` in a loop as it does /// all kzg verification at once @@ -802,4 +739,23 @@ impl MaybeAvailableBlock { Self::AvailabilityPending { block, .. } => block.clone(), } } + + pub fn from_block( + block: Arc>, + block_root: Hash256, + spec: Arc, + ) -> Self { + if block.num_expected_blobs() == 0 { + MaybeAvailableBlock::Available(AvailableBlock { + block_root, + block, + blobs: None, + blobs_available_timestamp: None, + data_columns: None, + spec, + }) + } else { + MaybeAvailableBlock::AvailabilityPending { block_root, block } + } + } } diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index 8c9e3929f6b..046c8c050f9 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -2303,27 +2303,10 @@ where pub async fn process_block( &self, slot: Slot, - block_root: Hash256, block_contents: SignedBlockContentsTuple, ) -> Result { self.set_current_slot(slot); - let (block, blob_items) = block_contents; - - let rpc_block = self.build_rpc_block_from_blobs(block_root, block, blob_items)?; - let block_hash: SignedBeaconBlockHash = self - .chain - .process_block( - block_root, - rpc_block, - NotifyExecutionLayer::Yes, - BlockImportSource::RangeSync, - || Ok(()), - ) - .await? - .try_into() - .expect("block blobs are available"); - self.chain.recompute_head_at_current_slot().await; - Ok(block_hash) + self.process_block_result(block_contents).await } pub async fn process_block_result( @@ -2334,20 +2317,18 @@ where let block_root = block.canonical_root(); let rpc_block = self.build_rpc_block_from_blobs(block_root, block, blob_items)?; - let block_hash: SignedBeaconBlockHash = self + match self .chain - .process_block( - block_root, - rpc_block, - NotifyExecutionLayer::Yes, - BlockImportSource::RangeSync, - || Ok(()), - ) - .await? - .try_into() - .expect("block blobs are available"); + .process_chain_segment(vec![rpc_block], NotifyExecutionLayer::Yes) + .await + { + crate::ChainSegmentResult::Successful { .. } => {} + crate::ChainSegmentResult::Failed { error, .. } => { + panic!("block import error {error:?}"); + } + } self.chain.recompute_head_at_current_slot().await; - Ok(block_hash) + Ok(block_root.into()) } /// Builds an `Rpc` block from a `SignedBeaconBlock` and blobs or data columns retrieved from @@ -2485,13 +2466,7 @@ where self.set_current_slot(slot); let (block_contents, new_state) = self.make_block(state, slot).await; - let block_hash = self - .process_block( - slot, - block_contents.0.canonical_root(), - block_contents.clone(), - ) - .await?; + let block_hash = self.process_block_result(block_contents.clone()).await?; Ok((block_hash, block_contents, new_state)) } diff --git a/beacon_node/beacon_chain/tests/rewards.rs b/beacon_node/beacon_chain/tests/rewards.rs index 41e6467b0fa..08ef20d3594 100644 --- a/beacon_node/beacon_chain/tests/rewards.rs +++ b/beacon_node/beacon_chain/tests/rewards.rs @@ -310,19 +310,15 @@ async fn test_rewards_base_multi_inclusion() { // funky hack: on first try, the state root will mismatch due to our modification // thankfully, the correct state root is reported back, so we just take that one :^) // there probably is a better way... - let Err(BlockError::StateRootMismatch { local, .. }) = harness - .process_block(slot, block.0.canonical_root(), block.clone()) - .await + let Err(BlockError::StateRootMismatch { local, .. }) = + harness.process_block(slot, block.clone()).await else { panic!("unexpected match of state root"); }; let mut new_block = block.0.message_base().unwrap().clone(); new_block.state_root = local; block.0 = Arc::new(harness.sign_beacon_block(new_block.into(), &harness.get_current_state())); - harness - .process_block(slot, block.0.canonical_root(), block.clone()) - .await - .unwrap(); + harness.process_block(slot, block.clone()).await.unwrap(); harness .extend_slots(E::slots_per_epoch() as usize * 2 - 4) diff --git a/beacon_node/network/src/network_beacon_processor/mod.rs b/beacon_node/network/src/network_beacon_processor/mod.rs index c06a1f6ee37..26ab61a0902 100644 --- a/beacon_node/network/src/network_beacon_processor/mod.rs +++ b/beacon_node/network/src/network_beacon_processor/mod.rs @@ -493,7 +493,7 @@ impl NetworkBeaconProcessor { pub fn send_rpc_beacon_block( self: &Arc, block_root: Hash256, - block: RpcBlock, + block: Arc>, seen_timestamp: Duration, process_type: BlockProcessType, ) -> Result<(), Error> { diff --git a/beacon_node/network/src/network_beacon_processor/sync_methods.rs b/beacon_node/network/src/network_beacon_processor/sync_methods.rs index f5fe7ee98b2..97ca721010c 100644 --- a/beacon_node/network/src/network_beacon_processor/sync_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/sync_methods.rs @@ -25,7 +25,9 @@ use store::KzgCommitment; use tokio::sync::mpsc; use types::beacon_block_body::format_kzg_commitments; use types::blob_sidecar::FixedBlobSidecarList; -use types::{BlockImportSource, DataColumnSidecar, DataColumnSidecarList, Epoch, Hash256}; +use types::{ + BlockImportSource, DataColumnSidecar, DataColumnSidecarList, Epoch, Hash256, SignedBeaconBlock, +}; /// Id associated to a batch processing request, either a sync batch or a parent lookup. #[derive(Clone, Debug, PartialEq)] @@ -52,7 +54,7 @@ impl NetworkBeaconProcessor { pub fn generate_rpc_beacon_block_process_fn( self: Arc, block_root: Hash256, - block: RpcBlock, + block: Arc>, seen_timestamp: Duration, process_type: BlockProcessType, ) -> AsyncFn { @@ -76,7 +78,7 @@ impl NetworkBeaconProcessor { pub fn generate_rpc_beacon_block_fns( self: Arc, block_root: Hash256, - block: RpcBlock, + block: Arc>, seen_timestamp: Duration, process_type: BlockProcessType, ) -> (AsyncFn, BlockingFn) { @@ -103,7 +105,7 @@ impl NetworkBeaconProcessor { pub async fn process_rpc_block( self: Arc>, block_root: Hash256, - block: RpcBlock, + block: Arc>, seen_timestamp: Duration, process_type: BlockProcessType, reprocess_tx: mpsc::Sender, diff --git a/beacon_node/network/src/sync/block_lookups/common.rs b/beacon_node/network/src/sync/block_lookups/common.rs index 8eefb2d6756..86b6894bac4 100644 --- a/beacon_node/network/src/sync/block_lookups/common.rs +++ b/beacon_node/network/src/sync/block_lookups/common.rs @@ -6,7 +6,6 @@ use crate::sync::block_lookups::{ }; use crate::sync::manager::BlockProcessType; use crate::sync::network_context::{LookupRequestResult, SyncNetworkContext}; -use beacon_chain::block_verification_types::RpcBlock; use beacon_chain::BeaconChainTypes; use lighthouse_network::service::api_types::Id; use parking_lot::RwLock; @@ -97,13 +96,8 @@ impl RequestState for BlockRequestState { seen_timestamp, .. } = download_result; - cx.send_block_for_processing( - id, - block_root, - RpcBlock::new_without_blobs(Some(block_root), value), - seen_timestamp, - ) - .map_err(LookupRequestError::SendFailedProcessor) + cx.send_block_for_processing(id, block_root, value, seen_timestamp) + .map_err(LookupRequestError::SendFailedProcessor) } fn response_type() -> ResponseType { diff --git a/beacon_node/network/src/sync/network_context.rs b/beacon_node/network/src/sync/network_context.rs index 968a9bcdddc..66e32ec00a4 100644 --- a/beacon_node/network/src/sync/network_context.rs +++ b/beacon_node/network/src/sync/network_context.rs @@ -1209,7 +1209,7 @@ impl SyncNetworkContext { &self, id: Id, block_root: Hash256, - block: RpcBlock, + block: Arc>, seen_timestamp: Duration, ) -> Result<(), SendErrorProcessor> { let beacon_processor = self