diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index ab2097e001..20af7b4630 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -13,7 +13,7 @@ use crate::block_verification::{ signature_verify_chain_segment, verify_header_signature, }; use crate::block_verification_types::{ - AsBlock, AvailableExecutedBlock, BlockImportData, ExecutedBlock, RpcBlock, + AsBlock, AvailableExecutedBlock, BlockImportData, ExecutedBlock, RangeSyncBlock, }; pub use crate::canonical_head::CanonicalHead; use crate::chain_config::ChainConfig; @@ -137,7 +137,7 @@ use types::*; pub type ForkChoiceError = fork_choice::Error; /// Alias to appease clippy. -type HashBlockTuple = (Hash256, RpcBlock); +type HashBlockTuple = (Hash256, RangeSyncBlock); // These keys are all zero because they get stored in different columns, see `DBColumn` type. pub const BEACON_CHAIN_DB_KEY: Hash256 = Hash256::ZERO; @@ -2746,7 +2746,7 @@ impl BeaconChain { /// This method is potentially long-running and should not run on the core executor. pub fn filter_chain_segment( self: &Arc, - chain_segment: Vec>, + chain_segment: Vec>, ) -> Result>, Box> { // This function will never import any blocks. let imported_blocks = vec![]; @@ -2855,7 +2855,7 @@ impl BeaconChain { /// `Self::process_block`. pub async fn process_chain_segment( self: &Arc, - chain_segment: Vec>, + chain_segment: Vec>, notify_execution_layer: NotifyExecutionLayer, ) -> ChainSegmentResult { for block in chain_segment.iter() { diff --git a/beacon_node/beacon_chain/src/block_verification.rs b/beacon_node/beacon_chain/src/block_verification.rs index 1be9bd4181..06ec26185f 100644 --- a/beacon_node/beacon_chain/src/block_verification.rs +++ b/beacon_node/beacon_chain/src/block_verification.rs @@ -50,7 +50,7 @@ use crate::beacon_snapshot::PreProcessingSnapshot; use crate::blob_verification::GossipBlobError; -use crate::block_verification_types::{AsBlock, BlockImportData, RpcBlock}; +use crate::block_verification_types::{AsBlock, BlockImportData, LookupBlock, RangeSyncBlock}; use crate::data_availability_checker::{ AvailabilityCheckError, AvailableBlock, AvailableBlockData, MaybeAvailableBlock, }; @@ -585,7 +585,7 @@ pub(crate) fn process_block_slash_info( - mut chain_segment: Vec<(Hash256, RpcBlock)>, + mut chain_segment: Vec<(Hash256, RangeSyncBlock)>, chain: &BeaconChain, ) -> Result>, BlockError> { if chain_segment.is_empty() { @@ -616,24 +616,14 @@ pub fn signature_verify_chain_segment( let consensus_context = ConsensusContext::new(block.slot()).set_current_block_root(block_root); - match block { - RpcBlock::FullyAvailable(available_block) => { - available_blocks.push(available_block.clone()); - signature_verified_blocks.push(SignatureVerifiedBlock { - block: MaybeAvailableBlock::Available(available_block), - block_root, - parent: None, - consensus_context, - }); - } - RpcBlock::BlockOnly { .. } => { - // RangeSync and BackfillSync already ensure that the chain segment is fully available - // so this shouldn't be possible in practice. - return Err(BlockError::InternalError( - "Chain segment is not fully available".to_string(), - )); - } - } + let available_block = block.into_available_block(); + available_blocks.push(available_block.clone()); + signature_verified_blocks.push(SignatureVerifiedBlock { + block: MaybeAvailableBlock::Available(available_block), + block_root, + parent: None, + consensus_context, + }); } chain @@ -1300,11 +1290,11 @@ impl IntoExecutionPendingBlock for SignatureVerifiedBloc } } -impl IntoExecutionPendingBlock for RpcBlock { +impl IntoExecutionPendingBlock for RangeSyncBlock { /// Verifies the `SignedBeaconBlock` by first transforming it into a `SignatureVerifiedBlock` /// and then using that implementation of `IntoExecutionPendingBlock` to complete verification. #[instrument( - name = "rpc_block_into_execution_pending_block_slashable", + name = "range_sync_block_into_execution_pending_block_slashable", level = "debug" skip_all, )] @@ -1318,24 +1308,51 @@ impl IntoExecutionPendingBlock for RpcBlock let block_root = check_block_relevancy(self.as_block(), block_root, chain) .map_err(|e| BlockSlashInfo::SignatureNotChecked(self.signed_block_header(), e))?; - let maybe_available_block = match &self { - RpcBlock::FullyAvailable(available_block) => { - chain - .data_availability_checker - .verify_kzg_for_available_block(available_block) - .map_err(|e| { - BlockSlashInfo::SignatureNotChecked( - self.signed_block_header(), - BlockError::AvailabilityCheck(e), - ) - })?; - MaybeAvailableBlock::Available(available_block.clone()) - } - // No need to perform KZG verification unless we have a fully available block - RpcBlock::BlockOnly { block, block_root } => MaybeAvailableBlock::AvailabilityPending { - block_root: *block_root, - block: block.clone(), - }, + let available_block = self.into_available_block(); + chain + .data_availability_checker + .verify_kzg_for_available_block(&available_block) + .map_err(|e| { + BlockSlashInfo::SignatureNotChecked( + available_block.as_block().signed_block_header(), + BlockError::AvailabilityCheck(e), + ) + })?; + let maybe_available_block = MaybeAvailableBlock::Available(available_block); + SignatureVerifiedBlock::check_slashable(maybe_available_block, block_root, chain)? + .into_execution_pending_block_slashable(block_root, chain, notify_execution_layer) + } + + fn block(&self) -> &SignedBeaconBlock { + self.as_block() + } + + fn block_cloned(&self) -> Arc> { + self.block_cloned() + } +} + +impl IntoExecutionPendingBlock for LookupBlock { + /// Verifies the `SignedBeaconBlock` by first transforming it into a `SignatureVerifiedBlock` + /// and then using that implementation of `IntoExecutionPendingBlock` to complete verification. + #[instrument( + name = "lookup_block_into_execution_pending_block_slashable", + level = "debug" + skip_all, + )] + fn into_execution_pending_block_slashable( + self, + block_root: Hash256, + chain: &Arc>, + notify_execution_layer: NotifyExecutionLayer, + ) -> Result, BlockSlashInfo> { + // Perform an early check to prevent wasting time on irrelevant blocks. + let block_root = check_block_relevancy(self.as_block(), block_root, chain) + .map_err(|e| BlockSlashInfo::SignatureNotChecked(self.signed_block_header(), e))?; + + let maybe_available_block = MaybeAvailableBlock::AvailabilityPending { + block_root, + block: self.block_cloned(), }; SignatureVerifiedBlock::check_slashable(maybe_available_block, block_root, chain)? diff --git a/beacon_node/beacon_chain/src/block_verification_types.rs b/beacon_node/beacon_chain/src/block_verification_types.rs index f98cd40d08..be73ef15d7 100644 --- a/beacon_node/beacon_chain/src/block_verification_types.rs +++ b/beacon_node/beacon_chain/src/block_verification_types.rs @@ -13,76 +13,70 @@ use types::{ SignedBeaconBlock, SignedBeaconBlockHeader, Slot, }; -/// A block that has been received over RPC. It has 2 internal variants: -/// -/// 1. `FullyAvailable`: A fully available block. This can either be a pre-deneb block, a -/// post-Deneb block with blobs, a post-Fulu block with the columns the node is required to custody, -/// or a post-Deneb block that doesn't require blobs/columns. Hence, it is fully self contained w.r.t -/// verification. i.e. this block has all the required data to get verified and imported into fork choice. -/// -/// 2. `BlockOnly`: This is a post-deneb block that requires blobs to be considered fully available. -#[derive(Clone, Educe)] -#[educe(Hash(bound(E: EthSpec)))] -pub enum RpcBlock { - FullyAvailable(AvailableBlock), - BlockOnly { - block: Arc>, - block_root: Hash256, - }, +/// A wrapper around a `SignedBeaconBlock`. This varaint is constructed +/// when lookup sync only fetches a single block. It does not contain +/// any blobs or data columns. +pub struct LookupBlock { + block: Arc>, + block_root: Hash256, } -impl Debug for RpcBlock { +impl LookupBlock { + pub fn new(block: Arc>) -> Self { + let block_root = block.canonical_root(); + Self { block, block_root } + } + + pub fn block(&self) -> &SignedBeaconBlock { + &self.block + } + + pub fn block_root(&self) -> Hash256 { + self.block_root + } + + pub fn block_cloned(&self) -> Arc> { + self.block.clone() + } +} + +/// A fully available block that has been constructed by range sync. +/// The block contains all the data required to import into fork choice. +/// This includes any and all blobs/columns required, including zero if +/// none are required. This can happen if the block is pre-deneb or if +/// it's simply past the DA boundary. +#[derive(Clone, Educe)] +#[educe(Hash(bound(E: EthSpec)))] +pub struct RangeSyncBlock { + block: AvailableBlock, +} + +impl Debug for RangeSyncBlock { fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { write!(f, "RpcBlock({:?})", self.block_root()) } } -impl RpcBlock { +impl RangeSyncBlock { pub fn block_root(&self) -> Hash256 { - match self { - RpcBlock::FullyAvailable(available_block) => available_block.block_root(), - RpcBlock::BlockOnly { block_root, .. } => *block_root, - } + self.block.block_root() } pub fn as_block(&self) -> &SignedBeaconBlock { - match self { - RpcBlock::FullyAvailable(available_block) => available_block.block(), - RpcBlock::BlockOnly { block, .. } => block, - } + self.block.block() } pub fn block_cloned(&self) -> Arc> { - match self { - RpcBlock::FullyAvailable(available_block) => available_block.block_cloned(), - RpcBlock::BlockOnly { block, .. } => block.clone(), - } + self.block.block_cloned() } - pub fn block_data(&self) -> Option<&AvailableBlockData> { - match self { - RpcBlock::FullyAvailable(available_block) => Some(available_block.data()), - RpcBlock::BlockOnly { .. } => None, - } + pub fn block_data(&self) -> &AvailableBlockData { + self.block.data() } } -impl RpcBlock { - /// Constructs an `RpcBlock` from a block and optional availability data. - /// - /// This function creates an RpcBlock which can be in one of two states: - /// - `FullyAvailable`: When `block_data` is provided, the block contains all required - /// data for verification. - /// - `BlockOnly`: When `block_data` is `None`, the block may still need additional - /// data to be considered fully available (used during block lookups or when blobs - /// will arrive separately). - /// - /// # Validation - /// - /// When `block_data` is provided, this function validates that: - /// - Block data is not provided when not required. - /// - Required blobs are present and match the expected count. - /// - Required custody columns are included based on the nodes custody requirements. +impl RangeSyncBlock { + /// Constructs an `RangeSyncBlock` from a block and availability data. /// /// # Errors /// @@ -92,62 +86,41 @@ impl RpcBlock { /// - `MissingCustodyColumns`: Block requires custody columns but they are incomplete. pub fn new( block: Arc>, - block_data: Option>, + block_data: AvailableBlockData, da_checker: &DataAvailabilityChecker, spec: Arc, ) -> Result where T: BeaconChainTypes, { - match block_data { - Some(block_data) => Ok(RpcBlock::FullyAvailable(AvailableBlock::new( - block, block_data, da_checker, spec, - )?)), - None => Ok(RpcBlock::BlockOnly { - block_root: block.canonical_root(), - block, - }), - } + let available_block = AvailableBlock::new(block, block_data, da_checker, spec)?; + Ok(Self { + block: available_block, + }) } #[allow(clippy::type_complexity)] - pub fn deconstruct( - self, - ) -> ( - Hash256, - Arc>, - Option>, - ) { - match self { - RpcBlock::FullyAvailable(available_block) => { - let (block_root, block, block_data) = available_block.deconstruct(); - (block_root, block, Some(block_data)) - } - RpcBlock::BlockOnly { block, block_root } => (block_root, block, None), - } + pub fn deconstruct(self) -> (Hash256, Arc>, AvailableBlockData) { + self.block.deconstruct() } pub fn n_blobs(&self) -> usize { - if let Some(block_data) = self.block_data() { - match block_data { - AvailableBlockData::NoData | AvailableBlockData::DataColumns(_) => 0, - AvailableBlockData::Blobs(blobs) => blobs.len(), - } - } else { - 0 + match self.block_data() { + AvailableBlockData::NoData | AvailableBlockData::DataColumns(_) => 0, + AvailableBlockData::Blobs(blobs) => blobs.len(), } } pub fn n_data_columns(&self) -> usize { - if let Some(block_data) = self.block_data() { - match block_data { - AvailableBlockData::NoData | AvailableBlockData::Blobs(_) => 0, - AvailableBlockData::DataColumns(columns) => columns.len(), - } - } else { - 0 + match self.block_data() { + AvailableBlockData::NoData | AvailableBlockData::Blobs(_) => 0, + AvailableBlockData::DataColumns(columns) => columns.len(), } } + + pub fn into_available_block(self) -> AvailableBlock { + self.block + } } /// A block that has gone through all pre-deneb block processing checks including block processing @@ -412,7 +385,7 @@ impl AsBlock for AvailableBlock { } } -impl AsBlock for RpcBlock { +impl AsBlock for RangeSyncBlock { fn slot(&self) -> Slot { self.as_block().slot() } @@ -432,24 +405,42 @@ impl AsBlock for RpcBlock { self.as_block().message() } fn as_block(&self) -> &SignedBeaconBlock { - match self { - Self::BlockOnly { - block, - block_root: _, - } => block, - Self::FullyAvailable(available_block) => available_block.block(), - } + self.block.as_block() } fn block_cloned(&self) -> Arc> { - match self { - RpcBlock::FullyAvailable(available_block) => available_block.block_cloned(), - RpcBlock::BlockOnly { - block, - block_root: _, - } => block.clone(), - } + self.block.block_cloned() } fn canonical_root(&self) -> Hash256 { - self.as_block().canonical_root() + self.block.block_root() + } +} + +impl AsBlock for LookupBlock { + fn slot(&self) -> Slot { + self.block().slot() + } + fn epoch(&self) -> Epoch { + self.block().epoch() + } + fn parent_root(&self) -> Hash256 { + self.block().parent_root() + } + fn state_root(&self) -> Hash256 { + self.block().state_root() + } + fn signed_block_header(&self) -> SignedBeaconBlockHeader { + self.block().signed_block_header() + } + fn message(&self) -> BeaconBlockRef<'_, E> { + self.block().message() + } + fn as_block(&self) -> &SignedBeaconBlock { + self.block() + } + fn block_cloned(&self) -> Arc> { + self.block_cloned() + } + fn canonical_root(&self) -> Hash256 { + self.block_root } } diff --git a/beacon_node/beacon_chain/src/data_availability_checker.rs b/beacon_node/beacon_chain/src/data_availability_checker.rs index e266e02f7f..4372efa809 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker.rs @@ -891,7 +891,7 @@ impl MaybeAvailableBlock { mod test { use super::*; use crate::CustodyContext; - use crate::block_verification_types::RpcBlock; + use crate::block_verification_types::RangeSyncBlock; use crate::custody_context::NodeCustodyType; use crate::data_column_verification::CustodyDataColumn; use crate::test_utils::{ @@ -1085,7 +1085,7 @@ mod test { /// Regression test for KZG verification truncation bug (https://github.com/sigp/lighthouse/pull/7927) #[test] - fn verify_kzg_for_rpc_blocks_should_not_truncate_data_columns_fulu() { + fn verify_kzg_for_range_sync_blocks_should_not_truncate_data_columns_fulu() { let spec = Arc::new(ForkName::Fulu.make_genesis_spec(E::default_spec())); let mut rng = StdRng::seed_from_u64(0xDEADBEEF0BAD5EEDu64); let da_checker = new_da_checker(spec.clone()); @@ -1128,17 +1128,14 @@ mod test { let block_data = AvailableBlockData::new_with_data_columns(custody_columns); let da_checker = Arc::new(new_da_checker(spec.clone())); - RpcBlock::new(Arc::new(block), Some(block_data), &da_checker, spec.clone()) + RangeSyncBlock::new(Arc::new(block), block_data, &da_checker, spec.clone()) .expect("should create RPC block with custody columns") }) .collect::>(); let available_blocks = blocks_with_columns - .iter() - .filter_map(|block| match block { - RpcBlock::FullyAvailable(available_block) => Some(available_block.clone()), - RpcBlock::BlockOnly { .. } => None, - }) + .into_iter() + .map(|block| block.into_available_block()) .collect::>(); // WHEN verifying all blocks together (totalling 256 data columns) diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index 4bc5bb21d3..c53c29438e 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -1,5 +1,5 @@ use crate::blob_verification::GossipVerifiedBlob; -use crate::block_verification_types::{AsBlock, AvailableBlockData, RpcBlock}; +use crate::block_verification_types::{AsBlock, AvailableBlockData, LookupBlock, RangeSyncBlock}; use crate::custody_context::NodeCustodyType; use crate::data_availability_checker::DataAvailabilityChecker; use crate::graffiti_calculator::GraffitiSettings; @@ -823,20 +823,20 @@ where mock_builder_server } - pub fn get_head_block(&self) -> RpcBlock { + pub fn get_head_block(&self) -> RangeSyncBlock { let block = self.chain.head_beacon_block(); let block_root = block.canonical_root(); - self.build_rpc_block_from_store_blobs(Some(block_root), block) + self.build_range_sync_block_from_store_blobs(Some(block_root), block) } - pub fn get_full_block(&self, block_root: &Hash256) -> RpcBlock { + pub fn get_full_block(&self, block_root: &Hash256) -> RangeSyncBlock { let block = self .chain .get_blinded_block(block_root) .unwrap() .unwrap_or_else(|| panic!("block root does not exist in harness {block_root:?}")); let full_block = self.chain.store.make_full_block(block_root, block).unwrap(); - self.build_rpc_block_from_store_blobs(Some(*block_root), Arc::new(full_block)) + self.build_range_sync_block_from_store_blobs(Some(*block_root), Arc::new(full_block)) } pub fn get_all_validators(&self) -> Vec { @@ -1340,15 +1340,12 @@ where let signed_block = self.sign_beacon_block(block, state); let block_root = signed_block.canonical_root(); - let rpc_block = RpcBlock::BlockOnly { - block_root, - block: Arc::new(signed_block), - }; + let lookup_block = LookupBlock::new(Arc::new(signed_block)); self.chain.slot_clock.set_slot(slot.as_u64()); self.chain .process_block( block_root, - rpc_block, + lookup_block, NotifyExecutionLayer::No, BlockImportSource::Lookup, || Ok(()), @@ -2607,20 +2604,33 @@ where .blob_kzg_commitments() .is_ok_and(|c| !c.is_empty()); let is_available = !has_blob_commitments || blob_items.is_some(); + let block_hash: SignedBeaconBlockHash = if !is_available { + self.chain + .process_block( + block_root, + LookupBlock::new(block), + NotifyExecutionLayer::Yes, + BlockImportSource::Lookup, + || Ok(()), + ) + .await? + .try_into() + .expect("block blobs are available") + } else { + let range_sync_block = self.build_range_sync_block_from_blobs(block, blob_items)?; + self.chain + .process_block( + block_root, + range_sync_block, + NotifyExecutionLayer::Yes, + BlockImportSource::RangeSync, + || Ok(()), + ) + .await? + .try_into() + .expect("block blobs are available") + }; - let rpc_block = self.build_rpc_block_from_blobs(block, blob_items, is_available)?; - let block_hash: SignedBeaconBlockHash = self - .chain - .process_block( - block_root, - rpc_block, - NotifyExecutionLayer::Yes, - BlockImportSource::RangeSync, - || Ok(()), - ) - .await? - .try_into() - .expect("block blobs are available"); self.chain.recompute_head_at_current_slot().await; Ok(block_hash) } @@ -2640,19 +2650,33 @@ where .blob_kzg_commitments() .is_ok_and(|c| !c.is_empty()); let is_available = !has_blob_commitments || blob_items.is_some(); - let rpc_block = self.build_rpc_block_from_blobs(block, blob_items, is_available)?; - let block_hash: SignedBeaconBlockHash = self - .chain - .process_block( - block_root, - rpc_block, - NotifyExecutionLayer::Yes, - BlockImportSource::RangeSync, - || Ok(()), - ) - .await? - .try_into() - .expect("block blobs are available"); + let block_hash: SignedBeaconBlockHash = if is_available { + let range_sync_block = self.build_range_sync_block_from_blobs(block, blob_items)?; + self.chain + .process_block( + block_root, + range_sync_block, + NotifyExecutionLayer::Yes, + BlockImportSource::RangeSync, + || Ok(()), + ) + .await? + .try_into() + .expect("block blobs are available") + } else { + self.chain + .process_block( + block_root, + LookupBlock::new(block), + NotifyExecutionLayer::Yes, + BlockImportSource::Lookup, + || Ok(()), + ) + .await? + .try_into() + .expect("block blobs are available") + }; + self.chain.recompute_head_at_current_slot().await; Ok(block_hash) } @@ -2735,13 +2759,13 @@ where state_root } - /// Builds an `Rpc` block from a `SignedBeaconBlock` and blobs or data columns retrieved from + /// Builds a `RangeSyncBlock` from a `SignedBeaconBlock` and blobs or data columns retrieved from /// the database. - pub fn build_rpc_block_from_store_blobs( + pub fn build_range_sync_block_from_store_blobs( &self, block_root: Option, block: Arc>, - ) -> RpcBlock { + ) -> RangeSyncBlock { let block_root = block_root.unwrap_or_else(|| get_block_root(&block)); let has_blobs = block .message() @@ -2749,9 +2773,9 @@ where .blob_kzg_commitments() .is_ok_and(|c| !c.is_empty()); if !has_blobs { - return RpcBlock::new( + return RangeSyncBlock::new( block, - Some(AvailableBlockData::NoData), + AvailableBlockData::NoData, &self.chain.data_availability_checker, self.chain.spec.clone(), ) @@ -2768,9 +2792,9 @@ where .unwrap(); let custody_columns = columns.into_iter().collect::>(); let block_data = AvailableBlockData::new_with_data_columns(custody_columns); - RpcBlock::new( + RangeSyncBlock::new( block, - Some(block_data), + block_data, &self.chain.data_availability_checker, self.chain.spec.clone(), ) @@ -2783,9 +2807,9 @@ where AvailableBlockData::NoData }; - RpcBlock::new( + RangeSyncBlock::new( block, - Some(block_data), + block_data, &self.chain.data_availability_checker, self.chain.spec.clone(), ) @@ -2793,18 +2817,17 @@ where } } - /// Builds an `RpcBlock` from a `SignedBeaconBlock` and `BlobsList`. - pub fn build_rpc_block_from_blobs( + /// Builds a `RangeSyncBlock` from a `SignedBeaconBlock` and `BlobsList`. + pub fn build_range_sync_block_from_blobs( &self, block: Arc>>, blob_items: Option<(KzgProofs, BlobsList)>, - is_available: bool, - ) -> Result, BlockError> { + ) -> Result, BlockError> { Ok(if self.spec.is_peer_das_enabled_for_epoch(block.epoch()) { let epoch = block.slot().epoch(E::slots_per_epoch()); let sampling_columns = self.chain.sampling_columns_for_epoch(epoch); - if blob_items.is_some_and(|(_, blobs)| !blobs.is_empty()) { + if blob_items.is_some_and(|(kzg_proofs, _)| !kzg_proofs.is_empty()) { // Note: this method ignores the actual custody columns and just take the first // `sampling_column_count` for testing purpose only, because the chain does not // currently have any knowledge of the columns being custodied. @@ -2812,33 +2835,17 @@ where .into_iter() .filter(|d| sampling_columns.contains(d.index())) .collect::>(); - if is_available { - let block_data = AvailableBlockData::new_with_data_columns(columns); - RpcBlock::new( - block, - Some(block_data), - &self.chain.data_availability_checker, - self.chain.spec.clone(), - )? - } else { - RpcBlock::new( - block, - None, - &self.chain.data_availability_checker, - self.chain.spec.clone(), - )? - } - } else if is_available { - RpcBlock::new( + let block_data = AvailableBlockData::new_with_data_columns(columns); + RangeSyncBlock::new( block, - Some(AvailableBlockData::NoData), + block_data, &self.chain.data_availability_checker, self.chain.spec.clone(), )? } else { - RpcBlock::new( + RangeSyncBlock::new( block, - None, + AvailableBlockData::NoData, &self.chain.data_availability_checker, self.chain.spec.clone(), )? @@ -2850,27 +2857,18 @@ where }) .transpose() .unwrap(); - if is_available { - let block_data = if let Some(blobs) = blobs { - AvailableBlockData::new_with_blobs(blobs) - } else { - AvailableBlockData::NoData - }; - - RpcBlock::new( - block, - Some(block_data), - &self.chain.data_availability_checker, - self.chain.spec.clone(), - )? + let block_data = if let Some(blobs) = blobs { + AvailableBlockData::new_with_blobs(blobs) } else { - RpcBlock::new( - block, - None, - &self.chain.data_availability_checker, - self.chain.spec.clone(), - )? - } + AvailableBlockData::NoData + }; + + RangeSyncBlock::new( + block, + block_data, + &self.chain.data_availability_checker, + self.chain.spec.clone(), + )? }) } diff --git a/beacon_node/beacon_chain/tests/attestation_production.rs b/beacon_node/beacon_chain/tests/attestation_production.rs index a1922f32a4..bca60d27cd 100644 --- a/beacon_node/beacon_chain/tests/attestation_production.rs +++ b/beacon_node/beacon_chain/tests/attestation_production.rs @@ -1,7 +1,6 @@ #![cfg(not(debug_assertions))] use beacon_chain::attestation_simulator::produce_unaggregated_attestation; -use beacon_chain::block_verification_types::RpcBlock; use beacon_chain::custody_context::NodeCustodyType; use beacon_chain::test_utils::{AttestationStrategy, BeaconChainHarness, BlockStrategy}; use beacon_chain::validator_monitor::UNAGGREGATED_ATTESTATION_LAG_SLOTS; @@ -223,19 +222,9 @@ async fn produces_attestations() { assert_eq!(data.target.epoch, state.current_epoch(), "bad target epoch"); assert_eq!(data.target.root, target_root, "bad target root"); - let rpc_block = - harness.build_rpc_block_from_store_blobs(Some(block_root), Arc::new(block.clone())); - - let available_block = match rpc_block { - RpcBlock::FullyAvailable(available_block) => { - chain - .data_availability_checker - .verify_kzg_for_available_block(&available_block) - .unwrap(); - available_block - } - RpcBlock::BlockOnly { .. } => panic!("block should be available"), - }; + let range_sync_block = harness + .build_range_sync_block_from_store_blobs(Some(block_root), Arc::new(block.clone())); + let available_block = range_sync_block.into_available_block(); let early_attestation = { let proto_block = chain @@ -292,20 +281,12 @@ async fn early_attester_cache_old_request() { .get_block(&head.beacon_block_root) .unwrap(); - let rpc_block = harness - .build_rpc_block_from_store_blobs(Some(head.beacon_block_root), head.beacon_block.clone()); - - let available_block = match rpc_block { - RpcBlock::FullyAvailable(available_block) => { - harness - .chain - .data_availability_checker - .verify_kzg_for_available_block(&available_block) - .unwrap(); - available_block - } - RpcBlock::BlockOnly { .. } => panic!("block should be available"), - }; + let available_block = harness + .build_range_sync_block_from_store_blobs( + Some(head.beacon_block_root), + head.beacon_block.clone(), + ) + .into_available_block(); harness .chain diff --git a/beacon_node/beacon_chain/tests/blob_verification.rs b/beacon_node/beacon_chain/tests/blob_verification.rs index ee61177b2a..0ee9a7dba6 100644 --- a/beacon_node/beacon_chain/tests/blob_verification.rs +++ b/beacon_node/beacon_chain/tests/blob_verification.rs @@ -5,7 +5,7 @@ use beacon_chain::test_utils::{ }; use beacon_chain::{ AvailabilityProcessingStatus, BlockError, ChainConfig, InvalidSignature, NotifyExecutionLayer, - block_verification_types::AsBlock, + block_verification_types::{AsBlock, LookupBlock}, }; use bls::{Keypair, Signature}; use logging::create_test_tracing_subscriber; @@ -76,14 +76,11 @@ async fn rpc_blobs_with_invalid_header_signature() { // Process the block without blobs so that it doesn't become available. harness.advance_slot(); - let rpc_block = harness - .build_rpc_block_from_blobs(signed_block.clone(), None, false) - .unwrap(); let availability = harness .chain .process_block( block_root, - rpc_block, + LookupBlock::new(signed_block.clone()), NotifyExecutionLayer::Yes, BlockImportSource::Lookup, || Ok(()), diff --git a/beacon_node/beacon_chain/tests/block_verification.rs b/beacon_node/beacon_chain/tests/block_verification.rs index e385e0dc48..8981b20a55 100644 --- a/beacon_node/beacon_chain/tests/block_verification.rs +++ b/beacon_node/beacon_chain/tests/block_verification.rs @@ -1,6 +1,6 @@ #![cfg(not(debug_assertions))] // TODO(gloas) we probably need similar test for payload envelope verification -use beacon_chain::block_verification_types::{AsBlock, ExecutedBlock, RpcBlock}; +use beacon_chain::block_verification_types::{AsBlock, ExecutedBlock, LookupBlock, RangeSyncBlock}; use beacon_chain::data_availability_checker::{AvailabilityCheckError, AvailableBlockData}; use beacon_chain::data_column_verification::CustodyDataColumn; use beacon_chain::{ @@ -13,7 +13,7 @@ use beacon_chain::{ }; use beacon_chain::{ BeaconSnapshot, BlockError, ChainConfig, ChainSegmentResult, IntoExecutionPendingBlock, - InvalidSignature, NotifyExecutionLayer, signature_verify_chain_segment, + InvalidSignature, NotifyExecutionLayer, }; use bls::{AggregateSignature, Keypair, Signature}; use fixed_bytes::FixedBytesExtended; @@ -136,7 +136,7 @@ fn chain_segment_blocks( chain_segment: &[BeaconSnapshot], chain_segment_sidecars: &[Option>], chain: Arc>, -) -> Vec> +) -> Vec> where T: BeaconChainTypes, { @@ -145,25 +145,25 @@ where .zip(chain_segment_sidecars.iter()) .map(|(snapshot, data_sidecars)| { let block = snapshot.beacon_block.clone(); - build_rpc_block(block, data_sidecars, chain.clone()) + build_range_sync_block(block, data_sidecars, chain.clone()) }) .collect() } -fn build_rpc_block( +fn build_range_sync_block( block: Arc>, data_sidecars: &Option>, chain: Arc>, -) -> RpcBlock +) -> RangeSyncBlock where T: BeaconChainTypes, { match data_sidecars { Some(DataSidecars::Blobs(blobs)) => { let block_data = AvailableBlockData::new_with_blobs(blobs.clone()); - RpcBlock::new( + RangeSyncBlock::new( block, - Some(block_data), + block_data, &chain.data_availability_checker, chain.spec.clone(), ) @@ -176,17 +176,17 @@ where .map(|c| c.as_data_column().clone()) .collect::>(), ); - RpcBlock::new( + RangeSyncBlock::new( block, - Some(block_data), + block_data, &chain.data_availability_checker, chain.spec.clone(), ) .unwrap() } - None => RpcBlock::new( + None => RangeSyncBlock::new( block, - Some(AvailableBlockData::NoData), + AvailableBlockData::NoData, &chain.data_availability_checker, chain.spec.clone(), ) @@ -301,7 +301,7 @@ fn update_data_column_signed_header( async fn chain_segment_full_segment() { let harness = get_harness(VALIDATOR_COUNT, NodeCustodyType::Fullnode); let (chain_segment, chain_segment_blobs) = get_chain_segment().await; - let blocks: Vec> = + let blocks: Vec> = chain_segment_blocks(&chain_segment, &chain_segment_blobs, harness.chain.clone()) .into_iter() .collect(); @@ -339,7 +339,7 @@ async fn chain_segment_full_segment() { async fn chain_segment_varying_chunk_size() { let (chain_segment, chain_segment_blobs) = get_chain_segment().await; let harness = get_harness(VALIDATOR_COUNT, NodeCustodyType::Fullnode); - let blocks: Vec> = + let blocks: Vec> = chain_segment_blocks(&chain_segment, &chain_segment_blobs, harness.chain.clone()) .into_iter() .collect(); @@ -384,7 +384,7 @@ async fn chain_segment_non_linear_parent_roots() { /* * Test with a block removed. */ - let mut blocks: Vec> = + let mut blocks: Vec> = chain_segment_blocks(&chain_segment, &chain_segment_blobs, harness.chain.clone()) .into_iter() .collect(); @@ -405,7 +405,7 @@ async fn chain_segment_non_linear_parent_roots() { /* * Test with a modified parent root. */ - let mut blocks: Vec> = + let mut blocks: Vec> = chain_segment_blocks(&chain_segment, &chain_segment_blobs, harness.chain.clone()) .into_iter() .collect(); @@ -413,9 +413,9 @@ async fn chain_segment_non_linear_parent_roots() { let (mut block, signature) = blocks[3].as_block().clone().deconstruct(); *block.parent_root_mut() = Hash256::zero(); - blocks[3] = RpcBlock::new( + blocks[3] = RangeSyncBlock::new( Arc::new(SignedBeaconBlock::from_block(block, signature)), - blocks[3].block_data().cloned(), + blocks[3].block_data().clone(), &harness.chain.data_availability_checker, harness.spec.clone(), ) @@ -447,15 +447,15 @@ async fn chain_segment_non_linear_slots() { * Test where a child is lower than the parent. */ - let mut blocks: Vec> = + let mut blocks: Vec> = chain_segment_blocks(&chain_segment, &chain_segment_blobs, harness.chain.clone()) .into_iter() .collect(); let (mut block, signature) = blocks[3].as_block().clone().deconstruct(); *block.slot_mut() = Slot::new(0); - blocks[3] = RpcBlock::new( + blocks[3] = RangeSyncBlock::new( Arc::new(SignedBeaconBlock::from_block(block, signature)), - blocks[3].block_data().cloned(), + blocks[3].block_data().clone(), &harness.chain.data_availability_checker, harness.spec.clone(), ) @@ -477,15 +477,15 @@ async fn chain_segment_non_linear_slots() { * Test where a child is equal to the parent. */ - let mut blocks: Vec> = + let mut blocks: Vec> = chain_segment_blocks(&chain_segment, &chain_segment_blobs, harness.chain.clone()) .into_iter() .collect(); let (mut block, signature) = blocks[3].as_block().clone().deconstruct(); *block.slot_mut() = blocks[2].slot(); - blocks[3] = RpcBlock::new( + blocks[3] = RangeSyncBlock::new( Arc::new(SignedBeaconBlock::from_block(block, signature)), - blocks[3].block_data().cloned(), + blocks[3].block_data().clone(), &harness.chain.data_availability_checker, harness.chain.spec.clone(), ) @@ -512,11 +512,11 @@ async fn assert_invalid_signature( snapshots: &[BeaconSnapshot], item: &str, ) { - let blocks: Vec> = snapshots + let blocks: Vec> = snapshots .iter() .zip(chain_segment_blobs.iter()) .map(|(snapshot, blobs)| { - build_rpc_block(snapshot.beacon_block.clone(), blobs, harness.chain.clone()) + build_range_sync_block(snapshot.beacon_block.clone(), blobs, harness.chain.clone()) }) .collect(); @@ -543,7 +543,7 @@ async fn assert_invalid_signature( .take(block_index) .zip(chain_segment_blobs.iter()) .map(|(snapshot, blobs)| { - build_rpc_block(snapshot.beacon_block.clone(), blobs, harness.chain.clone()) + build_range_sync_block(snapshot.beacon_block.clone(), blobs, harness.chain.clone()) }) .collect(); // We don't care if this fails, we just call this to ensure that all prior blocks have been @@ -558,7 +558,7 @@ async fn assert_invalid_signature( .chain .process_block( snapshots[block_index].beacon_block.canonical_root(), - build_rpc_block( + build_range_sync_block( snapshots[block_index].beacon_block.clone(), &chain_segment_blobs[block_index], harness.chain.clone(), @@ -620,7 +620,7 @@ async fn invalid_signature_gossip_block() { .take(block_index) .zip(chain_segment_blobs.iter()) .map(|(snapshot, blobs)| { - build_rpc_block(snapshot.beacon_block.clone(), blobs, harness.chain.clone()) + build_range_sync_block(snapshot.beacon_block.clone(), blobs, harness.chain.clone()) }) .collect(); harness @@ -630,18 +630,12 @@ async fn invalid_signature_gossip_block() { .into_block_error() .expect("should import all blocks prior to the one being tested"); let signed_block = SignedBeaconBlock::from_block(block, junk_signature()); - let rpc_block = RpcBlock::new( - Arc::new(signed_block), - None, - &harness.chain.data_availability_checker, - harness.spec.clone(), - ) - .unwrap(); + let lookup_block = LookupBlock::new(Arc::new(signed_block)); let process_res = harness .chain .process_block( - rpc_block.block_root(), - rpc_block, + lookup_block.block_root(), + lookup_block, NotifyExecutionLayer::Yes, BlockImportSource::Lookup, || Ok(()), @@ -675,11 +669,11 @@ async fn invalid_signature_block_proposal() { block.clone(), junk_signature(), )); - let blocks: Vec> = snapshots + let blocks: Vec> = snapshots .iter() .zip(chain_segment_blobs.iter()) .map(|(snapshot, blobs)| { - build_rpc_block(snapshot.beacon_block.clone(), blobs, harness.chain.clone()) + build_range_sync_block(snapshot.beacon_block.clone(), blobs, harness.chain.clone()) }) .collect::>(); // Ensure the block will be rejected if imported in a chain segment. @@ -994,11 +988,11 @@ async fn invalid_signature_deposit() { Arc::new(SignedBeaconBlock::from_block(block, signature)); update_parent_roots(&mut snapshots, &mut chain_segment_blobs); update_proposal_signatures(&mut snapshots, &harness); - let blocks: Vec> = snapshots + let blocks: Vec> = snapshots .iter() .zip(chain_segment_blobs.iter()) .map(|(snapshot, blobs)| { - build_rpc_block(snapshot.beacon_block.clone(), blobs, harness.chain.clone()) + build_range_sync_block(snapshot.beacon_block.clone(), blobs, harness.chain.clone()) }) .collect(); assert!( @@ -1641,9 +1635,9 @@ async fn add_base_block_to_altair_chain() { )); // Ensure that it would be impossible to import via `BeaconChain::process_block`. - let base_rpc_block = RpcBlock::new( + let base_range_sync_block = RangeSyncBlock::new( Arc::new(base_block.clone()), - None, + AvailableBlockData::NoData, &harness.chain.data_availability_checker, harness.spec.clone(), ) @@ -1652,8 +1646,8 @@ async fn add_base_block_to_altair_chain() { harness .chain .process_block( - base_rpc_block.block_root(), - base_rpc_block, + base_range_sync_block.block_root(), + base_range_sync_block, NotifyExecutionLayer::Yes, BlockImportSource::Lookup, || Ok(()), @@ -1672,9 +1666,9 @@ async fn add_base_block_to_altair_chain() { .chain .process_chain_segment( vec![ - RpcBlock::new( + RangeSyncBlock::new( Arc::new(base_block), - None, + AvailableBlockData::NoData, &harness.chain.data_availability_checker, harness.spec.clone() ) @@ -1792,19 +1786,13 @@ async fn add_altair_block_to_base_chain() { )); // Ensure that it would be impossible to import via `BeaconChain::process_block`. - let altair_rpc_block = RpcBlock::new( - Arc::new(altair_block.clone()), - None, - &harness.chain.data_availability_checker, - harness.spec.clone(), - ) - .unwrap(); + let altair_lookup_block = LookupBlock::new(Arc::new(altair_block.clone())); assert!(matches!( harness .chain .process_block( - altair_rpc_block.block_root(), - altair_rpc_block, + altair_lookup_block.block_root(), + altair_lookup_block, NotifyExecutionLayer::Yes, BlockImportSource::Lookup, || Ok(()), @@ -1823,9 +1811,9 @@ async fn add_altair_block_to_base_chain() { .chain .process_chain_segment( vec![ - RpcBlock::new( + RangeSyncBlock::new( Arc::new(altair_block), - None, + AvailableBlockData::NoData, &harness.chain.data_availability_checker, harness.spec.clone() ) @@ -1891,18 +1879,18 @@ async fn import_duplicate_block_unrealized_justification() { // Create two verified variants of the block, representing the same block being processed in // parallel. let notify_execution_layer = NotifyExecutionLayer::Yes; - let rpc_block = RpcBlock::new( + let range_sync_block = RangeSyncBlock::new( block.clone(), - Some(AvailableBlockData::NoData), + AvailableBlockData::NoData, &harness.chain.data_availability_checker, harness.spec.clone(), ) .unwrap(); - let verified_block1 = rpc_block + let verified_block1 = range_sync_block .clone() .into_execution_pending_block(block_root, chain, notify_execution_layer) .unwrap(); - let verified_block2 = rpc_block + let verified_block2 = range_sync_block .into_execution_pending_block(block_root, chain, notify_execution_layer) .unwrap(); @@ -1972,48 +1960,9 @@ async fn import_execution_pending_block( } } -// Test that `signature_verify_chain_segment` errors with a chain segment of mixed `FullyAvailable` -// and `BlockOnly` RpcBlocks. This situation should never happen in production. -#[tokio::test] -async fn signature_verify_mixed_rpc_block_variants() { - let (snapshots, data_sidecars) = get_chain_segment().await; - let snapshots: Vec<_> = snapshots.into_iter().take(10).collect(); - let data_sidecars: Vec<_> = data_sidecars.into_iter().take(10).collect(); - - let harness = get_harness(VALIDATOR_COUNT, NodeCustodyType::Fullnode); - - let mut chain_segment = Vec::new(); - - for (i, (snapshot, blobs)) in snapshots.iter().zip(data_sidecars.iter()).enumerate() { - let block = snapshot.beacon_block.clone(); - let block_root = snapshot.beacon_block_root; - - // Alternate between FullyAvailable and BlockOnly - let rpc_block = if i % 2 == 0 { - // FullyAvailable - with blobs/columns if needed - build_rpc_block(block, blobs, harness.chain.clone()) - } else { - // BlockOnly - no data - RpcBlock::new( - block, - None, - &harness.chain.data_availability_checker, - harness.chain.spec.clone(), - ) - .unwrap() - }; - - chain_segment.push((block_root, rpc_block)); - } - - // This should error because `signature_verify_chain_segment` expects a list - // of `RpcBlock::FullyAvailable`. - assert!(signature_verify_chain_segment(chain_segment.clone(), &harness.chain).is_err()); -} - // Test that RpcBlock::new() rejects blocks when blob count doesn't match expected. #[tokio::test] -async fn rpc_block_construction_fails_with_wrong_blob_count() { +async fn range_sync_block_construction_fails_with_wrong_blob_count() { let spec = test_spec::(); if !spec.fork_name_at_slot::(Slot::new(0)).deneb_enabled() @@ -2064,9 +2013,9 @@ async fn rpc_block_construction_fails_with_wrong_blob_count() { let block_data = AvailableBlockData::new_with_blobs(wrong_blobs); // Try to create RpcBlock with wrong blob count - let result = RpcBlock::new( + let result = RangeSyncBlock::new( Arc::new(block), - Some(block_data), + block_data, &harness.chain.data_availability_checker, harness.chain.spec.clone(), ); @@ -2086,7 +2035,7 @@ async fn rpc_block_construction_fails_with_wrong_blob_count() { // Test that RpcBlock::new() rejects blocks when custody columns are incomplete. #[tokio::test] -async fn rpc_block_rejects_missing_custody_columns() { +async fn range_sync_block_rejects_missing_custody_columns() { let spec = test_spec::(); if !spec.fork_name_at_slot::(Slot::new(0)).fulu_enabled() { @@ -2139,9 +2088,9 @@ async fn rpc_block_rejects_missing_custody_columns() { let block_data = AvailableBlockData::new_with_data_columns(incomplete_columns); // Try to create RpcBlock with incomplete custody columns - let result = RpcBlock::new( + let result = RangeSyncBlock::new( Arc::new(block), - Some(block_data), + block_data, &harness.chain.data_availability_checker, harness.chain.spec.clone(), ); @@ -2227,9 +2176,9 @@ async fn rpc_block_allows_construction_past_da_boundary() { // Try to create RpcBlock with NoData for a block past DA boundary // This should succeed since columns are not expected for blocks past DA boundary - let result = RpcBlock::new( + let result = RangeSyncBlock::new( Arc::new(block), - Some(AvailableBlockData::NoData), + AvailableBlockData::NoData, &harness.chain.data_availability_checker, harness.chain.spec.clone(), ); diff --git a/beacon_node/beacon_chain/tests/column_verification.rs b/beacon_node/beacon_chain/tests/column_verification.rs index 9941c957e2..6114bd7f45 100644 --- a/beacon_node/beacon_chain/tests/column_verification.rs +++ b/beacon_node/beacon_chain/tests/column_verification.rs @@ -7,7 +7,7 @@ use beacon_chain::test_utils::{ }; use beacon_chain::{ AvailabilityProcessingStatus, BlockError, ChainConfig, InvalidSignature, NotifyExecutionLayer, - block_verification_types::AsBlock, + block_verification_types::{AsBlock, LookupBlock}, }; use bls::{Keypair, Signature}; use logging::create_test_tracing_subscriber; @@ -80,16 +80,13 @@ async fn rpc_columns_with_invalid_header_signature() { // Process the block without blobs so that it doesn't become available. harness.advance_slot(); - let rpc_block = harness - .build_rpc_block_from_blobs(signed_block.clone(), None, false) - .unwrap(); let availability = harness .chain .process_block( block_root, - rpc_block, + LookupBlock::new(signed_block.clone()), NotifyExecutionLayer::Yes, - BlockImportSource::RangeSync, + BlockImportSource::Lookup, || Ok(()), ) .await @@ -169,16 +166,13 @@ async fn verify_header_signature_fork_block_bug() { // The block will be accepted but won't become the head because it's not fully available. // This keeps the head at the pre-fork state (Electra). harness.advance_slot(); - let rpc_block = harness - .build_rpc_block_from_blobs(signed_block.clone(), None, false) - .expect("Should build RPC block"); let availability = harness .chain .process_block( block_root, - rpc_block, + LookupBlock::new(signed_block.clone()), NotifyExecutionLayer::Yes, - BlockImportSource::RangeSync, + BlockImportSource::Lookup, || Ok(()), ) .await diff --git a/beacon_node/beacon_chain/tests/payload_invalidation.rs b/beacon_node/beacon_chain/tests/payload_invalidation.rs index bcc50990ec..3ed8f59838 100644 --- a/beacon_node/beacon_chain/tests/payload_invalidation.rs +++ b/beacon_node/beacon_chain/tests/payload_invalidation.rs @@ -1,7 +1,7 @@ #![cfg(not(debug_assertions))] #![allow(clippy::result_large_err)] -use beacon_chain::block_verification_types::RpcBlock; +use beacon_chain::block_verification_types::LookupBlock; use beacon_chain::{ BeaconChainError, BlockError, ChainConfig, ExecutionPayloadError, INVALID_JUSTIFIED_PAYLOAD_SHUTDOWN_REASON, NotifyExecutionLayer, StateSkipConfig, @@ -686,19 +686,13 @@ async fn invalidates_all_descendants() { assert_eq!(fork_parent_state.slot(), fork_parent_slot); let ((fork_block, _), _fork_post_state) = rig.harness.make_block(fork_parent_state, fork_slot).await; - let fork_rpc_block = RpcBlock::new( - fork_block.clone(), - None, - &rig.harness.chain.data_availability_checker, - rig.harness.chain.spec.clone(), - ) - .unwrap(); + let fork_lookup_block = LookupBlock::new(fork_block.clone()); let fork_block_root = rig .harness .chain .process_block( - fork_rpc_block.block_root(), - fork_rpc_block, + fork_lookup_block.block_root(), + fork_lookup_block, NotifyExecutionLayer::Yes, BlockImportSource::Lookup, || Ok(()), @@ -796,19 +790,13 @@ async fn switches_heads() { let ((fork_block, _), _fork_post_state) = rig.harness.make_block(fork_parent_state, fork_slot).await; let fork_parent_root = fork_block.parent_root(); - let fork_rpc_block = RpcBlock::new( - fork_block.clone(), - None, - &rig.harness.chain.data_availability_checker, - rig.harness.chain.spec.clone(), - ) - .unwrap(); + let fork_lookup_block = LookupBlock::new(fork_block.clone()); let fork_block_root = rig .harness .chain .process_block( - fork_rpc_block.block_root(), - fork_rpc_block, + fork_lookup_block.block_root(), + fork_lookup_block, NotifyExecutionLayer::Yes, BlockImportSource::Lookup, || Ok(()), @@ -1086,15 +1074,9 @@ async fn invalid_parent() { )); // Ensure the block built atop an invalid payload is invalid for import. - let rpc_block = RpcBlock::new( - block.clone(), - None, - &rig.harness.chain.data_availability_checker, - rig.harness.chain.spec.clone(), - ) - .unwrap(); + let lookup_block = LookupBlock::new(block.clone()); assert!(matches!( - rig.harness.chain.process_block(rpc_block.block_root(), rpc_block, NotifyExecutionLayer::Yes, BlockImportSource::Lookup, + rig.harness.chain.process_block(lookup_block.block_root(), lookup_block, NotifyExecutionLayer::Yes, BlockImportSource::Lookup, || Ok(()), ).await, Err(BlockError::ParentExecutionPayloadInvalid { parent_root: invalid_root }) @@ -1348,18 +1330,12 @@ async fn recover_from_invalid_head_by_importing_blocks() { } = InvalidHeadSetup::new().await; // Import the fork block, it should become the head. - let fork_rpc_block = RpcBlock::new( - fork_block.clone(), - None, - &rig.harness.chain.data_availability_checker, - rig.harness.chain.spec.clone(), - ) - .unwrap(); + let fork_lookup_block = LookupBlock::new(fork_block.clone()); rig.harness .chain .process_block( - fork_rpc_block.block_root(), - fork_rpc_block, + fork_lookup_block.block_root(), + fork_lookup_block, NotifyExecutionLayer::Yes, BlockImportSource::Lookup, || Ok(()), diff --git a/beacon_node/beacon_chain/tests/store_tests.rs b/beacon_node/beacon_chain/tests/store_tests.rs index a70ad89ca9..89c28cca37 100644 --- a/beacon_node/beacon_chain/tests/store_tests.rs +++ b/beacon_node/beacon_chain/tests/store_tests.rs @@ -2,7 +2,7 @@ #![allow(clippy::result_large_err)] use beacon_chain::attestation_verification::Error as AttnError; -use beacon_chain::block_verification_types::RpcBlock; +use beacon_chain::block_verification_types::LookupBlock; use beacon_chain::builder::BeaconChainBuilder; use beacon_chain::custody_context::CUSTODY_CHANGE_DA_EFFECTIVE_DELAY_SECONDS; use beacon_chain::data_availability_checker::AvailableBlock; @@ -3144,7 +3144,10 @@ async fn weak_subjectivity_sync_test( beacon_chain .process_block( full_block_root, - harness.build_rpc_block_from_store_blobs(Some(block_root), Arc::new(full_block)), + harness.build_range_sync_block_from_store_blobs( + Some(block_root), + Arc::new(full_block), + ), NotifyExecutionLayer::Yes, BlockImportSource::Lookup, || Ok(()), @@ -3214,20 +3217,16 @@ async fn weak_subjectivity_sync_test( .expect("should get block") .expect("should get block"); - let rpc_block = - harness.build_rpc_block_from_store_blobs(Some(block_root), Arc::new(full_block)); + let range_sync_block = harness + .build_range_sync_block_from_store_blobs(Some(block_root), Arc::new(full_block)); - match rpc_block { - RpcBlock::FullyAvailable(available_block) => { - harness - .chain - .data_availability_checker - .verify_kzg_for_available_block(&available_block) - .expect("should verify kzg"); - available_blocks.push(available_block); - } - RpcBlock::BlockOnly { .. } => panic!("Should be an available block"), - } + let fully_available_block = range_sync_block.into_available_block(); + harness + .chain + .data_availability_checker + .verify_kzg_for_available_block(&fully_available_block) + .expect("should verify kzg"); + available_blocks.push(fully_available_block); } // Corrupt the signature on the 1st block to ensure that the backfill processor is checking @@ -3798,19 +3797,13 @@ async fn process_blocks_and_attestations_for_unaligned_checkpoint() { assert_eq!(split.block_root, valid_fork_block.parent_root()); assert_ne!(split.state_root, unadvanced_split_state_root); - let invalid_fork_rpc_block = RpcBlock::new( - invalid_fork_block.clone(), - None, - &harness.chain.data_availability_checker, - harness.spec.clone(), - ) - .unwrap(); + let invalid_fork_lookup_block = LookupBlock::new(invalid_fork_block.clone()); // Applying the invalid block should fail. let err = harness .chain .process_block( - invalid_fork_rpc_block.block_root(), - invalid_fork_rpc_block, + invalid_fork_lookup_block.block_root(), + invalid_fork_lookup_block, NotifyExecutionLayer::Yes, BlockImportSource::Lookup, || Ok(()), @@ -3820,18 +3813,12 @@ async fn process_blocks_and_attestations_for_unaligned_checkpoint() { assert!(matches!(err, BlockError::WouldRevertFinalizedSlot { .. })); // Applying the valid block should succeed, but it should not become head. - let valid_fork_rpc_block = RpcBlock::new( - valid_fork_block.clone(), - None, - &harness.chain.data_availability_checker, - harness.spec.clone(), - ) - .unwrap(); + let valid_fork_lookup_block = LookupBlock::new(valid_fork_block.clone()); harness .chain .process_block( - valid_fork_rpc_block.block_root(), - valid_fork_rpc_block, + valid_fork_lookup_block.block_root(), + valid_fork_lookup_block, NotifyExecutionLayer::Yes, BlockImportSource::Lookup, || Ok(()), diff --git a/beacon_node/http_api/src/publish_blocks.rs b/beacon_node/http_api/src/publish_blocks.rs index bbf92a4dda..43dfbeb836 100644 --- a/beacon_node/http_api/src/publish_blocks.rs +++ b/beacon_node/http_api/src/publish_blocks.rs @@ -2,7 +2,7 @@ use crate::metrics; use std::future::Future; use beacon_chain::blob_verification::{GossipBlobError, GossipVerifiedBlob}; -use beacon_chain::block_verification_types::{AsBlock, RpcBlock}; +use beacon_chain::block_verification_types::{AsBlock, LookupBlock}; use beacon_chain::data_column_verification::GossipVerifiedDataColumn; use beacon_chain::validator_monitor::{get_block_delay_ms, timestamp_now}; use beacon_chain::{ @@ -311,19 +311,11 @@ pub async fn publish_block>( slot = %block.slot(), "Block previously seen" ); - let Ok(rpc_block) = RpcBlock::new( - block.clone(), - None, - &chain.data_availability_checker, - chain.spec.clone(), - ) else { - return Err(warp_utils::reject::custom_bad_request( - "Unable to construct rpc block".to_string(), - )); - }; + // try to reprocess as a lookup (single) block and let sync take care of missing components + let lookup_block = LookupBlock::new(block.clone()); let import_result = Box::pin(chain.process_block( block_root, - rpc_block, + lookup_block, NotifyExecutionLayer::Yes, BlockImportSource::HttpApi, publish_fn, diff --git a/beacon_node/network/src/network_beacon_processor/mod.rs b/beacon_node/network/src/network_beacon_processor/mod.rs index 357d6c08fd..e40eacce08 100644 --- a/beacon_node/network/src/network_beacon_processor/mod.rs +++ b/beacon_node/network/src/network_beacon_processor/mod.rs @@ -1,7 +1,8 @@ use crate::sync::manager::BlockProcessType; use crate::{service::NetworkMessage, sync::manager::SyncMessage}; use beacon_chain::blob_verification::{GossipBlobError, observe_gossip_blob}; -use beacon_chain::block_verification_types::RpcBlock; +use beacon_chain::block_verification_types::LookupBlock; +use beacon_chain::block_verification_types::RangeSyncBlock; use beacon_chain::data_column_verification::{GossipDataColumnError, observe_gossip_data_column}; use beacon_chain::fetch_blobs::{ EngineGetBlobsOutput, FetchEngineBlobError, fetch_and_process_engine_blobs, @@ -517,14 +518,14 @@ impl NetworkBeaconProcessor { /// Create a new `Work` event for some block, where the result from computation (if any) is /// sent to the other side of `result_tx`. - pub fn send_rpc_beacon_block( + pub fn send_lookup_beacon_block( self: &Arc, block_root: Hash256, - block: RpcBlock, + block: LookupBlock, seen_timestamp: Duration, process_type: BlockProcessType, ) -> Result<(), Error> { - let process_fn = self.clone().generate_rpc_beacon_block_process_fn( + let process_fn = self.clone().generate_lookup_beacon_block_process_fn( block_root, block, seen_timestamp, @@ -610,7 +611,7 @@ impl NetworkBeaconProcessor { pub fn send_chain_segment( self: &Arc, process_id: ChainSegmentProcessId, - blocks: Vec>, + blocks: Vec>, ) -> Result<(), Error> { debug!(blocks = blocks.len(), id = ?process_id, "Batch sending for process"); let processor = self.clone(); diff --git a/beacon_node/network/src/network_beacon_processor/sync_methods.rs b/beacon_node/network/src/network_beacon_processor/sync_methods.rs index 629a42c688..f7fbce8e56 100644 --- a/beacon_node/network/src/network_beacon_processor/sync_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/sync_methods.rs @@ -6,7 +6,8 @@ use crate::sync::{ ChainId, manager::{BlockProcessType, SyncMessage}, }; -use beacon_chain::block_verification_types::{AsBlock, RpcBlock}; +use beacon_chain::block_verification_types::LookupBlock; +use beacon_chain::block_verification_types::{AsBlock, RangeSyncBlock}; use beacon_chain::data_availability_checker::AvailabilityCheckError; use beacon_chain::historical_data_columns::HistoricalDataColumnError; use beacon_chain::{ @@ -51,16 +52,16 @@ impl NetworkBeaconProcessor { /// /// This separate function was required to prevent a cycle during compiler /// type checking. - pub fn generate_rpc_beacon_block_process_fn( + pub fn generate_lookup_beacon_block_process_fn( self: Arc, block_root: Hash256, - block: RpcBlock, + block: LookupBlock, seen_timestamp: Duration, process_type: BlockProcessType, ) -> AsyncFn { let process_fn = async move { let duplicate_cache = self.duplicate_cache.clone(); - self.process_rpc_block( + self.process_lookup_block( block_root, block, seen_timestamp, @@ -73,15 +74,15 @@ impl NetworkBeaconProcessor { } /// Returns the `process_fn` and `ignore_fn` required when requeuing an RPC block. - pub fn generate_rpc_beacon_block_fns( + pub fn generate_lookup_beacon_block_fns( self: Arc, block_root: Hash256, - block: RpcBlock, + block: LookupBlock, seen_timestamp: Duration, process_type: BlockProcessType, ) -> (AsyncFn, BlockingFn) { // An async closure which will import the block. - let process_fn = self.clone().generate_rpc_beacon_block_process_fn( + let process_fn = self.clone().generate_lookup_beacon_block_process_fn( block_root, block, seen_timestamp, @@ -107,10 +108,10 @@ impl NetworkBeaconProcessor { skip_all, fields(?block_root), )] - pub async fn process_rpc_block( + pub async fn process_lookup_block( self: Arc>, block_root: Hash256, - block: RpcBlock, + block: LookupBlock, seen_timestamp: Duration, process_type: BlockProcessType, duplicate_cache: DuplicateCache, @@ -118,14 +119,14 @@ impl NetworkBeaconProcessor { // Check if the block is already being imported through another source let Some(handle) = duplicate_cache.check_and_insert(block_root) else { debug!( - action = "sending rpc block to reprocessing queue", + action = "sending lookup block to reprocessing queue", %block_root, ?process_type, "Gossip block is being processed" ); // Send message to work reprocess queue to retry the block - let (process_fn, ignore_fn) = self.clone().generate_rpc_beacon_block_fns( + let (process_fn, ignore_fn) = self.clone().generate_lookup_beacon_block_fns( block_root, block, seen_timestamp, @@ -160,7 +161,7 @@ impl NetworkBeaconProcessor { slot = %block.slot(), commitments_formatted, ?process_type, - "Processing RPC block" + "Processing Lookup block" ); let signed_beacon_block = block.block_cloned(); @@ -530,7 +531,7 @@ impl NetworkBeaconProcessor { pub async fn process_chain_segment( &self, process_id: ChainSegmentProcessId, - downloaded_blocks: Vec>, + downloaded_blocks: Vec>, ) { let ChainSegmentProcessId::RangeBatchId(chain_id, epoch) = process_id else { // This is a request from range sync, this should _never_ happen @@ -611,7 +612,7 @@ impl NetworkBeaconProcessor { pub fn process_chain_segment_backfill( &self, process_id: ChainSegmentProcessId, - downloaded_blocks: Vec>, + downloaded_blocks: Vec>, ) { let ChainSegmentProcessId::BackSyncBatchId(epoch) = process_id else { // this a request from RangeSync, this should _never_ happen @@ -682,7 +683,7 @@ impl NetworkBeaconProcessor { #[instrument(skip_all)] async fn process_blocks<'a>( &self, - downloaded_blocks: impl Iterator>, + downloaded_blocks: impl Iterator>, notify_execution_layer: NotifyExecutionLayer, ) -> (usize, Result<(), ChainSegmentFailed>) { let blocks: Vec<_> = downloaded_blocks.cloned().collect(); @@ -716,23 +717,13 @@ impl NetworkBeaconProcessor { #[instrument(skip_all)] fn process_backfill_blocks( &self, - downloaded_blocks: Vec>, + downloaded_blocks: Vec>, ) -> (usize, Result<(), ChainSegmentFailed>) { let total_blocks = downloaded_blocks.len(); - let mut available_blocks = vec![]; - - for downloaded_block in downloaded_blocks { - match downloaded_block { - RpcBlock::FullyAvailable(available_block) => available_blocks.push(available_block), - RpcBlock::BlockOnly { .. } => return ( - 0, - Err(ChainSegmentFailed { - peer_action: None, - message: "Invalid downloaded_blocks segment. All downloaded blocks must be fully available".to_string() - }) - ), - } - } + let available_blocks = downloaded_blocks + .into_iter() + .map(|block| block.into_available_block()) + .collect::>(); match self .chain diff --git a/beacon_node/network/src/network_beacon_processor/tests.rs b/beacon_node/network/src/network_beacon_processor/tests.rs index 4b0ca0d46c..5fa8c729cb 100644 --- a/beacon_node/network/src/network_beacon_processor/tests.rs +++ b/beacon_node/network/src/network_beacon_processor/tests.rs @@ -8,7 +8,7 @@ use crate::{ service::NetworkMessage, sync::{SyncMessage, manager::BlockProcessType}, }; -use beacon_chain::block_verification_types::RpcBlock; +use beacon_chain::block_verification_types::LookupBlock; use beacon_chain::custody_context::NodeCustodyType; use beacon_chain::data_column_verification::validate_data_column_sidecar_for_gossip_fulu; use beacon_chain::kzg_utils::blobs_to_data_column_sidecars; @@ -437,36 +437,24 @@ impl TestRig { } } - pub fn enqueue_rpc_block(&self) { + pub fn enqueue_lookup_block(&self) { let block_root = self.next_block.canonical_root(); self.network_beacon_processor - .send_rpc_beacon_block( + .send_lookup_beacon_block( block_root, - RpcBlock::new( - self.next_block.clone(), - None, - &self._harness.chain.data_availability_checker, - self._harness.spec.clone(), - ) - .unwrap(), + LookupBlock::new(self.next_block.clone()), std::time::Duration::default(), BlockProcessType::SingleBlock { id: 0 }, ) .unwrap(); } - pub fn enqueue_single_lookup_rpc_block(&self) { + pub fn enqueue_single_lookup_block(&self) { let block_root = self.next_block.canonical_root(); self.network_beacon_processor - .send_rpc_beacon_block( + .send_lookup_beacon_block( block_root, - RpcBlock::new( - self.next_block.clone(), - None, - &self._harness.chain.data_availability_checker, - self._harness.spec.clone(), - ) - .unwrap(), + LookupBlock::new(self.next_block.clone()), std::time::Duration::default(), BlockProcessType::SingleBlock { id: 1 }, ) @@ -1305,7 +1293,7 @@ async fn attestation_to_unknown_block_processed(import_method: BlockImportMethod } } BlockImportMethod::Rpc => { - rig.enqueue_rpc_block(); + rig.enqueue_lookup_block(); events.push(WorkType::RpcBlock); if num_blobs > 0 { rig.enqueue_single_lookup_rpc_blobs(); @@ -1391,7 +1379,7 @@ async fn aggregate_attestation_to_unknown_block(import_method: BlockImportMethod } } BlockImportMethod::Rpc => { - rig.enqueue_rpc_block(); + rig.enqueue_lookup_block(); events.push(WorkType::RpcBlock); if num_blobs > 0 { rig.enqueue_single_lookup_rpc_blobs(); @@ -1585,7 +1573,7 @@ async fn test_rpc_block_reprocessing() { let next_block_root = rig.next_block.canonical_root(); // Insert the next block into the duplicate cache manually let handle = rig.duplicate_cache.check_and_insert(next_block_root); - rig.enqueue_single_lookup_rpc_block(); + rig.enqueue_single_lookup_block(); rig.assert_event_journal_completes(&[WorkType::RpcBlock]) .await; diff --git a/beacon_node/network/src/sync/backfill_sync/mod.rs b/beacon_node/network/src/sync/backfill_sync/mod.rs index 801c9eca4d..0f80138d24 100644 --- a/beacon_node/network/src/sync/backfill_sync/mod.rs +++ b/beacon_node/network/src/sync/backfill_sync/mod.rs @@ -19,7 +19,7 @@ use crate::sync::manager::BatchProcessResult; use crate::sync::network_context::{ RangeRequestId, RpcRequestSendError, RpcResponseError, SyncNetworkContext, }; -use beacon_chain::block_verification_types::RpcBlock; +use beacon_chain::block_verification_types::RangeSyncBlock; use beacon_chain::{BeaconChain, BeaconChainTypes}; use lighthouse_network::service::api_types::Id; use lighthouse_network::types::{BackFillState, NetworkGlobals}; @@ -55,7 +55,7 @@ const MAX_BATCH_DOWNLOAD_ATTEMPTS: u8 = 10; /// after `MAX_BATCH_PROCESSING_ATTEMPTS` times, it is considered faulty. const MAX_BATCH_PROCESSING_ATTEMPTS: u8 = 10; -type RpcBlocks = Vec>; +type RpcBlocks = Vec>; type BackFillBatchInfo = BatchInfo, RpcBlocks>; @@ -390,7 +390,7 @@ impl BackFillSync { batch_id: BatchId, peer_id: &PeerId, request_id: Id, - blocks: Vec>, + blocks: Vec>, ) -> Result { // check if we have this batch let Some(batch) = self.batches.get_mut(&batch_id) else { diff --git a/beacon_node/network/src/sync/batch.rs b/beacon_node/network/src/sync/batch.rs index e87ffd119e..10af1bf503 100644 --- a/beacon_node/network/src/sync/batch.rs +++ b/beacon_node/network/src/sync/batch.rs @@ -1,4 +1,4 @@ -use beacon_chain::block_verification_types::RpcBlock; +use beacon_chain::block_verification_types::RangeSyncBlock; use educe::Educe; use lighthouse_network::PeerId; use lighthouse_network::rpc::methods::BlocksByRangeRequest; @@ -449,7 +449,7 @@ impl BatchInfo { } // BatchInfo implementations for RangeSync -impl BatchInfo>> { +impl BatchInfo>> { /// Returns a BlocksByRange request associated with the batch. pub fn to_blocks_by_range_request(&self) -> (BlocksByRangeRequest, ByRangeRequestType) { ( diff --git a/beacon_node/network/src/sync/block_sidecar_coupling.rs b/beacon_node/network/src/sync/block_sidecar_coupling.rs index a287771854..98cf3e0a1f 100644 --- a/beacon_node/network/src/sync/block_sidecar_coupling.rs +++ b/beacon_node/network/src/sync/block_sidecar_coupling.rs @@ -1,6 +1,6 @@ use beacon_chain::{ BeaconChainTypes, - block_verification_types::{AvailableBlockData, RpcBlock}, + block_verification_types::{AvailableBlockData, RangeSyncBlock}, data_availability_checker::DataAvailabilityChecker, data_column_verification::CustodyDataColumn, get_block_root, @@ -200,7 +200,7 @@ impl RangeBlockComponentsRequest { &mut self, da_checker: Arc>, spec: Arc, - ) -> Option>, CouplingError>> + ) -> Option>, CouplingError>> where T: BeaconChainTypes, { @@ -288,7 +288,7 @@ impl RangeBlockComponentsRequest { blobs: Vec>>, da_checker: Arc>, spec: Arc, - ) -> Result>, CouplingError> + ) -> Result>, CouplingError> where T: BeaconChainTypes, { @@ -335,7 +335,7 @@ impl RangeBlockComponentsRequest { })?; let block_data = AvailableBlockData::new_with_blobs(blobs); responses.push( - RpcBlock::new(block, Some(block_data), &da_checker, spec.clone()) + RangeSyncBlock::new(block, block_data, &da_checker, spec.clone()) .map_err(|e| CouplingError::BlobPeerFailure(format!("{e:?}")))?, ) } @@ -360,7 +360,7 @@ impl RangeBlockComponentsRequest { attempt: usize, da_checker: Arc>, spec: Arc, - ) -> Result>, CouplingError> + ) -> Result>, CouplingError> where T: BeaconChainTypes, { @@ -388,12 +388,12 @@ impl RangeBlockComponentsRequest { // Now iterate all blocks ensuring that the block roots of each block and data column match, // plus we have columns for our custody requirements - let mut rpc_blocks = Vec::with_capacity(blocks.len()); + let mut range_sync_blocks = Vec::with_capacity(blocks.len()); let exceeded_retries = attempt >= MAX_COLUMN_RETRIES; for block in blocks { let block_root = get_block_root(&block); - rpc_blocks.push(if block.num_expected_blobs() > 0 { + range_sync_blocks.push(if block.num_expected_blobs() > 0 { let Some(mut data_columns_by_index) = data_columns_by_block.remove(&block_root) else { let responsible_peers = column_to_peer.iter().map(|c| (*c.0, *c.1)).collect(); @@ -441,11 +441,11 @@ impl RangeBlockComponentsRequest { let block_data = AvailableBlockData::new_with_data_columns(custody_columns.iter().map(|c| c.as_data_column().clone()).collect::>()); - RpcBlock::new(block, Some(block_data), &da_checker, spec.clone()) + RangeSyncBlock::new(block, block_data, &da_checker, spec.clone()) .map_err(|e| CouplingError::InternalError(format!("{:?}", e)))? } else { // Block has no data, expects zero columns - RpcBlock::new(block, Some(AvailableBlockData::NoData), &da_checker, spec.clone()) + RangeSyncBlock::new(block, AvailableBlockData::NoData, &da_checker, spec.clone()) .map_err(|e| CouplingError::InternalError(format!("{:?}", e)))? }); } @@ -458,7 +458,7 @@ impl RangeBlockComponentsRequest { debug!(?remaining_roots, "Not all columns consumed for block"); } - Ok(rpc_blocks) + Ok(range_sync_blocks) } } @@ -947,7 +947,7 @@ mod tests { } let result: Result< - Vec>, + Vec>, crate::sync::block_sidecar_coupling::CouplingError, > = info.responses(da_checker.clone(), spec.clone()).unwrap(); assert!(result.is_err()); @@ -981,10 +981,10 @@ mod tests { // WHEN: Attempting to get responses again let result = info.responses(da_checker, spec).unwrap(); - // THEN: Should succeed with complete RPC blocks + // THEN: Should succeed with complete RangeSync blocks assert!(result.is_ok()); - let rpc_blocks = result.unwrap(); - assert_eq!(rpc_blocks.len(), 2); + let range_sync_blocks = result.unwrap(); + assert_eq!(range_sync_blocks.len(), 2); } #[test] diff --git a/beacon_node/network/src/sync/network_context.rs b/beacon_node/network/src/sync/network_context.rs index 7e2c0d9a94..ff630bb470 100644 --- a/beacon_node/network/src/sync/network_context.rs +++ b/beacon_node/network/src/sync/network_context.rs @@ -17,7 +17,8 @@ use crate::sync::block_lookups::SingleLookupId; use crate::sync::block_sidecar_coupling::CouplingError; use crate::sync::network_context::requests::BlobsByRootSingleBlockRequest; use crate::sync::range_data_column_batch_request::RangeDataColumnBatchRequest; -use beacon_chain::block_verification_types::{AsBlock, RpcBlock}; +use beacon_chain::block_verification_types::LookupBlock; +use beacon_chain::block_verification_types::{AsBlock, RangeSyncBlock}; use beacon_chain::{BeaconChain, BeaconChainTypes, BlockProcessStatus, EngineState}; use custody::CustodyRequestResult; use fnv::FnvHashMap; @@ -735,7 +736,7 @@ impl SyncNetworkContext { &mut self, id: ComponentsByRangeRequestId, range_block_component: RangeBlockComponent, - ) -> Option>, RpcResponseError>> { + ) -> Option>, RpcResponseError>> { let Entry::Occupied(mut entry) = self.components_by_range_requests.entry(id) else { metrics::inc_counter_vec(&metrics::SYNC_UNKNOWN_NETWORK_REQUESTS, &["range_blocks"]); return None; @@ -1588,21 +1589,15 @@ impl SyncNetworkContext { .beacon_processor_if_enabled() .ok_or(SendErrorProcessor::ProcessorNotAvailable)?; - let block = RpcBlock::new( - block, - None, - &self.chain.data_availability_checker, - self.chain.spec.clone(), - ) - .map_err(|_| SendErrorProcessor::SendError)?; + let lookup_block = LookupBlock::new(block); - debug!(block = ?block_root, block_slot = %block.slot(), id, "Sending block for processing"); + debug!(block = ?block_root, block_slot = %lookup_block.slot(), id, "Sending block for processing"); // Lookup sync event safety: If `beacon_processor.send_rpc_beacon_block` returns Ok() sync // must receive a single `SyncMessage::BlockComponentProcessed` with this process type beacon_processor - .send_rpc_beacon_block( + .send_lookup_beacon_block( block_root, - block, + lookup_block, seen_timestamp, BlockProcessType::SingleBlock { id }, ) diff --git a/beacon_node/network/src/sync/range_sync/chain.rs b/beacon_node/network/src/sync/range_sync/chain.rs index e3ff638121..d533d8ed0d 100644 --- a/beacon_node/network/src/sync/range_sync/chain.rs +++ b/beacon_node/network/src/sync/range_sync/chain.rs @@ -10,7 +10,7 @@ use crate::sync::block_sidecar_coupling::CouplingError; use crate::sync::network_context::{RangeRequestId, RpcRequestSendError, RpcResponseError}; use crate::sync::{BatchProcessResult, network_context::SyncNetworkContext}; use beacon_chain::BeaconChainTypes; -use beacon_chain::block_verification_types::RpcBlock; +use beacon_chain::block_verification_types::RangeSyncBlock; use lighthouse_network::service::api_types::Id; use lighthouse_network::{PeerAction, PeerId}; use logging::crit; @@ -40,7 +40,7 @@ const BATCH_BUFFER_SIZE: u8 = 5; /// and continued is now in an inconsistent state. pub type ProcessingResult = Result; -type RpcBlocks = Vec>; +type RpcBlocks = Vec>; type RangeSyncBatchInfo = BatchInfo, RpcBlocks>; type RangeSyncBatches = BTreeMap>; @@ -273,7 +273,7 @@ impl SyncingChain { batch_id: BatchId, peer_id: &PeerId, request_id: Id, - blocks: Vec>, + blocks: Vec>, ) -> ProcessingResult { let _guard = self.span.clone().entered(); // check if we have this batch diff --git a/beacon_node/network/src/sync/range_sync/range.rs b/beacon_node/network/src/sync/range_sync/range.rs index 9fd72ac98a..6509ac3cb3 100644 --- a/beacon_node/network/src/sync/range_sync/range.rs +++ b/beacon_node/network/src/sync/range_sync/range.rs @@ -47,7 +47,7 @@ use crate::status::ToStatusMessage; use crate::sync::BatchProcessResult; use crate::sync::batch::BatchId; use crate::sync::network_context::{RpcResponseError, SyncNetworkContext}; -use beacon_chain::block_verification_types::RpcBlock; +use beacon_chain::block_verification_types::RangeSyncBlock; use beacon_chain::{BeaconChain, BeaconChainTypes}; use lighthouse_network::rpc::GoodbyeReason; use lighthouse_network::service::api_types::Id; @@ -213,7 +213,7 @@ where chain_id: ChainId, batch_id: BatchId, request_id: Id, - blocks: Vec>, + blocks: Vec>, ) { // check if this chunk removes the chain match self.chains.call_by_id(chain_id, |chain| { diff --git a/beacon_node/network/src/sync/tests/lookups.rs b/beacon_node/network/src/sync/tests/lookups.rs index 769a11d976..cd872df887 100644 --- a/beacon_node/network/src/sync/tests/lookups.rs +++ b/beacon_node/network/src/sync/tests/lookups.rs @@ -7,6 +7,7 @@ use crate::sync::{ manager::{BlockProcessType, BlockProcessingResult, SyncManager}, }; use beacon_chain::blob_verification::KzgVerifiedBlob; +use beacon_chain::block_verification_types::LookupBlock; use beacon_chain::custody_context::NodeCustodyType; use beacon_chain::{ AvailabilityProcessingStatus, BlockError, NotifyExecutionLayer, @@ -464,7 +465,7 @@ impl TestRig { panic!("Test consumer requested unknown block: {id:?}") }) .block_data() - .and_then(|d| d.blobs()) + .blobs() .unwrap_or_else(|| panic!("Block {id:?} has no blobs")) .iter() .find(|blob| blob.index == id.index) @@ -528,7 +529,7 @@ impl TestRig { panic!("Test consumer requested unknown block: {id:?}") }) .block_data() - .and_then(|d| d.data_columns()) + .data_columns() .unwrap_or_else(|| panic!("Block id {id:?} has no columns")); id.columns .iter() @@ -594,7 +595,7 @@ impl TestRig { // - Some blocks may not have blobs as the blob count is random let blobs = (req.start_slot..req.start_slot + req.count) .filter_map(|slot| self.network_blocks_by_slot.get(&Slot::new(slot))) - .filter_map(|block| block.block_data().and_then(|d| d.blobs())) + .filter_map(|block| block.block_data().blobs()) .flat_map(|blobs| blobs.into_iter()) .collect::>(); self.send_rpc_blobs_response(req_id, peer_id, &blobs); @@ -610,7 +611,7 @@ impl TestRig { // - Some blocks may not have columns as the blob count is random let columns = (req.start_slot..req.start_slot + req.count) .filter_map(|slot| self.network_blocks_by_slot.get(&Slot::new(slot))) - .filter_map(|block| block.block_data().and_then(|d| d.data_columns())) + .filter_map(|block| block.block_data().data_columns()) .flat_map(|columns| { columns .into_iter() @@ -786,10 +787,10 @@ impl TestRig { } fn corrupt_last_block_signature(&mut self) { - let rpc_block = self.get_last_block().clone(); - let mut block = (*rpc_block.block_cloned()).clone(); - let blobs = rpc_block.block_data().and_then(|d| d.blobs()); - let columns = rpc_block.block_data().and_then(|d| d.data_columns()); + let range_sync_block = self.get_last_block().clone(); + let mut block = (*range_sync_block.block_cloned()).clone(); + let blobs = range_sync_block.block_data().blobs(); + let columns = range_sync_block.block_data().data_columns(); *block.signature_mut() = self.valid_signature(); self.re_insert_block(Arc::new(block), blobs, columns); } @@ -801,15 +802,15 @@ impl TestRig { } fn corrupt_last_blob_proposer_signature(&mut self) { - let rpc_block = self.get_last_block().clone(); - let block = rpc_block.block_cloned(); - let mut blobs = rpc_block + let range_sync_block = self.get_last_block().clone(); + let block = range_sync_block.block_cloned(); + let mut blobs = range_sync_block .block_data() - .and_then(|d| d.blobs()) + .blobs() .expect("no blobs") .into_iter() .collect::>(); - let columns = rpc_block.block_data().and_then(|d| d.data_columns()); + let columns = range_sync_block.block_data().data_columns(); let first = blobs.first_mut().expect("empty blobs"); Arc::make_mut(first).signed_block_header.signature = self.valid_signature(); let max_blobs = @@ -822,15 +823,15 @@ impl TestRig { } fn corrupt_last_blob_kzg_proof(&mut self) { - let rpc_block = self.get_last_block().clone(); - let block = rpc_block.block_cloned(); - let mut blobs = rpc_block + let range_sync_block = self.get_last_block().clone(); + let block = range_sync_block.block_cloned(); + let mut blobs = range_sync_block .block_data() - .and_then(|d| d.blobs()) + .blobs() .expect("no blobs") .into_iter() .collect::>(); - let columns = rpc_block.block_data().and_then(|d| d.data_columns()); + let columns = range_sync_block.block_data().data_columns(); let first = blobs.first_mut().expect("empty blobs"); Arc::make_mut(first).kzg_proof = kzg::KzgProof::empty(); let max_blobs = @@ -843,12 +844,12 @@ impl TestRig { } fn corrupt_last_column_proposer_signature(&mut self) { - let rpc_block = self.get_last_block().clone(); - let block = rpc_block.block_cloned(); - let blobs = rpc_block.block_data().and_then(|d| d.blobs()); - let mut columns = rpc_block + let range_sync_block = self.get_last_block().clone(); + let block = range_sync_block.block_cloned(); + let blobs = range_sync_block.block_data().blobs(); + let mut columns = range_sync_block .block_data() - .and_then(|d| d.data_columns()) + .data_columns() .expect("no columns"); let first = columns.first_mut().expect("empty columns"); Arc::make_mut(first) @@ -859,12 +860,12 @@ impl TestRig { } fn corrupt_last_column_kzg_proof(&mut self) { - let rpc_block = self.get_last_block().clone(); - let block = rpc_block.block_cloned(); - let blobs = rpc_block.block_data().and_then(|d| d.blobs()); - let mut columns = rpc_block + let range_sync_block = self.get_last_block().clone(); + let block = range_sync_block.block_cloned(); + let blobs = range_sync_block.block_data().blobs(); + let mut columns = range_sync_block .block_data() - .and_then(|d| d.data_columns()) + .data_columns() .expect("no columns"); let first = columns.first_mut().expect("empty columns"); let column = Arc::make_mut(first); @@ -873,7 +874,7 @@ impl TestRig { self.re_insert_block(block, blobs, Some(columns)); } - fn get_last_block(&self) -> &RpcBlock { + fn get_last_block(&self) -> &RangeSyncBlock { let (_, last_block) = self .network_blocks_by_root .iter() @@ -893,13 +894,13 @@ impl TestRig { let block_root = block.canonical_root(); let block_slot = block.slot(); let block_data = if let Some(columns) = columns { - Some(AvailableBlockData::new_with_data_columns(columns)) + AvailableBlockData::new_with_data_columns(columns) } else if let Some(blobs) = blobs { - Some(AvailableBlockData::new_with_blobs(blobs)) + AvailableBlockData::new_with_blobs(blobs) } else { - Some(AvailableBlockData::NoData) + AvailableBlockData::NoData }; - let rpc_block = RpcBlock::new( + let range_sync_block = RangeSyncBlock::new( block, block_data, &self.harness.chain.data_availability_checker, @@ -907,8 +908,9 @@ impl TestRig { ) .unwrap(); self.network_blocks_by_slot - .insert(block_slot, rpc_block.clone()); - self.network_blocks_by_root.insert(block_root, rpc_block); + .insert(block_slot, range_sync_block.clone()); + self.network_blocks_by_root + .insert(block_root, range_sync_block); } /// Trigger a lookup with the last created block @@ -947,7 +949,7 @@ impl TestRig { /// Import a block directly into the chain without going through lookup sync async fn import_block_by_root(&mut self, block_root: Hash256) { - let rpc_block = self + let range_sync_block = self .network_blocks_by_root .get(&block_root) .unwrap_or_else(|| panic!("No block for root {block_root}")) @@ -957,9 +959,9 @@ impl TestRig { .chain .process_block( block_root, - rpc_block, + range_sync_block, NotifyExecutionLayer::Yes, - BlockImportSource::Gossip, + BlockImportSource::RangeSync, || Ok(()), ) .await @@ -979,7 +981,7 @@ impl TestRig { let blobs = self .get_last_block() .block_data() - .and_then(|d| d.blobs()) + .blobs() .expect("no blobs"); let blob = blobs.first().expect("empty blobs"); self.trigger_unknown_parent_blob(peer_id, blob.clone()); @@ -990,7 +992,7 @@ impl TestRig { let columns = self .get_last_block() .block_data() - .and_then(|d| d.data_columns()) + .data_columns() .expect("No data columns"); let column = columns.first().expect("empty columns"); self.trigger_unknown_parent_column(peer_id, column.clone()); @@ -1475,15 +1477,14 @@ impl TestRig { ) -> AvailabilityProcessingStatus { // Simulate importing block from another source. Don't use GossipVerified as it checks with // the clock, which does not match the timestamp in the payload. - let block_root = block.canonical_root(); - let rpc_block = RpcBlock::BlockOnly { block_root, block }; + let lookup_block = LookupBlock::new(block); self.harness .chain .process_block( - block_root, - rpc_block, + lookup_block.block_root(), + lookup_block, NotifyExecutionLayer::Yes, - BlockImportSource::Gossip, + BlockImportSource::Lookup, || Ok(()), ) .await @@ -2196,10 +2197,7 @@ async fn blobs_in_da_checker_skip_download() { }; r.build_chain(1).await; let block = r.get_last_block().clone(); - let blobs = block - .block_data() - .and_then(|d| d.blobs()) - .expect("block with no blobs"); + let blobs = block.block_data().blobs().expect("block with no blobs"); for blob in &blobs { r.insert_blob_to_da_checker(blob.clone()); } diff --git a/beacon_node/network/src/sync/tests/mod.rs b/beacon_node/network/src/sync/tests/mod.rs index f00cf5841d..6e948e4726 100644 --- a/beacon_node/network/src/sync/tests/mod.rs +++ b/beacon_node/network/src/sync/tests/mod.rs @@ -3,7 +3,7 @@ use crate::sync::SyncMessage; use crate::sync::block_lookups::BlockLookupsMetrics; use crate::sync::manager::SyncManager; use crate::sync::tests::lookups::SimulateConfig; -use beacon_chain::block_verification_types::RpcBlock; +use beacon_chain::block_verification_types::RangeSyncBlock; use beacon_chain::builder::Witness; use beacon_chain::custody_context::NodeCustodyType; use beacon_chain::test_utils::{BeaconChainHarness, EphemeralHarnessType}; @@ -77,8 +77,8 @@ struct TestRig { rng: ChaCha20Rng, fork_name: ForkName, /// Blocks that will be used in the test but may not be known to `harness` yet. - network_blocks_by_root: HashMap>, - network_blocks_by_slot: HashMap>, + network_blocks_by_root: HashMap>, + network_blocks_by_slot: HashMap>, penalties: Vec, /// All seen lookups through the test run seen_lookups: HashMap, diff --git a/beacon_node/network/src/sync/tests/range.rs b/beacon_node/network/src/sync/tests/range.rs index 67395ccd25..c19ee8eb6d 100644 --- a/beacon_node/network/src/sync/tests/range.rs +++ b/beacon_node/network/src/sync/tests/range.rs @@ -10,7 +10,7 @@ use beacon_chain::block_verification_types::AvailableBlockData; use beacon_chain::custody_context::NodeCustodyType; use beacon_chain::data_column_verification::CustodyDataColumn; use beacon_chain::test_utils::{AttestationStrategy, BlockStrategy}; -use beacon_chain::{EngineState, NotifyExecutionLayer, block_verification_types::RpcBlock}; +use beacon_chain::{EngineState, NotifyExecutionLayer, block_verification_types::RangeSyncBlock}; use beacon_processor::WorkType; use lighthouse_network::rpc::RequestType; use lighthouse_network::rpc::methods::{ @@ -430,7 +430,7 @@ impl TestRig { .chain .process_block( block_root, - build_rpc_block(block.into(), &data_sidecars, self.harness.chain.clone()), + build_range_sync_block(block.into(), &data_sidecars, self.harness.chain.clone()), NotifyExecutionLayer::Yes, BlockImportSource::RangeSync, || Ok(()), @@ -443,17 +443,17 @@ impl TestRig { } } -fn build_rpc_block( +fn build_range_sync_block( block: Arc>, data_sidecars: &Option>, chain: Arc>, -) -> RpcBlock { +) -> RangeSyncBlock { match data_sidecars { Some(DataSidecars::Blobs(blobs)) => { let block_data = AvailableBlockData::new_with_blobs(blobs.clone()); - RpcBlock::new( + RangeSyncBlock::new( block, - Some(block_data), + block_data, &chain.data_availability_checker, chain.spec.clone(), ) @@ -466,18 +466,18 @@ fn build_rpc_block( .map(|c| c.as_data_column().clone()) .collect::>(), ); - RpcBlock::new( + RangeSyncBlock::new( block, - Some(block_data), + block_data, &chain.data_availability_checker, chain.spec.clone(), ) .unwrap() } // Block has no data, expects zero columns - None => RpcBlock::new( + None => RangeSyncBlock::new( block, - Some(AvailableBlockData::NoData), + AvailableBlockData::NoData, &chain.data_availability_checker, chain.spec.clone(), ) diff --git a/testing/ef_tests/src/cases/fork_choice.rs b/testing/ef_tests/src/cases/fork_choice.rs index ca77dc8d79..07a7d4c6b6 100644 --- a/testing/ef_tests/src/cases/fork_choice.rs +++ b/testing/ef_tests/src/cases/fork_choice.rs @@ -3,7 +3,7 @@ use crate::decode::{ssz_decode_file, ssz_decode_file_with, ssz_decode_state, yam use ::fork_choice::{PayloadVerificationStatus, ProposerHeadError}; use beacon_chain::beacon_proposer_cache::compute_proposer_duties_from_head; use beacon_chain::blob_verification::GossipBlobError; -use beacon_chain::block_verification_types::RpcBlock; +use beacon_chain::block_verification_types::LookupBlock; use beacon_chain::chain_config::{ DEFAULT_RE_ORG_HEAD_THRESHOLD, DEFAULT_RE_ORG_MAX_EPOCHS_SINCE_FINALIZATION, DEFAULT_RE_ORG_PARENT_THRESHOLD, DisallowedReOrgOffsets, @@ -561,21 +561,13 @@ impl Tester { let block = Arc::new(block); let result: Result, _> = self - .block_on_dangerous( - self.harness.chain.process_block( - block_root, - RpcBlock::new( - block.clone(), - None, - &self.harness.chain.data_availability_checker, - self.harness.chain.spec.clone(), - ) - .map_err(|e| Error::InternalError(format!("{:?}", e)))?, - NotifyExecutionLayer::Yes, - BlockImportSource::Lookup, - || Ok(()), - ), - )? + .block_on_dangerous(self.harness.chain.process_block( + block_root, + LookupBlock::new(block.clone()), + NotifyExecutionLayer::Yes, + BlockImportSource::Lookup, + || Ok(()), + ))? .map(|avail: AvailabilityProcessingStatus| avail.try_into()); let success = data_column_success && result.as_ref().is_ok_and(|inner| inner.is_ok()); if success != valid { @@ -659,21 +651,13 @@ impl Tester { let block = Arc::new(block); let result: Result, _> = self - .block_on_dangerous( - self.harness.chain.process_block( - block_root, - RpcBlock::new( - block.clone(), - None, - &self.harness.chain.data_availability_checker, - self.harness.chain.spec.clone(), - ) - .map_err(|e| Error::InternalError(format!("{:?}", e)))?, - NotifyExecutionLayer::Yes, - BlockImportSource::Lookup, - || Ok(()), - ), - )? + .block_on_dangerous(self.harness.chain.process_block( + block_root, + LookupBlock::new(block.clone()), + NotifyExecutionLayer::Yes, + BlockImportSource::Lookup, + || Ok(()), + ))? .map(|avail: AvailabilityProcessingStatus| avail.try_into()); let success = blob_success && result.as_ref().is_ok_and(|inner| inner.is_ok()); if success != valid {