mirror of
https://github.com/sigp/lighthouse.git
synced 2026-04-17 21:08:32 +00:00
Add DataColumnSidecar gossip topic and message handling (#6147)
* Add `DataColumnSidecar` gossip topic and verification (#5050 and #5783). * Remove gossip verification changes (#5783). * Merge branch 'unstable' into data-column-gossip # Conflicts: # beacon_node/beacon_chain/src/data_column_verification.rs # beacon_node/beacon_chain/src/lib.rs * Add gossip cache timeout for data columns. Rename data column metrics for consistency. * Remove usage of `unimplemented!` and address review comments. * Remove unnused `GossipDataColumnError` variants and address review comments. * Merge branch 'unstable' into data-column-gossip * Update Cargo.lock * Arc `ChainSpec` in discovery to avoid performance regression when needing to clone it repeatedly.
This commit is contained in:
@@ -23,6 +23,7 @@ use crate::chain_config::ChainConfig;
|
||||
use crate::data_availability_checker::{
|
||||
Availability, AvailabilityCheckError, AvailableBlock, DataAvailabilityChecker,
|
||||
};
|
||||
use crate::data_column_verification::{GossipDataColumnError, GossipVerifiedDataColumn};
|
||||
use crate::early_attester_cache::EarlyAttesterCache;
|
||||
use crate::errors::{BeaconChainError as Error, BlockProductionError};
|
||||
use crate::eth1_chain::{Eth1Chain, Eth1ChainBackend};
|
||||
@@ -2118,6 +2119,19 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
})
|
||||
}
|
||||
|
||||
pub fn verify_data_column_sidecar_for_gossip(
|
||||
self: &Arc<Self>,
|
||||
data_column_sidecar: Arc<DataColumnSidecar<T::EthSpec>>,
|
||||
subnet_id: u64,
|
||||
) -> Result<GossipVerifiedDataColumn<T>, GossipDataColumnError> {
|
||||
metrics::inc_counter(&metrics::DATA_COLUMN_SIDECAR_PROCESSING_REQUESTS);
|
||||
let _timer = metrics::start_timer(&metrics::DATA_COLUMN_SIDECAR_GOSSIP_VERIFICATION_TIMES);
|
||||
GossipVerifiedDataColumn::new(data_column_sidecar, subnet_id, self).map(|v| {
|
||||
metrics::inc_counter(&metrics::DATA_COLUMN_SIDECAR_PROCESSING_SUCCESSES);
|
||||
v
|
||||
})
|
||||
}
|
||||
|
||||
pub fn verify_blob_sidecar_for_gossip(
|
||||
self: &Arc<Self>,
|
||||
blob_sidecar: Arc<BlobSidecar<T::EthSpec>>,
|
||||
@@ -2964,6 +2978,39 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
self.remove_notified(&block_root, r)
|
||||
}
|
||||
|
||||
/// Cache the data columns in the processing cache, process it, then evict it from the cache if it was
|
||||
/// imported or errors.
|
||||
pub async fn process_gossip_data_columns(
|
||||
self: &Arc<Self>,
|
||||
data_columns: Vec<GossipVerifiedDataColumn<T>>,
|
||||
) -> Result<AvailabilityProcessingStatus, BlockError<T::EthSpec>> {
|
||||
let Ok(block_root) = data_columns
|
||||
.iter()
|
||||
.map(|c| c.block_root())
|
||||
.unique()
|
||||
.exactly_one()
|
||||
else {
|
||||
return Err(BlockError::InternalError(
|
||||
"Columns should be from the same block".to_string(),
|
||||
));
|
||||
};
|
||||
|
||||
// If this block has already been imported to forkchoice it must have been available, so
|
||||
// we don't need to process its samples again.
|
||||
if self
|
||||
.canonical_head
|
||||
.fork_choice_read_lock()
|
||||
.contains_block(&block_root)
|
||||
{
|
||||
return Err(BlockError::BlockIsAlreadyKnown(block_root));
|
||||
}
|
||||
|
||||
let r = self
|
||||
.check_gossip_data_columns_availability_and_import(data_columns)
|
||||
.await;
|
||||
self.remove_notified_custody_columns(&block_root, r)
|
||||
}
|
||||
|
||||
/// Cache the blobs in the processing cache, process it, then evict it from the cache if it was
|
||||
/// imported or errors.
|
||||
pub async fn process_rpc_blobs(
|
||||
@@ -3013,6 +3060,21 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
r
|
||||
}
|
||||
|
||||
/// Remove any block components from the *processing cache* if we no longer require them. If the
|
||||
/// block was imported full or erred, we no longer require them.
|
||||
fn remove_notified_custody_columns(
|
||||
&self,
|
||||
block_root: &Hash256,
|
||||
r: Result<AvailabilityProcessingStatus, BlockError<T::EthSpec>>,
|
||||
) -> Result<AvailabilityProcessingStatus, BlockError<T::EthSpec>> {
|
||||
let has_missing_components =
|
||||
matches!(r, Ok(AvailabilityProcessingStatus::MissingComponents(_, _)));
|
||||
if !has_missing_components {
|
||||
self.reqresp_pre_import_cache.write().remove(block_root);
|
||||
}
|
||||
r
|
||||
}
|
||||
|
||||
/// Wraps `process_block` in logic to cache the block's commitments in the processing cache
|
||||
/// and evict if the block was imported or errored.
|
||||
pub async fn process_block_with_early_caching<B: IntoExecutionPendingBlock<T>>(
|
||||
@@ -3257,6 +3319,31 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
self.process_availability(slot, availability).await
|
||||
}
|
||||
|
||||
/// Checks if the provided data column can make any cached blocks available, and imports immediately
|
||||
/// if so, otherwise caches the data column in the data availability checker.
|
||||
async fn check_gossip_data_columns_availability_and_import(
|
||||
self: &Arc<Self>,
|
||||
data_columns: Vec<GossipVerifiedDataColumn<T>>,
|
||||
) -> Result<AvailabilityProcessingStatus, BlockError<T::EthSpec>> {
|
||||
if let Some(slasher) = self.slasher.as_ref() {
|
||||
for data_colum in &data_columns {
|
||||
slasher.accept_block_header(data_colum.signed_block_header());
|
||||
}
|
||||
}
|
||||
|
||||
let Ok(slot) = data_columns.iter().map(|c| c.slot()).unique().exactly_one() else {
|
||||
return Err(BlockError::InternalError(
|
||||
"Columns for the same block should have matching slot".to_string(),
|
||||
));
|
||||
};
|
||||
|
||||
let availability = self
|
||||
.data_availability_checker
|
||||
.put_gossip_data_columns(data_columns)?;
|
||||
|
||||
self.process_availability(slot, availability).await
|
||||
}
|
||||
|
||||
/// Checks if the provided blobs can make any cached blocks available, and imports immediately
|
||||
/// if so, otherwise caches the blob in the data availability checker.
|
||||
async fn check_rpc_blob_availability_and_import(
|
||||
|
||||
Reference in New Issue
Block a user