mirror of
https://github.com/sigp/lighthouse.git
synced 2026-03-14 18:32:42 +00:00
Improving blob propagation post-PeerDAS with Decentralized Blob Building (#6268)
* Get blobs from EL. Co-authored-by: Michael Sproul <michael@sigmaprime.io> * Avoid cloning blobs after fetching blobs. * Address review comments and refactor code. * Fix lint. * Move blob computation metric to the right spot. * Merge branch 'unstable' into das-fetch-blobs * Merge branch 'unstable' into das-fetch-blobs # Conflicts: # beacon_node/beacon_chain/src/beacon_chain.rs # beacon_node/beacon_chain/src/block_verification.rs # beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs * Merge branch 'unstable' into das-fetch-blobs # Conflicts: # beacon_node/beacon_chain/src/beacon_chain.rs * Gradual publication of data columns for supernodes. * Recompute head after importing block with blobs from the EL. * Fix lint * Merge branch 'unstable' into das-fetch-blobs * Use blocking task instead of async when computing cells. * Merge branch 'das-fetch-blobs' of github.com:jimmygchen/lighthouse into das-fetch-blobs * Merge remote-tracking branch 'origin/unstable' into das-fetch-blobs * Fix semantic conflicts * Downgrade error log. * Merge branch 'unstable' into das-fetch-blobs # Conflicts: # beacon_node/beacon_chain/src/data_availability_checker.rs # beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs # beacon_node/execution_layer/src/engine_api.rs # beacon_node/execution_layer/src/engine_api/json_structures.rs # beacon_node/network/src/network_beacon_processor/gossip_methods.rs # beacon_node/network/src/network_beacon_processor/mod.rs # beacon_node/network/src/network_beacon_processor/sync_methods.rs * Merge branch 'unstable' into das-fetch-blobs * Publish block without waiting for blob and column proof computation. * Address review comments and refactor. * Merge branch 'unstable' into das-fetch-blobs * Fix test and docs. * Comment cleanups. * Merge branch 'unstable' into das-fetch-blobs * Address review comments and cleanup * Address review comments and cleanup * Refactor to de-duplicate gradual publication logic. * Add more logging. * Merge remote-tracking branch 'origin/unstable' into das-fetch-blobs # Conflicts: # Cargo.lock * Fix incorrect comparison on `num_fetched_blobs`. * Implement gradual blob publication. * Merge branch 'unstable' into das-fetch-blobs * Inline `publish_fn`. * Merge branch 'das-fetch-blobs' of github.com:jimmygchen/lighthouse into das-fetch-blobs * Gossip verify blobs before publishing * Avoid queries for 0 blobs and error for duplicates * Gossip verified engine blob before processing them, and use observe cache to detect duplicates before publishing. * Merge branch 'das-fetch-blobs' of github.com:jimmygchen/lighthouse into das-fetch-blobs # Conflicts: # beacon_node/network/src/network_beacon_processor/mod.rs * Merge branch 'unstable' into das-fetch-blobs * Fix invalid commitment inclusion proofs in blob sidecars created from EL blobs. * Only publish EL blobs triggered from gossip block, and not RPC block. * Downgrade gossip blob log to `debug`. * Merge branch 'unstable' into das-fetch-blobs * Merge branch 'unstable' into das-fetch-blobs * Grammar
This commit is contained in:
@@ -18,7 +18,7 @@ use task_executor::TaskExecutor;
|
||||
use types::blob_sidecar::{BlobIdentifier, BlobSidecar, FixedBlobSidecarList};
|
||||
use types::{
|
||||
BlobSidecarList, ChainSpec, DataColumnIdentifier, DataColumnSidecar, DataColumnSidecarList,
|
||||
Epoch, EthSpec, Hash256, RuntimeVariableList, SignedBeaconBlock, Slot,
|
||||
Epoch, EthSpec, Hash256, RuntimeVariableList, SignedBeaconBlock,
|
||||
};
|
||||
|
||||
mod error;
|
||||
@@ -146,6 +146,10 @@ impl<T: BeaconChainTypes> DataAvailabilityChecker<T> {
|
||||
self.availability_cache.sampling_column_count()
|
||||
}
|
||||
|
||||
pub(crate) fn is_supernode(&self) -> bool {
|
||||
self.get_sampling_column_count() == self.spec.number_of_columns
|
||||
}
|
||||
|
||||
/// Checks if the block root is currenlty in the availability cache awaiting import because
|
||||
/// of missing components.
|
||||
pub fn get_execution_valid_block(
|
||||
@@ -201,7 +205,6 @@ impl<T: BeaconChainTypes> DataAvailabilityChecker<T> {
|
||||
pub fn put_rpc_blobs(
|
||||
&self,
|
||||
block_root: Hash256,
|
||||
epoch: Epoch,
|
||||
blobs: FixedBlobSidecarList<T::EthSpec>,
|
||||
) -> Result<Availability<T::EthSpec>, AvailabilityCheckError> {
|
||||
let seen_timestamp = self
|
||||
@@ -212,15 +215,12 @@ impl<T: BeaconChainTypes> DataAvailabilityChecker<T> {
|
||||
// Note: currently not reporting which specific blob is invalid because we fetch all blobs
|
||||
// from the same peer for both lookup and range sync.
|
||||
|
||||
let verified_blobs = KzgVerifiedBlobList::new(
|
||||
Vec::from(blobs).into_iter().flatten(),
|
||||
&self.kzg,
|
||||
seen_timestamp,
|
||||
)
|
||||
.map_err(AvailabilityCheckError::InvalidBlobs)?;
|
||||
let verified_blobs =
|
||||
KzgVerifiedBlobList::new(blobs.iter().flatten().cloned(), &self.kzg, seen_timestamp)
|
||||
.map_err(AvailabilityCheckError::InvalidBlobs)?;
|
||||
|
||||
self.availability_cache
|
||||
.put_kzg_verified_blobs(block_root, epoch, verified_blobs, &self.log)
|
||||
.put_kzg_verified_blobs(block_root, verified_blobs, &self.log)
|
||||
}
|
||||
|
||||
/// Put a list of custody columns received via RPC into the availability cache. This performs KZG
|
||||
@@ -229,7 +229,6 @@ impl<T: BeaconChainTypes> DataAvailabilityChecker<T> {
|
||||
pub fn put_rpc_custody_columns(
|
||||
&self,
|
||||
block_root: Hash256,
|
||||
epoch: Epoch,
|
||||
custody_columns: DataColumnSidecarList<T::EthSpec>,
|
||||
) -> Result<Availability<T::EthSpec>, AvailabilityCheckError> {
|
||||
// TODO(das): report which column is invalid for proper peer scoring
|
||||
@@ -248,12 +247,32 @@ impl<T: BeaconChainTypes> DataAvailabilityChecker<T> {
|
||||
|
||||
self.availability_cache.put_kzg_verified_data_columns(
|
||||
block_root,
|
||||
epoch,
|
||||
verified_custody_columns,
|
||||
&self.log,
|
||||
)
|
||||
}
|
||||
|
||||
/// Put a list of blobs received from the EL pool into the availability cache.
|
||||
///
|
||||
/// This DOES NOT perform KZG verification because the KZG proofs should have been constructed
|
||||
/// immediately prior to calling this function so they are assumed to be valid.
|
||||
pub fn put_engine_blobs(
|
||||
&self,
|
||||
block_root: Hash256,
|
||||
blobs: FixedBlobSidecarList<T::EthSpec>,
|
||||
) -> Result<Availability<T::EthSpec>, AvailabilityCheckError> {
|
||||
let seen_timestamp = self
|
||||
.slot_clock
|
||||
.now_duration()
|
||||
.ok_or(AvailabilityCheckError::SlotClockError)?;
|
||||
|
||||
let verified_blobs =
|
||||
KzgVerifiedBlobList::from_verified(blobs.iter().flatten().cloned(), seen_timestamp);
|
||||
|
||||
self.availability_cache
|
||||
.put_kzg_verified_blobs(block_root, verified_blobs, &self.log)
|
||||
}
|
||||
|
||||
/// Check if we've cached other blobs for this block. If it completes a set and we also
|
||||
/// have a block cached, return the `Availability` variant triggering block import.
|
||||
/// Otherwise cache the blob sidecar.
|
||||
@@ -265,7 +284,6 @@ impl<T: BeaconChainTypes> DataAvailabilityChecker<T> {
|
||||
) -> Result<Availability<T::EthSpec>, AvailabilityCheckError> {
|
||||
self.availability_cache.put_kzg_verified_blobs(
|
||||
gossip_blob.block_root(),
|
||||
gossip_blob.epoch(),
|
||||
vec![gossip_blob.into_inner()],
|
||||
&self.log,
|
||||
)
|
||||
@@ -279,12 +297,9 @@ impl<T: BeaconChainTypes> DataAvailabilityChecker<T> {
|
||||
#[allow(clippy::type_complexity)]
|
||||
pub fn put_gossip_data_columns(
|
||||
&self,
|
||||
slot: Slot,
|
||||
block_root: Hash256,
|
||||
gossip_data_columns: Vec<GossipVerifiedDataColumn<T>>,
|
||||
) -> Result<Availability<T::EthSpec>, AvailabilityCheckError> {
|
||||
let epoch = slot.epoch(T::EthSpec::slots_per_epoch());
|
||||
|
||||
let custody_columns = gossip_data_columns
|
||||
.into_iter()
|
||||
.map(|c| KzgVerifiedCustodyDataColumn::from_asserted_custody(c.into_inner()))
|
||||
@@ -292,7 +307,6 @@ impl<T: BeaconChainTypes> DataAvailabilityChecker<T> {
|
||||
|
||||
self.availability_cache.put_kzg_verified_data_columns(
|
||||
block_root,
|
||||
epoch,
|
||||
custody_columns,
|
||||
&self.log,
|
||||
)
|
||||
@@ -595,12 +609,7 @@ impl<T: BeaconChainTypes> DataAvailabilityChecker<T> {
|
||||
);
|
||||
|
||||
self.availability_cache
|
||||
.put_kzg_verified_data_columns(
|
||||
*block_root,
|
||||
slot.epoch(T::EthSpec::slots_per_epoch()),
|
||||
data_columns_to_publish.clone(),
|
||||
&self.log,
|
||||
)
|
||||
.put_kzg_verified_data_columns(*block_root, data_columns_to_publish.clone(), &self.log)
|
||||
.map(|availability| {
|
||||
DataColumnReconstructionResult::Success((
|
||||
availability,
|
||||
|
||||
Reference in New Issue
Block a user