Implement PeerDAS Fulu fork activation (#6795)

Addresses #6706


  This PR activates PeerDAS at the Fulu fork epoch instead of `EIP_7594_FORK_EPOCH`. This means we no longer support testing PeerDAS with Deneb / Electrs, as it's now part of a hard fork.
This commit is contained in:
Jimmy Chen
2025-01-30 18:01:34 +11:00
committed by GitHub
parent 7d54a43243
commit 70194dfc6a
54 changed files with 1126 additions and 640 deletions

View File

@@ -1,8 +1,9 @@
use crate::blob_verification::GossipVerifiedBlob;
use crate::block_verification_types::{AsBlock, RpcBlock};
use crate::kzg_utils::blobs_to_data_column_sidecars;
use crate::data_column_verification::CustodyDataColumn;
use crate::kzg_utils::build_data_column_sidecars;
use crate::observed_operations::ObservationOutcome;
pub use crate::persisted_beacon_chain::PersistedBeaconChain;
use crate::BeaconBlockResponseWrapper;
pub use crate::{
beacon_chain::{BEACON_CHAIN_DB_KEY, ETH1_CACHE_DB_KEY, FORK_CHOICE_DB_KEY, OP_POOL_DB_KEY},
migrate::MigratorConfig,
@@ -16,6 +17,7 @@ use crate::{
BeaconChain, BeaconChainTypes, BlockError, ChainConfig, ServerSentEventHandler,
StateSkipConfig,
};
use crate::{get_block_root, BeaconBlockResponseWrapper};
use bls::get_withdrawal_credentials;
use eth2::types::SignedBlockContentsTuple;
use execution_layer::test_utils::generate_genesis_header;
@@ -74,6 +76,11 @@ pub const FORK_NAME_ENV_VAR: &str = "FORK_NAME";
// Environment variable to read if `ci_logger` feature is enabled.
pub const CI_LOGGER_DIR_ENV_VAR: &str = "CI_LOGGER_DIR";
// Pre-computed data column sidecar using a single static blob from:
// `beacon_node/execution_layer/src/test_utils/fixtures/mainnet/test_blobs_bundle.ssz`
const TEST_DATA_COLUMN_SIDECARS_SSZ: &[u8] =
include_bytes!("test_utils/fixtures/test_data_column_sidecars.ssz");
// Default target aggregators to set during testing, this ensures an aggregator at each slot.
//
// You should mutate the `ChainSpec` prior to initialising the harness if you would like to use
@@ -105,7 +112,7 @@ static KZG_NO_PRECOMP: LazyLock<Arc<Kzg>> = LazyLock::new(|| {
});
pub fn get_kzg(spec: &ChainSpec) -> Arc<Kzg> {
if spec.eip7594_fork_epoch.is_some() {
if spec.fulu_fork_epoch.is_some() {
KZG_PEERDAS.clone()
} else if spec.deneb_fork_epoch.is_some() {
KZG.clone()
@@ -224,6 +231,7 @@ pub struct Builder<T: BeaconChainTypes> {
mock_execution_layer: Option<MockExecutionLayer<T::EthSpec>>,
testing_slot_clock: Option<TestingSlotClock>,
validator_monitor_config: Option<ValidatorMonitorConfig>,
import_all_data_columns: bool,
runtime: TestRuntime,
log: Logger,
}
@@ -366,6 +374,7 @@ where
mock_execution_layer: None,
testing_slot_clock: None,
validator_monitor_config: None,
import_all_data_columns: false,
runtime,
log,
}
@@ -458,6 +467,11 @@ where
self
}
pub fn import_all_data_columns(mut self, import_all_data_columns: bool) -> Self {
self.import_all_data_columns = import_all_data_columns;
self
}
pub fn execution_layer_from_url(mut self, url: &str) -> Self {
assert!(
self.execution_layer.is_none(),
@@ -575,6 +589,7 @@ where
.expect("should build dummy backend")
.shutdown_sender(shutdown_tx)
.chain_config(chain_config)
.import_all_data_columns(self.import_all_data_columns)
.event_handler(Some(ServerSentEventHandler::new_with_capacity(
log.clone(),
5,
@@ -762,15 +777,13 @@ where
pub fn get_head_block(&self) -> RpcBlock<E> {
let block = self.chain.head_beacon_block();
let block_root = block.canonical_root();
let blobs = self.chain.get_blobs(&block_root).unwrap().blobs();
RpcBlock::new(Some(block_root), block, blobs).unwrap()
self.build_rpc_block_from_store_blobs(Some(block_root), block)
}
pub fn get_full_block(&self, block_root: &Hash256) -> RpcBlock<E> {
let block = self.chain.get_blinded_block(block_root).unwrap().unwrap();
let full_block = self.chain.store.make_full_block(block_root, block).unwrap();
let blobs = self.chain.get_blobs(block_root).unwrap().blobs();
RpcBlock::new(Some(*block_root), Arc::new(full_block), blobs).unwrap()
self.build_rpc_block_from_store_blobs(Some(*block_root), Arc::new(full_block))
}
pub fn get_all_validators(&self) -> Vec<usize> {
@@ -2271,22 +2284,19 @@ where
self.set_current_slot(slot);
let (block, blob_items) = block_contents;
let sidecars = blob_items
.map(|(proofs, blobs)| BlobSidecar::build_sidecars(blobs, &block, proofs, &self.spec))
.transpose()
.unwrap();
let rpc_block = self.build_rpc_block_from_blobs(block_root, block, blob_items)?;
let block_hash: SignedBeaconBlockHash = self
.chain
.process_block(
block_root,
RpcBlock::new(Some(block_root), block, sidecars).unwrap(),
rpc_block,
NotifyExecutionLayer::Yes,
BlockImportSource::RangeSync,
|| Ok(()),
)
.await?
.try_into()
.unwrap();
.expect("block blobs are available");
self.chain.recompute_head_at_current_slot().await;
Ok(block_hash)
}
@@ -2297,16 +2307,13 @@ where
) -> Result<SignedBeaconBlockHash, BlockError> {
let (block, blob_items) = block_contents;
let sidecars = blob_items
.map(|(proofs, blobs)| BlobSidecar::build_sidecars(blobs, &block, proofs, &self.spec))
.transpose()
.unwrap();
let block_root = block.canonical_root();
let rpc_block = self.build_rpc_block_from_blobs(block_root, block, blob_items)?;
let block_hash: SignedBeaconBlockHash = self
.chain
.process_block(
block_root,
RpcBlock::new(Some(block_root), block, sidecars).unwrap(),
rpc_block,
NotifyExecutionLayer::Yes,
BlockImportSource::RangeSync,
|| Ok(()),
@@ -2318,6 +2325,75 @@ where
Ok(block_hash)
}
/// Builds an `Rpc` block from a `SignedBeaconBlock` and blobs or data columns retrieved from
/// the database.
pub fn build_rpc_block_from_store_blobs(
&self,
block_root: Option<Hash256>,
block: Arc<SignedBeaconBlock<E>>,
) -> RpcBlock<E> {
let block_root = block_root.unwrap_or_else(|| get_block_root(&block));
let has_blobs = block
.message()
.body()
.blob_kzg_commitments()
.is_ok_and(|c| !c.is_empty());
if !has_blobs {
return RpcBlock::new_without_blobs(Some(block_root), block);
}
// Blobs are stored as data columns from Fulu (PeerDAS)
if self.spec.is_peer_das_enabled_for_epoch(block.epoch()) {
let columns = self.chain.get_data_columns(&block_root).unwrap().unwrap();
let custody_columns = columns
.into_iter()
.map(CustodyDataColumn::from_asserted_custody)
.collect::<Vec<_>>();
RpcBlock::new_with_custody_columns(Some(block_root), block, custody_columns, &self.spec)
.unwrap()
} else {
let blobs = self.chain.get_blobs(&block_root).unwrap().blobs();
RpcBlock::new(Some(block_root), block, blobs).unwrap()
}
}
/// Builds an `RpcBlock` from a `SignedBeaconBlock` and `BlobsList`.
fn build_rpc_block_from_blobs(
&self,
block_root: Hash256,
block: Arc<SignedBeaconBlock<E, FullPayload<E>>>,
blob_items: Option<(KzgProofs<E>, BlobsList<E>)>,
) -> Result<RpcBlock<E>, BlockError> {
Ok(if self.spec.is_peer_das_enabled_for_epoch(block.epoch()) {
let sampling_column_count = self
.chain
.data_availability_checker
.get_sampling_column_count();
if blob_items.is_some_and(|(_, blobs)| !blobs.is_empty()) {
// Note: this method ignores the actual custody columns and just take the first
// `sampling_column_count` for testing purpose only, because the chain does not
// currently have any knowledge of the columns being custodied.
let columns = generate_data_column_sidecars_from_block(&block, &self.spec)
.into_iter()
.take(sampling_column_count)
.map(CustodyDataColumn::from_asserted_custody)
.collect::<Vec<_>>();
RpcBlock::new_with_custody_columns(Some(block_root), block, columns, &self.spec)?
} else {
RpcBlock::new_without_blobs(Some(block_root), block)
}
} else {
let blobs = blob_items
.map(|(proofs, blobs)| {
BlobSidecar::build_sidecars(blobs, &block, proofs, &self.spec)
})
.transpose()
.unwrap();
RpcBlock::new(Some(block_root), block, blobs)?
})
}
pub fn process_attestations(&self, attestations: HarnessAttestations<E>) {
let num_validators = self.validator_keypairs.len();
let mut unaggregated = Vec::with_capacity(num_validators);
@@ -2991,6 +3067,56 @@ where
Ok(())
}
/// Simulate some of the blobs / data columns being seen on gossip.
/// Converts the blobs to data columns if the slot is Fulu or later.
pub async fn process_gossip_blobs_or_columns<'a>(
&self,
block: &SignedBeaconBlock<E>,
blobs: impl Iterator<Item = &'a Blob<E>>,
proofs: impl Iterator<Item = &'a KzgProof>,
custody_columns_opt: Option<HashSet<ColumnIndex>>,
) {
let is_peerdas_enabled = self.chain.spec.is_peer_das_enabled_for_epoch(block.epoch());
if is_peerdas_enabled {
let custody_columns = custody_columns_opt.unwrap_or_else(|| {
let sampling_column_count = self
.chain
.data_availability_checker
.get_sampling_column_count() as u64;
(0..sampling_column_count).collect()
});
let verified_columns = generate_data_column_sidecars_from_block(block, &self.spec)
.into_iter()
.filter(|c| custody_columns.contains(&c.index))
.map(|sidecar| {
let column_index = sidecar.index;
self.chain
.verify_data_column_sidecar_for_gossip(sidecar, column_index)
})
.collect::<Result<Vec<_>, _>>()
.unwrap();
if !verified_columns.is_empty() {
self.chain
.process_gossip_data_columns(verified_columns, || Ok(()))
.await
.unwrap();
}
} else {
for (i, (kzg_proof, blob)) in proofs.into_iter().zip(blobs).enumerate() {
let sidecar =
Arc::new(BlobSidecar::new(i, blob.clone(), block, *kzg_proof).unwrap());
let gossip_blob = GossipVerifiedBlob::new(sidecar, i as u64, &self.chain)
.expect("should obtain gossip verified blob");
self.chain
.process_gossip_blob(gossip_blob)
.await
.expect("should import valid gossip verified blob");
}
}
}
}
// Junk `Debug` impl to satistfy certain trait bounds during testing.
@@ -3176,10 +3302,59 @@ pub fn generate_rand_block_and_data_columns<E: EthSpec>(
SignedBeaconBlock<E, FullPayload<E>>,
DataColumnSidecarList<E>,
) {
let kzg = get_kzg(spec);
let (block, blobs) = generate_rand_block_and_blobs(fork_name, num_blobs, rng, spec);
let blob_refs = blobs.iter().map(|b| &b.blob).collect::<Vec<_>>();
let data_columns = blobs_to_data_column_sidecars(&blob_refs, &block, &kzg, spec).unwrap();
let (block, _blobs) = generate_rand_block_and_blobs(fork_name, num_blobs, rng, spec);
let data_columns = generate_data_column_sidecars_from_block(&block, spec);
(block, data_columns)
}
/// Generate data column sidecars from pre-computed cells and proofs.
fn generate_data_column_sidecars_from_block<E: EthSpec>(
block: &SignedBeaconBlock<E>,
spec: &ChainSpec,
) -> DataColumnSidecarList<E> {
let kzg_commitments = block.message().body().blob_kzg_commitments().unwrap();
if kzg_commitments.is_empty() {
return vec![];
}
let kzg_commitments_inclusion_proof = block
.message()
.body()
.kzg_commitments_merkle_proof()
.unwrap();
let signed_block_header = block.signed_block_header();
// load the precomputed column sidecar to avoid computing them for every block in the tests.
let template_data_columns = RuntimeVariableList::<DataColumnSidecar<E>>::from_ssz_bytes(
TEST_DATA_COLUMN_SIDECARS_SSZ,
spec.number_of_columns as usize,
)
.unwrap();
let (cells, proofs) = template_data_columns
.into_iter()
.map(|sidecar| {
let DataColumnSidecar {
column, kzg_proofs, ..
} = sidecar;
// There's only one cell per column for a single blob
let cell_bytes: Vec<u8> = column.into_iter().next().unwrap().into();
let kzg_cell = cell_bytes.try_into().unwrap();
let kzg_proof = kzg_proofs.into_iter().next().unwrap();
(kzg_cell, kzg_proof)
})
.collect::<(Vec<_>, Vec<_>)>();
// Repeat the cells and proofs for every blob
let blob_cells_and_proofs_vec =
vec![(cells.try_into().unwrap(), proofs.try_into().unwrap()); kzg_commitments.len()];
build_data_column_sidecars(
kzg_commitments.clone(),
kzg_commitments_inclusion_proof,
signed_block_header,
blob_cells_and_proofs_vec,
spec,
)
.unwrap()
}