Implement PeerDAS Fulu fork activation (#6795)

Addresses #6706


  This PR activates PeerDAS at the Fulu fork epoch instead of `EIP_7594_FORK_EPOCH`. This means we no longer support testing PeerDAS with Deneb / Electrs, as it's now part of a hard fork.
This commit is contained in:
Jimmy Chen
2025-01-30 18:01:34 +11:00
committed by GitHub
parent 7d54a43243
commit 70194dfc6a
54 changed files with 1126 additions and 640 deletions

View File

@@ -1,7 +1,6 @@
#![cfg(not(debug_assertions))]
use beacon_chain::attestation_verification::Error as AttnError;
use beacon_chain::block_verification_types::RpcBlock;
use beacon_chain::builder::BeaconChainBuilder;
use beacon_chain::data_availability_checker::AvailableBlock;
use beacon_chain::schema_change::migrate_schema;
@@ -82,13 +81,26 @@ fn get_harness(
reconstruct_historic_states: true,
..ChainConfig::default()
};
get_harness_generic(store, validator_count, chain_config)
get_harness_generic(store, validator_count, chain_config, false)
}
fn get_harness_import_all_data_columns(
store: Arc<HotColdDB<E, BeaconNodeBackend<E>, BeaconNodeBackend<E>>>,
validator_count: usize,
) -> TestHarness {
// Most tests expect to retain historic states, so we use this as the default.
let chain_config = ChainConfig {
reconstruct_historic_states: true,
..ChainConfig::default()
};
get_harness_generic(store, validator_count, chain_config, true)
}
fn get_harness_generic(
store: Arc<HotColdDB<E, BeaconNodeBackend<E>, BeaconNodeBackend<E>>>,
validator_count: usize,
chain_config: ChainConfig,
import_all_data_columns: bool,
) -> TestHarness {
let harness = TestHarness::builder(MinimalEthSpec)
.spec(store.get_chain_spec().clone())
@@ -97,6 +109,7 @@ fn get_harness_generic(
.fresh_disk_store(store)
.mock_execution_layer()
.chain_config(chain_config)
.import_all_data_columns(import_all_data_columns)
.build();
harness.advance_slot();
harness
@@ -2286,7 +2299,12 @@ async fn weak_subjectivity_sync_test(slots: Vec<Slot>, checkpoint_slot: Slot) {
let temp1 = tempdir().unwrap();
let full_store = get_store(&temp1);
let harness = get_harness(full_store.clone(), LOW_VALIDATOR_COUNT);
// TODO(das): Run a supernode so the node has full blobs stored.
// This may not be required in the future if we end up implementing downloading checkpoint
// blobs from p2p peers:
// https://github.com/sigp/lighthouse/issues/6837
let harness = get_harness_import_all_data_columns(full_store.clone(), LOW_VALIDATOR_COUNT);
let all_validators = (0..LOW_VALIDATOR_COUNT).collect::<Vec<_>>();
@@ -2319,10 +2337,8 @@ async fn weak_subjectivity_sync_test(slots: Vec<Slot>, checkpoint_slot: Slot) {
.unwrap();
let wss_blobs_opt = harness
.chain
.store
.get_blobs(&wss_block_root)
.unwrap()
.blobs();
.get_or_reconstruct_blobs(&wss_block_root)
.unwrap();
let wss_state = full_store
.get_state(&wss_state_root, Some(checkpoint_slot))
.unwrap()
@@ -2395,14 +2411,16 @@ async fn weak_subjectivity_sync_test(slots: Vec<Slot>, checkpoint_slot: Slot) {
.await
.unwrap()
.unwrap();
// This test may break in the future if we no longer store the full checkpoint data columns.
let store_wss_blobs_opt = beacon_chain
.store
.get_blobs(&wss_block_root)
.unwrap()
.blobs();
.get_or_reconstruct_blobs(&wss_block_root)
.unwrap();
assert_eq!(store_wss_block, wss_block);
assert_eq!(store_wss_blobs_opt, wss_blobs_opt);
// TODO(fulu): Remove this condition once #6760 (PeerDAS checkpoint sync) is merged.
if !beacon_chain.spec.is_peer_das_scheduled() {
assert_eq!(store_wss_blobs_opt, wss_blobs_opt);
}
// Apply blocks forward to reach head.
let chain_dump = harness.chain.chain_dump().unwrap();
@@ -2418,7 +2436,7 @@ async fn weak_subjectivity_sync_test(slots: Vec<Slot>, checkpoint_slot: Slot) {
.await
.unwrap()
.unwrap();
let blobs = harness.chain.get_blobs(&block_root).expect("blobs").blobs();
let slot = full_block.slot();
let state_root = full_block.state_root();
@@ -2426,7 +2444,7 @@ async fn weak_subjectivity_sync_test(slots: Vec<Slot>, checkpoint_slot: Slot) {
beacon_chain
.process_block(
full_block.canonical_root(),
RpcBlock::new(Some(block_root), Arc::new(full_block), blobs).unwrap(),
harness.build_rpc_block_from_store_blobs(Some(block_root), Arc::new(full_block)),
NotifyExecutionLayer::Yes,
BlockImportSource::Lookup,
|| Ok(()),
@@ -2480,13 +2498,12 @@ async fn weak_subjectivity_sync_test(slots: Vec<Slot>, checkpoint_slot: Slot) {
.await
.expect("should get block")
.expect("should get block");
let blobs = harness.chain.get_blobs(&block_root).expect("blobs").blobs();
if let MaybeAvailableBlock::Available(block) = harness
.chain
.data_availability_checker
.verify_kzg_for_rpc_block(
RpcBlock::new(Some(block_root), Arc::new(full_block), blobs).unwrap(),
harness.build_rpc_block_from_store_blobs(Some(block_root), Arc::new(full_block)),
)
.expect("should verify kzg")
{
@@ -2587,7 +2604,7 @@ async fn process_blocks_and_attestations_for_unaligned_checkpoint() {
reconstruct_historic_states: false,
..ChainConfig::default()
};
let harness = get_harness_generic(store.clone(), LOW_VALIDATOR_COUNT, chain_config);
let harness = get_harness_generic(store.clone(), LOW_VALIDATOR_COUNT, chain_config, false);
let all_validators = (0..LOW_VALIDATOR_COUNT).collect::<Vec<_>>();
@@ -3075,6 +3092,10 @@ async fn deneb_prune_blobs_happy_case() {
let db_path = tempdir().unwrap();
let store = get_store(&db_path);
if store.get_chain_spec().is_peer_das_scheduled() {
// TODO(fulu): add prune tests for Fulu / PeerDAS data columns.
return;
}
let Some(deneb_fork_epoch) = store.get_chain_spec().deneb_fork_epoch else {
// No-op prior to Deneb.
return;
@@ -3122,6 +3143,10 @@ async fn deneb_prune_blobs_no_finalization() {
let db_path = tempdir().unwrap();
let store = get_store(&db_path);
if store.get_chain_spec().is_peer_das_scheduled() {
// TODO(fulu): add prune tests for Fulu / PeerDAS data columns.
return;
}
let Some(deneb_fork_epoch) = store.get_chain_spec().deneb_fork_epoch else {
// No-op prior to Deneb.
return;
@@ -3266,6 +3291,10 @@ async fn deneb_prune_blobs_margin_test(margin: u64) {
let db_path = tempdir().unwrap();
let store = get_store_generic(&db_path, config, test_spec::<E>());
if store.get_chain_spec().is_peer_das_scheduled() {
// TODO(fulu): add prune tests for Fulu / PeerDAS data columns.
return;
}
let Some(deneb_fork_epoch) = store.get_chain_spec().deneb_fork_epoch else {
// No-op prior to Deneb.
return;