Gloas spec v1.7.0-alpha.5 and beacon_chain tests (#8998)

Fix database pruning post-Gloas


  - Fix DB pruning logic (and state summaries DAG)
- Get the `beacon_chain` tests running with `FORK_NAME=gloas` 🎉


Co-Authored-By: Michael Sproul <michael@sigmaprime.io>

Co-Authored-By: Jimmy Chen <jchen.tc@gmail.com>

Co-Authored-By: Eitan Seri- Levi <eserilev@gmail.com>

Co-Authored-By: dapplion <35266934+dapplion@users.noreply.github.com>

Co-Authored-By: Eitan Seri-Levi <eserilev@ucsc.edu>
This commit is contained in:
Michael Sproul
2026-04-21 16:29:15 +10:00
committed by GitHub
parent c028bac28d
commit cf3d5e285e
82 changed files with 1513 additions and 1391 deletions

View File

@@ -31,8 +31,8 @@ use types::{test_utils::generate_deterministic_keypair, *};
type E = MainnetEthSpec;
// Should ideally be divisible by 3.
const VALIDATOR_COUNT: usize = 24;
// Gloas requires >= 1 validator per slot for PTC committee computation, so >= 32 for MainnetEthSpec.
const VALIDATOR_COUNT: usize = 32;
const CHAIN_SEGMENT_LENGTH: usize = 64 * 5;
const BLOCK_INDICES: &[usize] = &[0, 1, 32, 64, 68 + 1, 129, CHAIN_SEGMENT_LENGTH - 1];
@@ -77,10 +77,9 @@ async fn get_chain_segment() -> (Vec<BeaconSnapshot<E>>, Vec<Option<DataSidecars
.unwrap();
let block_epoch = full_block.epoch();
// TODO(gloas): probably need to update this test
segment.push(BeaconSnapshot {
beacon_block_root: snapshot.beacon_block_root,
execution_envelope: None,
execution_envelope: snapshot.execution_envelope,
beacon_block: Arc::new(full_block),
beacon_state: snapshot.beacon_state,
});
@@ -196,6 +195,47 @@ where
}
}
/// Pre-store execution payload envelopes in the harness's store.
///
/// Post-Gloas, block N+1 needs block N's envelope to be available when it is
/// imported. This function stores all envelopes from the chain segment so that
/// `process_chain_segment` can import all blocks successfully.
// TODO(gloas): this is a bit of a hack that can be removed once `process_chain_segment` handles
// payload envelopes
fn store_envelopes_for_chain_segment(
chain_segment: &[BeaconSnapshot<E>],
harness: &BeaconChainHarness<EphemeralHarnessType<E>>,
) {
for snapshot in chain_segment {
if let Some(ref envelope) = snapshot.execution_envelope {
harness
.chain
.store
.put_payload_envelope(&snapshot.beacon_block_root, envelope)
.expect("should store envelope");
}
}
}
/// Update fork choice with envelope payload status for all blocks in the chain segment.
///
/// Must be called after the blocks have been imported into fork choice.
fn update_fork_choice_with_envelopes(
chain_segment: &[BeaconSnapshot<E>],
harness: &BeaconChainHarness<EphemeralHarnessType<E>>,
) {
for snapshot in chain_segment {
if snapshot.execution_envelope.is_some() {
// Call may fail if block was invalid (it will have no fork choice node).
let _ = harness
.chain
.canonical_head
.fork_choice_write_lock()
.on_valid_payload_envelope_received(snapshot.beacon_block_root);
}
}
}
fn junk_signature() -> Signature {
let kp = generate_deterministic_keypair(VALIDATOR_COUNT);
let message = Hash256::from_slice(&[42; 32]);
@@ -303,6 +343,7 @@ fn update_data_column_signed_header<E: EthSpec>(
async fn chain_segment_full_segment() {
let harness = get_harness(VALIDATOR_COUNT, NodeCustodyType::Fullnode);
let (chain_segment, chain_segment_blobs) = get_chain_segment().await;
store_envelopes_for_chain_segment(&chain_segment, &harness);
let blocks: Vec<RangeSyncBlock<E>> =
chain_segment_blocks(&chain_segment, &chain_segment_blobs, harness.chain.clone())
.into_iter()
@@ -328,6 +369,7 @@ async fn chain_segment_full_segment() {
.into_block_error()
.expect("should import chain segment");
update_fork_choice_with_envelopes(&chain_segment, &harness);
harness.chain.recompute_head_at_current_slot().await;
assert_eq!(
@@ -348,6 +390,7 @@ async fn chain_segment_varying_chunk_size() {
for chunk_size in &[1, 2, 31, 32, 33] {
let harness = get_harness(VALIDATOR_COUNT, NodeCustodyType::Fullnode);
store_envelopes_for_chain_segment(&chain_segment, &harness);
harness
.chain
@@ -363,6 +406,7 @@ async fn chain_segment_varying_chunk_size() {
.unwrap_or_else(|_| panic!("should import chain segment of len {}", chunk_size));
}
update_fork_choice_with_envelopes(&chain_segment, &harness);
harness.chain.recompute_head_at_current_slot().await;
assert_eq!(
@@ -514,6 +558,7 @@ async fn assert_invalid_signature(
snapshots: &[BeaconSnapshot<E>],
item: &str,
) {
store_envelopes_for_chain_segment(chain_segment, harness);
let blocks: Vec<RangeSyncBlock<E>> = snapshots
.iter()
.zip(chain_segment_blobs.iter())
@@ -540,10 +585,22 @@ async fn assert_invalid_signature(
harness.chain.recompute_head_at_current_slot().await;
// Ensure the block will be rejected if imported on its own (without gossip checking).
let ancestor_blocks = chain_segment
// Only include blocks that haven't been imported yet (after the finalized slot) to avoid
// `WouldRevertFinalizedSlot` errors when part 1 already imported and finalized some blocks.
// Use the fork choice finalized checkpoint directly, as the cached head may not reflect
// finalization that occurred during process_chain_segment.
let finalized_slot = harness
.chain
.canonical_head
.fork_choice_read_lock()
.finalized_checkpoint()
.epoch
.start_slot(E::slots_per_epoch());
let ancestor_blocks: Vec<RangeSyncBlock<E>> = chain_segment
.iter()
.take(block_index)
.zip(chain_segment_blobs.iter())
.filter(|(snapshot, _)| snapshot.beacon_block.slot() > finalized_slot)
.map(|(snapshot, blobs)| {
build_range_sync_block(snapshot.beacon_block.clone(), blobs, harness.chain.clone())
})
@@ -554,6 +611,7 @@ async fn assert_invalid_signature(
.chain
.process_chain_segment(ancestor_blocks, NotifyExecutionLayer::Yes)
.await;
update_fork_choice_with_envelopes(chain_segment, harness);
harness.chain.recompute_head_at_current_slot().await;
let process_res = harness
@@ -594,6 +652,7 @@ async fn get_invalid_sigs_harness(
chain_segment: &[BeaconSnapshot<E>],
) -> BeaconChainHarness<EphemeralHarnessType<E>> {
let harness = get_harness(VALIDATOR_COUNT, NodeCustodyType::Fullnode);
store_envelopes_for_chain_segment(chain_segment, &harness);
harness
.chain
.slot_clock
@@ -1091,6 +1150,21 @@ async fn block_gossip_verification() {
)
.await
.expect("should import valid gossip verified block");
// Post-Gloas, store the execution payload envelope so that subsequent blocks can look up
// the parent envelope.
if let Some(ref envelope) = snapshot.execution_envelope {
harness
.chain
.store
.put_payload_envelope(&snapshot.beacon_block_root, envelope)
.expect("should store envelope");
harness
.chain
.canonical_head
.fork_choice_write_lock()
.on_valid_payload_envelope_received(snapshot.beacon_block_root)
.expect("should update fork choice with envelope");
}
if let Some(data_sidecars) = blobs_opt {
verify_and_process_gossip_data_sidecars(&harness, data_sidecars).await;
}
@@ -2040,7 +2114,10 @@ async fn range_sync_block_construction_fails_with_wrong_blob_count() {
async fn range_sync_block_rejects_missing_custody_columns() {
let spec = test_spec::<E>();
if !spec.fork_name_at_slot::<E>(Slot::new(0)).fulu_enabled() {
// Gloas blocks don't have blob_kzg_commitments (blobs are in the execution payload envelope).
if !spec.fork_name_at_slot::<E>(Slot::new(0)).fulu_enabled()
|| spec.fork_name_at_slot::<E>(Slot::new(0)).gloas_enabled()
{
return;
}
@@ -2118,7 +2195,10 @@ async fn range_sync_block_rejects_missing_custody_columns() {
async fn rpc_block_allows_construction_past_da_boundary() {
let spec = test_spec::<E>();
if !spec.fork_name_at_slot::<E>(Slot::new(0)).fulu_enabled() {
// Gloas blocks don't have blob_kzg_commitments (blobs are in the execution payload envelope).
if !spec.fork_name_at_slot::<E>(Slot::new(0)).fulu_enabled()
|| spec.fork_name_at_slot::<E>(Slot::new(0)).gloas_enabled()
{
return;
}