mirror of
https://github.com/sigp/lighthouse.git
synced 2026-04-29 18:53:32 +00:00
Gloas spec v1.7.0-alpha.5 and beacon_chain tests (#8998)
Fix database pruning post-Gloas - Fix DB pruning logic (and state summaries DAG) - Get the `beacon_chain` tests running with `FORK_NAME=gloas` 🎉 Co-Authored-By: Michael Sproul <michael@sigmaprime.io> Co-Authored-By: Jimmy Chen <jchen.tc@gmail.com> Co-Authored-By: Eitan Seri- Levi <eserilev@gmail.com> Co-Authored-By: dapplion <35266934+dapplion@users.noreply.github.com> Co-Authored-By: Eitan Seri-Levi <eserilev@ucsc.edu>
This commit is contained in:
@@ -10,7 +10,7 @@ use std::sync::{Arc, LazyLock};
|
||||
use tree_hash::TreeHash;
|
||||
use types::{Attestation, EthSpec, MainnetEthSpec, RelativeEpoch, Slot};
|
||||
|
||||
pub const VALIDATOR_COUNT: usize = 16;
|
||||
pub const VALIDATOR_COUNT: usize = 32;
|
||||
|
||||
/// A cached set of keys.
|
||||
static KEYPAIRS: LazyLock<Vec<Keypair>> =
|
||||
|
||||
@@ -1389,13 +1389,18 @@ async fn attestation_to_finalized_block() {
|
||||
let earlier_block_root = earlier_block.canonical_root();
|
||||
assert_ne!(earlier_block_root, finalized_checkpoint.root);
|
||||
|
||||
// For Gloas, `block.state_root()` returns the pending state root, but the cold DB
|
||||
// may store the full state root. Use `get_cold_state_root` to get the actual stored key.
|
||||
let cold_state_root = harness
|
||||
.chain
|
||||
.store
|
||||
.get_cold_state_root(earlier_slot)
|
||||
.expect("should not error getting cold state root")
|
||||
.expect("cold state root should be present for finalized slot in archive store");
|
||||
|
||||
let mut state = harness
|
||||
.chain
|
||||
.get_state(
|
||||
&earlier_block.state_root(),
|
||||
Some(earlier_slot),
|
||||
CACHE_STATE_IN_TESTS,
|
||||
)
|
||||
.get_state(&cold_state_root, Some(earlier_slot), CACHE_STATE_IN_TESTS)
|
||||
.expect("should not error getting state")
|
||||
.expect("should find state");
|
||||
|
||||
|
||||
@@ -31,8 +31,8 @@ use types::{test_utils::generate_deterministic_keypair, *};
|
||||
|
||||
type E = MainnetEthSpec;
|
||||
|
||||
// Should ideally be divisible by 3.
|
||||
const VALIDATOR_COUNT: usize = 24;
|
||||
// Gloas requires >= 1 validator per slot for PTC committee computation, so >= 32 for MainnetEthSpec.
|
||||
const VALIDATOR_COUNT: usize = 32;
|
||||
const CHAIN_SEGMENT_LENGTH: usize = 64 * 5;
|
||||
const BLOCK_INDICES: &[usize] = &[0, 1, 32, 64, 68 + 1, 129, CHAIN_SEGMENT_LENGTH - 1];
|
||||
|
||||
@@ -77,10 +77,9 @@ async fn get_chain_segment() -> (Vec<BeaconSnapshot<E>>, Vec<Option<DataSidecars
|
||||
.unwrap();
|
||||
let block_epoch = full_block.epoch();
|
||||
|
||||
// TODO(gloas): probably need to update this test
|
||||
segment.push(BeaconSnapshot {
|
||||
beacon_block_root: snapshot.beacon_block_root,
|
||||
execution_envelope: None,
|
||||
execution_envelope: snapshot.execution_envelope,
|
||||
beacon_block: Arc::new(full_block),
|
||||
beacon_state: snapshot.beacon_state,
|
||||
});
|
||||
@@ -196,6 +195,47 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
/// Pre-store execution payload envelopes in the harness's store.
|
||||
///
|
||||
/// Post-Gloas, block N+1 needs block N's envelope to be available when it is
|
||||
/// imported. This function stores all envelopes from the chain segment so that
|
||||
/// `process_chain_segment` can import all blocks successfully.
|
||||
// TODO(gloas): this is a bit of a hack that can be removed once `process_chain_segment` handles
|
||||
// payload envelopes
|
||||
fn store_envelopes_for_chain_segment(
|
||||
chain_segment: &[BeaconSnapshot<E>],
|
||||
harness: &BeaconChainHarness<EphemeralHarnessType<E>>,
|
||||
) {
|
||||
for snapshot in chain_segment {
|
||||
if let Some(ref envelope) = snapshot.execution_envelope {
|
||||
harness
|
||||
.chain
|
||||
.store
|
||||
.put_payload_envelope(&snapshot.beacon_block_root, envelope)
|
||||
.expect("should store envelope");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Update fork choice with envelope payload status for all blocks in the chain segment.
|
||||
///
|
||||
/// Must be called after the blocks have been imported into fork choice.
|
||||
fn update_fork_choice_with_envelopes(
|
||||
chain_segment: &[BeaconSnapshot<E>],
|
||||
harness: &BeaconChainHarness<EphemeralHarnessType<E>>,
|
||||
) {
|
||||
for snapshot in chain_segment {
|
||||
if snapshot.execution_envelope.is_some() {
|
||||
// Call may fail if block was invalid (it will have no fork choice node).
|
||||
let _ = harness
|
||||
.chain
|
||||
.canonical_head
|
||||
.fork_choice_write_lock()
|
||||
.on_valid_payload_envelope_received(snapshot.beacon_block_root);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn junk_signature() -> Signature {
|
||||
let kp = generate_deterministic_keypair(VALIDATOR_COUNT);
|
||||
let message = Hash256::from_slice(&[42; 32]);
|
||||
@@ -303,6 +343,7 @@ fn update_data_column_signed_header<E: EthSpec>(
|
||||
async fn chain_segment_full_segment() {
|
||||
let harness = get_harness(VALIDATOR_COUNT, NodeCustodyType::Fullnode);
|
||||
let (chain_segment, chain_segment_blobs) = get_chain_segment().await;
|
||||
store_envelopes_for_chain_segment(&chain_segment, &harness);
|
||||
let blocks: Vec<RangeSyncBlock<E>> =
|
||||
chain_segment_blocks(&chain_segment, &chain_segment_blobs, harness.chain.clone())
|
||||
.into_iter()
|
||||
@@ -328,6 +369,7 @@ async fn chain_segment_full_segment() {
|
||||
.into_block_error()
|
||||
.expect("should import chain segment");
|
||||
|
||||
update_fork_choice_with_envelopes(&chain_segment, &harness);
|
||||
harness.chain.recompute_head_at_current_slot().await;
|
||||
|
||||
assert_eq!(
|
||||
@@ -348,6 +390,7 @@ async fn chain_segment_varying_chunk_size() {
|
||||
|
||||
for chunk_size in &[1, 2, 31, 32, 33] {
|
||||
let harness = get_harness(VALIDATOR_COUNT, NodeCustodyType::Fullnode);
|
||||
store_envelopes_for_chain_segment(&chain_segment, &harness);
|
||||
|
||||
harness
|
||||
.chain
|
||||
@@ -363,6 +406,7 @@ async fn chain_segment_varying_chunk_size() {
|
||||
.unwrap_or_else(|_| panic!("should import chain segment of len {}", chunk_size));
|
||||
}
|
||||
|
||||
update_fork_choice_with_envelopes(&chain_segment, &harness);
|
||||
harness.chain.recompute_head_at_current_slot().await;
|
||||
|
||||
assert_eq!(
|
||||
@@ -514,6 +558,7 @@ async fn assert_invalid_signature(
|
||||
snapshots: &[BeaconSnapshot<E>],
|
||||
item: &str,
|
||||
) {
|
||||
store_envelopes_for_chain_segment(chain_segment, harness);
|
||||
let blocks: Vec<RangeSyncBlock<E>> = snapshots
|
||||
.iter()
|
||||
.zip(chain_segment_blobs.iter())
|
||||
@@ -540,10 +585,22 @@ async fn assert_invalid_signature(
|
||||
harness.chain.recompute_head_at_current_slot().await;
|
||||
|
||||
// Ensure the block will be rejected if imported on its own (without gossip checking).
|
||||
let ancestor_blocks = chain_segment
|
||||
// Only include blocks that haven't been imported yet (after the finalized slot) to avoid
|
||||
// `WouldRevertFinalizedSlot` errors when part 1 already imported and finalized some blocks.
|
||||
// Use the fork choice finalized checkpoint directly, as the cached head may not reflect
|
||||
// finalization that occurred during process_chain_segment.
|
||||
let finalized_slot = harness
|
||||
.chain
|
||||
.canonical_head
|
||||
.fork_choice_read_lock()
|
||||
.finalized_checkpoint()
|
||||
.epoch
|
||||
.start_slot(E::slots_per_epoch());
|
||||
let ancestor_blocks: Vec<RangeSyncBlock<E>> = chain_segment
|
||||
.iter()
|
||||
.take(block_index)
|
||||
.zip(chain_segment_blobs.iter())
|
||||
.filter(|(snapshot, _)| snapshot.beacon_block.slot() > finalized_slot)
|
||||
.map(|(snapshot, blobs)| {
|
||||
build_range_sync_block(snapshot.beacon_block.clone(), blobs, harness.chain.clone())
|
||||
})
|
||||
@@ -554,6 +611,7 @@ async fn assert_invalid_signature(
|
||||
.chain
|
||||
.process_chain_segment(ancestor_blocks, NotifyExecutionLayer::Yes)
|
||||
.await;
|
||||
update_fork_choice_with_envelopes(chain_segment, harness);
|
||||
harness.chain.recompute_head_at_current_slot().await;
|
||||
|
||||
let process_res = harness
|
||||
@@ -594,6 +652,7 @@ async fn get_invalid_sigs_harness(
|
||||
chain_segment: &[BeaconSnapshot<E>],
|
||||
) -> BeaconChainHarness<EphemeralHarnessType<E>> {
|
||||
let harness = get_harness(VALIDATOR_COUNT, NodeCustodyType::Fullnode);
|
||||
store_envelopes_for_chain_segment(chain_segment, &harness);
|
||||
harness
|
||||
.chain
|
||||
.slot_clock
|
||||
@@ -1091,6 +1150,21 @@ async fn block_gossip_verification() {
|
||||
)
|
||||
.await
|
||||
.expect("should import valid gossip verified block");
|
||||
// Post-Gloas, store the execution payload envelope so that subsequent blocks can look up
|
||||
// the parent envelope.
|
||||
if let Some(ref envelope) = snapshot.execution_envelope {
|
||||
harness
|
||||
.chain
|
||||
.store
|
||||
.put_payload_envelope(&snapshot.beacon_block_root, envelope)
|
||||
.expect("should store envelope");
|
||||
harness
|
||||
.chain
|
||||
.canonical_head
|
||||
.fork_choice_write_lock()
|
||||
.on_valid_payload_envelope_received(snapshot.beacon_block_root)
|
||||
.expect("should update fork choice with envelope");
|
||||
}
|
||||
if let Some(data_sidecars) = blobs_opt {
|
||||
verify_and_process_gossip_data_sidecars(&harness, data_sidecars).await;
|
||||
}
|
||||
@@ -2040,7 +2114,10 @@ async fn range_sync_block_construction_fails_with_wrong_blob_count() {
|
||||
async fn range_sync_block_rejects_missing_custody_columns() {
|
||||
let spec = test_spec::<E>();
|
||||
|
||||
if !spec.fork_name_at_slot::<E>(Slot::new(0)).fulu_enabled() {
|
||||
// Gloas blocks don't have blob_kzg_commitments (blobs are in the execution payload envelope).
|
||||
if !spec.fork_name_at_slot::<E>(Slot::new(0)).fulu_enabled()
|
||||
|| spec.fork_name_at_slot::<E>(Slot::new(0)).gloas_enabled()
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -2118,7 +2195,10 @@ async fn range_sync_block_rejects_missing_custody_columns() {
|
||||
async fn rpc_block_allows_construction_past_da_boundary() {
|
||||
let spec = test_spec::<E>();
|
||||
|
||||
if !spec.fork_name_at_slot::<E>(Slot::new(0)).fulu_enabled() {
|
||||
// Gloas blocks don't have blob_kzg_commitments (blobs are in the execution payload envelope).
|
||||
if !spec.fork_name_at_slot::<E>(Slot::new(0)).fulu_enabled()
|
||||
|| spec.fork_name_at_slot::<E>(Slot::new(0)).gloas_enabled()
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
@@ -16,8 +16,8 @@ use types::*;
|
||||
|
||||
type E = MainnetEthSpec;
|
||||
|
||||
// Should ideally be divisible by 3.
|
||||
const VALIDATOR_COUNT: usize = 24;
|
||||
// >= 32 validators required for Gloas genesis with MainnetEthSpec (32 slots/epoch).
|
||||
const VALIDATOR_COUNT: usize = 32;
|
||||
|
||||
/// A cached set of keys.
|
||||
static KEYPAIRS: LazyLock<Vec<Keypair>> =
|
||||
@@ -52,7 +52,8 @@ async fn rpc_columns_with_invalid_header_signature() {
|
||||
let spec = Arc::new(test_spec::<E>());
|
||||
|
||||
// Only run this test if columns are enabled.
|
||||
if !spec.is_fulu_scheduled() {
|
||||
// TODO(gloas): Gloas blocks don't have blob_kzg_commitments — blobs are in the envelope.
|
||||
if !spec.is_fulu_scheduled() || spec.is_gloas_scheduled() {
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
@@ -170,7 +170,10 @@ async fn blob_sidecar_event_on_process_rpc_blobs() {
|
||||
|
||||
#[tokio::test]
|
||||
async fn data_column_sidecar_event_on_process_rpc_columns() {
|
||||
if fork_name_from_env().is_none_or(|f| !f.fulu_enabled()) {
|
||||
// Gloas blocks don't have blob_kzg_commitments (blobs are in the execution payload envelope).
|
||||
if fork_name_from_env().is_none_or(|f| !f.fulu_enabled())
|
||||
|| fork_name_from_env().is_some_and(|f| f.gloas_enabled())
|
||||
{
|
||||
return;
|
||||
};
|
||||
|
||||
|
||||
@@ -371,7 +371,7 @@ impl InvalidPayloadRig {
|
||||
/// Simple test of the different import types.
|
||||
#[tokio::test]
|
||||
async fn valid_invalid_syncing() {
|
||||
if fork_name_from_env().is_some_and(|f| !f.bellatrix_enabled()) {
|
||||
if fork_name_from_env().is_some_and(|f| !f.bellatrix_enabled() || f.gloas_enabled()) {
|
||||
return;
|
||||
}
|
||||
let mut rig = InvalidPayloadRig::new();
|
||||
@@ -388,7 +388,7 @@ async fn valid_invalid_syncing() {
|
||||
/// `latest_valid_hash`.
|
||||
#[tokio::test]
|
||||
async fn invalid_payload_invalidates_parent() {
|
||||
if fork_name_from_env().is_some_and(|f| !f.bellatrix_enabled()) {
|
||||
if fork_name_from_env().is_some_and(|f| !f.bellatrix_enabled() || f.gloas_enabled()) {
|
||||
return;
|
||||
}
|
||||
let mut rig = InvalidPayloadRig::new().enable_attestations();
|
||||
@@ -445,7 +445,7 @@ async fn immediate_forkchoice_update_invalid_test(
|
||||
|
||||
#[tokio::test]
|
||||
async fn immediate_forkchoice_update_payload_invalid() {
|
||||
if fork_name_from_env().is_some_and(|f| !f.bellatrix_enabled()) {
|
||||
if fork_name_from_env().is_some_and(|f| !f.bellatrix_enabled() || f.gloas_enabled()) {
|
||||
return;
|
||||
}
|
||||
immediate_forkchoice_update_invalid_test(|latest_valid_hash| Payload::Invalid {
|
||||
@@ -456,7 +456,7 @@ async fn immediate_forkchoice_update_payload_invalid() {
|
||||
|
||||
#[tokio::test]
|
||||
async fn immediate_forkchoice_update_payload_invalid_block_hash() {
|
||||
if fork_name_from_env().is_some_and(|f| !f.bellatrix_enabled()) {
|
||||
if fork_name_from_env().is_some_and(|f| !f.bellatrix_enabled() || f.gloas_enabled()) {
|
||||
return;
|
||||
}
|
||||
immediate_forkchoice_update_invalid_test(|_| Payload::InvalidBlockHash).await
|
||||
@@ -464,7 +464,7 @@ async fn immediate_forkchoice_update_payload_invalid_block_hash() {
|
||||
|
||||
#[tokio::test]
|
||||
async fn immediate_forkchoice_update_payload_invalid_terminal_block() {
|
||||
if fork_name_from_env().is_some_and(|f| !f.bellatrix_enabled()) {
|
||||
if fork_name_from_env().is_some_and(|f| !f.bellatrix_enabled() || f.gloas_enabled()) {
|
||||
return;
|
||||
}
|
||||
immediate_forkchoice_update_invalid_test(|_| Payload::Invalid {
|
||||
@@ -476,7 +476,7 @@ async fn immediate_forkchoice_update_payload_invalid_terminal_block() {
|
||||
/// Ensure the client tries to exit when the justified checkpoint is invalidated.
|
||||
#[tokio::test]
|
||||
async fn justified_checkpoint_becomes_invalid() {
|
||||
if fork_name_from_env().is_some_and(|f| !f.bellatrix_enabled()) {
|
||||
if fork_name_from_env().is_some_and(|f| !f.bellatrix_enabled() || f.gloas_enabled()) {
|
||||
return;
|
||||
}
|
||||
let mut rig = InvalidPayloadRig::new().enable_attestations();
|
||||
@@ -520,7 +520,7 @@ async fn justified_checkpoint_becomes_invalid() {
|
||||
/// Ensure that a `latest_valid_hash` for a pre-finality block only reverts a single block.
|
||||
#[tokio::test]
|
||||
async fn pre_finalized_latest_valid_hash() {
|
||||
if fork_name_from_env().is_some_and(|f| !f.bellatrix_enabled()) {
|
||||
if fork_name_from_env().is_some_and(|f| !f.bellatrix_enabled() || f.gloas_enabled()) {
|
||||
return;
|
||||
}
|
||||
let num_blocks = E::slots_per_epoch() * 4;
|
||||
@@ -569,7 +569,7 @@ async fn pre_finalized_latest_valid_hash() {
|
||||
/// - Will not validate `latest_valid_root` and its ancestors.
|
||||
#[tokio::test]
|
||||
async fn latest_valid_hash_will_not_validate() {
|
||||
if fork_name_from_env().is_some_and(|f| !f.bellatrix_enabled()) {
|
||||
if fork_name_from_env().is_some_and(|f| !f.bellatrix_enabled() || f.gloas_enabled()) {
|
||||
return;
|
||||
}
|
||||
const LATEST_VALID_SLOT: u64 = 3;
|
||||
@@ -618,7 +618,7 @@ async fn latest_valid_hash_will_not_validate() {
|
||||
/// Check behaviour when the `latest_valid_hash` is a junk value.
|
||||
#[tokio::test]
|
||||
async fn latest_valid_hash_is_junk() {
|
||||
if fork_name_from_env().is_some_and(|f| !f.bellatrix_enabled()) {
|
||||
if fork_name_from_env().is_some_and(|f| !f.bellatrix_enabled() || f.gloas_enabled()) {
|
||||
return;
|
||||
}
|
||||
let num_blocks = E::slots_per_epoch() * 5;
|
||||
@@ -661,7 +661,7 @@ async fn latest_valid_hash_is_junk() {
|
||||
/// Check that descendants of invalid blocks are also invalidated.
|
||||
#[tokio::test]
|
||||
async fn invalidates_all_descendants() {
|
||||
if fork_name_from_env().is_some_and(|f| !f.bellatrix_enabled()) {
|
||||
if fork_name_from_env().is_some_and(|f| !f.bellatrix_enabled() || f.gloas_enabled()) {
|
||||
return;
|
||||
}
|
||||
let num_blocks = E::slots_per_epoch() * 4 + E::slots_per_epoch() / 2;
|
||||
@@ -764,7 +764,7 @@ async fn invalidates_all_descendants() {
|
||||
/// Check that the head will switch after the canonical branch is invalidated.
|
||||
#[tokio::test]
|
||||
async fn switches_heads() {
|
||||
if fork_name_from_env().is_some_and(|f| !f.bellatrix_enabled()) {
|
||||
if fork_name_from_env().is_some_and(|f| !f.bellatrix_enabled() || f.gloas_enabled()) {
|
||||
return;
|
||||
}
|
||||
let num_blocks = E::slots_per_epoch() * 4 + E::slots_per_epoch() / 2;
|
||||
@@ -863,7 +863,7 @@ async fn switches_heads() {
|
||||
|
||||
#[tokio::test]
|
||||
async fn invalid_during_processing() {
|
||||
if fork_name_from_env().is_some_and(|f| !f.bellatrix_enabled()) {
|
||||
if fork_name_from_env().is_some_and(|f| !f.bellatrix_enabled() || f.gloas_enabled()) {
|
||||
return;
|
||||
}
|
||||
let mut rig = InvalidPayloadRig::new();
|
||||
@@ -897,7 +897,7 @@ async fn invalid_during_processing() {
|
||||
|
||||
#[tokio::test]
|
||||
async fn invalid_after_optimistic_sync() {
|
||||
if fork_name_from_env().is_some_and(|f| !f.bellatrix_enabled()) {
|
||||
if fork_name_from_env().is_some_and(|f| !f.bellatrix_enabled() || f.gloas_enabled()) {
|
||||
return;
|
||||
}
|
||||
let mut rig = InvalidPayloadRig::new().enable_attestations();
|
||||
@@ -937,7 +937,7 @@ async fn invalid_after_optimistic_sync() {
|
||||
|
||||
#[tokio::test]
|
||||
async fn manually_validate_child() {
|
||||
if fork_name_from_env().is_some_and(|f| !f.bellatrix_enabled()) {
|
||||
if fork_name_from_env().is_some_and(|f| !f.bellatrix_enabled() || f.gloas_enabled()) {
|
||||
return;
|
||||
}
|
||||
let mut rig = InvalidPayloadRig::new().enable_attestations();
|
||||
@@ -957,7 +957,7 @@ async fn manually_validate_child() {
|
||||
|
||||
#[tokio::test]
|
||||
async fn manually_validate_parent() {
|
||||
if fork_name_from_env().is_some_and(|f| !f.bellatrix_enabled()) {
|
||||
if fork_name_from_env().is_some_and(|f| !f.bellatrix_enabled() || f.gloas_enabled()) {
|
||||
return;
|
||||
}
|
||||
let mut rig = InvalidPayloadRig::new().enable_attestations();
|
||||
@@ -977,7 +977,7 @@ async fn manually_validate_parent() {
|
||||
|
||||
#[tokio::test]
|
||||
async fn payload_preparation() {
|
||||
if fork_name_from_env().is_some_and(|f| !f.bellatrix_enabled()) {
|
||||
if fork_name_from_env().is_some_and(|f| !f.bellatrix_enabled() || f.gloas_enabled()) {
|
||||
return;
|
||||
}
|
||||
let mut rig = InvalidPayloadRig::new();
|
||||
@@ -1034,13 +1034,14 @@ async fn payload_preparation() {
|
||||
fee_recipient,
|
||||
None,
|
||||
None,
|
||||
None,
|
||||
);
|
||||
assert_eq!(rig.previous_payload_attributes(), payload_attributes);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn invalid_parent() {
|
||||
if fork_name_from_env().is_some_and(|f| !f.bellatrix_enabled()) {
|
||||
if fork_name_from_env().is_some_and(|f| !f.bellatrix_enabled() || f.gloas_enabled()) {
|
||||
return;
|
||||
}
|
||||
let mut rig = InvalidPayloadRig::new();
|
||||
@@ -1107,7 +1108,7 @@ async fn invalid_parent() {
|
||||
|
||||
#[tokio::test]
|
||||
async fn attesting_to_optimistic_head() {
|
||||
if fork_name_from_env().is_some_and(|f| !f.bellatrix_enabled()) {
|
||||
if fork_name_from_env().is_some_and(|f| !f.bellatrix_enabled() || f.gloas_enabled()) {
|
||||
return;
|
||||
}
|
||||
let mut rig = InvalidPayloadRig::new();
|
||||
@@ -1320,7 +1321,7 @@ impl InvalidHeadSetup {
|
||||
|
||||
#[tokio::test]
|
||||
async fn recover_from_invalid_head_by_importing_blocks() {
|
||||
if fork_name_from_env().is_some_and(|f| !f.bellatrix_enabled()) {
|
||||
if fork_name_from_env().is_some_and(|f| !f.bellatrix_enabled() || f.gloas_enabled()) {
|
||||
return;
|
||||
}
|
||||
let InvalidHeadSetup {
|
||||
@@ -1362,7 +1363,7 @@ async fn recover_from_invalid_head_by_importing_blocks() {
|
||||
|
||||
#[tokio::test]
|
||||
async fn recover_from_invalid_head_after_persist_and_reboot() {
|
||||
if fork_name_from_env().is_some_and(|f| !f.bellatrix_enabled()) {
|
||||
if fork_name_from_env().is_some_and(|f| !f.bellatrix_enabled() || f.gloas_enabled()) {
|
||||
return;
|
||||
}
|
||||
let InvalidHeadSetup {
|
||||
@@ -1407,7 +1408,7 @@ async fn recover_from_invalid_head_after_persist_and_reboot() {
|
||||
|
||||
#[tokio::test]
|
||||
async fn weights_after_resetting_optimistic_status() {
|
||||
if fork_name_from_env().is_some_and(|f| !f.bellatrix_enabled()) {
|
||||
if fork_name_from_env().is_some_and(|f| !f.bellatrix_enabled() || f.gloas_enabled()) {
|
||||
return;
|
||||
}
|
||||
let mut rig = InvalidPayloadRig::new().enable_attestations();
|
||||
|
||||
@@ -845,14 +845,13 @@ async fn check_all_base_rewards_for_subset(
|
||||
.state_at_slot(Slot::new(slot - 1), StateSkipConfig::WithoutStateRoots)
|
||||
.unwrap();
|
||||
|
||||
// TODO(gloas): handle payloads?
|
||||
let mut pre_state = BlockReplayer::<E, BlockReplayError, IntoIter<_, 0>>::new(
|
||||
parent_state,
|
||||
&harness.spec,
|
||||
)
|
||||
.no_signature_verification()
|
||||
.minimal_block_root_verification()
|
||||
.apply_blocks(vec![], vec![], Some(block.slot()))
|
||||
.apply_blocks(vec![], Some(block.slot()))
|
||||
.unwrap()
|
||||
.into_state();
|
||||
|
||||
|
||||
@@ -27,6 +27,7 @@ use beacon_chain::{
|
||||
};
|
||||
use bls::{Keypair, Signature, SignatureBytes};
|
||||
use fixed_bytes::FixedBytesExtended;
|
||||
use fork_choice::PayloadStatus;
|
||||
use logging::create_test_tracing_subscriber;
|
||||
use maplit::hashset;
|
||||
use rand::Rng;
|
||||
@@ -53,7 +54,7 @@ use types::test_utils::{SeedableRng, XorShiftRng};
|
||||
use types::*;
|
||||
|
||||
// Should ideally be divisible by 3.
|
||||
pub const LOW_VALIDATOR_COUNT: usize = 24;
|
||||
pub const LOW_VALIDATOR_COUNT: usize = 32;
|
||||
pub const HIGH_VALIDATOR_COUNT: usize = 64;
|
||||
|
||||
// When set to true, cache any states fetched from the db.
|
||||
@@ -184,6 +185,10 @@ async fn light_client_bootstrap_test() {
|
||||
// No-op prior to Altair.
|
||||
return;
|
||||
};
|
||||
// TODO(EIP-7732): Light client not yet implemented for Gloas.
|
||||
if spec.is_gloas_scheduled() {
|
||||
return;
|
||||
}
|
||||
|
||||
let db_path = tempdir().unwrap();
|
||||
let store = get_store_generic(&db_path, StoreConfig::default(), spec.clone());
|
||||
@@ -239,6 +244,10 @@ async fn light_client_updates_test() {
|
||||
// No-op prior to Altair.
|
||||
return;
|
||||
};
|
||||
// TODO(EIP-7732): Light client not yet implemented for Gloas.
|
||||
if spec.is_gloas_scheduled() {
|
||||
return;
|
||||
}
|
||||
|
||||
let num_final_blocks = E::slots_per_epoch() * 2;
|
||||
let db_path = tempdir().unwrap();
|
||||
@@ -568,13 +577,12 @@ async fn epoch_boundary_state_attestation_processing() {
|
||||
.get_blinded_block(&block_root)
|
||||
.unwrap()
|
||||
.expect("block exists");
|
||||
// Use get_state as the state may be finalized by this point
|
||||
// Use get_state as the state may be finalized by this point.
|
||||
let state_root = block.state_root();
|
||||
let mut epoch_boundary_state = store
|
||||
.get_state(&block.state_root(), None, CACHE_STATE_IN_TESTS)
|
||||
.get_state(&state_root, None, CACHE_STATE_IN_TESTS)
|
||||
.expect("no error")
|
||||
.unwrap_or_else(|| {
|
||||
panic!("epoch boundary state should exist {:?}", block.state_root())
|
||||
});
|
||||
.unwrap_or_else(|| panic!("epoch boundary state should exist {:?}", state_root));
|
||||
let ebs_state_root = epoch_boundary_state.update_tree_hash_cache().unwrap();
|
||||
let mut ebs_of_ebs = store
|
||||
.get_state(&ebs_state_root, None, CACHE_STATE_IN_TESTS)
|
||||
@@ -673,8 +681,11 @@ async fn forwards_iter_block_and_state_roots_until() {
|
||||
let block_root = block_roots[slot.as_usize()];
|
||||
assert_eq!(block_root_iter.next().unwrap().unwrap(), (block_root, slot));
|
||||
|
||||
let (iter_state_root, iter_slot) = state_root_iter.next().unwrap().unwrap();
|
||||
assert_eq!(iter_slot, slot);
|
||||
|
||||
let state_root = state_roots[slot.as_usize()];
|
||||
assert_eq!(state_root_iter.next().unwrap().unwrap(), (state_root, slot));
|
||||
assert_eq!(iter_state_root, state_root);
|
||||
}
|
||||
};
|
||||
|
||||
@@ -708,13 +719,8 @@ async fn block_replayer_hooks() {
|
||||
.add_attested_blocks_at_slots(state.clone(), state_root, &block_slots, &all_validators)
|
||||
.await;
|
||||
|
||||
let (blocks, envelopes) = store
|
||||
.load_blocks_to_replay(
|
||||
Slot::new(0),
|
||||
max_slot,
|
||||
end_block_root.into(),
|
||||
StatePayloadStatus::Pending,
|
||||
)
|
||||
let blocks = store
|
||||
.load_blocks_to_replay(Slot::new(0), max_slot, end_block_root.into())
|
||||
.unwrap();
|
||||
|
||||
let mut pre_slots = vec![];
|
||||
@@ -749,7 +755,7 @@ async fn block_replayer_hooks() {
|
||||
post_block_slots.push(block.slot());
|
||||
Ok(())
|
||||
}))
|
||||
.apply_blocks(blocks, envelopes, None)
|
||||
.apply_blocks(blocks, None)
|
||||
.unwrap()
|
||||
.into_state();
|
||||
|
||||
@@ -2871,12 +2877,6 @@ async fn reproduction_unaligned_checkpoint_sync_pruned_payload() {
|
||||
.block_root_at_slot(checkpoint_slot, WhenSlotSkipped::Prev)
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
let wss_state_root = harness
|
||||
.chain
|
||||
.state_root_at_slot(checkpoint_slot)
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
|
||||
let wss_block = harness
|
||||
.chain
|
||||
.store
|
||||
@@ -2884,8 +2884,21 @@ async fn reproduction_unaligned_checkpoint_sync_pruned_payload() {
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
|
||||
// The test premise requires the anchor block to have a payload.
|
||||
assert!(wss_block.message().execution_payload().is_ok());
|
||||
let wss_state_root = harness
|
||||
.chain
|
||||
.state_root_at_slot(checkpoint_slot)
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
|
||||
// The test premise requires the anchor block to have a payload (or a payload bid in Gloas).
|
||||
assert!(
|
||||
wss_block.message().execution_payload().is_ok()
|
||||
|| wss_block
|
||||
.message()
|
||||
.body()
|
||||
.signed_execution_payload_bid()
|
||||
.is_ok()
|
||||
);
|
||||
|
||||
let wss_blobs_opt = harness
|
||||
.chain
|
||||
@@ -2967,15 +2980,19 @@ async fn reproduction_unaligned_checkpoint_sync_pruned_payload() {
|
||||
chain.head_snapshot().beacon_state.slot()
|
||||
);
|
||||
|
||||
let payload_exists = chain
|
||||
.store
|
||||
.execution_payload_exists(&wss_block_root)
|
||||
.unwrap_or(false);
|
||||
// In Gloas, the execution payload envelope is separate from the block and will be synced
|
||||
// from the network. We don't check for its existence here.
|
||||
if !wss_block.fork_name_unchecked().gloas_enabled() {
|
||||
let payload_exists = chain
|
||||
.store
|
||||
.execution_payload_exists(&wss_block_root)
|
||||
.unwrap_or(false);
|
||||
|
||||
assert!(
|
||||
payload_exists,
|
||||
"Split block payload must exist in the new node's store after checkpoint sync"
|
||||
);
|
||||
assert!(
|
||||
payload_exists,
|
||||
"Split block payload must exist in the new node's store after checkpoint sync"
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
async fn weak_subjectivity_sync_test(
|
||||
@@ -3013,18 +3030,17 @@ async fn weak_subjectivity_sync_test(
|
||||
.block_root_at_slot(checkpoint_slot, WhenSlotSkipped::Prev)
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
let wss_state_root = harness
|
||||
.chain
|
||||
.state_root_at_slot(checkpoint_slot)
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
|
||||
let wss_block = harness
|
||||
.chain
|
||||
.store
|
||||
.get_full_block(&wss_block_root)
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
let wss_state_root = harness
|
||||
.chain
|
||||
.state_root_at_slot(checkpoint_slot)
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
let wss_blobs_opt = harness
|
||||
.chain
|
||||
.get_or_reconstruct_blobs(&wss_block_root)
|
||||
@@ -3101,6 +3117,20 @@ async fn weak_subjectivity_sync_test(
|
||||
.build()
|
||||
.expect("should build");
|
||||
|
||||
// Store the WSS envelope to simulate it arriving from network sync.
|
||||
// In production, the envelope would be synced from the network after checkpoint sync.
|
||||
if let Some(envelope) = harness
|
||||
.chain
|
||||
.store
|
||||
.get_payload_envelope(&wss_block.canonical_root())
|
||||
.unwrap_or(None)
|
||||
{
|
||||
beacon_chain
|
||||
.store
|
||||
.put_payload_envelope(&wss_block.canonical_root(), &envelope)
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
let beacon_chain = Arc::new(beacon_chain);
|
||||
let wss_block_root = wss_block.canonical_root();
|
||||
let store_wss_block = harness
|
||||
@@ -3120,6 +3150,21 @@ async fn weak_subjectivity_sync_test(
|
||||
assert_eq!(store_wss_blobs_opt, wss_blobs_opt);
|
||||
}
|
||||
|
||||
// Store the WSS block's envelope in the new chain (required for Gloas forward sync).
|
||||
// The first forward block needs the checkpoint block's envelope to determine the parent's
|
||||
// Full state.
|
||||
if let Some(envelope) = harness
|
||||
.chain
|
||||
.store
|
||||
.get_payload_envelope(&wss_block_root)
|
||||
.unwrap()
|
||||
{
|
||||
beacon_chain
|
||||
.store
|
||||
.put_payload_envelope(&wss_block_root, &envelope)
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
// Apply blocks forward to reach head.
|
||||
let chain_dump = harness.chain.chain_dump().unwrap();
|
||||
let new_blocks = chain_dump
|
||||
@@ -3154,6 +3199,21 @@ async fn weak_subjectivity_sync_test(
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// Store the envelope and apply it to fork choice.
|
||||
if let Some(envelope) = &snapshot.execution_envelope {
|
||||
beacon_chain
|
||||
.store
|
||||
.put_payload_envelope(&block_root, envelope)
|
||||
.unwrap();
|
||||
// Update fork choice so head selection accounts for Full payload status.
|
||||
beacon_chain
|
||||
.canonical_head
|
||||
.fork_choice_write_lock()
|
||||
.on_valid_payload_envelope_received(block_root)
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
beacon_chain.recompute_head_at_current_slot().await;
|
||||
|
||||
// Check that the new block's state can be loaded correctly.
|
||||
@@ -3305,6 +3365,17 @@ async fn weak_subjectivity_sync_test(
|
||||
}
|
||||
assert_eq!(beacon_chain.store.get_oldest_block_slot(), 0);
|
||||
|
||||
// Store envelopes for all historic blocks (needed for dumping the chain from the new node).
|
||||
for snapshot in chain_dump.iter() {
|
||||
let block_root = snapshot.beacon_block_root;
|
||||
if let Some(envelope) = &snapshot.execution_envelope {
|
||||
beacon_chain
|
||||
.store
|
||||
.put_payload_envelope(&block_root, envelope)
|
||||
.unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
// Sanity check for non-aligned WSS starts, to make sure the WSS block is persisted properly
|
||||
if wss_block_slot != wss_state_slot {
|
||||
let new_node_block_root_at_wss_block = beacon_chain
|
||||
@@ -3374,13 +3445,12 @@ async fn weak_subjectivity_sync_test(
|
||||
assert_eq!(state.canonical_root().unwrap(), state_root);
|
||||
}
|
||||
|
||||
// Anchor slot is still set to the slot of the checkpoint block.
|
||||
// Note: since hot tree states the anchor slot is set to the aligned ws state slot
|
||||
// https://github.com/sigp/lighthouse/pull/6750
|
||||
let wss_aligned_slot = if checkpoint_slot % E::slots_per_epoch() == 0 {
|
||||
checkpoint_slot
|
||||
// Anchor slot is set to the WSS state slot, which is always epoch-aligned (the state is
|
||||
// advanced to an epoch boundary during checkpoint sync).
|
||||
let wss_aligned_slot = if wss_state_slot % E::slots_per_epoch() == 0 {
|
||||
wss_state_slot
|
||||
} else {
|
||||
(checkpoint_slot.epoch(E::slots_per_epoch()) + Epoch::new(1))
|
||||
(wss_state_slot.epoch(E::slots_per_epoch()) + Epoch::new(1))
|
||||
.start_slot(E::slots_per_epoch())
|
||||
};
|
||||
assert_eq!(store.get_anchor_info().anchor_slot, wss_aligned_slot);
|
||||
@@ -3635,6 +3705,10 @@ async fn test_import_historical_data_columns_batch_no_block_found() {
|
||||
if fork_name_from_env().is_some_and(|f| !f.fulu_enabled()) {
|
||||
return;
|
||||
};
|
||||
// TODO(Gloas): blocks don't have blob_kzg_commitments (blobs are in the execution payload envelope).
|
||||
if fork_name_from_env().is_some_and(|f| f.gloas_enabled()) {
|
||||
return;
|
||||
}
|
||||
|
||||
let spec = test_spec::<E>();
|
||||
let db_path = tempdir().unwrap();
|
||||
@@ -3745,12 +3819,14 @@ async fn process_blocks_and_attestations_for_unaligned_checkpoint() {
|
||||
|
||||
let all_validators = (0..LOW_VALIDATOR_COUNT).collect::<Vec<_>>();
|
||||
|
||||
let split_slot = Slot::new(E::slots_per_epoch() * 4);
|
||||
let finalized_epoch_start_slot = Slot::new(E::slots_per_epoch() * 4);
|
||||
let pre_skips = 1;
|
||||
let post_skips = 1;
|
||||
|
||||
// Build the chain up to the intended split slot, with 3 skips before the split.
|
||||
let slots = (1..=split_slot.as_u64() - pre_skips)
|
||||
let split_slot = finalized_epoch_start_slot;
|
||||
|
||||
// Build the chain up to the intended finalized epoch slot, with 1 skip before the split.
|
||||
let slots = (1..=finalized_epoch_start_slot.as_u64() - pre_skips)
|
||||
.map(Slot::new)
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
@@ -3769,20 +3845,26 @@ async fn process_blocks_and_attestations_for_unaligned_checkpoint() {
|
||||
//
|
||||
// - one that is invalid because it conflicts with finalization (slot <= finalized_slot)
|
||||
// - one that is valid because its slot is not finalized (slot > finalized_slot)
|
||||
//
|
||||
// Note: block verification uses finalized_checkpoint.epoch.start_slot() (==
|
||||
// finalized_epoch_start_slot) for the finalized slot check.
|
||||
let (unadvanced_split_state, unadvanced_split_state_root) =
|
||||
harness.get_current_state_and_root();
|
||||
|
||||
let ((invalid_fork_block, _), _) = harness
|
||||
.make_block(unadvanced_split_state.clone(), split_slot)
|
||||
.make_block(unadvanced_split_state.clone(), finalized_epoch_start_slot)
|
||||
.await;
|
||||
let ((valid_fork_block, _), _) = harness
|
||||
.make_block(unadvanced_split_state.clone(), split_slot + 1)
|
||||
.make_block(
|
||||
unadvanced_split_state.clone(),
|
||||
finalized_epoch_start_slot + 1,
|
||||
)
|
||||
.await;
|
||||
|
||||
// Advance the chain so that the intended split slot is finalized.
|
||||
// Do not attest in the epoch boundary slot, to make attestation production later easier (no
|
||||
// equivocations).
|
||||
let finalizing_slot = split_slot + 2 * E::slots_per_epoch();
|
||||
let finalizing_slot = finalized_epoch_start_slot + 2 * E::slots_per_epoch();
|
||||
for _ in 0..pre_skips + post_skips {
|
||||
harness.advance_slot();
|
||||
}
|
||||
@@ -3834,12 +3916,7 @@ async fn process_blocks_and_attestations_for_unaligned_checkpoint() {
|
||||
let (split_state_root, mut advanced_split_state) = harness
|
||||
.chain
|
||||
.store
|
||||
.get_advanced_hot_state(
|
||||
split.block_root,
|
||||
StatePayloadStatus::Pending,
|
||||
split.slot,
|
||||
split.state_root,
|
||||
)
|
||||
.get_advanced_hot_state(split.block_root, split.slot, split.state_root)
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
complete_state_advance(
|
||||
@@ -3973,6 +4050,7 @@ async fn schema_downgrade_to_min_version(store_config: StoreConfig, archive: boo
|
||||
let num_blocks_produced = E::slots_per_epoch() * 4;
|
||||
let db_path = tempdir().unwrap();
|
||||
let spec = test_spec::<E>();
|
||||
let is_gloas = spec.is_gloas_scheduled();
|
||||
|
||||
let chain_config = ChainConfig {
|
||||
archive,
|
||||
@@ -3995,7 +4073,11 @@ async fn schema_downgrade_to_min_version(store_config: StoreConfig, archive: boo
|
||||
)
|
||||
.await;
|
||||
|
||||
let min_version = SchemaVersion(28);
|
||||
let min_version = if is_gloas {
|
||||
SchemaVersion(29)
|
||||
} else {
|
||||
SchemaVersion(28)
|
||||
};
|
||||
|
||||
// Save the slot clock so that the new harness doesn't revert in time.
|
||||
let slot_clock = harness.chain.slot_clock.clone();
|
||||
@@ -4565,6 +4647,10 @@ async fn fulu_prune_data_columns_happy_case() {
|
||||
// No-op if PeerDAS not scheduled.
|
||||
return;
|
||||
}
|
||||
// TODO(Gloas): blocks don't have blob_kzg_commitments (blobs are in the execution payload envelope).
|
||||
if store.get_chain_spec().is_gloas_scheduled() {
|
||||
return;
|
||||
}
|
||||
let Some(fulu_fork_epoch) = store.get_chain_spec().fulu_fork_epoch else {
|
||||
// No-op prior to Fulu.
|
||||
return;
|
||||
@@ -4620,6 +4706,10 @@ async fn fulu_prune_data_columns_no_finalization() {
|
||||
// No-op if PeerDAS not scheduled.
|
||||
return;
|
||||
}
|
||||
// TODO(Gloas): blocks don't have blob_kzg_commitments (blobs are in the execution payload envelope).
|
||||
if store.get_chain_spec().is_gloas_scheduled() {
|
||||
return;
|
||||
}
|
||||
let Some(fulu_fork_epoch) = store.get_chain_spec().fulu_fork_epoch else {
|
||||
// No-op prior to Fulu.
|
||||
return;
|
||||
@@ -4839,6 +4929,10 @@ async fn fulu_prune_data_columns_margin_test(margin: u64) {
|
||||
// No-op if PeerDAS not scheduled.
|
||||
return;
|
||||
}
|
||||
// TODO(Gloas): blocks don't have blob_kzg_commitments (blobs are in the execution payload envelope).
|
||||
if store.get_chain_spec().is_gloas_scheduled() {
|
||||
return;
|
||||
}
|
||||
let Some(fulu_fork_epoch) = store.get_chain_spec().fulu_fork_epoch else {
|
||||
// No-op prior to Fulu.
|
||||
return;
|
||||
@@ -5156,6 +5250,10 @@ async fn test_custody_column_filtering_regular_node() {
|
||||
if !test_spec::<E>().is_peer_das_scheduled() {
|
||||
return;
|
||||
}
|
||||
// TODO(Gloas): blocks don't have blob_kzg_commitments (blobs are in the execution payload envelope).
|
||||
if test_spec::<E>().is_gloas_scheduled() {
|
||||
return;
|
||||
}
|
||||
|
||||
let db_path = tempdir().unwrap();
|
||||
let store = get_store(&db_path);
|
||||
@@ -5200,6 +5298,10 @@ async fn test_custody_column_filtering_supernode() {
|
||||
if !test_spec::<E>().is_peer_das_scheduled() {
|
||||
return;
|
||||
}
|
||||
// TODO(Gloas): blocks don't have blob_kzg_commitments (blobs are in the execution payload envelope).
|
||||
if test_spec::<E>().is_gloas_scheduled() {
|
||||
return;
|
||||
}
|
||||
|
||||
let db_path = tempdir().unwrap();
|
||||
let store = get_store(&db_path);
|
||||
@@ -5515,7 +5617,7 @@ async fn test_gloas_block_and_envelope_storage_generic(
|
||||
let mut state = genesis_state;
|
||||
|
||||
let mut block_roots = vec![];
|
||||
let mut stored_states = vec![(Slot::new(0), StatePayloadStatus::Full, genesis_state_root)];
|
||||
let mut stored_states = vec![(Slot::new(0), genesis_state_root)];
|
||||
|
||||
for i in 1..=num_slots {
|
||||
let slot = Slot::new(i);
|
||||
@@ -5527,10 +5629,10 @@ async fn test_gloas_block_and_envelope_storage_generic(
|
||||
|
||||
let state_root = state.canonical_root().unwrap();
|
||||
store.put_state(&state_root, &state).unwrap();
|
||||
stored_states.push((slot, state.payload_status(), state_root));
|
||||
stored_states.push((slot, state_root));
|
||||
}
|
||||
|
||||
let (block_contents, envelope, mut pending_state) =
|
||||
let (block_contents, envelope, mut post_block_state) =
|
||||
harness.make_block_with_envelope(state, slot).await;
|
||||
let block_root = block_contents.0.canonical_root();
|
||||
|
||||
@@ -5540,21 +5642,17 @@ async fn test_gloas_block_and_envelope_storage_generic(
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let pending_state_root = pending_state.update_tree_hash_cache().unwrap();
|
||||
stored_states.push((slot, StatePayloadStatus::Pending, pending_state_root));
|
||||
let state_root = post_block_state.update_tree_hash_cache().unwrap();
|
||||
stored_states.push((slot, state_root));
|
||||
|
||||
// Process the envelope.
|
||||
let envelope = envelope.expect("Gloas block should have envelope");
|
||||
let mut full_state = pending_state.clone();
|
||||
let envelope_state_root = envelope.message.state_root;
|
||||
let full_state_root = harness
|
||||
.process_envelope(block_root, envelope, &mut full_state)
|
||||
harness
|
||||
.process_envelope(block_root, envelope, &post_block_state, state_root)
|
||||
.await;
|
||||
assert_eq!(full_state_root, envelope_state_root);
|
||||
stored_states.push((slot, StatePayloadStatus::Full, full_state_root));
|
||||
|
||||
block_roots.push(block_root);
|
||||
state = full_state;
|
||||
state = post_block_state;
|
||||
}
|
||||
|
||||
// Verify block storage.
|
||||
@@ -5577,20 +5675,15 @@ async fn test_gloas_block_and_envelope_storage_generic(
|
||||
|
||||
// Verify state storage.
|
||||
// Iterate in reverse order to frustrate the cache.
|
||||
for (slot, payload_status, state_root) in stored_states.into_iter().rev() {
|
||||
for (slot, state_root) in stored_states.into_iter().rev() {
|
||||
println!("{slot}: {state_root:?}");
|
||||
let Some(mut loaded_state) = store
|
||||
.get_state(&state_root, Some(slot), CACHE_STATE_IN_TESTS)
|
||||
.unwrap()
|
||||
else {
|
||||
panic!("missing {payload_status:?} state at slot {slot} with root {state_root:?}");
|
||||
panic!("missing state at slot {slot} with root {state_root:?}");
|
||||
};
|
||||
assert_eq!(loaded_state.slot(), slot);
|
||||
assert_eq!(
|
||||
loaded_state.payload_status(),
|
||||
payload_status,
|
||||
"slot = {slot}"
|
||||
);
|
||||
assert_eq!(
|
||||
loaded_state.canonical_root().unwrap(),
|
||||
state_root,
|
||||
@@ -5600,74 +5693,6 @@ async fn test_gloas_block_and_envelope_storage_generic(
|
||||
check_db_invariants(&harness);
|
||||
}
|
||||
|
||||
/// Test that Pending and Full states have the correct payload status through round-trip
|
||||
/// storage and retrieval.
|
||||
#[tokio::test]
|
||||
async fn test_gloas_state_payload_status() {
|
||||
if !fork_name_from_env().is_some_and(|f| f.gloas_enabled()) {
|
||||
return;
|
||||
}
|
||||
|
||||
let db_path = tempdir().unwrap();
|
||||
let store = get_store(&db_path);
|
||||
let harness = get_harness(store.clone(), LOW_VALIDATOR_COUNT);
|
||||
|
||||
let num_blocks = 6u64;
|
||||
let (genesis_state, _genesis_state_root) = harness.get_current_state_and_root();
|
||||
let mut state = genesis_state;
|
||||
|
||||
for i in 1..=num_blocks {
|
||||
let slot = Slot::new(i);
|
||||
harness.advance_slot();
|
||||
|
||||
let (block_contents, envelope, pending_state) =
|
||||
harness.make_block_with_envelope(state, slot).await;
|
||||
let block_root = block_contents.0.canonical_root();
|
||||
|
||||
harness
|
||||
.process_block(slot, block_root, block_contents)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// Verify the pending state has correct payload status.
|
||||
assert_eq!(
|
||||
pending_state.payload_status(),
|
||||
StatePayloadStatus::Pending,
|
||||
"pending state at slot {} should be Pending",
|
||||
i
|
||||
);
|
||||
|
||||
// Process the envelope and verify the full state has correct payload status.
|
||||
let envelope = envelope.expect("Gloas block should have envelope");
|
||||
let mut full_state = pending_state;
|
||||
let full_state_root = harness
|
||||
.process_envelope(block_root, envelope, &mut full_state)
|
||||
.await;
|
||||
|
||||
assert_eq!(
|
||||
full_state.payload_status(),
|
||||
StatePayloadStatus::Full,
|
||||
"full state at slot {} should be Full",
|
||||
i
|
||||
);
|
||||
|
||||
// Round-trip: load the full state from DB and check status.
|
||||
let loaded_full = store
|
||||
.get_state(&full_state_root, None, CACHE_STATE_IN_TESTS)
|
||||
.unwrap()
|
||||
.expect("full state should exist in DB");
|
||||
assert_eq!(
|
||||
loaded_full.payload_status(),
|
||||
StatePayloadStatus::Full,
|
||||
"loaded full state at slot {} should be Full after round-trip",
|
||||
i
|
||||
);
|
||||
|
||||
state = full_state;
|
||||
}
|
||||
check_db_invariants(&harness);
|
||||
}
|
||||
|
||||
/// Test block replay with and without envelopes.
|
||||
#[tokio::test]
|
||||
async fn test_gloas_block_replay_with_envelopes() {
|
||||
@@ -5704,11 +5729,11 @@ async fn test_gloas_block_replay_with_envelopes() {
|
||||
pending_states.insert(slot, (pending_state_root, pending_state.clone()));
|
||||
|
||||
let envelope = envelope.expect("Gloas block should have envelope");
|
||||
let mut full_state = pending_state;
|
||||
let full_state_root = harness
|
||||
.process_envelope(block_root, envelope, &mut full_state)
|
||||
let full_state = pending_state;
|
||||
harness
|
||||
.process_envelope(block_root, envelope, &full_state, pending_state_root)
|
||||
.await;
|
||||
full_states.insert(slot, (full_state_root, full_state.clone()));
|
||||
full_states.insert(slot, (pending_state_root, full_state.clone()));
|
||||
|
||||
last_block_root = block_root;
|
||||
state = full_state;
|
||||
@@ -5716,94 +5741,29 @@ async fn test_gloas_block_replay_with_envelopes() {
|
||||
|
||||
let end_slot = Slot::new(num_blocks);
|
||||
|
||||
// Load blocks for Pending replay (no envelopes for the last block).
|
||||
let (blocks_pending, envelopes_pending) = store
|
||||
.load_blocks_to_replay(
|
||||
Slot::new(0),
|
||||
end_slot,
|
||||
last_block_root,
|
||||
StatePayloadStatus::Pending,
|
||||
)
|
||||
// Load blocks for replay.
|
||||
let blocks = store
|
||||
.load_blocks_to_replay(Slot::new(0), end_slot, last_block_root)
|
||||
.unwrap();
|
||||
assert!(
|
||||
!blocks_pending.is_empty(),
|
||||
"should have blocks for pending replay"
|
||||
);
|
||||
// For Pending, no envelope for the first block (slot 0) or last block; envelopes for
|
||||
// intermediate blocks whose payloads are canonical.
|
||||
let expected_pending_envelopes = blocks_pending.len().saturating_sub(2);
|
||||
assert!(!blocks.is_empty(), "should have blocks for replay");
|
||||
|
||||
// Replay blocks and verify against the expected state.
|
||||
let mut replayed = BlockReplayer::<MinimalEthSpec>::new(genesis_state, store.get_chain_spec())
|
||||
.no_signature_verification()
|
||||
.minimal_block_root_verification()
|
||||
.apply_blocks(blocks, None)
|
||||
.expect("should replay blocks")
|
||||
.into_state();
|
||||
replayed.apply_pending_mutations().unwrap();
|
||||
|
||||
let (_, mut expected) = pending_states.get(&end_slot).unwrap().clone();
|
||||
expected.apply_pending_mutations().unwrap();
|
||||
|
||||
replayed.drop_all_caches().unwrap();
|
||||
expected.drop_all_caches().unwrap();
|
||||
assert_eq!(
|
||||
envelopes_pending.len(),
|
||||
expected_pending_envelopes,
|
||||
"pending replay should have envelopes for all blocks except the last"
|
||||
);
|
||||
assert!(
|
||||
blocks_pending
|
||||
.iter()
|
||||
.skip(1)
|
||||
.take(envelopes_pending.len())
|
||||
.map(|block| block.slot())
|
||||
.eq(envelopes_pending
|
||||
.iter()
|
||||
.map(|envelope| envelope.message.slot)),
|
||||
"block and envelope slots should match"
|
||||
);
|
||||
|
||||
// Load blocks for Full replay (envelopes for all blocks including the last).
|
||||
let (blocks_full, envelopes_full) = store
|
||||
.load_blocks_to_replay(
|
||||
Slot::new(0),
|
||||
end_slot,
|
||||
last_block_root,
|
||||
StatePayloadStatus::Full,
|
||||
)
|
||||
.unwrap();
|
||||
assert_eq!(
|
||||
envelopes_full.len(),
|
||||
expected_pending_envelopes + 1,
|
||||
"full replay should have one more envelope than pending replay"
|
||||
);
|
||||
|
||||
// Replay to Pending state and verify.
|
||||
let mut replayed_pending =
|
||||
BlockReplayer::<MinimalEthSpec>::new(genesis_state.clone(), store.get_chain_spec())
|
||||
.no_signature_verification()
|
||||
.minimal_block_root_verification()
|
||||
.desired_state_payload_status(StatePayloadStatus::Pending)
|
||||
.apply_blocks(blocks_pending, envelopes_pending, None)
|
||||
.expect("should replay blocks to pending state")
|
||||
.into_state();
|
||||
replayed_pending.apply_pending_mutations().unwrap();
|
||||
|
||||
let (_, mut expected_pending) = pending_states.get(&end_slot).unwrap().clone();
|
||||
expected_pending.apply_pending_mutations().unwrap();
|
||||
|
||||
replayed_pending.drop_all_caches().unwrap();
|
||||
expected_pending.drop_all_caches().unwrap();
|
||||
assert_eq!(
|
||||
replayed_pending, expected_pending,
|
||||
"replayed pending state should match stored pending state"
|
||||
);
|
||||
|
||||
// Replay to Full state and verify.
|
||||
let mut replayed_full =
|
||||
BlockReplayer::<MinimalEthSpec>::new(genesis_state, store.get_chain_spec())
|
||||
.no_signature_verification()
|
||||
.minimal_block_root_verification()
|
||||
.desired_state_payload_status(StatePayloadStatus::Full)
|
||||
.apply_blocks(blocks_full, envelopes_full, None)
|
||||
.expect("should replay blocks to full state")
|
||||
.into_state();
|
||||
replayed_full.apply_pending_mutations().unwrap();
|
||||
|
||||
let (_, mut expected_full) = full_states.get(&end_slot).unwrap().clone();
|
||||
expected_full.apply_pending_mutations().unwrap();
|
||||
|
||||
replayed_full.drop_all_caches().unwrap();
|
||||
expected_full.drop_all_caches().unwrap();
|
||||
assert_eq!(
|
||||
replayed_full, expected_full,
|
||||
"replayed full state should match stored full state"
|
||||
replayed, expected,
|
||||
"replayed state should match stored state"
|
||||
);
|
||||
check_db_invariants(&harness);
|
||||
}
|
||||
@@ -5836,40 +5796,43 @@ async fn test_gloas_hot_state_hierarchy() {
|
||||
let slot = Slot::new(i);
|
||||
harness.advance_slot();
|
||||
|
||||
let (block_contents, envelope, pending_state) =
|
||||
let (block_contents, envelope, mut pending_state) =
|
||||
harness.make_block_with_envelope(state.clone(), slot).await;
|
||||
let block_root = block_contents.0.canonical_root();
|
||||
|
||||
// Attest to previous block before processing next.
|
||||
if i > 1 {
|
||||
let state_root = state.update_tree_hash_cache().unwrap();
|
||||
harness.attest_block(
|
||||
&state,
|
||||
state_root,
|
||||
last_block_root.into(),
|
||||
&block_contents.0,
|
||||
&some_validators,
|
||||
);
|
||||
}
|
||||
let signed_block = block_contents.0.clone();
|
||||
|
||||
harness
|
||||
.process_block(slot, block_root, block_contents)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// Attest to the current block at its own slot (same-slot attestation).
|
||||
// In Gloas, same-slot attestations have index=0 and route to Pending in
|
||||
// fork choice, correctly propagating weight through the Full path.
|
||||
// Use pending_state (at slot i) so the target root resolves correctly.
|
||||
let pending_state_root = pending_state.update_tree_hash_cache().unwrap();
|
||||
harness.attest_block(
|
||||
&pending_state,
|
||||
pending_state_root,
|
||||
block_root.into(),
|
||||
&signed_block,
|
||||
&some_validators,
|
||||
);
|
||||
|
||||
let envelope = envelope.expect("Gloas block should have envelope");
|
||||
let mut full_state = pending_state;
|
||||
let full_state = pending_state;
|
||||
harness
|
||||
.process_envelope(block_root, envelope, &mut full_state)
|
||||
.process_envelope(block_root, envelope, &full_state, pending_state_root)
|
||||
.await;
|
||||
|
||||
last_block_root = block_root;
|
||||
state = full_state;
|
||||
}
|
||||
|
||||
// Verify states can be loaded and have correct payload status.
|
||||
let _head_state = harness.get_current_state();
|
||||
let _head_slot = harness.head_slot();
|
||||
// Head should be the block at slot 40 with full payload.
|
||||
let head = harness.chain.canonical_head.cached_head();
|
||||
assert_eq!(head.head_block_root(), last_block_root);
|
||||
assert_eq!(head.head_payload_status(), PayloadStatus::Full);
|
||||
|
||||
// States at all slots on the canonical chain should be retrievable.
|
||||
for slot_num in 1..=num_blocks {
|
||||
@@ -5880,7 +5843,7 @@ async fn test_gloas_hot_state_hierarchy() {
|
||||
let mut loaded_state = store
|
||||
.get_state(&state_root, Some(slot), CACHE_STATE_IN_TESTS)
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
.unwrap_or_else(|| panic!("missing state at {slot}/{state_root:?}"));
|
||||
assert_eq!(loaded_state.canonical_root().unwrap(), state_root);
|
||||
}
|
||||
|
||||
|
||||
@@ -115,7 +115,18 @@ fn massive_skips() {
|
||||
|
||||
assert!(state.slot() > 1, "the state should skip at least one slot");
|
||||
|
||||
if state.fork_name_unchecked().fulu_enabled() {
|
||||
if state.fork_name_unchecked().gloas_enabled() {
|
||||
// Gloas uses compute_balance_weighted_selection for proposer selection, which
|
||||
// returns InvalidIndicesCount (not InsufficientValidators) when the active
|
||||
// validator set is empty.
|
||||
assert_eq!(
|
||||
error,
|
||||
SlotProcessingError::EpochProcessingError(EpochProcessingError::BeaconStateError(
|
||||
BeaconStateError::InvalidIndicesCount
|
||||
)),
|
||||
"should return error indicating that validators have been slashed out"
|
||||
)
|
||||
} else if state.fork_name_unchecked().fulu_enabled() {
|
||||
// post-fulu this is done in per_epoch_processing
|
||||
assert_eq!(
|
||||
error,
|
||||
@@ -1006,9 +1017,12 @@ async fn pseudo_finalize_test_generic(
|
||||
};
|
||||
|
||||
// pseudo finalize
|
||||
// Post-Gloas the finalized state must be Pending (the block's state_root), not Full
|
||||
// (the envelope's state_root), because the payload of the finalized block is not finalized.
|
||||
let finalized_state_root = head.beacon_block.message().state_root();
|
||||
harness
|
||||
.chain
|
||||
.manually_finalize_state(head.beacon_state_root(), checkpoint)
|
||||
.manually_finalize_state(finalized_state_root, checkpoint)
|
||||
.unwrap();
|
||||
|
||||
let split = harness.chain.store.get_split_info();
|
||||
|
||||
@@ -117,7 +117,8 @@ async fn missed_blocks_across_epochs() {
|
||||
|
||||
#[tokio::test]
|
||||
async fn missed_blocks_basic() {
|
||||
let validator_count = 16;
|
||||
// >= 32 validators required for Gloas genesis with MainnetEthSpec (32 slots/epoch).
|
||||
let validator_count = 32;
|
||||
|
||||
let slots_per_epoch = E::slots_per_epoch();
|
||||
|
||||
|
||||
Reference in New Issue
Block a user