Simplify diff strat and expand tests (they mostly pass!)

This commit is contained in:
Michael Sproul
2026-02-26 17:15:32 +11:00
parent edf77a5298
commit e44f37895d
5 changed files with 131 additions and 102 deletions

View File

@@ -5440,27 +5440,67 @@ fn check_finalization(harness: &TestHarness, expected_slot: u64) {
/// Test basic Gloas block + envelope storage and retrieval.
#[tokio::test]
async fn test_gloas_block_and_envelope_storage() {
async fn test_gloas_block_and_envelope_storage_no_skips() {
test_gloas_block_and_envelope_storage_generic(32, vec![], false).await
}
#[tokio::test]
async fn test_gloas_block_and_envelope_storage_some_skips() {
test_gloas_block_and_envelope_storage_generic(32, vec![2, 4, 5, 16, 23, 24, 25], false).await
}
#[tokio::test]
async fn test_gloas_block_and_envelope_storage_no_skips_w_cache() {
test_gloas_block_and_envelope_storage_generic(32, vec![], true).await
}
#[tokio::test]
async fn test_gloas_block_and_envelope_storage_some_skips_w_cache() {
test_gloas_block_and_envelope_storage_generic(32, vec![2, 4, 5, 16, 23, 24, 25], true).await
}
async fn test_gloas_block_and_envelope_storage_generic(
num_slots: u64,
skipped_slots: Vec<u64>,
use_state_cache: bool,
) {
if !fork_name_from_env().is_some_and(|f| f.gloas_enabled()) {
return;
}
let db_path = tempdir().unwrap();
let store = get_store(&db_path);
let store_config = if !use_state_cache {
StoreConfig {
state_cache_size: new_non_zero_usize(1),
..StoreConfig::default()
}
} else {
StoreConfig::default()
};
let spec = test_spec::<E>();
let store = get_store_generic(&db_path, store_config, spec);
let harness = get_harness(store.clone(), LOW_VALIDATOR_COUNT);
let spec = &harness.chain.spec;
let num_blocks = 8u64;
let (genesis_state, _genesis_state_root) = harness.get_current_state_and_root();
let (genesis_state, genesis_state_root) = harness.get_current_state_and_root();
let mut state = genesis_state;
let mut block_roots = vec![];
let mut pending_state_roots = vec![];
let mut full_state_roots = vec![];
let mut stored_states = vec![(Slot::new(0), StatePayloadStatus::Full, genesis_state_root)];
for i in 1..=num_blocks {
for i in 1..=num_slots {
let slot = Slot::new(i);
harness.advance_slot();
if skipped_slots.contains(&i) {
complete_state_advance(&mut state, None, slot, spec)
.expect("should be able to advance state to slot");
let state_root = state.canonical_root().unwrap();
store.put_state(&state_root, &state).unwrap();
stored_states.push((slot, state.payload_status(), state_root));
}
let (block_contents, envelope, mut pending_state) =
harness.make_block_with_envelope(state, slot).await;
let block_root = block_contents.0.canonical_root();
@@ -5472,7 +5512,7 @@ async fn test_gloas_block_and_envelope_storage() {
.unwrap();
let pending_state_root = pending_state.update_tree_hash_cache().unwrap();
pending_state_roots.push(pending_state_root);
stored_states.push((slot, StatePayloadStatus::Pending, pending_state_root));
// Process the envelope.
let envelope = envelope.expect("Gloas block should have envelope");
@@ -5482,13 +5522,13 @@ async fn test_gloas_block_and_envelope_storage() {
.process_envelope(block_root, envelope, &mut full_state)
.await;
assert_eq!(full_state_root, envelope_state_root);
full_state_roots.push(full_state_root);
stored_states.push((slot, StatePayloadStatus::Full, full_state_root));
block_roots.push(block_root);
state = full_state;
}
// Verify storage.
// Verify block storage.
for (i, block_root) in block_roots.iter().enumerate() {
// Block can be loaded.
assert!(
@@ -5504,41 +5544,28 @@ async fn test_gloas_block_and_envelope_storage() {
"envelope at slot {} should be in DB",
i + 1
);
}
// Pending state can be loaded.
let pending_state_root = pending_state_roots[i];
let loaded_pending_state = store
.get_state(&pending_state_root, None, CACHE_STATE_IN_TESTS)
.unwrap();
assert!(
loaded_pending_state.is_some(),
"pending state at slot {} should be in DB",
i + 1
);
let loaded_pending_state = loaded_pending_state.unwrap();
// Verify state storage.
// Iterate in reverse order to frustrate the cache.
for (slot, payload_status, state_root) in stored_states.into_iter().rev() {
println!("{slot}: {state_root:?}");
let Some(mut loaded_state) = store
.get_state(&state_root, Some(slot), CACHE_STATE_IN_TESTS)
.unwrap()
else {
panic!("missing {payload_status:?} state at slot {slot} with root {state_root:?}");
};
assert_eq!(loaded_state.slot(), slot);
assert_eq!(
loaded_pending_state.payload_status_with_skipped_pending(),
StatePayloadStatus::Pending,
"loaded pending state at slot {} should have Pending status",
i + 1
loaded_state.payload_status(),
payload_status,
"slot = {slot}"
);
// Full state can be loaded.
let full_state_root = full_state_roots[i];
let loaded_full_state = store
.get_state(&full_state_root, None, CACHE_STATE_IN_TESTS)
.unwrap();
assert!(
loaded_full_state.is_some(),
"full state at slot {} should be in DB",
i + 1
);
let loaded_full_state = loaded_full_state.unwrap();
assert_eq!(
loaded_full_state.payload_status_with_skipped_pending(),
StatePayloadStatus::Full,
"loaded full state at slot {} should have Full status",
i + 1
loaded_state.canonical_root().unwrap(),
state_root,
"slot = {slot}"
);
}
}
@@ -5574,7 +5601,7 @@ async fn test_gloas_state_payload_status() {
// Verify the pending state has correct payload status.
assert_eq!(
pending_state.payload_status_with_skipped_pending(),
pending_state.payload_status(),
StatePayloadStatus::Pending,
"pending state at slot {} should be Pending",
i
@@ -5588,7 +5615,7 @@ async fn test_gloas_state_payload_status() {
.await;
assert_eq!(
full_state.payload_status_with_skipped_pending(),
full_state.payload_status(),
StatePayloadStatus::Full,
"full state at slot {} should be Full",
i
@@ -5600,7 +5627,7 @@ async fn test_gloas_state_payload_status() {
.unwrap()
.expect("full state should exist in DB");
assert_eq!(
loaded_full.payload_status_with_skipped_pending(),
loaded_full.payload_status(),
StatePayloadStatus::Full,
"loaded full state at slot {} should be Full after round-trip",
i

View File

@@ -662,18 +662,14 @@ impl HierarchyModuli {
&self,
slot: Slot,
start_slot: Slot,
payload_status: StatePayloadStatus,
_payload_status: StatePayloadStatus,
) -> Result<StorageStrategy, Error> {
// Store all Full states by replaying from their respective Pending state at the same slot.
// Make an exception for the genesis state, which "counts as" Full by virtue of having 0x0
// in both `latest_block_hash` and `latest_execution_payload_bid.block_hash`.
if let StatePayloadStatus::Full = payload_status
&& slot >= start_slot
&& slot != 0
{
return Ok(StorageStrategy::ReplayFrom(slot));
}
// FIXME(sproul): Reverted the idea of using different storage strategies for full and
// pending states, this has the consequence of storing double diffs and double snapshots
// at full slots. The complexity of managing skipped slots was the main impetus for
// reverting the payload-status sensitive design: a Full skipped slot has no same-slot
// Pending state to replay from, so has to be handled differently from Full non-skipped
// slots.
match slot.cmp(&start_slot) {
Ordering::Less => return Err(Error::LessThanStart(slot, start_slot)),
Ordering::Equal => return Ok(StorageStrategy::Snapshot),

View File

@@ -1665,7 +1665,7 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> HotColdDB<E, Hot, Cold>
state: &BeaconState<E>,
ops: &mut Vec<KeyValueStoreOp>,
) -> Result<(), Error> {
let payload_status = state.payload_status_with_skipped_pending();
let payload_status = state.payload_status();
match self.state_cache.lock().put_state(
*state_root,
@@ -1735,7 +1735,7 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> HotColdDB<E, Hot, Cold>
self,
*state_root,
state,
self.hot_storage_strategy(state.slot(), state.payload_status_with_skipped_pending())?,
self.hot_storage_strategy(state.slot(), state.payload_status())?,
)?;
ops.push(hot_state_summary.as_kv_store_op(*state_root));
Ok(hot_state_summary)
@@ -1748,8 +1748,7 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> HotColdDB<E, Hot, Cold>
ops: &mut Vec<KeyValueStoreOp>,
) -> Result<(), Error> {
let slot = state.slot();
let storage_strategy =
self.hot_storage_strategy(slot, state.payload_status_with_skipped_pending())?;
let storage_strategy = self.hot_storage_strategy(slot, state.payload_status())?;
match storage_strategy {
StorageStrategy::ReplayFrom(_) => {
// Already have persisted the state summary, don't persist anything else
@@ -1885,19 +1884,29 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> HotColdDB<E, Hot, Cold>
return Ok(StatePayloadStatus::Pending);
}
// Load the hot state summary for the previous state. If it has the same slot as this
// summary then we know this summary is for a `Full` block (payload state).
// NOTE: We treat any and all skipped-slot states as `Pending` by this definition, which is
// perhaps a bit strange (they could have a payload most-recently applied).
// TODO(gloas): could maybe simplify this by checking diff_base_slot == slot?
// Load the hot state summary for the previous state.
//
// If it has the same slot as this summary then we know this summary is for a `Full` block
// (payload state), because they are always diffed against their same-slot `Pending` state.
//
// If the previous summary has a different slot AND the latest block is from `summary.slot`,
// then this state *must* be `Pending` (it is the summary for latest block itself).
//
// Otherwise, we are at a skipped slot and must traverse the graph of state summaries
// backwards until we reach a summary for the latest block. This recursion could be quite
// far in the case of a long skip. We could optimise this in future using the
// `diff_base_state` (like in `get_ancestor_state_root`), or by doing a proper DB
// migration.
let previous_state_summary = self
.load_hot_state_summary(&previous_state_root)?
.ok_or(Error::MissingHotStateSummary(previous_state_root))?;
if previous_state_summary.slot == summary.slot {
Ok(StatePayloadStatus::Full)
} else {
} else if summary.slot == summary.latest_block_slot {
Ok(StatePayloadStatus::Pending)
} else {
self.get_hot_state_summary_payload_status(&previous_state_summary)
}
}
@@ -2010,6 +2019,12 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> HotColdDB<E, Hot, Cold>
) = self.load_hot_state_summary(state_root)?
{
let payload_status = self.get_hot_state_summary_payload_status(&summary)?;
debug!(
%slot,
?state_root,
?payload_status,
"Loading hot state"
);
let mut state = match self.hot_storage_strategy(slot, payload_status)? {
strat @ StorageStrategy::Snapshot | strat @ StorageStrategy::DiffFrom(_) => {
let buffer_timer = metrics::start_timer_vec(
@@ -2082,9 +2097,7 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> HotColdDB<E, Hot, Cold>
desired_payload_status: StatePayloadStatus,
update_cache: bool,
) -> Result<BeaconState<E>, Error> {
if base_state.slot() == slot
&& base_state.payload_status_with_skipped_pending() == desired_payload_status
{
if base_state.slot() == slot && base_state.payload_status() == desired_payload_status {
return Ok(base_state);
}
@@ -2124,10 +2137,19 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> HotColdDB<E, Hot, Cold>
Ok(())
};
debug!(
%slot,
blocks = ?blocks.iter().map(|block| block.slot()).collect::<Vec<_>>(),
envelopes = ?envelopes.iter().map(|e| e.message.slot).collect::<Vec<_>>(),
payload_status = ?desired_payload_status,
"Replaying blocks and envelopes"
);
self.replay_blocks(
base_state,
blocks,
envelopes,
desired_payload_status,
slot,
no_state_root_iter(),
Some(Box::new(state_cache_hook)),
@@ -2440,10 +2462,13 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> HotColdDB<E, Hot, Cold>
self.forwards_state_roots_iterator_until(base_state.slot(), slot, || {
Err(Error::StateShouldNotBeRequired(slot))
})?;
// TODO(gloas): calculate correct payload status for cold states
let payload_status = StatePayloadStatus::Pending;
let state = self.replay_blocks(
base_state,
blocks,
envelopes,
payload_status,
slot,
Some(state_root_iter),
None,
@@ -2681,6 +2706,7 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> HotColdDB<E, Hot, Cold>
}
// Load the payload for the last block if desired.
// TODO(gloas): check that we don't load a duplicate in the case of a skipped slot
if let StatePayloadStatus::Full = desired_payload_status {
let envelope = self.get_payload_envelope(&end_block_root)?.ok_or(
HotColdDBError::MissingExecutionPayloadEnvelope(end_block_root),
@@ -2700,6 +2726,7 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> HotColdDB<E, Hot, Cold>
state: BeaconState<E>,
blocks: Vec<SignedBlindedBeaconBlock<E>>,
envelopes: Vec<SignedExecutionPayloadEnvelope<E>>,
desired_payload_status: StatePayloadStatus,
target_slot: Slot,
state_root_iter: Option<impl Iterator<Item = Result<(Hash256, Slot), Error>>>,
pre_slot_hook: Option<PreSlotHook<E, Error>>,
@@ -2708,7 +2735,8 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> HotColdDB<E, Hot, Cold>
let mut block_replayer = BlockReplayer::new(state, &self.spec)
.no_signature_verification()
.minimal_block_root_verification();
.minimal_block_root_verification()
.desired_state_payload_status(desired_payload_status);
let have_state_root_iterator = state_root_iter.is_some();
if let Some(state_root_iter) = state_root_iter {
@@ -4176,21 +4204,14 @@ impl HotStateSummary {
let latest_block_root = state.get_latest_block_root(state_root);
// Payload status of the state determines a lot about how it is stored.
let payload_status = state.payload_status_with_skipped_pending();
let payload_status = state.payload_status();
let get_state_root = |slot| {
if slot == state.slot() {
// In the case where this state is a `Full` state, use the `state_root` of its
// prior `Pending` state.
if let StatePayloadStatus::Full = payload_status {
// TODO(gloas): change this assert to debug_assert_eq
assert_eq!(state.latest_block_header().slot, state.slot());
Ok(state.latest_block_header().state_root)
} else {
Ok::<_, Error>(state_root)
}
// TODO(gloas): I think we can remove this case
Ok::<_, Error>(state_root)
} else {
Ok(get_ancestor_state_root(store, state, slot).map_err(|e| {
Ok::<_, Error>(get_ancestor_state_root(store, state, slot).map_err(|e| {
Error::StateSummaryIteratorError {
error: e,
from_state_root: state_root,
@@ -4210,8 +4231,12 @@ impl HotStateSummary {
let previous_state_root = if state.slot() == 0 {
// Set to 0x0 for genesis state to prevent any sort of circular reference.
Hash256::zero()
} else if let StatePayloadStatus::Full = payload_status {
get_state_root(state.slot())?
} else if let StatePayloadStatus::Full = payload_status
&& state.slot() == state.latest_block_header().slot
{
// A Full state at a non-skipped slot builds off the Pending state of the same slot,
// i.e. the state with the same `state_root` as its `BeaconBlock`
state.latest_block_header().state_root
} else {
get_state_root(state.slot().safe_sub(1_u64)?)?
};

View File

@@ -320,7 +320,7 @@ where
.block_hash;
// Similar to `is_parent_block_full`, but reading the block hash from the
// not-yet-applied `block`. The 0x0 case covers genesis (no block replay reqd).
// not-yet-applied `block`. The slot 0 case covers genesis (no block replay reqd).
if self.state.slot() != 0 && block.is_parent_block_full(latest_bid_block_hash) {
let envelope = next_envelope_at_slot(self.state.slot())?;
// State root for the next slot processing is now the envelope's state root.

View File

@@ -1284,25 +1284,6 @@ impl<E: EthSpec> BeaconState<E> {
}
}
/// Determine the payload status of this state with all skipped slots considered pending.
///
/// Prior to Gloas this is always `Pending`.
///
/// Post-Gloas, the definition of the `StatePayloadStatus` is:
///
/// - `Full` if this state is the IMMEDIATE result of envelope processing (no skipped slots)
/// - `Pending` if this state is the result of block processing, or slot processing (skipped
/// slot).
pub fn payload_status_with_skipped_pending(&self) -> StatePayloadStatus {
if !self.fork_name_unchecked().gloas_enabled() {
StatePayloadStatus::Pending
} else if self.is_parent_block_full() && self.latest_block_header().slot == self.slot() {
StatePayloadStatus::Full
} else {
StatePayloadStatus::Pending
}
}
/// Return `true` if the validator who produced `slot_signature` is eligible to aggregate.
///
/// Spec v0.12.1