From a959c5f6401840f3afebe286470a68da86645f1d Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Mon, 23 Feb 2026 12:55:50 +1100 Subject: [PATCH 01/20] Add payload support to `BlockReplayer` --- beacon_node/beacon_chain/tests/rewards.rs | 3 +- beacon_node/beacon_chain/tests/store_tests.rs | 3 +- .../http_api/src/attestation_performance.rs | 3 +- .../http_api/src/block_packing_efficiency.rs | 3 +- beacon_node/http_api/src/block_rewards.rs | 6 +- .../http_api/src/sync_committee_rewards.rs | 3 +- beacon_node/store/src/hot_cold_store.rs | 3 +- beacon_node/store/src/reconstruct.rs | 1 + .../state_processing/src/block_replayer.rs | 104 +++++++++++++++++- .../src/envelope_processing.rs | 2 - .../src/per_block_processing/tests.rs | 2 +- 11 files changed, 117 insertions(+), 16 deletions(-) diff --git a/beacon_node/beacon_chain/tests/rewards.rs b/beacon_node/beacon_chain/tests/rewards.rs index bc7c98041f..1889c1f625 100644 --- a/beacon_node/beacon_chain/tests/rewards.rs +++ b/beacon_node/beacon_chain/tests/rewards.rs @@ -845,13 +845,14 @@ async fn check_all_base_rewards_for_subset( .state_at_slot(Slot::new(slot - 1), StateSkipConfig::WithoutStateRoots) .unwrap(); + // TODO(gloas): handle payloads? let mut pre_state = BlockReplayer::>::new( parent_state, &harness.spec, ) .no_signature_verification() .minimal_block_root_verification() - .apply_blocks(vec![], Some(block.slot())) + .apply_blocks(vec![], vec![], Some(block.slot())) .unwrap() .into_state(); diff --git a/beacon_node/beacon_chain/tests/store_tests.rs b/beacon_node/beacon_chain/tests/store_tests.rs index 6bea5f6013..365513bbb4 100644 --- a/beacon_node/beacon_chain/tests/store_tests.rs +++ b/beacon_node/beacon_chain/tests/store_tests.rs @@ -697,6 +697,7 @@ async fn block_replayer_hooks() { let mut pre_block_slots = vec![]; let mut post_block_slots = vec![]; + // TODO(gloas): handle payloads? let mut replay_state = BlockReplayer::::new(state, &chain.spec) .pre_slot_hook(Box::new(|_, state| { pre_slots.push(state.slot()); @@ -724,7 +725,7 @@ async fn block_replayer_hooks() { post_block_slots.push(block.slot()); Ok(()) })) - .apply_blocks(blocks, None) + .apply_blocks(blocks, vec![], None) .unwrap() .into_state(); diff --git a/beacon_node/http_api/src/attestation_performance.rs b/beacon_node/http_api/src/attestation_performance.rs index 6e285829d2..05ed36e68b 100644 --- a/beacon_node/http_api/src/attestation_performance.rs +++ b/beacon_node/http_api/src/attestation_performance.rs @@ -205,8 +205,9 @@ pub fn get_attestation_performance( }) .collect::, _>>()?; + // TODO(gloas): add payloads replayer = replayer - .apply_blocks(blocks, None) + .apply_blocks(blocks, vec![], None) .map_err(|e| custom_server_error(format!("{:?}", e)))?; } diff --git a/beacon_node/http_api/src/block_packing_efficiency.rs b/beacon_node/http_api/src/block_packing_efficiency.rs index 3772470b28..725a0648a5 100644 --- a/beacon_node/http_api/src/block_packing_efficiency.rs +++ b/beacon_node/http_api/src/block_packing_efficiency.rs @@ -398,8 +398,9 @@ pub fn get_block_packing_efficiency( }) .collect::, _>>()?; + // TODO(gloas): add payloads replayer = replayer - .apply_blocks(blocks, None) + .apply_blocks(blocks, vec![], None) .map_err(|e: PackingEfficiencyError| custom_server_error(format!("{:?}", e)))?; } diff --git a/beacon_node/http_api/src/block_rewards.rs b/beacon_node/http_api/src/block_rewards.rs index 891f024bf9..8b355bf140 100644 --- a/beacon_node/http_api/src/block_rewards.rs +++ b/beacon_node/http_api/src/block_rewards.rs @@ -56,6 +56,7 @@ pub fn get_block_rewards( let mut reward_cache = Default::default(); let mut block_rewards = Vec::with_capacity(blocks.len()); + // TODO(gloas): handle payloads let block_replayer = BlockReplayer::new(state, &chain.spec) .pre_block_hook(Box::new(|state, block| { state.build_all_committee_caches(&chain.spec)?; @@ -78,7 +79,7 @@ pub fn get_block_rewards( ) .no_signature_verification() .minimal_block_root_verification() - .apply_blocks(blocks, None) + .apply_blocks(blocks, vec![], None) .map_err(unhandled_error)?; if block_replayer.state_root_miss() { @@ -138,11 +139,12 @@ pub fn compute_block_rewards( )) })?; + // TODO(gloas): handle payloads? let block_replayer = BlockReplayer::new(parent_state, &chain.spec) .no_signature_verification() .state_root_iter([Ok((parent_block.state_root(), parent_block.slot()))].into_iter()) .minimal_block_root_verification() - .apply_blocks(vec![], Some(block.slot())) + .apply_blocks(vec![], vec![], Some(block.slot())) .map_err(unhandled_error::)?; if block_replayer.state_root_miss() { diff --git a/beacon_node/http_api/src/sync_committee_rewards.rs b/beacon_node/http_api/src/sync_committee_rewards.rs index 9bc1f6ead4..8715fc2b1e 100644 --- a/beacon_node/http_api/src/sync_committee_rewards.rs +++ b/beacon_node/http_api/src/sync_committee_rewards.rs @@ -66,11 +66,12 @@ pub fn get_state_before_applying_block( }) .map_err(|e| custom_not_found(format!("Parent state is not available! {:?}", e)))?; + // TODO(gloas): handle payloads? let replayer = BlockReplayer::new(parent_state, &chain.spec) .no_signature_verification() .state_root_iter([Ok((parent_block.state_root(), parent_block.slot()))].into_iter()) .minimal_block_root_verification() - .apply_blocks(vec![], Some(block.slot())) + .apply_blocks(vec![], vec![], Some(block.slot())) .map_err(unhandled_error::)?; Ok(replayer.into_state()) diff --git a/beacon_node/store/src/hot_cold_store.rs b/beacon_node/store/src/hot_cold_store.rs index 6e165702a2..00415bbd2b 100644 --- a/beacon_node/store/src/hot_cold_store.rs +++ b/beacon_node/store/src/hot_cold_store.rs @@ -2548,8 +2548,9 @@ impl, Cold: ItemStore> HotColdDB block_replayer = block_replayer.pre_slot_hook(pre_slot_hook); } + // TODO(gloas): plumb through payloads here block_replayer - .apply_blocks(blocks, Some(target_slot)) + .apply_blocks(blocks, vec![], Some(target_slot)) .map(|block_replayer| { if have_state_root_iterator && block_replayer.state_root_miss() { warn!( diff --git a/beacon_node/store/src/reconstruct.rs b/beacon_node/store/src/reconstruct.rs index 7aca692ef9..e51543c3a2 100644 --- a/beacon_node/store/src/reconstruct.rs +++ b/beacon_node/store/src/reconstruct.rs @@ -67,6 +67,7 @@ where state.build_caches(&self.spec)?; + // TODO(gloas): handle payload envelope replay process_results(block_root_iter, |iter| -> Result<(), Error> { let mut io_batch = vec![]; diff --git a/consensus/state_processing/src/block_replayer.rs b/consensus/state_processing/src/block_replayer.rs index 56e667cdd3..63299cbf70 100644 --- a/consensus/state_processing/src/block_replayer.rs +++ b/consensus/state_processing/src/block_replayer.rs @@ -1,14 +1,19 @@ use crate::{ BlockProcessingError, BlockSignatureStrategy, ConsensusContext, SlotProcessingError, - VerifyBlockRoot, per_block_processing, per_epoch_processing::EpochProcessingSummary, + VerifyBlockRoot, VerifySignatures, + envelope_processing::{ + EnvelopeProcessingError, VerifyStateRoot, process_execution_payload_envelope, + }, + per_block_processing, + per_epoch_processing::EpochProcessingSummary, per_slot_processing, }; use itertools::Itertools; use std::iter::Peekable; use std::marker::PhantomData; use types::{ - BeaconState, BeaconStateError, BlindedPayload, ChainSpec, EthSpec, Hash256, SignedBeaconBlock, - Slot, + BeaconState, BeaconStateError, BlindedPayload, ChainSpec, EthSpec, ExecutionBlockHash, Hash256, + SignedBeaconBlock, SignedExecutionPayloadEnvelope, Slot, }; pub type PreBlockHook<'a, E, Error> = Box< @@ -24,7 +29,7 @@ pub type PostSlotHook<'a, E, Error> = Box< >; pub type StateRootIterDefault = std::iter::Empty>; -/// Efficiently apply blocks to a state while configuring various parameters. +/// Efficiently apply blocks and payloads to a state while configuring various parameters. /// /// Usage follows a builder pattern. pub struct BlockReplayer< @@ -41,6 +46,11 @@ pub struct BlockReplayer< post_block_hook: Option>, pre_slot_hook: Option>, post_slot_hook: Option>, + /// Iterator over state roots for all *block* states. + /// + /// Pre-Gloas, this is all states. Post-Gloas, this is *just* the states corresponding to beacon + /// blocks. For states corresponding to payloads, we read the state root from the payload + /// envelope. pub(crate) state_root_iter: Option>, state_root_miss: bool, _phantom: PhantomData, @@ -50,7 +60,13 @@ pub struct BlockReplayer< pub enum BlockReplayError { SlotProcessing(SlotProcessingError), BlockProcessing(BlockProcessingError), + EnvelopeProcessing(EnvelopeProcessingError), BeaconState(BeaconStateError), + /// A payload envelope for this `slot` was required but not provided. + MissingPayloadEnvelope { + slot: Slot, + block_hash: ExecutionBlockHash, + }, } impl From for BlockReplayError { @@ -65,6 +81,12 @@ impl From for BlockReplayError { } } +impl From for BlockReplayError { + fn from(e: EnvelopeProcessingError) -> Self { + Self::EnvelopeProcessing(e) + } +} + impl From for BlockReplayError { fn from(e: BeaconStateError) -> Self { Self::BeaconState(e) @@ -215,8 +237,11 @@ where pub fn apply_blocks( mut self, blocks: Vec>>, + payload_envelopes: Vec>, target_slot: Option, ) -> Result { + let mut envelopes_iter = payload_envelopes.into_iter(); + for (i, block) in blocks.iter().enumerate() { // Allow one additional block at the start which is only used for its state root. if i == 0 && block.slot() <= self.state.slot() { @@ -224,7 +249,74 @@ where } while self.state.slot() < block.slot() { - let state_root = self.get_state_root(&blocks, i)?; + let block_state_root = self.get_state_root(&blocks, i)?; + + // Apply the payload for the *previous* block if the bid in the current block + // indicates that the parent is full. + // TODO(gloas): check this condition at the fork boundary. + let state_root = if self.state.slot() == self.state.latest_block_header().slot + && block.fork_name_unchecked().gloas_enabled() + { + let state_block_hash = self + .state + .latest_execution_payload_bid() + .map_err(BlockReplayError::from)? + .block_hash; + let parent_block_hash = block + .message() + .body() + .signed_execution_payload_bid() + .map_err(BlockReplayError::from)? + .message + .parent_block_hash; + + // Similar to `is_parent_block_full`, but reading the block hash from the + // not-yet-applied `block`. + if state_block_hash == parent_block_hash { + if let Some(envelope) = envelopes_iter.next() + && envelope.message.slot == self.state.slot() + { + // TODO(gloas): bulk signature verification could be relevant here? + let verify_payload_signatures = + if let BlockSignatureStrategy::NoVerification = + self.block_sig_strategy + { + VerifySignatures::False + } else { + VerifySignatures::True + }; + // TODO(gloas): state root verif enabled during initial + // prototyping/testing + let verify_state_root = VerifyStateRoot::True; + process_execution_payload_envelope( + &mut self.state, + Some(block_state_root), + &envelope, + verify_payload_signatures, + verify_state_root, + self.spec, + ) + .map_err(BlockReplayError::from)?; + + // State root for next slot processing is now the envelope's state root. + envelope.message.state_root + } else { + return Err(BlockReplayError::MissingPayloadEnvelope { + slot: block.slot(), + block_hash: state_block_hash, + } + .into()); + } + } else { + // Empty payload at this slot, the state root is unchanged from when the + // beacon block was applied. + block_state_root + } + } else { + // Pre-Gloas or at skipped slots post-Gloas, the state root of the parent state + // is always the output from `self.get_state_root`. + block_state_root + }; if let Some(ref mut pre_slot_hook) = self.pre_slot_hook { pre_slot_hook(state_root, &mut self.state)?; @@ -268,6 +360,8 @@ where } } + // TODO(gloas): apply last payload, but how to know if it *should* be applied? + if let Some(target_slot) = target_slot { while self.state.slot() < target_slot { let state_root = self.get_state_root(&blocks, blocks.len())?; diff --git a/consensus/state_processing/src/envelope_processing.rs b/consensus/state_processing/src/envelope_processing.rs index c2cfeae5d3..1e3c54f1e1 100644 --- a/consensus/state_processing/src/envelope_processing.rs +++ b/consensus/state_processing/src/envelope_processing.rs @@ -241,8 +241,6 @@ pub fn process_execution_payload_envelope( // TODO(gloas): newPayload happens here in the spec, ensure we wire that up correctly process_deposit_requests_post_gloas(state, &execution_requests.deposits, spec)?; - - // TODO(gloas): gotta update these process_withdrawal_requests(state, &execution_requests.withdrawals, spec)?; process_consolidation_requests(state, &execution_requests.consolidations, spec)?; diff --git a/consensus/state_processing/src/per_block_processing/tests.rs b/consensus/state_processing/src/per_block_processing/tests.rs index 739717b33f..cbcde715bc 100644 --- a/consensus/state_processing/src/per_block_processing/tests.rs +++ b/consensus/state_processing/src/per_block_processing/tests.rs @@ -1140,7 +1140,7 @@ async fn block_replayer_peeking_state_roots() { let block_replayer = BlockReplayer::new(parent_state, &harness.chain.spec) .state_root_iter(state_root_iter.into_iter()) .no_signature_verification() - .apply_blocks(vec![target_block], None) + .apply_blocks(vec![target_block], vec![], None) .unwrap(); assert_eq!( From afc6fb137cd90be64e6372bee27b34537cb3a90a Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Mon, 23 Feb 2026 15:43:19 +1100 Subject: [PATCH 02/20] Connect up DB replay_blocks/load_blocks --- beacon_node/beacon_chain/src/fork_revert.rs | 3 +- beacon_node/beacon_chain/tests/store_tests.rs | 5 +- beacon_node/http_api/src/block_rewards.rs | 5 +- beacon_node/store/src/hot_cold_store.rs | 73 ++++++++++++++++--- .../state_processing/src/block_replayer.rs | 13 +--- .../types/src/block/signed_beacon_block.rs | 27 +++++++ 6 files changed, 99 insertions(+), 27 deletions(-) diff --git a/beacon_node/beacon_chain/src/fork_revert.rs b/beacon_node/beacon_chain/src/fork_revert.rs index 4db79790d3..44424bbad9 100644 --- a/beacon_node/beacon_chain/src/fork_revert.rs +++ b/beacon_node/beacon_chain/src/fork_revert.rs @@ -159,7 +159,8 @@ pub fn reset_fork_choice_to_finalization, Cold: It // Replay blocks from finalized checkpoint back to head. // We do not replay attestations presently, relying on the absence of other blocks // to guarantee `head_block_root` as the head. - let blocks = store + // TODO(gloas): this code doesn't work anyway, could just delete all of it + let (blocks, _envelopes) = store .load_blocks_to_replay(finalized_slot + 1, head_state.slot(), head_block_root) .map_err(|e| format!("Error loading blocks to replay for fork choice: {:?}", e))?; diff --git a/beacon_node/beacon_chain/tests/store_tests.rs b/beacon_node/beacon_chain/tests/store_tests.rs index 365513bbb4..ef7179aadb 100644 --- a/beacon_node/beacon_chain/tests/store_tests.rs +++ b/beacon_node/beacon_chain/tests/store_tests.rs @@ -688,7 +688,7 @@ async fn block_replayer_hooks() { .add_attested_blocks_at_slots(state.clone(), state_root, &block_slots, &all_validators) .await; - let blocks = store + let (blocks, envelopes) = store .load_blocks_to_replay(Slot::new(0), max_slot, end_block_root.into()) .unwrap(); @@ -697,7 +697,6 @@ async fn block_replayer_hooks() { let mut pre_block_slots = vec![]; let mut post_block_slots = vec![]; - // TODO(gloas): handle payloads? let mut replay_state = BlockReplayer::::new(state, &chain.spec) .pre_slot_hook(Box::new(|_, state| { pre_slots.push(state.slot()); @@ -725,7 +724,7 @@ async fn block_replayer_hooks() { post_block_slots.push(block.slot()); Ok(()) })) - .apply_blocks(blocks, vec![], None) + .apply_blocks(blocks, envelopes, None) .unwrap() .into_state(); diff --git a/beacon_node/http_api/src/block_rewards.rs b/beacon_node/http_api/src/block_rewards.rs index 8b355bf140..cdb3d650ea 100644 --- a/beacon_node/http_api/src/block_rewards.rs +++ b/beacon_node/http_api/src/block_rewards.rs @@ -32,7 +32,7 @@ pub fn get_block_rewards( .map_err(unhandled_error)? .ok_or_else(|| custom_bad_request(format!("block at end slot {} unknown", end_slot)))?; - let blocks = chain + let (blocks, envelopes) = chain .store .load_blocks_to_replay(start_slot, end_slot, end_block_root) .map_err(|e| unhandled_error(BeaconChainError::from(e)))?; @@ -56,7 +56,6 @@ pub fn get_block_rewards( let mut reward_cache = Default::default(); let mut block_rewards = Vec::with_capacity(blocks.len()); - // TODO(gloas): handle payloads let block_replayer = BlockReplayer::new(state, &chain.spec) .pre_block_hook(Box::new(|state, block| { state.build_all_committee_caches(&chain.spec)?; @@ -79,7 +78,7 @@ pub fn get_block_rewards( ) .no_signature_verification() .minimal_block_root_verification() - .apply_blocks(blocks, vec![], None) + .apply_blocks(blocks, envelopes, None) .map_err(unhandled_error)?; if block_replayer.state_root_miss() { diff --git a/beacon_node/store/src/hot_cold_store.rs b/beacon_node/store/src/hot_cold_store.rs index 00415bbd2b..8fbc0824d7 100644 --- a/beacon_node/store/src/hot_cold_store.rs +++ b/beacon_node/store/src/hot_cold_store.rs @@ -186,6 +186,7 @@ pub enum HotColdDBError { MissingHotHDiff(Hash256), MissingHDiff(Slot), MissingExecutionPayload(Hash256), + MissingExecutionPayloadEnvelope(Hash256), MissingFullBlockExecutionPayloadPruned(Hash256, Slot), MissingAnchorInfo, MissingFrozenBlockSlot(Hash256), @@ -2020,7 +2021,8 @@ impl, Cold: ItemStore> HotColdDB return Ok(base_state); } - let blocks = self.load_blocks_to_replay(base_state.slot(), slot, latest_block_root)?; + let (blocks, envelopes) = + self.load_blocks_to_replay(base_state.slot(), slot, latest_block_root)?; let _t = metrics::start_timer(&metrics::STORE_BEACON_REPLAY_HOT_BLOCKS_TIME); // If replaying blocks, and `update_cache` is true, also cache the epoch boundary @@ -2053,6 +2055,7 @@ impl, Cold: ItemStore> HotColdDB self.replay_blocks( base_state, blocks, + envelopes, slot, no_state_root_iter(), Some(Box::new(state_cache_hook)), @@ -2357,6 +2360,8 @@ impl, Cold: ItemStore> HotColdDB } let blocks = self.load_cold_blocks(base_state.slot() + 1, slot)?; + // TODO(gloas): load payload envelopes + let envelopes = vec![]; // Include state root for base state as it is required by block processing to not // have to hash the state. @@ -2365,7 +2370,14 @@ impl, Cold: ItemStore> HotColdDB self.forwards_state_roots_iterator_until(base_state.slot(), slot, || { Err(Error::StateShouldNotBeRequired(slot)) })?; - let state = self.replay_blocks(base_state, blocks, slot, Some(state_root_iter), None)?; + let state = self.replay_blocks( + base_state, + blocks, + envelopes, + slot, + Some(state_root_iter), + None, + )?; debug!( target_slot = %slot, replay_time_ms = metrics::stop_timer_with_duration(replay_timer).as_millis(), @@ -2480,18 +2492,31 @@ impl, Cold: ItemStore> HotColdDB })? } - /// Load the blocks between `start_slot` and `end_slot` by backtracking from `end_block_hash`. + /// Load the blocks & envelopes between `start_slot` and `end_slot` by backtracking from + /// `end_block_root`. /// /// Blocks are returned in slot-ascending order, suitable for replaying on a state with slot /// equal to `start_slot`, to reach a state with slot equal to `end_slot`. + /// + /// Payloads are also returned in slot-ascending order, but only payloads forming part of + /// the chain are loaded (payloads for EMPTY slots are omitted). Prior to Gloas, an empty + /// vec of payloads will be returned. + // TODO(gloas): handle last payload + #[allow(clippy::type_complexity)] pub fn load_blocks_to_replay( &self, start_slot: Slot, end_slot: Slot, - end_block_hash: Hash256, - ) -> Result>>, Error> { + end_block_root: Hash256, + ) -> Result< + ( + Vec>, + Vec>, + ), + Error, + > { let _t = metrics::start_timer(&metrics::STORE_BEACON_LOAD_HOT_BLOCKS_TIME); - let mut blocks = ParentRootBlockIterator::new(self, end_block_hash) + let mut blocks = ParentRootBlockIterator::new(self, end_block_root) .map(|result| result.map(|(_, block)| block)) // Include the block at the end slot (if any), it needs to be // replayed in order to construct the canonical state at `end_slot`. @@ -2518,7 +2543,35 @@ impl, Cold: ItemStore> HotColdDB }) .collect::, _>>()?; blocks.reverse(); - Ok(blocks) + + // If Gloas is not enabled for any slots in the range, just return `blocks`. + if !self.spec.fork_name_at_slot::(start_slot).gloas_enabled() + && !self.spec.fork_name_at_slot::(end_slot).gloas_enabled() + { + return Ok((blocks, vec![])); + } + + // Load envelopes. + let mut envelopes = vec![]; + + for (block, next_block) in blocks.iter().tuple_windows() { + if block.fork_name_unchecked().gloas_enabled() { + // Check next block to see if this block's payload is canonical on this chain. + let block_hash = block.payload_bid_block_hash()?; + if !next_block.is_parent_block_full(block_hash) { + // No payload at this slot (empty), nothing to load. + continue; + } + // Using `parent_root` avoids computation. + let block_root = next_block.parent_root(); + let envelope = self + .get_payload_envelope(&block_root)? + .ok_or(HotColdDBError::MissingExecutionPayloadEnvelope(block_root))?; + envelopes.push(envelope); + } + } + + Ok((blocks, envelopes)) } /// Replay `blocks` on top of `state` until `target_slot` is reached. @@ -2528,7 +2581,8 @@ impl, Cold: ItemStore> HotColdDB pub fn replay_blocks( &self, state: BeaconState, - blocks: Vec>>, + blocks: Vec>, + envelopes: Vec>, target_slot: Slot, state_root_iter: Option>>, pre_slot_hook: Option>, @@ -2548,9 +2602,8 @@ impl, Cold: ItemStore> HotColdDB block_replayer = block_replayer.pre_slot_hook(pre_slot_hook); } - // TODO(gloas): plumb through payloads here block_replayer - .apply_blocks(blocks, vec![], Some(target_slot)) + .apply_blocks(blocks, envelopes, Some(target_slot)) .map(|block_replayer| { if have_state_root_iterator && block_replayer.state_root_miss() { warn!( diff --git a/consensus/state_processing/src/block_replayer.rs b/consensus/state_processing/src/block_replayer.rs index 63299cbf70..ff97cebe72 100644 --- a/consensus/state_processing/src/block_replayer.rs +++ b/consensus/state_processing/src/block_replayer.rs @@ -257,22 +257,15 @@ where let state_root = if self.state.slot() == self.state.latest_block_header().slot && block.fork_name_unchecked().gloas_enabled() { - let state_block_hash = self + let latest_bid_block_hash = self .state .latest_execution_payload_bid() .map_err(BlockReplayError::from)? .block_hash; - let parent_block_hash = block - .message() - .body() - .signed_execution_payload_bid() - .map_err(BlockReplayError::from)? - .message - .parent_block_hash; // Similar to `is_parent_block_full`, but reading the block hash from the // not-yet-applied `block`. - if state_block_hash == parent_block_hash { + if block.is_parent_block_full(latest_bid_block_hash) { if let Some(envelope) = envelopes_iter.next() && envelope.message.slot == self.state.slot() { @@ -303,7 +296,7 @@ where } else { return Err(BlockReplayError::MissingPayloadEnvelope { slot: block.slot(), - block_hash: state_block_hash, + block_hash: latest_bid_block_hash, } .into()); } diff --git a/consensus/types/src/block/signed_beacon_block.rs b/consensus/types/src/block/signed_beacon_block.rs index aeb3c18d95..b7b1d9d2a2 100644 --- a/consensus/types/src/block/signed_beacon_block.rs +++ b/consensus/types/src/block/signed_beacon_block.rs @@ -14,6 +14,7 @@ use tree_hash::TreeHash; use tree_hash_derive::TreeHash; use crate::{ + ExecutionBlockHash, block::{ BLOB_KZG_COMMITMENTS_INDEX, BeaconBlock, BeaconBlockAltair, BeaconBlockBase, BeaconBlockBellatrix, BeaconBlockBodyBellatrix, BeaconBlockBodyCapella, @@ -365,6 +366,32 @@ impl> SignedBeaconBlock format_kzg_commitments(commitments.as_ref()) } + + /// Convenience accessor for the block's bid's `block_hash`. + /// + /// This method returns an error prior to Gloas. + pub fn payload_bid_block_hash(&self) -> Result { + self.message() + .body() + .signed_execution_payload_bid() + .map(|bid| bid.message.block_hash) + } + + /// Check if the `parent_hash` in this block's `signed_payload_bid` matches `block_hash`. + /// + /// This function is useful post-Gloas for determining if the parent block is full, *without* + /// necessarily needing access to a beacon state. The passed in `parent_block_hash` MUST be the + /// `block_hash` from the parent beacon block's bid. If the parent beacon state is available + /// this can alternatively be fetched from `state.latest_payload_bid`. + /// + /// This function returns `false` for all blocks prior to Gloas. + pub fn is_parent_block_full(&self, parent_block_hash: ExecutionBlockHash) -> bool { + let Ok(signed_payload_bid) = self.message().body().signed_execution_payload_bid() else { + // Prior to Gloas. + return false; + }; + signed_payload_bid.message.parent_block_hash == parent_block_hash + } } // We can convert pre-Bellatrix blocks without payloads into blocks with payloads. From a2e0068b85ca05dc932ce14d3e818594e55c1838 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Mon, 23 Feb 2026 16:09:10 +1100 Subject: [PATCH 03/20] Payloads for cold blocks --- beacon_node/store/src/hot_cold_store.rs | 45 +++++++++++++++++++------ 1 file changed, 34 insertions(+), 11 deletions(-) diff --git a/beacon_node/store/src/hot_cold_store.rs b/beacon_node/store/src/hot_cold_store.rs index 8fbc0824d7..d858ef904e 100644 --- a/beacon_node/store/src/hot_cold_store.rs +++ b/beacon_node/store/src/hot_cold_store.rs @@ -2359,9 +2359,7 @@ impl, Cold: ItemStore> HotColdDB return Ok(base_state); } - let blocks = self.load_cold_blocks(base_state.slot() + 1, slot)?; - // TODO(gloas): load payload envelopes - let envelopes = vec![]; + let (blocks, envelopes) = self.load_cold_blocks(base_state.slot() + 1, slot)?; // Include state root for base state as it is required by block processing to not // have to hash the state. @@ -2470,26 +2468,44 @@ impl, Cold: ItemStore> HotColdDB } } - /// Load cold blocks between `start_slot` and `end_slot` inclusive. + /// Load cold blocks and payload envelopes between `start_slot` and `end_slot` inclusive. + #[allow(clippy::type_complexity)] pub fn load_cold_blocks( &self, start_slot: Slot, end_slot: Slot, - ) -> Result>, Error> { + ) -> Result< + ( + Vec>, + Vec>, + ), + Error, + > { let _t = metrics::start_timer(&metrics::STORE_BEACON_LOAD_COLD_BLOCKS_TIME); let block_root_iter = self.forwards_block_roots_iterator_until(start_slot, end_slot, || { Err(Error::StateShouldNotBeRequired(end_slot)) })?; - process_results(block_root_iter, |iter| { + let blocks = process_results(block_root_iter, |iter| { iter.map(|(block_root, _slot)| block_root) .dedup() .map(|block_root| { self.get_blinded_block(&block_root)? .ok_or(Error::MissingBlock(block_root)) }) - .collect() - })? + .collect::, Error>>() + }) + .flatten()?; + + // If Gloas is not enabled for any slots in the range, just return `blocks`. + if !self.spec.fork_name_at_slot::(start_slot).gloas_enabled() + && !self.spec.fork_name_at_slot::(end_slot).gloas_enabled() + { + return Ok((blocks, vec![])); + } + let envelopes = self.load_payload_envelopes_for_blocks(&blocks)?; + + Ok((blocks, envelopes)) } /// Load the blocks & envelopes between `start_slot` and `end_slot` by backtracking from @@ -2551,7 +2567,15 @@ impl, Cold: ItemStore> HotColdDB return Ok((blocks, vec![])); } - // Load envelopes. + let envelopes = self.load_payload_envelopes_for_blocks(&blocks)?; + + Ok((blocks, envelopes)) + } + + pub fn load_payload_envelopes_for_blocks( + &self, + blocks: &[SignedBlindedBeaconBlock], + ) -> Result>, Error> { let mut envelopes = vec![]; for (block, next_block) in blocks.iter().tuple_windows() { @@ -2570,8 +2594,7 @@ impl, Cold: ItemStore> HotColdDB envelopes.push(envelope); } } - - Ok((blocks, envelopes)) + Ok(envelopes) } /// Replay `blocks` on top of `state` until `target_slot` is reached. From b3d2e85e55509210809e32aa8e491e1a119dae7f Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Mon, 23 Feb 2026 17:28:46 +1100 Subject: [PATCH 04/20] Avoid Result::flatten (would require MSRV bump) --- beacon_node/store/src/hot_cold_store.rs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/beacon_node/store/src/hot_cold_store.rs b/beacon_node/store/src/hot_cold_store.rs index d858ef904e..849099ecfb 100644 --- a/beacon_node/store/src/hot_cold_store.rs +++ b/beacon_node/store/src/hot_cold_store.rs @@ -2494,8 +2494,7 @@ impl, Cold: ItemStore> HotColdDB .ok_or(Error::MissingBlock(block_root)) }) .collect::, Error>>() - }) - .flatten()?; + })??; // If Gloas is not enabled for any slots in the range, just return `blocks`. if !self.spec.fork_name_at_slot::(start_slot).gloas_enabled() From a3f31835ab6bb1e2903e1f95c9a96668482486de Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Mon, 23 Feb 2026 21:14:27 +1100 Subject: [PATCH 05/20] Add StatePayloadStatus to BlockReplayer --- .../state_processing/src/block_replayer.rs | 130 +++++++++++------- consensus/types/src/execution/mod.rs | 2 + .../src/execution/state_payload_status.rs | 18 +++ 3 files changed, 104 insertions(+), 46 deletions(-) create mode 100644 consensus/types/src/execution/state_payload_status.rs diff --git a/consensus/state_processing/src/block_replayer.rs b/consensus/state_processing/src/block_replayer.rs index ff97cebe72..22096293af 100644 --- a/consensus/state_processing/src/block_replayer.rs +++ b/consensus/state_processing/src/block_replayer.rs @@ -12,8 +12,8 @@ use itertools::Itertools; use std::iter::Peekable; use std::marker::PhantomData; use types::{ - BeaconState, BeaconStateError, BlindedPayload, ChainSpec, EthSpec, ExecutionBlockHash, Hash256, - SignedBeaconBlock, SignedExecutionPayloadEnvelope, Slot, + BeaconState, BeaconStateError, BlindedPayload, ChainSpec, EthSpec, Hash256, SignedBeaconBlock, + SignedExecutionPayloadEnvelope, Slot, execution::StatePayloadStatus, }; pub type PreBlockHook<'a, E, Error> = Box< @@ -53,6 +53,13 @@ pub struct BlockReplayer< /// envelope. pub(crate) state_root_iter: Option>, state_root_miss: bool, + /// The payload status of the state desired as the end result of block replay. + /// + /// This dictates whether a payload should be applied after applying the last block. + /// + /// Prior to Gloas, this should always be set to `StatePayloadStatus::Pending` to indicate + /// that no envelope needs to be applied. + desired_state_payload_status: StatePayloadStatus, _phantom: PhantomData, } @@ -65,7 +72,6 @@ pub enum BlockReplayError { /// A payload envelope for this `slot` was required but not provided. MissingPayloadEnvelope { slot: Slot, - block_hash: ExecutionBlockHash, }, } @@ -118,6 +124,7 @@ where post_slot_hook: None, state_root_iter: None, state_root_miss: false, + desired_state_payload_status: StatePayloadStatus::Pending, _phantom: PhantomData, } } @@ -183,6 +190,14 @@ where self } + /// Set the desired payload status of the state reached by replay. + /// + /// This determines whether to apply a payload after applying the last block. + pub fn desired_state_payload_status(mut self, payload_status: StatePayloadStatus) -> Self { + self.desired_state_payload_status = payload_status; + self + } + /// Compute the state root for `self.state` as efficiently as possible. /// /// This function MUST only be called when `self.state` is a post-state, i.e. it MUST not be @@ -230,6 +245,38 @@ where Ok(state_root) } + /// Apply an execution payload envelope to `self.state`. + /// + /// The `block_state_root` MUST be the `state_root` of the most recently applied block. + /// + /// Returns the `state_root` of `self.state` after payload application. + fn apply_payload_envelope( + &mut self, + envelope: &SignedExecutionPayloadEnvelope, + block_state_root: Hash256, + ) -> Result { + // TODO(gloas): bulk signature verification could be relevant here? + let verify_payload_signatures = + if let BlockSignatureStrategy::NoVerification = self.block_sig_strategy { + VerifySignatures::False + } else { + VerifySignatures::True + }; + // TODO(gloas): state root verif enabled during initial prototyping + let verify_state_root = VerifyStateRoot::True; + process_execution_payload_envelope( + &mut self.state, + Some(block_state_root), + envelope, + verify_payload_signatures, + verify_state_root, + self.spec, + ) + .map_err(BlockReplayError::from)?; + + Ok(envelope.message.state_root) + } + /// Apply `blocks` atop `self.state`, taking care of slot processing. /// /// If `target_slot` is provided then the state will be advanced through to `target_slot` @@ -242,6 +289,16 @@ where ) -> Result { let mut envelopes_iter = payload_envelopes.into_iter(); + let mut next_envelope_at_slot = |slot| { + if let Some(envelope) = envelopes_iter.next() + && envelope.message.slot == slot + { + Ok(envelope) + } else { + Err(BlockReplayError::MissingPayloadEnvelope { slot }) + } + }; + for (i, block) in blocks.iter().enumerate() { // Allow one additional block at the start which is only used for its state root. if i == 0 && block.slot() <= self.state.slot() { @@ -249,13 +306,12 @@ where } while self.state.slot() < block.slot() { - let block_state_root = self.get_state_root(&blocks, i)?; + let mut state_root = self.get_state_root(&blocks, i)?; // Apply the payload for the *previous* block if the bid in the current block - // indicates that the parent is full. - // TODO(gloas): check this condition at the fork boundary. - let state_root = if self.state.slot() == self.state.latest_block_header().slot - && block.fork_name_unchecked().gloas_enabled() + // indicates that the parent is full (and it hasn't already been applied). + state_root = if block.fork_name_unchecked().gloas_enabled() + && self.state.slot() == self.state.latest_block_header().slot { let latest_bid_block_hash = self .state @@ -266,49 +322,18 @@ where // Similar to `is_parent_block_full`, but reading the block hash from the // not-yet-applied `block`. if block.is_parent_block_full(latest_bid_block_hash) { - if let Some(envelope) = envelopes_iter.next() - && envelope.message.slot == self.state.slot() - { - // TODO(gloas): bulk signature verification could be relevant here? - let verify_payload_signatures = - if let BlockSignatureStrategy::NoVerification = - self.block_sig_strategy - { - VerifySignatures::False - } else { - VerifySignatures::True - }; - // TODO(gloas): state root verif enabled during initial - // prototyping/testing - let verify_state_root = VerifyStateRoot::True; - process_execution_payload_envelope( - &mut self.state, - Some(block_state_root), - &envelope, - verify_payload_signatures, - verify_state_root, - self.spec, - ) - .map_err(BlockReplayError::from)?; - - // State root for next slot processing is now the envelope's state root. - envelope.message.state_root - } else { - return Err(BlockReplayError::MissingPayloadEnvelope { - slot: block.slot(), - block_hash: latest_bid_block_hash, - } - .into()); - } + let envelope = next_envelope_at_slot(self.state.slot())?; + // State root for the next slot processing is now the envelope's state root. + self.apply_payload_envelope(&envelope, state_root)? } else { // Empty payload at this slot, the state root is unchanged from when the // beacon block was applied. - block_state_root + state_root } } else { // Pre-Gloas or at skipped slots post-Gloas, the state root of the parent state // is always the output from `self.get_state_root`. - block_state_root + state_root }; if let Some(ref mut pre_slot_hook) = self.pre_slot_hook { @@ -353,11 +378,24 @@ where } } - // TODO(gloas): apply last payload, but how to know if it *should* be applied? + // Apply the last payload if desired. + let mut opt_state_root = if let StatePayloadStatus::Full = self.desired_state_payload_status + && let Some(last_block) = blocks.last() + { + let envelope = next_envelope_at_slot(self.state.slot())?; + Some(self.apply_payload_envelope(&envelope, last_block.state_root())?) + } else { + None + }; if let Some(target_slot) = target_slot { while self.state.slot() < target_slot { - let state_root = self.get_state_root(&blocks, blocks.len())?; + // Read state root from `opt_state_root` if a payload was just applied. + let state_root = if let Some(root) = opt_state_root.take() { + root + } else { + self.get_state_root(&blocks, blocks.len())? + }; if let Some(ref mut pre_slot_hook) = self.pre_slot_hook { pre_slot_hook(state_root, &mut self.state)?; diff --git a/consensus/types/src/execution/mod.rs b/consensus/types/src/execution/mod.rs index a3d4ed8730..591be32b24 100644 --- a/consensus/types/src/execution/mod.rs +++ b/consensus/types/src/execution/mod.rs @@ -12,6 +12,7 @@ mod payload; mod signed_bls_to_execution_change; mod signed_execution_payload_bid; mod signed_execution_payload_envelope; +mod state_payload_status; pub use bls_to_execution_change::BlsToExecutionChange; pub use eth1_data::Eth1Data; @@ -41,3 +42,4 @@ pub use payload::{ pub use signed_bls_to_execution_change::SignedBlsToExecutionChange; pub use signed_execution_payload_bid::SignedExecutionPayloadBid; pub use signed_execution_payload_envelope::SignedExecutionPayloadEnvelope; +pub use state_payload_status::StatePayloadStatus; diff --git a/consensus/types/src/execution/state_payload_status.rs b/consensus/types/src/execution/state_payload_status.rs new file mode 100644 index 0000000000..053ed14ec4 --- /dev/null +++ b/consensus/types/src/execution/state_payload_status.rs @@ -0,0 +1,18 @@ +use serde::{Deserialize, Serialize}; + +/// Payload status as it applies to a `BeaconState` post-Gloas. +/// +/// A state can either be a post-state for a block (in which case we call it `Pending`) or a +/// payload envelope (`Full`). When handling states it is often necessary to know which of these +/// two variants is required. +/// +/// Note that states at skipped slots could be either `Pending` or `Full`, depending on whether +/// the payload for the most-recently applied block was also applied. +#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)] +#[serde(rename_all = "lowercase")] +pub enum StatePayloadStatus { + /// For states produced by `process_block` executed on a `BeaconBlock`. + Pending, + /// For states produced by `process_execution_payload` on a `ExecutionPayloadEnvelope`. + Full, +} From adc049805789f67d0e1c2b93a90f8a7a2fe97040 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Tue, 24 Feb 2026 15:23:54 +1100 Subject: [PATCH 06/20] Delete fork_revert feature --- beacon_node/beacon_chain/src/builder.rs | 43 +--- beacon_node/beacon_chain/src/fork_revert.rs | 204 ------------------ beacon_node/beacon_chain/src/lib.rs | 1 - beacon_node/beacon_chain/tests/store_tests.rs | 182 ---------------- 4 files changed, 5 insertions(+), 425 deletions(-) delete mode 100644 beacon_node/beacon_chain/src/fork_revert.rs diff --git a/beacon_node/beacon_chain/src/builder.rs b/beacon_node/beacon_chain/src/builder.rs index 4c82c93ba3..2c1dae9215 100644 --- a/beacon_node/beacon_chain/src/builder.rs +++ b/beacon_node/beacon_chain/src/builder.rs @@ -7,7 +7,6 @@ use crate::beacon_proposer_cache::BeaconProposerCache; use crate::custody_context::NodeCustodyType; use crate::data_availability_checker::DataAvailabilityChecker; use crate::fork_choice_signal::ForkChoiceSignalTx; -use crate::fork_revert::{reset_fork_choice_to_finalization, revert_to_fork_boundary}; use crate::graffiti_calculator::{GraffitiCalculator, GraffitiOrigin}; use crate::kzg_utils::{build_data_column_sidecars_fulu, build_data_column_sidecars_gloas}; use crate::light_client_server_cache::LightClientServerCache; @@ -778,49 +777,17 @@ where .get_head(current_slot, &self.spec) .map_err(|e| format!("Unable to get fork choice head: {:?}", e))?; - // Try to decode the head block according to the current fork, if that fails, try - // to backtrack to before the most recent fork. - let (head_block_root, head_block, head_reverted) = - match store.get_full_block(&initial_head_block_root) { - Ok(Some(block)) => (initial_head_block_root, block, false), - Ok(None) => return Err("Head block not found in store".into()), - Err(StoreError::SszDecodeError(_)) => { - error!( - message = "This node has likely missed a hard fork. \ - It will try to revert the invalid blocks and keep running, \ - but any stray blocks and states will not be deleted. \ - Long-term you should consider re-syncing this node.", - "Error decoding head block" - ); - let (block_root, block) = revert_to_fork_boundary( - current_slot, - initial_head_block_root, - store.clone(), - &self.spec, - )?; - - (block_root, block, true) - } - Err(e) => return Err(descriptive_db_error("head block", &e)), - }; + let head_block_root = initial_head_block_root; + let head_block = store + .get_full_block(&initial_head_block_root) + .map_err(|e| descriptive_db_error("head block", &e))? + .ok_or("Head block not found in store")?; let (_head_state_root, head_state) = store .get_advanced_hot_state(head_block_root, current_slot, head_block.state_root()) .map_err(|e| descriptive_db_error("head state", &e))? .ok_or("Head state not found in store")?; - // If the head reverted then we need to reset fork choice using the new head's finalized - // checkpoint. - if head_reverted { - fork_choice = reset_fork_choice_to_finalization( - head_block_root, - &head_state, - store.clone(), - Some(current_slot), - &self.spec, - )?; - } - let head_shuffling_ids = BlockShufflingIds::try_from_head(head_block_root, &head_state)?; let mut head_snapshot = BeaconSnapshot { diff --git a/beacon_node/beacon_chain/src/fork_revert.rs b/beacon_node/beacon_chain/src/fork_revert.rs deleted file mode 100644 index 4db79790d3..0000000000 --- a/beacon_node/beacon_chain/src/fork_revert.rs +++ /dev/null @@ -1,204 +0,0 @@ -use crate::{BeaconForkChoiceStore, BeaconSnapshot}; -use fork_choice::{ForkChoice, PayloadVerificationStatus}; -use itertools::process_results; -use state_processing::state_advance::complete_state_advance; -use state_processing::{ - ConsensusContext, VerifyBlockRoot, per_block_processing, - per_block_processing::BlockSignatureStrategy, -}; -use std::sync::Arc; -use std::time::Duration; -use store::{HotColdDB, ItemStore, iter::ParentRootBlockIterator}; -use tracing::{info, warn}; -use types::{BeaconState, ChainSpec, EthSpec, ForkName, Hash256, SignedBeaconBlock, Slot}; - -const CORRUPT_DB_MESSAGE: &str = "The database could be corrupt. Check its file permissions or \ - consider deleting it by running with the --purge-db flag."; - -/// Revert the head to the last block before the most recent hard fork. -/// -/// This function is destructive and should only be used if there is no viable alternative. It will -/// cause the reverted blocks and states to be completely forgotten, lying dormant in the database -/// forever. -/// -/// Return the `(head_block_root, head_block)` that should be used post-reversion. -pub fn revert_to_fork_boundary, Cold: ItemStore>( - current_slot: Slot, - head_block_root: Hash256, - store: Arc>, - spec: &ChainSpec, -) -> Result<(Hash256, SignedBeaconBlock), String> { - let current_fork = spec.fork_name_at_slot::(current_slot); - let fork_epoch = spec - .fork_epoch(current_fork) - .ok_or_else(|| format!("Current fork '{}' never activates", current_fork))?; - - if current_fork == ForkName::Base { - return Err(format!( - "Cannot revert to before phase0 hard fork. {}", - CORRUPT_DB_MESSAGE - )); - } - - warn!( - target_fork = %current_fork, - %fork_epoch, - "Reverting invalid head block" - ); - let block_iter = ParentRootBlockIterator::fork_tolerant(&store, head_block_root); - - let (block_root, blinded_block) = process_results(block_iter, |mut iter| { - iter.find_map(|(block_root, block)| { - if block.slot() < fork_epoch.start_slot(E::slots_per_epoch()) { - Some((block_root, block)) - } else { - info!( - ?block_root, - slot = %block.slot(), - "Reverting block" - ); - None - } - }) - }) - .map_err(|e| { - format!( - "Error fetching blocks to revert: {:?}. {}", - e, CORRUPT_DB_MESSAGE - ) - })? - .ok_or_else(|| format!("No pre-fork blocks found. {}", CORRUPT_DB_MESSAGE))?; - - let block = store - .make_full_block(&block_root, blinded_block) - .map_err(|e| format!("Unable to add payload to new head block: {:?}", e))?; - - Ok((block_root, block)) -} - -/// Reset fork choice to the finalized checkpoint of the supplied head state. -/// -/// The supplied `head_block_root` should correspond to the most recently applied block on -/// `head_state`. -/// -/// This function avoids quirks of fork choice initialization by replaying all of the blocks from -/// the checkpoint to the head. -/// -/// See this issue for details: https://github.com/ethereum/consensus-specs/issues/2566 -/// -/// It will fail if the finalized state or any of the blocks to replay are unavailable. -/// -/// WARNING: this function is destructive and causes fork choice to permanently forget all -/// chains other than the chain leading to `head_block_root`. It should only be used in extreme -/// circumstances when there is no better alternative. -pub fn reset_fork_choice_to_finalization, Cold: ItemStore>( - head_block_root: Hash256, - head_state: &BeaconState, - store: Arc>, - current_slot: Option, - spec: &ChainSpec, -) -> Result, E>, String> { - // Fetch finalized block. - let finalized_checkpoint = head_state.finalized_checkpoint(); - let finalized_block_root = finalized_checkpoint.root; - let finalized_block = store - .get_full_block(&finalized_block_root) - .map_err(|e| format!("Error loading finalized block: {:?}", e))? - .ok_or_else(|| { - format!( - "Finalized block missing for revert: {:?}", - finalized_block_root - ) - })?; - - // Advance finalized state to finalized epoch (to handle skipped slots). - let finalized_state_root = finalized_block.state_root(); - // The enshrined finalized state should be in the state cache. - let mut finalized_state = store - .get_state(&finalized_state_root, Some(finalized_block.slot()), true) - .map_err(|e| format!("Error loading finalized state: {:?}", e))? - .ok_or_else(|| { - format!( - "Finalized block state missing from database: {:?}", - finalized_state_root - ) - })?; - let finalized_slot = finalized_checkpoint.epoch.start_slot(E::slots_per_epoch()); - complete_state_advance( - &mut finalized_state, - Some(finalized_state_root), - finalized_slot, - spec, - ) - .map_err(|e| { - format!( - "Error advancing finalized state to finalized epoch: {:?}", - e - ) - })?; - let finalized_snapshot = BeaconSnapshot { - beacon_block_root: finalized_block_root, - beacon_block: Arc::new(finalized_block), - beacon_state: finalized_state, - }; - - let fc_store = - BeaconForkChoiceStore::get_forkchoice_store(store.clone(), finalized_snapshot.clone()) - .map_err(|e| format!("Unable to reset fork choice store for revert: {e:?}"))?; - - let mut fork_choice = ForkChoice::from_anchor( - fc_store, - finalized_block_root, - &finalized_snapshot.beacon_block, - &finalized_snapshot.beacon_state, - current_slot, - spec, - ) - .map_err(|e| format!("Unable to reset fork choice for revert: {:?}", e))?; - - // Replay blocks from finalized checkpoint back to head. - // We do not replay attestations presently, relying on the absence of other blocks - // to guarantee `head_block_root` as the head. - let blocks = store - .load_blocks_to_replay(finalized_slot + 1, head_state.slot(), head_block_root) - .map_err(|e| format!("Error loading blocks to replay for fork choice: {:?}", e))?; - - let mut state = finalized_snapshot.beacon_state; - for block in blocks { - complete_state_advance(&mut state, None, block.slot(), spec) - .map_err(|e| format!("State advance failed: {:?}", e))?; - - let mut ctxt = ConsensusContext::new(block.slot()) - .set_proposer_index(block.message().proposer_index()); - per_block_processing( - &mut state, - &block, - BlockSignatureStrategy::NoVerification, - VerifyBlockRoot::True, - &mut ctxt, - spec, - ) - .map_err(|e| format!("Error replaying block: {:?}", e))?; - - // Setting this to unverified is the safest solution, since we don't have a way to - // retro-actively determine if they were valid or not. - // - // This scenario is so rare that it seems OK to double-verify some blocks. - let payload_verification_status = PayloadVerificationStatus::Optimistic; - - fork_choice - .on_block( - block.slot(), - block.message(), - block.canonical_root(), - // Reward proposer boost. We are reinforcing the canonical chain. - Duration::from_secs(0), - &state, - payload_verification_status, - spec, - ) - .map_err(|e| format!("Error applying replayed block to fork choice: {:?}", e))?; - } - - Ok(fork_choice) -} diff --git a/beacon_node/beacon_chain/src/lib.rs b/beacon_node/beacon_chain/src/lib.rs index 3b03395a66..e1a190ffb3 100644 --- a/beacon_node/beacon_chain/src/lib.rs +++ b/beacon_node/beacon_chain/src/lib.rs @@ -26,7 +26,6 @@ pub mod events; pub mod execution_payload; pub mod fetch_blobs; pub mod fork_choice_signal; -pub mod fork_revert; pub mod graffiti_calculator; pub mod historical_blocks; pub mod historical_data_columns; diff --git a/beacon_node/beacon_chain/tests/store_tests.rs b/beacon_node/beacon_chain/tests/store_tests.rs index 6bea5f6013..ff20e999bb 100644 --- a/beacon_node/beacon_chain/tests/store_tests.rs +++ b/beacon_node/beacon_chain/tests/store_tests.rs @@ -3924,188 +3924,6 @@ async fn finalizes_after_resuming_from_db() { ); } -#[allow(clippy::large_stack_frames)] -#[tokio::test] -async fn revert_minority_fork_on_resume() { - let validator_count = 16; - let slots_per_epoch = MinimalEthSpec::slots_per_epoch(); - - let fork_epoch = Epoch::new(4); - let fork_slot = fork_epoch.start_slot(slots_per_epoch); - let initial_blocks = slots_per_epoch * fork_epoch.as_u64() - 1; - let post_fork_blocks = slots_per_epoch * 3; - - let mut spec1 = MinimalEthSpec::default_spec(); - spec1.altair_fork_epoch = None; - let mut spec2 = MinimalEthSpec::default_spec(); - spec2.altair_fork_epoch = Some(fork_epoch); - - let all_validators = (0..validator_count).collect::>(); - - // Chain with no fork epoch configured. - let db_path1 = tempdir().unwrap(); - let store1 = get_store_generic(&db_path1, StoreConfig::default(), spec1.clone()); - let harness1 = BeaconChainHarness::builder(MinimalEthSpec) - .spec(spec1.clone().into()) - .keypairs(KEYPAIRS[0..validator_count].to_vec()) - .fresh_disk_store(store1) - .mock_execution_layer() - .build(); - - // Chain with fork epoch configured. - let db_path2 = tempdir().unwrap(); - let store2 = get_store_generic(&db_path2, StoreConfig::default(), spec2.clone()); - let harness2 = BeaconChainHarness::builder(MinimalEthSpec) - .spec(spec2.clone().into()) - .keypairs(KEYPAIRS[0..validator_count].to_vec()) - .fresh_disk_store(store2) - .mock_execution_layer() - .build(); - - // Apply the same blocks to both chains initially. - let mut state = harness1.get_current_state(); - let mut block_root = harness1.chain.genesis_block_root; - for slot in (1..=initial_blocks).map(Slot::new) { - let state_root = state.update_tree_hash_cache().unwrap(); - - let attestations = harness1.make_attestations( - &all_validators, - &state, - state_root, - block_root.into(), - slot, - ); - harness1.set_current_slot(slot); - harness2.set_current_slot(slot); - harness1.process_attestations(attestations.clone(), &state); - harness2.process_attestations(attestations, &state); - - let ((block, blobs), new_state) = harness1.make_block(state, slot).await; - - harness1 - .process_block(slot, block.canonical_root(), (block.clone(), blobs.clone())) - .await - .unwrap(); - harness2 - .process_block(slot, block.canonical_root(), (block.clone(), blobs.clone())) - .await - .unwrap(); - - state = new_state; - block_root = block.canonical_root(); - } - - assert_eq!(harness1.head_slot(), fork_slot - 1); - assert_eq!(harness2.head_slot(), fork_slot - 1); - - // Fork the two chains. - let mut state1 = state.clone(); - let mut state2 = state.clone(); - - let mut majority_blocks = vec![]; - - for i in 0..post_fork_blocks { - let slot = fork_slot + i; - - // Attestations on majority chain. - let state_root = state.update_tree_hash_cache().unwrap(); - - let attestations = harness2.make_attestations( - &all_validators, - &state2, - state_root, - block_root.into(), - slot, - ); - harness2.set_current_slot(slot); - harness2.process_attestations(attestations, &state2); - - // Minority chain block (no attesters). - let ((block1, blobs1), new_state1) = harness1.make_block(state1, slot).await; - harness1 - .process_block(slot, block1.canonical_root(), (block1, blobs1)) - .await - .unwrap(); - state1 = new_state1; - - // Majority chain block (all attesters). - let ((block2, blobs2), new_state2) = harness2.make_block(state2, slot).await; - harness2 - .process_block(slot, block2.canonical_root(), (block2.clone(), blobs2)) - .await - .unwrap(); - - state2 = new_state2; - block_root = block2.canonical_root(); - - majority_blocks.push(block2); - } - - let end_slot = fork_slot + post_fork_blocks - 1; - assert_eq!(harness1.head_slot(), end_slot); - assert_eq!(harness2.head_slot(), end_slot); - - // Resume from disk with the hard-fork activated: this should revert the post-fork blocks. - // We have to do some hackery with the `slot_clock` so that the correct slot is set when - // the beacon chain builder loads the head block. - drop(harness1); - let resume_store = get_store_generic(&db_path1, StoreConfig::default(), spec2.clone()); - - let resumed_harness = TestHarness::builder(MinimalEthSpec) - .spec(spec2.clone().into()) - .keypairs(KEYPAIRS[0..validator_count].to_vec()) - .resumed_disk_store(resume_store) - .override_store_mutator(Box::new(move |mut builder| { - builder = builder - .resume_from_db() - .unwrap() - .testing_slot_clock(spec2.get_slot_duration()) - .unwrap(); - builder - .get_slot_clock() - .unwrap() - .set_slot(end_slot.as_u64()); - builder - })) - .mock_execution_layer() - .build(); - - // Head should now be just before the fork. - resumed_harness.chain.recompute_head_at_current_slot().await; - assert_eq!(resumed_harness.head_slot(), fork_slot - 1); - - // Fork choice should only know the canonical head. When we reverted the head we also should - // have called `reset_fork_choice_to_finalization` which rebuilds fork choice from scratch - // without the reverted block. - assert_eq!( - resumed_harness.chain.heads(), - vec![(resumed_harness.head_block_root(), fork_slot - 1)] - ); - - // Apply blocks from the majority chain and trigger finalization. - let initial_split_slot = resumed_harness.chain.store.get_split_slot(); - for block in &majority_blocks { - resumed_harness - .process_block_result((block.clone(), None)) - .await - .unwrap(); - - // The canonical head should be the block from the majority chain. - resumed_harness.chain.recompute_head_at_current_slot().await; - assert_eq!(resumed_harness.head_slot(), block.slot()); - assert_eq!(resumed_harness.head_block_root(), block.canonical_root()); - } - let advanced_split_slot = resumed_harness.chain.store.get_split_slot(); - - // Check that the migration ran successfully. - assert!(advanced_split_slot > initial_split_slot); - - // Check that there is only a single head now matching harness2 (the minority chain is gone). - let heads = resumed_harness.chain.heads(); - assert_eq!(heads, harness2.chain.heads()); - assert_eq!(heads.len(), 1); -} - // This test checks whether the schema downgrade from the latest version to some minimum supported // version is correct. This is the easiest schema test to write without historic versions of // Lighthouse on-hand, but has the disadvantage that the min version needs to be adjusted manually From 295aaf982c8e7c4a8b76482085a7c75514f20b0f Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Tue, 24 Feb 2026 15:33:43 +1100 Subject: [PATCH 07/20] Thread more payload status --- beacon_node/beacon_chain/src/fork_revert.rs | 125 ++---------------- .../src/schema_change/migration_schema_v24.rs | 2 + beacon_node/beacon_chain/tests/store_tests.rs | 7 +- beacon_node/http_api/src/block_rewards.rs | 8 +- beacon_node/store/src/hot_cold_store.rs | 94 +++++++++++-- consensus/types/src/state/beacon_state.rs | 20 ++- 6 files changed, 125 insertions(+), 131 deletions(-) diff --git a/beacon_node/beacon_chain/src/fork_revert.rs b/beacon_node/beacon_chain/src/fork_revert.rs index 44424bbad9..986924bfc8 100644 --- a/beacon_node/beacon_chain/src/fork_revert.rs +++ b/beacon_node/beacon_chain/src/fork_revert.rs @@ -1,13 +1,7 @@ -use crate::{BeaconForkChoiceStore, BeaconSnapshot}; -use fork_choice::{ForkChoice, PayloadVerificationStatus}; +use crate::BeaconForkChoiceStore; +use fork_choice::ForkChoice; use itertools::process_results; -use state_processing::state_advance::complete_state_advance; -use state_processing::{ - ConsensusContext, VerifyBlockRoot, per_block_processing, - per_block_processing::BlockSignatureStrategy, -}; use std::sync::Arc; -use std::time::Duration; use store::{HotColdDB, ItemStore, iter::ParentRootBlockIterator}; use tracing::{info, warn}; use types::{BeaconState, ChainSpec, EthSpec, ForkName, Hash256, SignedBeaconBlock, Slot}; @@ -92,114 +86,11 @@ pub fn revert_to_fork_boundary, Cold: ItemStore /// chains other than the chain leading to `head_block_root`. It should only be used in extreme /// circumstances when there is no better alternative. pub fn reset_fork_choice_to_finalization, Cold: ItemStore>( - head_block_root: Hash256, - head_state: &BeaconState, - store: Arc>, - current_slot: Option, - spec: &ChainSpec, + _head_block_root: Hash256, + _head_state: &BeaconState, + _store: Arc>, + _current_slot: Option, + _spec: &ChainSpec, ) -> Result, E>, String> { - // Fetch finalized block. - let finalized_checkpoint = head_state.finalized_checkpoint(); - let finalized_block_root = finalized_checkpoint.root; - let finalized_block = store - .get_full_block(&finalized_block_root) - .map_err(|e| format!("Error loading finalized block: {:?}", e))? - .ok_or_else(|| { - format!( - "Finalized block missing for revert: {:?}", - finalized_block_root - ) - })?; - - // Advance finalized state to finalized epoch (to handle skipped slots). - let finalized_state_root = finalized_block.state_root(); - // The enshrined finalized state should be in the state cache. - let mut finalized_state = store - .get_state(&finalized_state_root, Some(finalized_block.slot()), true) - .map_err(|e| format!("Error loading finalized state: {:?}", e))? - .ok_or_else(|| { - format!( - "Finalized block state missing from database: {:?}", - finalized_state_root - ) - })?; - let finalized_slot = finalized_checkpoint.epoch.start_slot(E::slots_per_epoch()); - complete_state_advance( - &mut finalized_state, - Some(finalized_state_root), - finalized_slot, - spec, - ) - .map_err(|e| { - format!( - "Error advancing finalized state to finalized epoch: {:?}", - e - ) - })?; - let finalized_snapshot = BeaconSnapshot { - beacon_block_root: finalized_block_root, - beacon_block: Arc::new(finalized_block), - beacon_state: finalized_state, - }; - - let fc_store = - BeaconForkChoiceStore::get_forkchoice_store(store.clone(), finalized_snapshot.clone()) - .map_err(|e| format!("Unable to reset fork choice store for revert: {e:?}"))?; - - let mut fork_choice = ForkChoice::from_anchor( - fc_store, - finalized_block_root, - &finalized_snapshot.beacon_block, - &finalized_snapshot.beacon_state, - current_slot, - spec, - ) - .map_err(|e| format!("Unable to reset fork choice for revert: {:?}", e))?; - - // Replay blocks from finalized checkpoint back to head. - // We do not replay attestations presently, relying on the absence of other blocks - // to guarantee `head_block_root` as the head. - // TODO(gloas): this code doesn't work anyway, could just delete all of it - let (blocks, _envelopes) = store - .load_blocks_to_replay(finalized_slot + 1, head_state.slot(), head_block_root) - .map_err(|e| format!("Error loading blocks to replay for fork choice: {:?}", e))?; - - let mut state = finalized_snapshot.beacon_state; - for block in blocks { - complete_state_advance(&mut state, None, block.slot(), spec) - .map_err(|e| format!("State advance failed: {:?}", e))?; - - let mut ctxt = ConsensusContext::new(block.slot()) - .set_proposer_index(block.message().proposer_index()); - per_block_processing( - &mut state, - &block, - BlockSignatureStrategy::NoVerification, - VerifyBlockRoot::True, - &mut ctxt, - spec, - ) - .map_err(|e| format!("Error replaying block: {:?}", e))?; - - // Setting this to unverified is the safest solution, since we don't have a way to - // retro-actively determine if they were valid or not. - // - // This scenario is so rare that it seems OK to double-verify some blocks. - let payload_verification_status = PayloadVerificationStatus::Optimistic; - - fork_choice - .on_block( - block.slot(), - block.message(), - block.canonical_root(), - // Reward proposer boost. We are reinforcing the canonical chain. - Duration::from_secs(0), - &state, - payload_verification_status, - spec, - ) - .map_err(|e| format!("Error applying replayed block to fork choice: {:?}", e))?; - } - - Ok(fork_choice) + Err("broken".into()) } diff --git a/beacon_node/beacon_chain/src/schema_change/migration_schema_v24.rs b/beacon_node/beacon_chain/src/schema_change/migration_schema_v24.rs index 1e1823a836..c8dfe1ac9b 100644 --- a/beacon_node/beacon_chain/src/schema_change/migration_schema_v24.rs +++ b/beacon_node/beacon_chain/src/schema_change/migration_schema_v24.rs @@ -16,6 +16,7 @@ use store::{ use tracing::{debug, info, warn}; use types::{ BeaconState, CACHED_EPOCHS, ChainSpec, Checkpoint, CommitteeCache, EthSpec, Hash256, Slot, + execution::StatePayloadStatus, }; /// We stopped using the pruning checkpoint in schema v23 but never explicitly deleted it. @@ -58,6 +59,7 @@ pub fn get_state_v22( base_state, summary.slot, summary.latest_block_root, + StatePayloadStatus::Pending, update_cache, ) .map(Some) diff --git a/beacon_node/beacon_chain/tests/store_tests.rs b/beacon_node/beacon_chain/tests/store_tests.rs index ef7179aadb..5b0f0aa388 100644 --- a/beacon_node/beacon_chain/tests/store_tests.rs +++ b/beacon_node/beacon_chain/tests/store_tests.rs @@ -689,7 +689,12 @@ async fn block_replayer_hooks() { .await; let (blocks, envelopes) = store - .load_blocks_to_replay(Slot::new(0), max_slot, end_block_root.into()) + .load_blocks_to_replay( + Slot::new(0), + max_slot, + end_block_root.into(), + StatePayloadStatus::Pending, + ) .unwrap(); let mut pre_slots = vec![]; diff --git a/beacon_node/http_api/src/block_rewards.rs b/beacon_node/http_api/src/block_rewards.rs index cdb3d650ea..d0c6d8c958 100644 --- a/beacon_node/http_api/src/block_rewards.rs +++ b/beacon_node/http_api/src/block_rewards.rs @@ -6,6 +6,7 @@ use std::num::NonZeroUsize; use std::sync::Arc; use tracing::{debug, warn}; use types::block::BlindedBeaconBlock; +use types::execution::StatePayloadStatus; use types::new_non_zero_usize; use warp_utils::reject::{beacon_state_error, custom_bad_request, unhandled_error}; @@ -34,7 +35,12 @@ pub fn get_block_rewards( let (blocks, envelopes) = chain .store - .load_blocks_to_replay(start_slot, end_slot, end_block_root) + .load_blocks_to_replay( + start_slot, + end_slot, + end_block_root, + StatePayloadStatus::Pending, + ) .map_err(|e| unhandled_error(BeaconChainError::from(e)))?; let state_root = chain diff --git a/beacon_node/store/src/hot_cold_store.rs b/beacon_node/store/src/hot_cold_store.rs index 849099ecfb..b22be57fe1 100644 --- a/beacon_node/store/src/hot_cold_store.rs +++ b/beacon_node/store/src/hot_cold_store.rs @@ -1845,6 +1845,44 @@ impl, Cold: ItemStore> HotColdDB } } + /// Compute the `StatePayloadStatus` for a stored state based on its summary. + /// + /// In future this might become a field of the summary, but this would require a whole DB + /// migration. For now we use an extra read from the DB to determine it. + fn get_hot_state_summary_payload_status( + &self, + summary: &HotStateSummary, + ) -> Result { + // Treat pre-Gloas states as `Pending`. + if !self + .spec + .fork_name_at_slot::(summary.slot) + .gloas_enabled() + { + return Ok(StatePayloadStatus::Pending); + } + + // Treat genesis state as `Pending` (`BeaconBlock` state). + let previous_state_root = summary.previous_state_root; + if previous_state_root.is_zero() { + return Ok(StatePayloadStatus::Pending); + } + + // Load the hot state summary for the previous state. If it has the same slot as this + // summary then we know this summary is for a `Full` block (payload state). + // NOTE: We treat any and all skipped-slot states as `Pending` by this definition, which is + // perhaps a bit strange (they could have a payload most-recently applied). + let previous_state_summary = self + .load_hot_state_summary(&previous_state_root)? + .ok_or(Error::MissingHotStateSummary(previous_state_root))?; + + if previous_state_summary.slot == summary.slot { + Ok(StatePayloadStatus::Full) + } else { + Ok(StatePayloadStatus::Pending) + } + } + fn load_hot_hdiff_buffer(&self, state_root: Hash256) -> Result { if let Some(buffer) = self .state_cache @@ -1940,12 +1978,14 @@ impl, Cold: ItemStore> HotColdDB ) -> Result, Hash256)>, Error> { metrics::inc_counter(&metrics::BEACON_STATE_HOT_GET_COUNT); - if let Some(HotStateSummary { - slot, - latest_block_root, - diff_base_state, - .. - }) = self.load_hot_state_summary(state_root)? + if let Some( + summary @ HotStateSummary { + slot, + latest_block_root, + diff_base_state, + .. + }, + ) = self.load_hot_state_summary(state_root)? { let mut state = match self.hot_storage_strategy(slot)? { strat @ StorageStrategy::Snapshot | strat @ StorageStrategy::DiffFrom(_) => { @@ -1994,10 +2034,13 @@ impl, Cold: ItemStore> HotColdDB .lock() .rebase_on_finalized(&mut base_state, &self.spec)?; + let payload_status = self.get_hot_state_summary_payload_status(&summary)?; + self.load_hot_state_using_replay( base_state, slot, latest_block_root, + payload_status, update_cache, )? } @@ -2015,20 +2058,26 @@ impl, Cold: ItemStore> HotColdDB base_state: BeaconState, slot: Slot, latest_block_root: Hash256, + desired_payload_status: StatePayloadStatus, update_cache: bool, ) -> Result, Error> { - if base_state.slot() == slot { + if base_state.slot() == slot && base_state.payload_status() == desired_payload_status { return Ok(base_state); } - let (blocks, envelopes) = - self.load_blocks_to_replay(base_state.slot(), slot, latest_block_root)?; + let (blocks, envelopes) = self.load_blocks_to_replay( + base_state.slot(), + slot, + latest_block_root, + desired_payload_status, + )?; let _t = metrics::start_timer(&metrics::STORE_BEACON_REPLAY_HOT_BLOCKS_TIME); // If replaying blocks, and `update_cache` is true, also cache the epoch boundary // state that this state is based on. It may be useful as the basis of more states // in the same epoch. let state_cache_hook = |state_root, state: &mut BeaconState| { + // TODO(gloas): prevent caching of the payload_status=Full state? if !update_cache || state.slot() % E::slots_per_epoch() != 0 { return Ok(()); } @@ -2502,7 +2551,14 @@ impl, Cold: ItemStore> HotColdDB { return Ok((blocks, vec![])); } - let envelopes = self.load_payload_envelopes_for_blocks(&blocks)?; + // TODO(gloas): wire this up + let end_block_root = Hash256::ZERO; + let desired_payload_status = StatePayloadStatus::Pending; + let envelopes = self.load_payload_envelopes_for_blocks( + &blocks, + end_block_root, + desired_payload_status, + )?; Ok((blocks, envelopes)) } @@ -2523,6 +2579,7 @@ impl, Cold: ItemStore> HotColdDB start_slot: Slot, end_slot: Slot, end_block_root: Hash256, + desired_payload_status: StatePayloadStatus, ) -> Result< ( Vec>, @@ -2566,7 +2623,11 @@ impl, Cold: ItemStore> HotColdDB return Ok((blocks, vec![])); } - let envelopes = self.load_payload_envelopes_for_blocks(&blocks)?; + let envelopes = self.load_payload_envelopes_for_blocks( + &blocks, + end_block_root, + desired_payload_status, + )?; Ok((blocks, envelopes)) } @@ -2574,6 +2635,8 @@ impl, Cold: ItemStore> HotColdDB pub fn load_payload_envelopes_for_blocks( &self, blocks: &[SignedBlindedBeaconBlock], + end_block_root: Hash256, + desired_payload_status: StatePayloadStatus, ) -> Result>, Error> { let mut envelopes = vec![]; @@ -2593,6 +2656,15 @@ impl, Cold: ItemStore> HotColdDB envelopes.push(envelope); } } + + // Load the payload for the last block if desired. + if let StatePayloadStatus::Full = desired_payload_status { + let envelope = self.get_payload_envelope(&end_block_root)?.ok_or( + HotColdDBError::MissingExecutionPayloadEnvelope(end_block_root), + )?; + envelopes.push(envelope); + } + Ok(envelopes) } diff --git a/consensus/types/src/state/beacon_state.rs b/consensus/types/src/state/beacon_state.rs index 6228e40ef8..b71688724e 100644 --- a/consensus/types/src/state/beacon_state.rs +++ b/consensus/types/src/state/beacon_state.rs @@ -36,7 +36,7 @@ use crate::{ execution::{ Eth1Data, ExecutionPayloadHeaderBellatrix, ExecutionPayloadHeaderCapella, ExecutionPayloadHeaderDeneb, ExecutionPayloadHeaderElectra, ExecutionPayloadHeaderFulu, - ExecutionPayloadHeaderRef, ExecutionPayloadHeaderRefMut, + ExecutionPayloadHeaderRef, ExecutionPayloadHeaderRefMut, StatePayloadStatus, }, fork::{Fork, ForkName, ForkVersionDecode, InconsistentFork, map_fork_name}, light_client::consts::{ @@ -1265,6 +1265,24 @@ impl BeaconState { } } + /// Determine the payload status of this state. + /// + /// Prior to Gloas this is always `Pending`. + /// + /// Post-Gloas, the definition of the `StatePayloadStatus` is: + /// + /// - `Full` if this state is the result of envelope processing. + /// - `Pending` if this state is the result of block processing. + pub fn payload_status(&self) -> StatePayloadStatus { + if !self.fork_name_unchecked().gloas_enabled() { + StatePayloadStatus::Pending + } else if self.is_parent_block_full() { + StatePayloadStatus::Full + } else { + StatePayloadStatus::Pending + } + } + /// Return `true` if the validator who produced `slot_signature` is eligible to aggregate. /// /// Spec v0.12.1 From b29c6c0e48b466e3fc07697491288961f0a38992 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Tue, 24 Feb 2026 16:45:41 +1100 Subject: [PATCH 08/20] Address review comments --- beacon_node/store/src/hot_cold_store.rs | 8 -------- beacon_node/store/src/iter.rs | 22 ++++------------------ consensus/types/src/state/beacon_state.rs | 7 ++++--- 3 files changed, 8 insertions(+), 29 deletions(-) diff --git a/beacon_node/store/src/hot_cold_store.rs b/beacon_node/store/src/hot_cold_store.rs index 6e165702a2..4d00ed1c4a 100644 --- a/beacon_node/store/src/hot_cold_store.rs +++ b/beacon_node/store/src/hot_cold_store.rs @@ -721,14 +721,6 @@ impl, Cold: ItemStore> HotColdDB }) } - /// Fetch a block from the store, ignoring which fork variant it *should* be for. - pub fn get_block_any_variant>( - &self, - block_root: &Hash256, - ) -> Result>, Error> { - self.get_block_with(block_root, SignedBeaconBlock::any_from_ssz_bytes) - } - /// Fetch a block from the store using a custom decode function. /// /// This is useful for e.g. ignoring the slot-indicated fork to forcefully load a block as if it diff --git a/beacon_node/store/src/iter.rs b/beacon_node/store/src/iter.rs index e2b666e597..0cb803d1ed 100644 --- a/beacon_node/store/src/iter.rs +++ b/beacon_node/store/src/iter.rs @@ -249,7 +249,6 @@ impl, Cold: ItemStore> Iterator pub struct ParentRootBlockIterator<'a, E: EthSpec, Hot: ItemStore, Cold: ItemStore> { store: &'a HotColdDB, next_block_root: Hash256, - decode_any_variant: bool, _phantom: PhantomData, } @@ -260,17 +259,6 @@ impl<'a, E: EthSpec, Hot: ItemStore, Cold: ItemStore> Self { store, next_block_root: start_block_root, - decode_any_variant: false, - _phantom: PhantomData, - } - } - - /// Block iterator that is tolerant of blocks that have the wrong fork for their slot. - pub fn fork_tolerant(store: &'a HotColdDB, start_block_root: Hash256) -> Self { - Self { - store, - next_block_root: start_block_root, - decode_any_variant: true, _phantom: PhantomData, } } @@ -285,12 +273,10 @@ impl<'a, E: EthSpec, Hot: ItemStore, Cold: ItemStore> Ok(None) } else { let block_root = self.next_block_root; - let block = if self.decode_any_variant { - self.store.get_block_any_variant(&block_root) - } else { - self.store.get_blinded_block(&block_root) - }? - .ok_or(Error::BlockNotFound(block_root))?; + let block = self + .store + .get_blinded_block(&block_root)? + .ok_or(Error::BlockNotFound(block_root))?; self.next_block_root = block.message().parent_root(); Ok(Some((block_root, block))) } diff --git a/consensus/types/src/state/beacon_state.rs b/consensus/types/src/state/beacon_state.rs index 6228e40ef8..bd67f469d2 100644 --- a/consensus/types/src/state/beacon_state.rs +++ b/consensus/types/src/state/beacon_state.rs @@ -56,9 +56,10 @@ use crate::{ pub const CACHED_EPOCHS: usize = 3; -// Pre-electra WS calculations are not supported. On mainnet, pre-electra epochs are outside the weak subjectivity -// period. The default pre-electra WS value is set to 256 to allow for `basic-sim``, `fallback-sim`` test case `revert_minority_fork_on_resume` -// to pass. 256 is a small enough number to trigger the WS safety check pre-electra on mainnet. +// Pre-electra WS calculations are not supported. On mainnet, pre-electra epochs are outside the +// weak subjectivity period. The default pre-electra WS value is set to 256 to allow for `basic-sim` +// and `fallback-sim` tests to pass. 256 is a small enough number to trigger the WS safety check +// pre-electra on mainnet. pub const DEFAULT_PRE_ELECTRA_WS_PERIOD: u64 = 256; const MAX_RANDOM_BYTE: u64 = (1 << 8) - 1; From e2b3971cbd45f8cc0372bd4bc440db32538bb161 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Tue, 24 Feb 2026 17:48:28 +1100 Subject: [PATCH 09/20] Add StatePayloadStatus to storage_strategy --- .../src/schema_change/migration_schema_v24.rs | 5 +- beacon_node/beacon_chain/tests/store_tests.rs | 2 +- beacon_node/store/src/hdiff.rs | 39 ++++++++++--- beacon_node/store/src/hot_cold_store.rs | 57 ++++++++++++------- 4 files changed, 74 insertions(+), 29 deletions(-) diff --git a/beacon_node/beacon_chain/src/schema_change/migration_schema_v24.rs b/beacon_node/beacon_chain/src/schema_change/migration_schema_v24.rs index c8dfe1ac9b..fced9e4c7a 100644 --- a/beacon_node/beacon_chain/src/schema_change/migration_schema_v24.rs +++ b/beacon_node/beacon_chain/src/schema_change/migration_schema_v24.rs @@ -224,7 +224,7 @@ pub fn upgrade_to_v24( if previous_snapshot_slot >= anchor_info.state_upper_limit && db .hierarchy - .storage_strategy(split.slot, dummy_start_slot) + .storage_strategy(split.slot, dummy_start_slot, StatePayloadStatus::Pending) .is_ok_and(|strategy| !strategy.is_replay_from()) { info!( @@ -331,7 +331,8 @@ pub fn upgrade_to_v24( ); } else { // 1. Store snapshot or diff at this slot (if required). - let storage_strategy = db.hot_storage_strategy(slot)?; + let storage_strategy = + db.hot_storage_strategy(slot, StatePayloadStatus::Pending)?; debug!( %slot, ?state_root, diff --git a/beacon_node/beacon_chain/tests/store_tests.rs b/beacon_node/beacon_chain/tests/store_tests.rs index 0cfaac7502..cd119ef028 100644 --- a/beacon_node/beacon_chain/tests/store_tests.rs +++ b/beacon_node/beacon_chain/tests/store_tests.rs @@ -5097,7 +5097,7 @@ async fn replay_from_split_state() { assert!( store .hierarchy - .storage_strategy(split.slot, anchor_slot) + .storage_strategy(split.slot, anchor_slot, StatePayloadStatus::Pending) .unwrap() .is_replay_from() ); diff --git a/beacon_node/store/src/hdiff.rs b/beacon_node/store/src/hdiff.rs index 3777c83b60..54b2f3604b 100644 --- a/beacon_node/store/src/hdiff.rs +++ b/beacon_node/store/src/hdiff.rs @@ -12,7 +12,9 @@ use std::str::FromStr; use std::sync::LazyLock; use superstruct::superstruct; use types::state::HistoricalSummary; -use types::{BeaconState, ChainSpec, Epoch, EthSpec, Hash256, Slot, Validator}; +use types::{ + BeaconState, ChainSpec, Epoch, EthSpec, Hash256, Slot, Validator, execution::StatePayloadStatus, +}; static EMPTY_PUBKEY: LazyLock = LazyLock::new(PublicKeyBytes::empty); @@ -653,7 +655,21 @@ impl HierarchyModuli { /// exponents [5,13,21], to reconstruct state at slot 3,000,003: if start = 3,000,002 /// layer 2 diff will point to the start snapshot instead of the layer 1 diff at /// 2998272. - pub fn storage_strategy(&self, slot: Slot, start_slot: Slot) -> Result { + /// * `payload_status` - whether the state is `Full` (came from processing a payload), or + /// `Pending` (came from processing a block). Prior to Gloas all states are Pending. + pub fn storage_strategy( + &self, + slot: Slot, + start_slot: Slot, + payload_status: StatePayloadStatus, + ) -> Result { + // Store all Full states by replaying from their respective Pending state at the same slot. + if let StatePayloadStatus::Full = payload_status + && slot >= start_slot + { + return Ok(StorageStrategy::ReplayFrom(slot)); + } + match slot.cmp(&start_slot) { Ordering::Less => return Err(Error::LessThanStart(slot, start_slot)), Ordering::Equal => return Ok(StorageStrategy::Snapshot), @@ -809,33 +825,42 @@ mod tests { let sslot = Slot::new(0); let moduli = config.to_moduli().unwrap(); + let payload_status = StatePayloadStatus::Pending; // Full snapshots at multiples of 2^21. let snapshot_freq = Slot::new(1 << 21); assert_eq!( - moduli.storage_strategy(Slot::new(0), sslot).unwrap(), + moduli + .storage_strategy(Slot::new(0), sslot, payload_status) + .unwrap(), StorageStrategy::Snapshot ); assert_eq!( - moduli.storage_strategy(snapshot_freq, sslot).unwrap(), + moduli + .storage_strategy(snapshot_freq, sslot, payload_status) + .unwrap(), StorageStrategy::Snapshot ); assert_eq!( - moduli.storage_strategy(snapshot_freq * 3, sslot).unwrap(), + moduli + .storage_strategy(snapshot_freq * 3, sslot, payload_status) + .unwrap(), StorageStrategy::Snapshot ); // Diffs should be from the previous layer (the snapshot in this case), and not the previous diff in the same layer. let first_layer = Slot::new(1 << 18); assert_eq!( - moduli.storage_strategy(first_layer * 2, sslot).unwrap(), + moduli + .storage_strategy(first_layer * 2, sslot, payload_status) + .unwrap(), StorageStrategy::DiffFrom(Slot::new(0)) ); let replay_strategy_slot = first_layer + 1; assert_eq!( moduli - .storage_strategy(replay_strategy_slot, sslot) + .storage_strategy(replay_strategy_slot, sslot, payload_status) .unwrap(), StorageStrategy::ReplayFrom(first_layer) ); diff --git a/beacon_node/store/src/hot_cold_store.rs b/beacon_node/store/src/hot_cold_store.rs index 943c98f307..06c42339d9 100644 --- a/beacon_node/store/src/hot_cold_store.rs +++ b/beacon_node/store/src/hot_cold_store.rs @@ -452,15 +452,26 @@ impl HotColdDB, BeaconNodeBackend> { } impl, Cold: ItemStore> HotColdDB { - fn cold_storage_strategy(&self, slot: Slot) -> Result { + fn cold_storage_strategy( + &self, + slot: Slot, + // payload_status: StatePayloadStatus, + ) -> Result { // The start slot for the freezer HDiff is always 0 - Ok(self.hierarchy.storage_strategy(slot, Slot::new(0))?) - } - - pub fn hot_storage_strategy(&self, slot: Slot) -> Result { + // TODO(gloas): wire up payload_status Ok(self .hierarchy - .storage_strategy(slot, self.hot_hdiff_start_slot()?)?) + .storage_strategy(slot, Slot::new(0), StatePayloadStatus::Pending)?) + } + + pub fn hot_storage_strategy( + &self, + slot: Slot, + payload_status: StatePayloadStatus, + ) -> Result { + Ok(self + .hierarchy + .storage_strategy(slot, self.hot_hdiff_start_slot()?, payload_status)?) } pub fn hot_hdiff_start_slot(&self) -> Result { @@ -1380,8 +1391,11 @@ impl, Cold: ItemStore> HotColdDB // NOTE: `hot_storage_strategy` can error if there are states in the database // prior to the `anchor_slot`. This can happen if checkpoint sync has been // botched and left some states in the database prior to completing. + // Use `Pending` status here because snapshots and diffs are only stored for + // `Pending` states. if let Some(slot) = slot - && let Ok(strategy) = self.hot_storage_strategy(slot) + && let Ok(strategy) = + self.hot_storage_strategy(slot, StatePayloadStatus::Pending) { match strategy { StorageStrategy::Snapshot => { @@ -1643,6 +1657,8 @@ impl, Cold: ItemStore> HotColdDB state: &BeaconState, ops: &mut Vec, ) -> Result<(), Error> { + let payload_status = state.payload_status(); + match self.state_cache.lock().put_state( *state_root, state.get_latest_block_root(*state_root), @@ -1688,7 +1704,7 @@ impl, Cold: ItemStore> HotColdDB debug!( ?state_root, slot = %state.slot(), - storage_strategy = ?self.hot_storage_strategy(state.slot())?, + storage_strategy = ?self.hot_storage_strategy(state.slot(), payload_status)?, diff_base_state = %summary.diff_base_state, previous_state_root = ?summary.previous_state_root, "Storing hot state summary and diffs" @@ -1711,7 +1727,7 @@ impl, Cold: ItemStore> HotColdDB self, *state_root, state, - self.hot_storage_strategy(state.slot())?, + self.hot_storage_strategy(state.slot(), state.payload_status())?, )?; ops.push(hot_state_summary.as_kv_store_op(*state_root)); Ok(hot_state_summary) @@ -1724,7 +1740,7 @@ impl, Cold: ItemStore> HotColdDB ops: &mut Vec, ) -> Result<(), Error> { let slot = state.slot(); - let storage_strategy = self.hot_storage_strategy(slot)?; + let storage_strategy = self.hot_storage_strategy(slot, state.payload_status())?; match storage_strategy { StorageStrategy::ReplayFrom(_) => { // Already have persisted the state summary, don't persist anything else @@ -1884,16 +1900,20 @@ impl, Cold: ItemStore> HotColdDB return Ok(buffer); } - let Some(HotStateSummary { - slot, - diff_base_state, - .. - }) = self.load_hot_state_summary(&state_root)? + let Some( + summary @ HotStateSummary { + slot, + diff_base_state, + .. + }, + ) = self.load_hot_state_summary(&state_root)? else { return Err(Error::MissingHotStateSummary(state_root)); }; - let buffer = match self.hot_storage_strategy(slot)? { + let payload_status = self.get_hot_state_summary_payload_status(&summary)?; + + let buffer = match self.hot_storage_strategy(slot, payload_status)? { StorageStrategy::Snapshot => { let Some(state) = self.load_hot_state_as_snapshot(state_root)? else { let existing_snapshots = self.load_hot_state_snapshot_roots()?; @@ -1979,7 +1999,8 @@ impl, Cold: ItemStore> HotColdDB }, ) = self.load_hot_state_summary(state_root)? { - let mut state = match self.hot_storage_strategy(slot)? { + let payload_status = self.get_hot_state_summary_payload_status(&summary)?; + let mut state = match self.hot_storage_strategy(slot, payload_status)? { strat @ StorageStrategy::Snapshot | strat @ StorageStrategy::DiffFrom(_) => { let buffer_timer = metrics::start_timer_vec( &metrics::BEACON_HDIFF_BUFFER_LOAD_TIME, @@ -2026,8 +2047,6 @@ impl, Cold: ItemStore> HotColdDB .lock() .rebase_on_finalized(&mut base_state, &self.spec)?; - let payload_status = self.get_hot_state_summary_payload_status(&summary)?; - self.load_hot_state_using_replay( base_state, slot, From 28eb5adf0ac9aef88ea7c237a274b2fef791a82f Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Tue, 24 Feb 2026 18:16:53 +1100 Subject: [PATCH 10/20] Update HotStateSummary construction --- beacon_node/store/src/hdiff.rs | 3 ++- beacon_node/store/src/hot_cold_store.rs | 29 ++++++++++++++++++----- consensus/types/src/state/beacon_state.rs | 19 +++++++++++++++ 3 files changed, 44 insertions(+), 7 deletions(-) diff --git a/beacon_node/store/src/hdiff.rs b/beacon_node/store/src/hdiff.rs index 54b2f3604b..beae02fc75 100644 --- a/beacon_node/store/src/hdiff.rs +++ b/beacon_node/store/src/hdiff.rs @@ -656,7 +656,8 @@ impl HierarchyModuli { /// layer 2 diff will point to the start snapshot instead of the layer 1 diff at /// 2998272. /// * `payload_status` - whether the state is `Full` (came from processing a payload), or - /// `Pending` (came from processing a block). Prior to Gloas all states are Pending. + /// `Pending` (came from processing a block). Prior to Gloas all states are `Pending`. + /// Skipped slots post-Gloas should also use a `Pending` status. pub fn storage_strategy( &self, slot: Slot, diff --git a/beacon_node/store/src/hot_cold_store.rs b/beacon_node/store/src/hot_cold_store.rs index 06c42339d9..46257db3fb 100644 --- a/beacon_node/store/src/hot_cold_store.rs +++ b/beacon_node/store/src/hot_cold_store.rs @@ -1657,7 +1657,7 @@ impl, Cold: ItemStore> HotColdDB state: &BeaconState, ops: &mut Vec, ) -> Result<(), Error> { - let payload_status = state.payload_status(); + let payload_status = state.payload_status_with_skipped_pending(); match self.state_cache.lock().put_state( *state_root, @@ -1727,7 +1727,7 @@ impl, Cold: ItemStore> HotColdDB self, *state_root, state, - self.hot_storage_strategy(state.slot(), state.payload_status())?, + self.hot_storage_strategy(state.slot(), state.payload_status_with_skipped_pending())?, )?; ops.push(hot_state_summary.as_kv_store_op(*state_root)); Ok(hot_state_summary) @@ -1740,7 +1740,8 @@ impl, Cold: ItemStore> HotColdDB ops: &mut Vec, ) -> Result<(), Error> { let slot = state.slot(); - let storage_strategy = self.hot_storage_strategy(slot, state.payload_status())?; + let storage_strategy = + self.hot_storage_strategy(slot, state.payload_status_with_skipped_pending())?; match storage_strategy { StorageStrategy::ReplayFrom(_) => { // Already have persisted the state summary, don't persist anything else @@ -1880,6 +1881,7 @@ impl, Cold: ItemStore> HotColdDB // summary then we know this summary is for a `Full` block (payload state). // NOTE: We treat any and all skipped-slot states as `Pending` by this definition, which is // perhaps a bit strange (they could have a payload most-recently applied). + // TODO(gloas): could maybe simplify this by checking diff_base_slot == slot? let previous_state_summary = self .load_hot_state_summary(&previous_state_root)? .ok_or(Error::MissingHotStateSummary(previous_state_root))?; @@ -2072,7 +2074,9 @@ impl, Cold: ItemStore> HotColdDB desired_payload_status: StatePayloadStatus, update_cache: bool, ) -> Result, Error> { - if base_state.slot() == slot && base_state.payload_status() == desired_payload_status { + if base_state.slot() == slot + && base_state.payload_status_with_skipped_pending() == desired_payload_status + { return Ok(base_state); } @@ -4163,9 +4167,20 @@ impl HotStateSummary { // slots where there isn't a skip). let latest_block_root = state.get_latest_block_root(state_root); + // Payload status of the state determines a lot about how it is stored. + let payload_status = state.payload_status_with_skipped_pending(); + let get_state_root = |slot| { if slot == state.slot() { - Ok::<_, Error>(state_root) + // In the case where this state is a `Full` state, use the `state_root` of its + // prior `Pending` state. + if let StatePayloadStatus::Full = payload_status { + // TODO(gloas): change this assert to debug_assert_eq + assert_eq!(state.latest_block_header().slot, state.slot()); + Ok(state.latest_block_header().state_root) + } else { + Ok::<_, Error>(state_root) + } } else { Ok(get_ancestor_state_root(store, state, slot).map_err(|e| { Error::StateSummaryIteratorError { @@ -4184,7 +4199,9 @@ impl HotStateSummary { OptionalDiffBaseState::Snapshot(0) }; - let previous_state_root = if state.slot() == 0 { + let previous_state_root = if state.slot() == 0 + && let StatePayloadStatus::Pending = payload_status + { // Set to 0x0 for genesis state to prevent any sort of circular reference. Hash256::zero() } else { diff --git a/consensus/types/src/state/beacon_state.rs b/consensus/types/src/state/beacon_state.rs index 34cfd0ca1c..e23215fc5a 100644 --- a/consensus/types/src/state/beacon_state.rs +++ b/consensus/types/src/state/beacon_state.rs @@ -1284,6 +1284,25 @@ impl BeaconState { } } + /// Determine the payload status of this state with all skipped slots considered pending. + /// + /// Prior to Gloas this is always `Pending`. + /// + /// Post-Gloas, the definition of the `StatePayloadStatus` is: + /// + /// - `Full` if this state is the IMMEDIATE result of envelope processing (no skipped slots) + /// - `Pending` if this state is the result of block processing, or slot processing (skipped + /// slot). + pub fn payload_status_with_skipped_pending(&self) -> StatePayloadStatus { + if !self.fork_name_unchecked().gloas_enabled() { + StatePayloadStatus::Pending + } else if self.is_parent_block_full() && self.latest_block_header().slot == self.slot() { + StatePayloadStatus::Full + } else { + StatePayloadStatus::Pending + } + } + /// Return `true` if the validator who produced `slot_signature` is eligible to aggregate. /// /// Spec v0.12.1 From 5f3faced1ac5010ce388c50f276f6501b7e4185f Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Wed, 25 Feb 2026 10:15:31 +1100 Subject: [PATCH 11/20] Small fixes for the genesis state --- beacon_node/store/src/hdiff.rs | 3 +++ beacon_node/store/src/hot_cold_store.rs | 6 +++--- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/beacon_node/store/src/hdiff.rs b/beacon_node/store/src/hdiff.rs index beae02fc75..3ad6a1f0d3 100644 --- a/beacon_node/store/src/hdiff.rs +++ b/beacon_node/store/src/hdiff.rs @@ -665,8 +665,11 @@ impl HierarchyModuli { payload_status: StatePayloadStatus, ) -> Result { // Store all Full states by replaying from their respective Pending state at the same slot. + // Make an exception for the genesis state, which "counts as" Full by virtue of having 0x0 + // in both `latest_block_hash` and `latest_execution_payload_bid.block_hash`. if let StatePayloadStatus::Full = payload_status && slot >= start_slot + && slot != 0 { return Ok(StorageStrategy::ReplayFrom(slot)); } diff --git a/beacon_node/store/src/hot_cold_store.rs b/beacon_node/store/src/hot_cold_store.rs index 46257db3fb..c9e1b6062c 100644 --- a/beacon_node/store/src/hot_cold_store.rs +++ b/beacon_node/store/src/hot_cold_store.rs @@ -4199,11 +4199,11 @@ impl HotStateSummary { OptionalDiffBaseState::Snapshot(0) }; - let previous_state_root = if state.slot() == 0 - && let StatePayloadStatus::Pending = payload_status - { + let previous_state_root = if state.slot() == 0 { // Set to 0x0 for genesis state to prevent any sort of circular reference. Hash256::zero() + } else if let StatePayloadStatus::Full = payload_status { + get_state_root(state.slot())? } else { get_state_root(state.slot().safe_sub(1_u64)?)? }; From fe240ba8925a3d419b2a6d4e6c18504010788c23 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Wed, 25 Feb 2026 10:15:54 +1100 Subject: [PATCH 12/20] Start updating the test harness (Claude) --- .../src/block_production/gloas.rs | 13 +- beacon_node/beacon_chain/src/test_utils.rs | 114 ++++++ beacon_node/beacon_chain/tests/store_tests.rs | 382 ++++++++++++++++++ .../test_utils/execution_block_generator.rs | 10 +- beacon_node/http_api/src/produce_block.rs | 2 +- 5 files changed, 515 insertions(+), 6 deletions(-) diff --git a/beacon_node/beacon_chain/src/block_production/gloas.rs b/beacon_node/beacon_chain/src/block_production/gloas.rs index 607090c59d..651b71ed39 100644 --- a/beacon_node/beacon_chain/src/block_production/gloas.rs +++ b/beacon_node/beacon_chain/src/block_production/gloas.rs @@ -41,7 +41,11 @@ pub const BID_VALUE_SELF_BUILD: u64 = 0; pub const EXECUTION_PAYMENT_TRUSTLESS_BUILD: u64 = 0; type ConsensusBlockValue = u64; -type BlockProductionResult = (BeaconBlock>, ConsensusBlockValue); +type BlockProductionResult = ( + BeaconBlock>, + BeaconState, + ConsensusBlockValue, +); pub type PreparePayloadResult = Result, BlockProductionError>; pub type PreparePayloadHandle = JoinHandle>>; @@ -433,7 +437,7 @@ impl BeaconChain { payload_data: Option>, mut state: BeaconState, verification: ProduceBlockVerification, - ) -> Result<(BeaconBlock>, u64), BlockProductionError> { + ) -> Result, BlockProductionError> { let PartialBeaconBlock { slot, proposer_index, @@ -545,6 +549,9 @@ impl BeaconChain { drop(state_root_timer); + // Clone the Pending state (post-block, pre-envelope) for callers that need it. + let pending_state = state.clone(); + let (mut block, _) = signed_beacon_block.deconstruct(); *block.state_root_mut() = state_root; @@ -605,7 +612,7 @@ impl BeaconChain { "Produced beacon block" ); - Ok((block, consensus_block_value)) + Ok((block, pending_state, consensus_block_value)) } // TODO(gloas) introduce `ProposerPreferences` so we can build out trustless diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index 096a0516fc..45e9bad82a 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -1082,6 +1082,67 @@ where (block_contents, block_response.state) } + /// Returns a newly created block, signed by the proposer for the given slot, + /// along with the execution payload envelope (for Gloas) and the pending state. + /// + /// For pre-Gloas forks, the envelope is `None` and this behaves like `make_block`. + pub async fn make_block_with_envelope( + &self, + mut state: BeaconState, + slot: Slot, + ) -> ( + SignedBlockContentsTuple, + Option>, + BeaconState, + ) { + assert_ne!(slot, 0, "can't produce a block at slot 0"); + assert!(slot >= state.slot()); + + if state.fork_name_unchecked().gloas_enabled() + || self.spec.fork_name_at_slot::(slot).gloas_enabled() + { + complete_state_advance(&mut state, None, slot, &self.spec) + .expect("should be able to advance state to slot"); + state.build_caches(&self.spec).expect("should build caches"); + + let proposer_index = state.get_beacon_proposer_index(slot, &self.spec).unwrap(); + + let graffiti = Graffiti::from(self.rng.lock().random::<[u8; 32]>()); + let graffiti_settings = + GraffitiSettings::new(Some(graffiti), Some(GraffitiPolicy::PreserveUserGraffiti)); + let randao_reveal = self.sign_randao_reveal(&state, proposer_index, slot); + + let (block, pending_state, _consensus_block_value) = self + .chain + .produce_block_on_state_gloas( + state, + None, + slot, + randao_reveal, + graffiti_settings, + ProduceBlockVerification::VerifyRandao, + ) + .await + .unwrap(); + + let signed_block = Arc::new(block.sign( + &self.validator_keypairs[proposer_index].sk, + &pending_state.fork(), + pending_state.genesis_validators_root(), + &self.spec, + )); + + // Retrieve the cached envelope produced during block production. + let envelope = self.chain.pending_payload_envelopes.write().remove(slot); + + let block_contents: SignedBlockContentsTuple = (signed_block, None); + (block_contents, envelope, pending_state) + } else { + let (block_contents, state) = self.make_block(state, slot).await; + (block_contents, None, state) + } + } + /// Useful for the `per_block_processing` tests. Creates a block, and returns the state after /// caches are built but before the generated block is processed. pub async fn make_block_return_pre_state( @@ -2477,6 +2538,59 @@ where Ok(block_hash) } + /// Process an execution payload envelope for a Gloas block. + /// + /// This applies the envelope to the pending state to produce the Full state, + /// computes the Full state root, sets it on the envelope, and stores both the + /// envelope and the Full state in the database. + /// + /// Returns the Full state root. + pub fn process_envelope( + &self, + block_root: Hash256, + envelope: ExecutionPayloadEnvelope, + pending_state: &mut BeaconState, + ) -> Hash256 { + let block_state_root = pending_state + .update_tree_hash_cache() + .expect("should compute pending state root"); + + let mut signed_envelope = SignedExecutionPayloadEnvelope { + message: envelope, + signature: Signature::infinity().expect("should create infinity signature"), + }; + + state_processing::envelope_processing::process_execution_payload_envelope( + pending_state, + Some(block_state_root), + &signed_envelope, + state_processing::VerifySignatures::False, + state_processing::envelope_processing::VerifyStateRoot::False, + &self.spec, + ) + .expect("should process envelope"); + + let full_state_root = pending_state + .update_tree_hash_cache() + .expect("should compute full state root"); + + signed_envelope.message.state_root = full_state_root; + + // Store the envelope. + self.chain + .store + .put_payload_envelope(&block_root, signed_envelope) + .expect("should store envelope"); + + // Store the Full state. + self.chain + .store + .put_state(&full_state_root, pending_state) + .expect("should store full state"); + + full_state_root + } + /// Builds an `Rpc` block from a `SignedBeaconBlock` and blobs or data columns retrieved from /// the database. pub fn build_rpc_block_from_store_blobs( diff --git a/beacon_node/beacon_chain/tests/store_tests.rs b/beacon_node/beacon_chain/tests/store_tests.rs index cd119ef028..0143e6573c 100644 --- a/beacon_node/beacon_chain/tests/store_tests.rs +++ b/beacon_node/beacon_chain/tests/store_tests.rs @@ -5431,6 +5431,388 @@ fn check_finalization(harness: &TestHarness, expected_slot: u64) { ); } +// ===================== Gloas Store Tests ===================== + +/// Test basic Gloas block + envelope storage and retrieval. +#[tokio::test] +async fn test_gloas_block_and_envelope_storage() { + if !fork_name_from_env().is_some_and(|f| f.gloas_enabled()) { + return; + } + + let db_path = tempdir().unwrap(); + let store = get_store(&db_path); + let harness = get_harness(store.clone(), LOW_VALIDATOR_COUNT); + + let num_blocks = 8u64; + let (genesis_state, _genesis_state_root) = harness.get_current_state_and_root(); + let mut state = genesis_state; + + let mut block_roots = vec![]; + let mut pending_state_roots = vec![]; + let mut full_state_roots = vec![]; + + for i in 1..=num_blocks { + let slot = Slot::new(i); + harness.advance_slot(); + + let (block_contents, envelope, pending_state) = + harness.make_block_with_envelope(state, slot).await; + let block_root = block_contents.0.canonical_root(); + + // Process the block. + harness + .process_block(slot, block_root, block_contents) + .await + .unwrap(); + + let pending_state_root = pending_state.clone().update_tree_hash_cache().unwrap(); + pending_state_roots.push(pending_state_root); + + // Process the envelope. + let envelope = envelope.expect("Gloas block should have envelope"); + let mut full_state = pending_state.clone(); + let full_state_root = harness.process_envelope(block_root, envelope, &mut full_state); + full_state_roots.push(full_state_root); + + block_roots.push(block_root); + state = full_state; + } + + // Verify storage. + for (i, block_root) in block_roots.iter().enumerate() { + // Block can be loaded. + assert!( + store.get_blinded_block(block_root).unwrap().is_some(), + "block at slot {} should be in DB", + i + 1 + ); + + // Envelope can be loaded. + let loaded_envelope = store.get_payload_envelope(block_root).unwrap(); + assert!( + loaded_envelope.is_some(), + "envelope at slot {} should be in DB", + i + 1 + ); + + // Pending state can be loaded. + let pending_state_root = pending_state_roots[i]; + let loaded_pending_state = store + .get_state(&pending_state_root, None, CACHE_STATE_IN_TESTS) + .unwrap(); + assert!( + loaded_pending_state.is_some(), + "pending state at slot {} should be in DB", + i + 1 + ); + let loaded_pending_state = loaded_pending_state.unwrap(); + assert_eq!( + loaded_pending_state.payload_status_with_skipped_pending(), + StatePayloadStatus::Pending, + "loaded pending state at slot {} should have Pending status", + i + 1 + ); + + // Full state can be loaded. + let full_state_root = full_state_roots[i]; + let loaded_full_state = store + .get_state(&full_state_root, None, CACHE_STATE_IN_TESTS) + .unwrap(); + assert!( + loaded_full_state.is_some(), + "full state at slot {} should be in DB", + i + 1 + ); + let loaded_full_state = loaded_full_state.unwrap(); + assert_eq!( + loaded_full_state.payload_status_with_skipped_pending(), + StatePayloadStatus::Full, + "loaded full state at slot {} should have Full status", + i + 1 + ); + } +} + +/// Test that Pending and Full states have the correct payload status through round-trip +/// storage and retrieval. +#[tokio::test] +async fn test_gloas_state_payload_status() { + if !fork_name_from_env().is_some_and(|f| f.gloas_enabled()) { + return; + } + + let db_path = tempdir().unwrap(); + let store = get_store(&db_path); + let harness = get_harness(store.clone(), LOW_VALIDATOR_COUNT); + + let num_blocks = 6u64; + let (genesis_state, _genesis_state_root) = harness.get_current_state_and_root(); + let mut state = genesis_state; + + for i in 1..=num_blocks { + let slot = Slot::new(i); + harness.advance_slot(); + + let (block_contents, envelope, pending_state) = + harness.make_block_with_envelope(state, slot).await; + let block_root = block_contents.0.canonical_root(); + + harness + .process_block(slot, block_root, block_contents) + .await + .unwrap(); + + // Verify the pending state has correct payload status. + assert_eq!( + pending_state.payload_status_with_skipped_pending(), + StatePayloadStatus::Pending, + "pending state at slot {} should be Pending", + i + ); + + // Process the envelope and verify the full state has correct payload status. + let envelope = envelope.expect("Gloas block should have envelope"); + let mut full_state = pending_state; + let full_state_root = harness.process_envelope(block_root, envelope, &mut full_state); + + assert_eq!( + full_state.payload_status_with_skipped_pending(), + StatePayloadStatus::Full, + "full state at slot {} should be Full", + i + ); + + // Round-trip: load the full state from DB and check status. + let loaded_full = store + .get_state(&full_state_root, None, CACHE_STATE_IN_TESTS) + .unwrap() + .expect("full state should exist in DB"); + assert_eq!( + loaded_full.payload_status_with_skipped_pending(), + StatePayloadStatus::Full, + "loaded full state at slot {} should be Full after round-trip", + i + ); + + state = full_state; + } +} + +/// Test block replay with and without envelopes. +#[tokio::test] +async fn test_gloas_block_replay_with_envelopes() { + if !fork_name_from_env().is_some_and(|f| f.gloas_enabled()) { + return; + } + + let db_path = tempdir().unwrap(); + let store = get_store(&db_path); + let harness = get_harness(store.clone(), LOW_VALIDATOR_COUNT); + + let num_blocks = 16u64; + let (genesis_state, _genesis_state_root) = harness.get_current_state_and_root(); + let mut state = genesis_state.clone(); + + let mut last_block_root = Hash256::zero(); + let mut pending_states = HashMap::new(); + let mut full_states = HashMap::new(); + + for i in 1..=num_blocks { + let slot = Slot::new(i); + harness.advance_slot(); + + let (block_contents, envelope, pending_state) = + harness.make_block_with_envelope(state, slot).await; + let block_root = block_contents.0.canonical_root(); + + harness + .process_block(slot, block_root, block_contents) + .await + .unwrap(); + + let pending_state_root = pending_state.clone().update_tree_hash_cache().unwrap(); + pending_states.insert(slot, (pending_state_root, pending_state.clone())); + + let envelope = envelope.expect("Gloas block should have envelope"); + let mut full_state = pending_state; + let full_state_root = harness.process_envelope(block_root, envelope, &mut full_state); + full_states.insert(slot, (full_state_root, full_state.clone())); + + last_block_root = block_root; + state = full_state; + } + + let end_slot = Slot::new(num_blocks); + + // Load blocks for Pending replay (no envelopes for the last block). + let (blocks_pending, envelopes_pending) = store + .load_blocks_to_replay( + Slot::new(0), + end_slot, + last_block_root, + StatePayloadStatus::Pending, + ) + .unwrap(); + assert!( + !blocks_pending.is_empty(), + "should have blocks for pending replay" + ); + // For Pending, no envelope for the last block; envelopes for intermediate blocks + // whose payloads are canonical. + let expected_pending_envelopes = blocks_pending.len().saturating_sub(1); + assert_eq!( + envelopes_pending.len(), + expected_pending_envelopes, + "pending replay should have envelopes for all blocks except the last" + ); + + // Load blocks for Full replay (envelopes for all blocks including the last). + let (blocks_full, envelopes_full) = store + .load_blocks_to_replay( + Slot::new(0), + end_slot, + last_block_root, + StatePayloadStatus::Full, + ) + .unwrap(); + assert_eq!( + envelopes_full.len(), + expected_pending_envelopes + 1, + "full replay should have one more envelope than pending replay" + ); + + // Replay to Pending state and verify. + let mut replayed_pending = + BlockReplayer::::new(genesis_state.clone(), store.get_chain_spec()) + .no_signature_verification() + .minimal_block_root_verification() + .desired_state_payload_status(StatePayloadStatus::Pending) + .apply_blocks(blocks_pending, envelopes_pending, None) + .expect("should replay blocks to pending state") + .into_state(); + replayed_pending.apply_pending_mutations().unwrap(); + + let (_, mut expected_pending) = pending_states.get(&end_slot).unwrap().clone(); + expected_pending.apply_pending_mutations().unwrap(); + + replayed_pending.drop_all_caches().unwrap(); + expected_pending.drop_all_caches().unwrap(); + assert_eq!( + replayed_pending, expected_pending, + "replayed pending state should match stored pending state" + ); + + // Replay to Full state and verify. + let mut replayed_full = + BlockReplayer::::new(genesis_state, store.get_chain_spec()) + .no_signature_verification() + .minimal_block_root_verification() + .desired_state_payload_status(StatePayloadStatus::Full) + .apply_blocks(blocks_full, envelopes_full, None) + .expect("should replay blocks to full state") + .into_state(); + replayed_full.apply_pending_mutations().unwrap(); + + let (_, mut expected_full) = full_states.get(&end_slot).unwrap().clone(); + expected_full.apply_pending_mutations().unwrap(); + + replayed_full.drop_all_caches().unwrap(); + expected_full.drop_all_caches().unwrap(); + assert_eq!( + replayed_full, expected_full, + "replayed full state should match stored full state" + ); +} + +/// Test the hot state hierarchy with Full states stored as ReplayFrom. +#[tokio::test] +async fn test_gloas_hot_state_hierarchy() { + if !fork_name_from_env().is_some_and(|f| f.gloas_enabled()) { + return; + } + + let db_path = tempdir().unwrap(); + let store = get_store(&db_path); + let harness = get_harness(store.clone(), LOW_VALIDATOR_COUNT); + + // Build enough blocks to span multiple epochs. With MinimalEthSpec (8 slots/epoch), + // 40 slots covers 5 epochs. + let num_blocks = E::slots_per_epoch() * 5; + let all_validators = (0..LOW_VALIDATOR_COUNT).collect::>(); + + let (genesis_state, _genesis_state_root) = harness.get_current_state_and_root(); + + // Use manual block building with envelopes for the first few blocks, + // then use the standard attested-blocks path once we've verified envelope handling. + let mut state = genesis_state; + let mut last_block_root = Hash256::zero(); + + for i in 1..=num_blocks { + let slot = Slot::new(i); + harness.advance_slot(); + + let (block_contents, envelope, pending_state) = + harness.make_block_with_envelope(state.clone(), slot).await; + let block_root = block_contents.0.canonical_root(); + + // Attest to previous block before processing next. + if i > 1 { + let state_root = state.update_tree_hash_cache().unwrap(); + harness.attest_block( + &state, + state_root, + last_block_root.into(), + &block_contents.0, + &all_validators, + ); + } + + harness + .process_block(slot, block_root, block_contents) + .await + .unwrap(); + + let envelope = envelope.expect("Gloas block should have envelope"); + let mut full_state = pending_state; + harness.process_envelope(block_root, envelope, &mut full_state); + + last_block_root = block_root; + state = full_state; + } + + // Verify states can be loaded and have correct payload status. + let _head_state = harness.get_current_state(); + let _head_slot = harness.head_slot(); + + // States at some slots should be retrievable. + for slot_num in 1..=num_blocks { + let slot = Slot::new(slot_num); + // Get the state root from the block at this slot via the state root iterator. + let state_root_result: Option<(Hash256, Slot)> = harness + .chain + .forwards_iter_state_roots(slot) + .expect("should get iter") + .map(Result::unwrap) + .find(|(_, s)| *s == slot); + + if let Some((state_root, _)) = state_root_result { + let loaded_state = store + .get_state(&state_root, Some(slot), CACHE_STATE_IN_TESTS) + .unwrap(); + assert!( + loaded_state.is_some(), + "state at slot {} should be loadable", + slot_num + ); + } + } + + // Verify chain dump and iterators work with Gloas states. + check_chain_dump(&harness, num_blocks + 1); + check_iterators(&harness); +} + /// Check that the HotColdDB's split_slot is equal to the start slot of the last finalized epoch. fn check_split_slot( harness: &TestHarness, diff --git a/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs b/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs index 8591359f15..a2af20a0e0 100644 --- a/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs +++ b/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs @@ -925,8 +925,14 @@ pub fn generate_genesis_header( *header.transactions_root_mut() = empty_transactions_root; Some(header) } - // TODO(EIP-7732): need to look into this - ForkName::Gloas => None, + ForkName::Gloas => { + // TODO(gloas): we are using a Fulu header for now, but this gets fixed up by the + // genesis builder anyway which translates it to bid/latest_block_hash. + let mut header = ExecutionPayloadHeader::Fulu(<_>::default()); + *header.block_hash_mut() = genesis_block_hash.unwrap_or_default(); + *header.transactions_root_mut() = empty_transactions_root; + Some(header) + } } } diff --git a/beacon_node/http_api/src/produce_block.rs b/beacon_node/http_api/src/produce_block.rs index 607221686f..70475de130 100644 --- a/beacon_node/http_api/src/produce_block.rs +++ b/beacon_node/http_api/src/produce_block.rs @@ -70,7 +70,7 @@ pub async fn produce_block_v4( let graffiti_settings = GraffitiSettings::new(query.graffiti, query.graffiti_policy); - let (block, consensus_block_value) = chain + let (block, _pending_state, consensus_block_value) = chain .produce_block_with_verification_gloas( randao_reveal, slot, From f4b7f8f02dba09119741a6d214a8f5a6a6629da8 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Wed, 25 Feb 2026 10:53:41 +1100 Subject: [PATCH 13/20] Fixed signed envelopes etc --- beacon_node/beacon_chain/src/test_utils.rs | 61 +++++++++++++--------- 1 file changed, 35 insertions(+), 26 deletions(-) diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index 45e9bad82a..a6dc376e71 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -65,6 +65,7 @@ use store::database::interface::BeaconNodeBackend; use store::{HotColdDB, ItemStore, MemoryStore, config::StoreConfig}; use task_executor::TaskExecutor; use task_executor::{ShutdownReason, test_utils::TestRuntime}; +use tracing::debug; use tree_hash::TreeHash; use typenum::U4294967296; use types::attestation::IndexedAttestationBase; @@ -1092,7 +1093,7 @@ where slot: Slot, ) -> ( SignedBlockContentsTuple, - Option>, + Option>, BeaconState, ) { assert_ne!(slot, 0, "can't produce a block at slot 0"); @@ -1132,11 +1133,30 @@ where &self.spec, )); - // Retrieve the cached envelope produced during block production. - let envelope = self.chain.pending_payload_envelopes.write().remove(slot); + // Retrieve the cached envelope produced during block production and sign it. + let signed_envelope = self + .chain + .pending_payload_envelopes + .write() + .remove(slot) + .map(|envelope| { + let epoch = slot.epoch(E::slots_per_epoch()); + let domain = self.spec.get_domain( + epoch, + Domain::BeaconBuilder, + &pending_state.fork(), + pending_state.genesis_validators_root(), + ); + let message = envelope.signing_root(domain); + let signature = self.validator_keypairs[proposer_index].sk.sign(message); + SignedExecutionPayloadEnvelope { + message: envelope, + signature, + } + }); let block_contents: SignedBlockContentsTuple = (signed_block, None); - (block_contents, envelope, pending_state) + (block_contents, signed_envelope, pending_state) } else { let (block_contents, state) = self.make_block(state, slot).await; (block_contents, None, state) @@ -2539,43 +2559,32 @@ where } /// Process an execution payload envelope for a Gloas block. - /// - /// This applies the envelope to the pending state to produce the Full state, - /// computes the Full state root, sets it on the envelope, and stores both the - /// envelope and the Full state in the database. - /// - /// Returns the Full state root. pub fn process_envelope( &self, block_root: Hash256, - envelope: ExecutionPayloadEnvelope, + signed_envelope: SignedExecutionPayloadEnvelope, pending_state: &mut BeaconState, ) -> Hash256 { + let state_root = signed_envelope.message.state_root; + debug!( + slot = %signed_envelope.message.slot, + ?state_root, + "Processing execution payload envelope" + ); let block_state_root = pending_state .update_tree_hash_cache() .expect("should compute pending state root"); - let mut signed_envelope = SignedExecutionPayloadEnvelope { - message: envelope, - signature: Signature::infinity().expect("should create infinity signature"), - }; - state_processing::envelope_processing::process_execution_payload_envelope( pending_state, Some(block_state_root), &signed_envelope, - state_processing::VerifySignatures::False, - state_processing::envelope_processing::VerifyStateRoot::False, + state_processing::VerifySignatures::True, + state_processing::envelope_processing::VerifyStateRoot::True, &self.spec, ) .expect("should process envelope"); - let full_state_root = pending_state - .update_tree_hash_cache() - .expect("should compute full state root"); - - signed_envelope.message.state_root = full_state_root; - // Store the envelope. self.chain .store @@ -2585,10 +2594,10 @@ where // Store the Full state. self.chain .store - .put_state(&full_state_root, pending_state) + .put_state(&state_root, pending_state) .expect("should store full state"); - full_state_root + state_root } /// Builds an `Rpc` block from a `SignedBeaconBlock` and blobs or data columns retrieved from From 984f0d70e012aef52088b6a86217c520a4ac6665 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Wed, 25 Feb 2026 13:21:48 +1100 Subject: [PATCH 14/20] Make state cache payload status aware --- beacon_node/beacon_chain/src/beacon_chain.rs | 21 ++++++++-- .../beacon_chain/src/blob_verification.rs | 8 +++- .../beacon_chain/src/block_production/mod.rs | 15 +++++-- .../beacon_chain/src/block_verification.rs | 12 +++++- beacon_node/beacon_chain/src/builder.rs | 12 +++++- .../beacon_chain/src/canonical_head.rs | 13 +++++- .../src/data_column_verification.rs | 9 +++- .../beacon_chain/src/state_advance_timer.rs | 14 ++++++- beacon_node/beacon_chain/src/test_utils.rs | 41 ++++++++++++++++++- beacon_node/beacon_chain/tests/store_tests.rs | 23 ++++++++--- beacon_node/store/src/hot_cold_store.rs | 14 +++++-- beacon_node/store/src/state_cache.rs | 33 +++++++++++---- .../src/execution/state_payload_status.rs | 2 +- 13 files changed, 181 insertions(+), 36 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 4a2b9795d8..48f851c193 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -2041,7 +2041,12 @@ impl BeaconChain { } else { let (advanced_state_root, mut state) = self .store - .get_advanced_hot_state(beacon_block_root, request_slot, beacon_state_root)? + .get_advanced_hot_state( + beacon_block_root, + StatePayloadStatus::Pending, + request_slot, + beacon_state_root, + )? .ok_or(Error::MissingBeaconState(beacon_state_root))?; if state.current_epoch() < request_epoch { partial_state_advance( @@ -4710,7 +4715,12 @@ impl BeaconChain { .ok_or(Error::MissingBeaconBlock(parent_block_root))?; let (state_root, state) = self .store - .get_advanced_hot_state(parent_block_root, proposal_slot, block.state_root())? + .get_advanced_hot_state( + parent_block_root, + StatePayloadStatus::Pending, + proposal_slot, + block.state_root(), + )? .ok_or(Error::MissingBeaconState(block.state_root()))?; (Cow::Owned(state), state_root) }; @@ -6701,7 +6711,12 @@ impl BeaconChain { } else { let (state_root, state) = self .store - .get_advanced_hot_state(head_block_root, target_slot, head_block.state_root)? + .get_advanced_hot_state( + head_block_root, + StatePayloadStatus::Pending, + target_slot, + head_block.state_root, + )? .ok_or(Error::MissingBeaconState(head_block.state_root))?; (state, state_root) }; diff --git a/beacon_node/beacon_chain/src/blob_verification.rs b/beacon_node/beacon_chain/src/blob_verification.rs index fe111628db..c257ba02ec 100644 --- a/beacon_node/beacon_chain/src/blob_verification.rs +++ b/beacon_node/beacon_chain/src/blob_verification.rs @@ -20,6 +20,7 @@ use tree_hash::TreeHash; use types::data::BlobIdentifier; use types::{ BeaconStateError, BlobSidecar, Epoch, EthSpec, Hash256, SignedBeaconBlockHeader, Slot, + StatePayloadStatus, }; /// An error occurred while validating a gossip blob. @@ -510,7 +511,12 @@ pub fn validate_blob_sidecar_for_gossip BeaconChain { // state cache thanks to the state advance timer. let (state_root, state) = self .store - .get_advanced_hot_state(head_block_root, slot, head_state_root) + .get_advanced_hot_state( + head_block_root, + StatePayloadStatus::Pending, + slot, + head_state_root, + ) .map_err(BlockProductionError::FailedToLoadState)? .ok_or(BlockProductionError::UnableToProduceAtSlot(slot))?; (state, Some(state_root)) @@ -204,7 +209,11 @@ impl BeaconChain { let (state_root, state) = self .store - .get_advanced_hot_state_from_cache(re_org_parent_block, slot) + .get_advanced_hot_state_from_cache( + re_org_parent_block, + StatePayloadStatus::Pending, + slot, + ) .or_else(|| { warn!(reason = "no state in cache", "Not attempting re-org"); None diff --git a/beacon_node/beacon_chain/src/block_verification.rs b/beacon_node/beacon_chain/src/block_verification.rs index b7ffa465fd..d63161f279 100644 --- a/beacon_node/beacon_chain/src/block_verification.rs +++ b/beacon_node/beacon_chain/src/block_verification.rs @@ -100,7 +100,8 @@ use tracing::{Instrument, Span, debug, debug_span, error, info_span, instrument} use types::{ BeaconBlockRef, BeaconState, BeaconStateError, BlobsList, ChainSpec, DataColumnSidecarList, Epoch, EthSpec, ExecutionBlockHash, FullPayload, Hash256, InconsistentFork, KzgProofs, - RelativeEpoch, SignedBeaconBlock, SignedBeaconBlockHeader, Slot, data::DataColumnSidecarError, + RelativeEpoch, SignedBeaconBlock, SignedBeaconBlockHeader, Slot, StatePayloadStatus, + data::DataColumnSidecarError, }; pub const POS_PANDA_BANNER: &str = r#" @@ -1992,9 +1993,16 @@ fn load_parent>( // Retrieve any state that is advanced through to at most `block.slot()`: this is // particularly important if `block` descends from the finalized/split block, but at a slot // prior to the finalized slot (which is invalid and inaccessible in our DB schema). + // TODO(gloas): use correct payload_status based on block + let payload_status = StatePayloadStatus::Pending; let (parent_state_root, state) = chain .store - .get_advanced_hot_state(root, block.slot(), parent_block.state_root())? + .get_advanced_hot_state( + root, + payload_status, + block.slot(), + parent_block.state_root(), + )? .ok_or_else(|| { BeaconChainError::DBInconsistent( format!("Missing state for parent block {root:?}",), diff --git a/beacon_node/beacon_chain/src/builder.rs b/beacon_node/beacon_chain/src/builder.rs index cc2f47c1b0..e9e1ac9006 100644 --- a/beacon_node/beacon_chain/src/builder.rs +++ b/beacon_node/beacon_chain/src/builder.rs @@ -45,7 +45,7 @@ use tree_hash::TreeHash; use types::data::CustodyIndex; use types::{ BeaconBlock, BeaconState, BlobSidecarList, ChainSpec, ColumnIndex, DataColumnSidecarList, - Epoch, EthSpec, Hash256, SignedBeaconBlock, Slot, + Epoch, EthSpec, Hash256, SignedBeaconBlock, Slot, StatePayloadStatus, }; /// An empty struct used to "witness" all the `BeaconChainTypes` traits. It has no user-facing @@ -783,8 +783,16 @@ where .map_err(|e| descriptive_db_error("head block", &e))? .ok_or("Head block not found in store")?; + // TODO(gloas): update head loading to load Full block once fork choice works + let payload_status = StatePayloadStatus::Pending; + let (_head_state_root, head_state) = store - .get_advanced_hot_state(head_block_root, current_slot, head_block.state_root()) + .get_advanced_hot_state( + head_block_root, + payload_status, + current_slot, + head_block.state_root(), + ) .map_err(|e| descriptive_db_error("head state", &e))? .ok_or("Head state not found in store")?; diff --git a/beacon_node/beacon_chain/src/canonical_head.rs b/beacon_node/beacon_chain/src/canonical_head.rs index 1a08ac3f88..fd060e2b59 100644 --- a/beacon_node/beacon_chain/src/canonical_head.rs +++ b/beacon_node/beacon_chain/src/canonical_head.rs @@ -305,8 +305,16 @@ impl CanonicalHead { .get_full_block(&beacon_block_root)? .ok_or(Error::MissingBeaconBlock(beacon_block_root))?; let current_slot = fork_choice.fc_store().get_current_slot(); + + // TODO(gloas): pass a better payload status once fork choice is implemented + let payload_status = StatePayloadStatus::Pending; let (_, beacon_state) = store - .get_advanced_hot_state(beacon_block_root, current_slot, beacon_block.state_root())? + .get_advanced_hot_state( + beacon_block_root, + payload_status, + current_slot, + beacon_block.state_root(), + )? .ok_or(Error::MissingBeaconState(beacon_block.state_root()))?; let snapshot = BeaconSnapshot { @@ -673,10 +681,13 @@ impl BeaconChain { .get_full_block(&new_view.head_block_root)? .ok_or(Error::MissingBeaconBlock(new_view.head_block_root))?; + // TODO(gloas): update once we have fork choice + let payload_status = StatePayloadStatus::Pending; let (_, beacon_state) = self .store .get_advanced_hot_state( new_view.head_block_root, + payload_status, current_slot, beacon_block.state_root(), )? diff --git a/beacon_node/beacon_chain/src/data_column_verification.rs b/beacon_node/beacon_chain/src/data_column_verification.rs index 08acfdffa4..9ae9213a70 100644 --- a/beacon_node/beacon_chain/src/data_column_verification.rs +++ b/beacon_node/beacon_chain/src/data_column_verification.rs @@ -20,7 +20,7 @@ use tracing::{debug, instrument}; use types::data::ColumnIndex; use types::{ BeaconStateError, ChainSpec, DataColumnSidecar, DataColumnSidecarFulu, DataColumnSubnetId, - EthSpec, Hash256, Slot, + EthSpec, Hash256, Slot, StatePayloadStatus, }; /// An error occurred while validating a gossip data column. @@ -708,7 +708,12 @@ fn verify_proposer_and_signature( ); chain .store - .get_advanced_hot_state(block_parent_root, column_slot, parent_block.state_root) + .get_advanced_hot_state( + block_parent_root, + StatePayloadStatus::Pending, + column_slot, + parent_block.state_root, + ) .map_err(|e| GossipDataColumnError::BeaconChainError(Box::new(e.into())))? .ok_or_else(|| { GossipDataColumnError::BeaconChainError(Box::new( diff --git a/beacon_node/beacon_chain/src/state_advance_timer.rs b/beacon_node/beacon_chain/src/state_advance_timer.rs index cb916cb514..4c070e7ecc 100644 --- a/beacon_node/beacon_chain/src/state_advance_timer.rs +++ b/beacon_node/beacon_chain/src/state_advance_timer.rs @@ -26,7 +26,10 @@ use std::sync::{ use task_executor::TaskExecutor; use tokio::time::{Instant, sleep, sleep_until}; use tracing::{Instrument, debug, debug_span, error, instrument, warn}; -use types::{AttestationShufflingId, BeaconStateError, EthSpec, Hash256, RelativeEpoch, Slot}; +use types::{ + AttestationShufflingId, BeaconStateError, EthSpec, Hash256, RelativeEpoch, Slot, + StatePayloadStatus, +}; /// If the head slot is more than `MAX_ADVANCE_DISTANCE` from the current slot, then don't perform /// the state advancement. @@ -277,9 +280,16 @@ fn advance_head(beacon_chain: &Arc>) -> Resu (snapshot.beacon_block_root, snapshot.beacon_state_root()) }; + // TODO(gloas): do better once we have fork choice + let payload_status = StatePayloadStatus::Pending; let (head_state_root, mut state) = beacon_chain .store - .get_advanced_hot_state(head_block_root, current_slot, head_block_state_root)? + .get_advanced_hot_state( + head_block_root, + payload_status, + current_slot, + head_block_state_root, + )? .ok_or(Error::HeadMissingFromSnapshotCache(head_block_root))?; let initial_slot = state.slot(); diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index a6dc376e71..b9a4aa6426 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -27,7 +27,7 @@ use bls::{ use eth2::types::{GraffitiPolicy, SignedBlockContentsTuple}; use execution_layer::test_utils::generate_genesis_header; use execution_layer::{ - ExecutionLayer, + ExecutionLayer, NewPayloadRequest, NewPayloadRequestGloas, auth::JwtKey, test_utils::{ DEFAULT_JWT_SECRET, DEFAULT_TERMINAL_BLOCK, ExecutionBlockGenerator, MockBuilder, @@ -53,6 +53,7 @@ use sensitive_url::SensitiveUrl; use slot_clock::{SlotClock, TestingSlotClock}; use ssz_types::{RuntimeVariableList, VariableList}; use state_processing::per_block_processing::compute_timestamp_at_slot; +use state_processing::per_block_processing::deneb::kzg_commitment_to_versioned_hash; use state_processing::state_advance::complete_state_advance; use std::borrow::Cow; use std::collections::{HashMap, HashSet}; @@ -2559,7 +2560,7 @@ where } /// Process an execution payload envelope for a Gloas block. - pub fn process_envelope( + pub async fn process_envelope( &self, block_root: Hash256, signed_envelope: SignedExecutionPayloadEnvelope, @@ -2585,6 +2586,42 @@ where ) .expect("should process envelope"); + // Notify the EL of the new payload so forkchoiceUpdated can reference it. + let block = self + .chain + .store + .get_blinded_block(&block_root) + .expect("should read block from store") + .expect("block should exist in store"); + + let bid = &block + .message() + .body() + .signed_execution_payload_bid() + .expect("Gloas block should have a payload bid") + .message; + + let versioned_hashes = bid + .blob_kzg_commitments + .iter() + .map(kzg_commitment_to_versioned_hash) + .collect(); + + let request = NewPayloadRequest::Gloas(NewPayloadRequestGloas { + execution_payload: &signed_envelope.message.payload, + versioned_hashes, + parent_beacon_block_root: block.message().parent_root(), + execution_requests: &signed_envelope.message.execution_requests, + }); + + self.chain + .execution_layer + .as_ref() + .expect("harness should have execution layer") + .notify_new_payload(request) + .await + .expect("newPayload should succeed"); + // Store the envelope. self.chain .store diff --git a/beacon_node/beacon_chain/tests/store_tests.rs b/beacon_node/beacon_chain/tests/store_tests.rs index 0143e6573c..d86d71ea69 100644 --- a/beacon_node/beacon_chain/tests/store_tests.rs +++ b/beacon_node/beacon_chain/tests/store_tests.rs @@ -3803,7 +3803,12 @@ async fn process_blocks_and_attestations_for_unaligned_checkpoint() { let (split_state_root, mut advanced_split_state) = harness .chain .store - .get_advanced_hot_state(split.block_root, split.slot, split.state_root) + .get_advanced_hot_state( + split.block_root, + StatePayloadStatus::Pending, + split.slot, + split.state_root, + ) .unwrap() .unwrap(); complete_state_advance( @@ -5472,7 +5477,9 @@ async fn test_gloas_block_and_envelope_storage() { // Process the envelope. let envelope = envelope.expect("Gloas block should have envelope"); let mut full_state = pending_state.clone(); - let full_state_root = harness.process_envelope(block_root, envelope, &mut full_state); + let full_state_root = harness + .process_envelope(block_root, envelope, &mut full_state) + .await; full_state_roots.push(full_state_root); block_roots.push(block_root); @@ -5574,7 +5581,9 @@ async fn test_gloas_state_payload_status() { // Process the envelope and verify the full state has correct payload status. let envelope = envelope.expect("Gloas block should have envelope"); let mut full_state = pending_state; - let full_state_root = harness.process_envelope(block_root, envelope, &mut full_state); + let full_state_root = harness + .process_envelope(block_root, envelope, &mut full_state) + .await; assert_eq!( full_state.payload_status_with_skipped_pending(), @@ -5636,7 +5645,9 @@ async fn test_gloas_block_replay_with_envelopes() { let envelope = envelope.expect("Gloas block should have envelope"); let mut full_state = pending_state; - let full_state_root = harness.process_envelope(block_root, envelope, &mut full_state); + let full_state_root = harness + .process_envelope(block_root, envelope, &mut full_state) + .await; full_states.insert(slot, (full_state_root, full_state.clone())); last_block_root = block_root; @@ -5775,7 +5786,9 @@ async fn test_gloas_hot_state_hierarchy() { let envelope = envelope.expect("Gloas block should have envelope"); let mut full_state = pending_state; - harness.process_envelope(block_root, envelope, &mut full_state); + harness + .process_envelope(block_root, envelope, &mut full_state) + .await; last_block_root = block_root; state = full_state; diff --git a/beacon_node/store/src/hot_cold_store.rs b/beacon_node/store/src/hot_cold_store.rs index c9e1b6062c..0f8924be73 100644 --- a/beacon_node/store/src/hot_cold_store.rs +++ b/beacon_node/store/src/hot_cold_store.rs @@ -1144,10 +1144,13 @@ impl, Cold: ItemStore> HotColdDB pub fn get_advanced_hot_state( &self, block_root: Hash256, + payload_status: StatePayloadStatus, max_slot: Slot, state_root: Hash256, ) -> Result)>, Error> { - if let Some(cached) = self.get_advanced_hot_state_from_cache(block_root, max_slot) { + if let Some(cached) = + self.get_advanced_hot_state_from_cache(block_root, payload_status, max_slot) + { return Ok(Some(cached)); } @@ -1169,7 +1172,11 @@ impl, Cold: ItemStore> HotColdDB .into()); } - let state_root = if block_root == split.block_root && split.slot <= max_slot { + // Split state should always be `Pending`. + let state_root = if block_root == split.block_root + && let StatePayloadStatus::Pending = payload_status + && split.slot <= max_slot + { split.state_root } else { state_root @@ -1216,11 +1223,12 @@ impl, Cold: ItemStore> HotColdDB pub fn get_advanced_hot_state_from_cache( &self, block_root: Hash256, + payload_status: StatePayloadStatus, max_slot: Slot, ) -> Option<(Hash256, BeaconState)> { self.state_cache .lock() - .get_by_block_root(block_root, max_slot) + .get_by_block_root(block_root, payload_status, max_slot) } /// Delete a state, ensuring it is removed from the LRU cache, as well as from on-disk. diff --git a/beacon_node/store/src/state_cache.rs b/beacon_node/store/src/state_cache.rs index 4b0d1ee016..86860ac5f8 100644 --- a/beacon_node/store/src/state_cache.rs +++ b/beacon_node/store/src/state_cache.rs @@ -7,7 +7,7 @@ use lru::LruCache; use std::collections::{BTreeMap, HashMap, HashSet}; use std::num::NonZeroUsize; use tracing::instrument; -use types::{BeaconState, ChainSpec, Epoch, EthSpec, Hash256, Slot}; +use types::{BeaconState, ChainSpec, Epoch, EthSpec, Hash256, Slot, execution::StatePayloadStatus}; /// Fraction of the LRU cache to leave intact during culling. const CULL_EXEMPT_NUMERATOR: usize = 1; @@ -23,10 +23,10 @@ pub struct FinalizedState { state: BeaconState, } -/// Map from block_root -> slot -> state_root. +/// Map from (block_root, payload_status) -> slot -> state_root. #[derive(Debug, Default)] pub struct BlockMap { - blocks: HashMap, + blocks: HashMap<(Hash256, StatePayloadStatus), SlotMap>, } /// Map from slot -> state_root. @@ -130,8 +130,11 @@ impl StateCache { return Err(Error::FinalizedStateDecreasingSlot); } + let payload_status = state.payload_status(); + // Add to block map. - self.block_map.insert(block_root, state.slot(), state_root); + self.block_map + .insert(block_root, payload_status, state.slot(), state_root); // Prune block map. let state_roots_to_prune = self.block_map.prune(state.slot()); @@ -254,7 +257,9 @@ impl StateCache { // Record the connection from block root and slot to this state. let slot = state.slot(); - self.block_map.insert(block_root, slot, state_root); + let payload_status = state.payload_status(); + self.block_map + .insert(block_root, payload_status, slot, state_root); Ok(PutStateOutcome::New(deleted_states)) } @@ -303,9 +308,10 @@ impl StateCache { pub fn get_by_block_root( &mut self, block_root: Hash256, + payload_status: StatePayloadStatus, slot: Slot, ) -> Option<(Hash256, BeaconState)> { - let slot_map = self.block_map.blocks.get(&block_root)?; + let slot_map = self.block_map.blocks.get(&(block_root, payload_status))?; // Find the state at `slot`, or failing that the most recent ancestor. let state_root = slot_map @@ -399,8 +405,14 @@ impl StateCache { } impl BlockMap { - fn insert(&mut self, block_root: Hash256, slot: Slot, state_root: Hash256) { - let slot_map = self.blocks.entry(block_root).or_default(); + fn insert( + &mut self, + block_root: Hash256, + payload_status: StatePayloadStatus, + slot: Slot, + state_root: Hash256, + ) { + let slot_map = self.blocks.entry((block_root, payload_status)).or_default(); slot_map.slots.insert(slot, state_root); } @@ -432,7 +444,10 @@ impl BlockMap { } fn delete_block_states(&mut self, block_root: &Hash256) -> Option { - self.blocks.remove(block_root) + // TODO(gloas): update return type + self.blocks + .remove(&(*block_root, StatePayloadStatus::Pending)); + self.blocks.remove(&(*block_root, StatePayloadStatus::Full)) } } diff --git a/consensus/types/src/execution/state_payload_status.rs b/consensus/types/src/execution/state_payload_status.rs index 053ed14ec4..1661be6060 100644 --- a/consensus/types/src/execution/state_payload_status.rs +++ b/consensus/types/src/execution/state_payload_status.rs @@ -8,7 +8,7 @@ use serde::{Deserialize, Serialize}; /// /// Note that states at skipped slots could be either `Pending` or `Full`, depending on whether /// the payload for the most-recently applied block was also applied. -#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)] +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize)] #[serde(rename_all = "lowercase")] pub enum StatePayloadStatus { /// For states produced by `process_block` executed on a `BeaconBlock`. From 57527e509433cf1d6b778648b8ecdaed047175a0 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Wed, 25 Feb 2026 14:26:21 +1100 Subject: [PATCH 15/20] Fix load_parent --- .../beacon_chain/src/block_verification.rs | 20 ++++++++++++++++--- 1 file changed, 17 insertions(+), 3 deletions(-) diff --git a/beacon_node/beacon_chain/src/block_verification.rs b/beacon_node/beacon_chain/src/block_verification.rs index d63161f279..275297133f 100644 --- a/beacon_node/beacon_chain/src/block_verification.rs +++ b/beacon_node/beacon_chain/src/block_verification.rs @@ -1993,8 +1993,20 @@ fn load_parent>( // Retrieve any state that is advanced through to at most `block.slot()`: this is // particularly important if `block` descends from the finalized/split block, but at a slot // prior to the finalized slot (which is invalid and inaccessible in our DB schema). - // TODO(gloas): use correct payload_status based on block - let payload_status = StatePayloadStatus::Pending; + // + // Post-Gloas we must also fetch a state with the correct payload status. If the current + // block builds upon the payload of its parent block, then we know the parent block is FULL + // and we need to load the full state. + let payload_status = if block.as_block().fork_name_unchecked().gloas_enabled() { + let parent_bid_block_hash = parent_block.payload_bid_block_hash()?; + if block.as_block().is_parent_block_full(parent_bid_block_hash) { + StatePayloadStatus::Full + } else { + StatePayloadStatus::Pending + } + } else { + StatePayloadStatus::Pending + }; let (parent_state_root, state) = chain .store .get_advanced_hot_state( @@ -2025,7 +2037,9 @@ fn load_parent>( ); } - let beacon_state_root = if state.slot() == parent_block.slot() { + let beacon_state_root = if state.slot() == parent_block.slot() + && let StatePayloadStatus::Pending = payload_status + { // Sanity check. if parent_state_root != parent_block.state_root() { return Err(BeaconChainError::DBInconsistent(format!( From 59a2b6dead67811af708eee9bc0b6c1a62852947 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Wed, 25 Feb 2026 16:02:15 +1100 Subject: [PATCH 16/20] Fix state for block production --- .../beacon_chain/src/block_production/mod.rs | 30 ++++++++++++++++--- beacon_node/beacon_chain/tests/store_tests.rs | 4 +-- 2 files changed, 28 insertions(+), 6 deletions(-) diff --git a/beacon_node/beacon_chain/src/block_production/mod.rs b/beacon_node/beacon_chain/src/block_production/mod.rs index 38cfb5fa5e..b33323f527 100644 --- a/beacon_node/beacon_chain/src/block_production/mod.rs +++ b/beacon_node/beacon_chain/src/block_production/mod.rs @@ -37,8 +37,14 @@ impl BeaconChain { }; let (state, state_root_opt) = if head_slot < slot { // Attempt an aggressive re-org if configured and the conditions are right. - if let Some((re_org_state, re_org_state_root)) = - self.get_state_for_re_org(slot, head_slot, head_block_root) + // TODO(gloas): re-enable reorgs + let gloas_enabled = self + .spec + .fork_name_at_slot::(slot) + .gloas_enabled(); + if !gloas_enabled + && let Some((re_org_state, re_org_state_root)) = + self.get_state_for_re_org(slot, head_slot, head_block_root) { info!( %slot, @@ -49,13 +55,29 @@ impl BeaconChain { } else { // Fetch the head state advanced through to `slot`, which should be present in the // state cache thanks to the state advance timer. + // TODO(gloas): need to fix this once fork choice understands payloads + // for now we just use the existence of the head's payload envelope to determine + // whether we should build atop it + let (payload_status, parent_state_root) = if gloas_enabled + && let Ok(Some(envelope)) = self.store.get_payload_envelope(&head_block_root) + { + debug!( + %slot, + parent_state_root = ?envelope.message.state_root, + parent_block_root = ?head_block_root, + "Building Gloas block on full state" + ); + (StatePayloadStatus::Full, envelope.message.state_root) + } else { + (StatePayloadStatus::Pending, head_state_root) + }; let (state_root, state) = self .store .get_advanced_hot_state( head_block_root, - StatePayloadStatus::Pending, + payload_status, slot, - head_state_root, + parent_state_root, ) .map_err(BlockProductionError::FailedToLoadState)? .ok_or(BlockProductionError::UnableToProduceAtSlot(slot))?; diff --git a/beacon_node/beacon_chain/tests/store_tests.rs b/beacon_node/beacon_chain/tests/store_tests.rs index d86d71ea69..770d3ababd 100644 --- a/beacon_node/beacon_chain/tests/store_tests.rs +++ b/beacon_node/beacon_chain/tests/store_tests.rs @@ -5461,7 +5461,7 @@ async fn test_gloas_block_and_envelope_storage() { let slot = Slot::new(i); harness.advance_slot(); - let (block_contents, envelope, pending_state) = + let (block_contents, envelope, mut pending_state) = harness.make_block_with_envelope(state, slot).await; let block_root = block_contents.0.canonical_root(); @@ -5471,7 +5471,7 @@ async fn test_gloas_block_and_envelope_storage() { .await .unwrap(); - let pending_state_root = pending_state.clone().update_tree_hash_cache().unwrap(); + let pending_state_root = pending_state.update_tree_hash_cache().unwrap(); pending_state_roots.push(pending_state_root); // Process the envelope. From adfa3b882da2e96a0d69b98bf2c2a755268c7760 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Wed, 25 Feb 2026 17:09:32 +1100 Subject: [PATCH 17/20] First Gloas test passes! --- .../beacon_chain/src/block_verification.rs | 38 +++++++++++-------- beacon_node/beacon_chain/tests/store_tests.rs | 2 + .../state_processing/src/state_advance.rs | 5 +++ 3 files changed, 29 insertions(+), 16 deletions(-) diff --git a/beacon_node/beacon_chain/src/block_verification.rs b/beacon_node/beacon_chain/src/block_verification.rs index 275297133f..3a8672ddd8 100644 --- a/beacon_node/beacon_chain/src/block_verification.rs +++ b/beacon_node/beacon_chain/src/block_verification.rs @@ -1558,7 +1558,11 @@ impl ExecutionPendingBlock { let distance = block.slot().as_u64().saturating_sub(state.slot().as_u64()); for _ in 0..distance { - let state_root = if parent.beacon_block.slot() == state.slot() { + // TODO(gloas): could do a similar optimisation here for Full blocks if we have access + // to the parent envelope and its `state_root`. + let state_root = if parent.beacon_block.slot() == state.slot() + && state.payload_status() == StatePayloadStatus::Pending + { // If it happens that `pre_state` has *not* already been advanced forward a single // slot, then there is no need to compute the state root for this // `per_slot_processing` call since that state root is already stored in the parent @@ -1997,24 +2001,26 @@ fn load_parent>( // Post-Gloas we must also fetch a state with the correct payload status. If the current // block builds upon the payload of its parent block, then we know the parent block is FULL // and we need to load the full state. - let payload_status = if block.as_block().fork_name_unchecked().gloas_enabled() { - let parent_bid_block_hash = parent_block.payload_bid_block_hash()?; - if block.as_block().is_parent_block_full(parent_bid_block_hash) { - StatePayloadStatus::Full + let (payload_status, parent_state_root) = + if block.as_block().fork_name_unchecked().gloas_enabled() { + let parent_bid_block_hash = parent_block.payload_bid_block_hash()?; + if block.as_block().is_parent_block_full(parent_bid_block_hash) { + // TODO(gloas): loading the envelope here is not very efficient + let envelope = chain.store.get_payload_envelope(&root)?.ok_or_else(|| { + BeaconChainError::DBInconsistent(format!( + "Missing envelope for parent block {root:?}", + )) + })?; + (StatePayloadStatus::Full, envelope.message.state_root) + } else { + (StatePayloadStatus::Pending, parent_block.state_root()) + } } else { - StatePayloadStatus::Pending - } - } else { - StatePayloadStatus::Pending - }; + (StatePayloadStatus::Pending, parent_block.state_root()) + }; let (parent_state_root, state) = chain .store - .get_advanced_hot_state( - root, - payload_status, - block.slot(), - parent_block.state_root(), - )? + .get_advanced_hot_state(root, payload_status, block.slot(), parent_state_root)? .ok_or_else(|| { BeaconChainError::DBInconsistent( format!("Missing state for parent block {root:?}",), diff --git a/beacon_node/beacon_chain/tests/store_tests.rs b/beacon_node/beacon_chain/tests/store_tests.rs index 770d3ababd..8e10f0e85f 100644 --- a/beacon_node/beacon_chain/tests/store_tests.rs +++ b/beacon_node/beacon_chain/tests/store_tests.rs @@ -5477,9 +5477,11 @@ async fn test_gloas_block_and_envelope_storage() { // Process the envelope. let envelope = envelope.expect("Gloas block should have envelope"); let mut full_state = pending_state.clone(); + let envelope_state_root = envelope.message.state_root; let full_state_root = harness .process_envelope(block_root, envelope, &mut full_state) .await; + assert_eq!(full_state_root, envelope_state_root); full_state_roots.push(full_state_root); block_roots.push(block_root); diff --git a/consensus/state_processing/src/state_advance.rs b/consensus/state_processing/src/state_advance.rs index 11a956bc2a..b1d8770d4f 100644 --- a/consensus/state_processing/src/state_advance.rs +++ b/consensus/state_processing/src/state_advance.rs @@ -77,6 +77,11 @@ pub fn partial_state_advance( // (all-zeros) state root. let mut initial_state_root = Some(if state.slot() > state.latest_block_header().slot { state_root_opt.unwrap_or_else(Hash256::zero) + } else if state.slot() == state.latest_block_header().slot + && !state.latest_block_header().state_root.is_zero() + { + // Post-Gloas Full block case. + state.latest_block_header().state_root } else { state_root_opt.ok_or(Error::StateRootNotProvided)? }); From edf77a52980b8710ec5c6cc120ea034b7af816e3 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Thu, 26 Feb 2026 10:20:47 +1100 Subject: [PATCH 18/20] Small fixes relating to genesis --- beacon_node/beacon_chain/tests/store_tests.rs | 17 ++++++++++++++--- .../state_processing/src/block_replayer.rs | 4 ++-- 2 files changed, 16 insertions(+), 5 deletions(-) diff --git a/beacon_node/beacon_chain/tests/store_tests.rs b/beacon_node/beacon_chain/tests/store_tests.rs index 8e10f0e85f..2c8db586b0 100644 --- a/beacon_node/beacon_chain/tests/store_tests.rs +++ b/beacon_node/beacon_chain/tests/store_tests.rs @@ -5671,14 +5671,25 @@ async fn test_gloas_block_replay_with_envelopes() { !blocks_pending.is_empty(), "should have blocks for pending replay" ); - // For Pending, no envelope for the last block; envelopes for intermediate blocks - // whose payloads are canonical. - let expected_pending_envelopes = blocks_pending.len().saturating_sub(1); + // For Pending, no envelope for the first block (slot 0) or last block; envelopes for + // intermediate blocks whose payloads are canonical. + let expected_pending_envelopes = blocks_pending.len().saturating_sub(2); assert_eq!( envelopes_pending.len(), expected_pending_envelopes, "pending replay should have envelopes for all blocks except the last" ); + assert!( + blocks_pending + .iter() + .skip(1) + .take(envelopes_pending.len()) + .map(|block| block.slot()) + .eq(envelopes_pending + .iter() + .map(|envelope| envelope.message.slot)), + "block and envelope slots should match" + ); // Load blocks for Full replay (envelopes for all blocks including the last). let (blocks_full, envelopes_full) = store diff --git a/consensus/state_processing/src/block_replayer.rs b/consensus/state_processing/src/block_replayer.rs index 22096293af..313a20da46 100644 --- a/consensus/state_processing/src/block_replayer.rs +++ b/consensus/state_processing/src/block_replayer.rs @@ -320,8 +320,8 @@ where .block_hash; // Similar to `is_parent_block_full`, but reading the block hash from the - // not-yet-applied `block`. - if block.is_parent_block_full(latest_bid_block_hash) { + // not-yet-applied `block`. The 0x0 case covers genesis (no block replay reqd). + if self.state.slot() != 0 && block.is_parent_block_full(latest_bid_block_hash) { let envelope = next_envelope_at_slot(self.state.slot())?; // State root for the next slot processing is now the envelope's state root. self.apply_payload_envelope(&envelope, state_root)? From e44f37895d2c99265adb7653f6ed3380c282048b Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Thu, 26 Feb 2026 17:15:32 +1100 Subject: [PATCH 19/20] Simplify diff strat and expand tests (they mostly pass!) --- beacon_node/beacon_chain/tests/store_tests.rs | 115 +++++++++++------- beacon_node/store/src/hdiff.rs | 18 ++- beacon_node/store/src/hot_cold_store.rs | 79 ++++++++---- .../state_processing/src/block_replayer.rs | 2 +- consensus/types/src/state/beacon_state.rs | 19 --- 5 files changed, 131 insertions(+), 102 deletions(-) diff --git a/beacon_node/beacon_chain/tests/store_tests.rs b/beacon_node/beacon_chain/tests/store_tests.rs index 2c8db586b0..96dedefda9 100644 --- a/beacon_node/beacon_chain/tests/store_tests.rs +++ b/beacon_node/beacon_chain/tests/store_tests.rs @@ -5440,27 +5440,67 @@ fn check_finalization(harness: &TestHarness, expected_slot: u64) { /// Test basic Gloas block + envelope storage and retrieval. #[tokio::test] -async fn test_gloas_block_and_envelope_storage() { +async fn test_gloas_block_and_envelope_storage_no_skips() { + test_gloas_block_and_envelope_storage_generic(32, vec![], false).await +} + +#[tokio::test] +async fn test_gloas_block_and_envelope_storage_some_skips() { + test_gloas_block_and_envelope_storage_generic(32, vec![2, 4, 5, 16, 23, 24, 25], false).await +} + +#[tokio::test] +async fn test_gloas_block_and_envelope_storage_no_skips_w_cache() { + test_gloas_block_and_envelope_storage_generic(32, vec![], true).await +} + +#[tokio::test] +async fn test_gloas_block_and_envelope_storage_some_skips_w_cache() { + test_gloas_block_and_envelope_storage_generic(32, vec![2, 4, 5, 16, 23, 24, 25], true).await +} + +async fn test_gloas_block_and_envelope_storage_generic( + num_slots: u64, + skipped_slots: Vec, + use_state_cache: bool, +) { if !fork_name_from_env().is_some_and(|f| f.gloas_enabled()) { return; } let db_path = tempdir().unwrap(); - let store = get_store(&db_path); + let store_config = if !use_state_cache { + StoreConfig { + state_cache_size: new_non_zero_usize(1), + ..StoreConfig::default() + } + } else { + StoreConfig::default() + }; + let spec = test_spec::(); + let store = get_store_generic(&db_path, store_config, spec); let harness = get_harness(store.clone(), LOW_VALIDATOR_COUNT); + let spec = &harness.chain.spec; - let num_blocks = 8u64; - let (genesis_state, _genesis_state_root) = harness.get_current_state_and_root(); + let (genesis_state, genesis_state_root) = harness.get_current_state_and_root(); let mut state = genesis_state; let mut block_roots = vec![]; - let mut pending_state_roots = vec![]; - let mut full_state_roots = vec![]; + let mut stored_states = vec![(Slot::new(0), StatePayloadStatus::Full, genesis_state_root)]; - for i in 1..=num_blocks { + for i in 1..=num_slots { let slot = Slot::new(i); harness.advance_slot(); + if skipped_slots.contains(&i) { + complete_state_advance(&mut state, None, slot, spec) + .expect("should be able to advance state to slot"); + + let state_root = state.canonical_root().unwrap(); + store.put_state(&state_root, &state).unwrap(); + stored_states.push((slot, state.payload_status(), state_root)); + } + let (block_contents, envelope, mut pending_state) = harness.make_block_with_envelope(state, slot).await; let block_root = block_contents.0.canonical_root(); @@ -5472,7 +5512,7 @@ async fn test_gloas_block_and_envelope_storage() { .unwrap(); let pending_state_root = pending_state.update_tree_hash_cache().unwrap(); - pending_state_roots.push(pending_state_root); + stored_states.push((slot, StatePayloadStatus::Pending, pending_state_root)); // Process the envelope. let envelope = envelope.expect("Gloas block should have envelope"); @@ -5482,13 +5522,13 @@ async fn test_gloas_block_and_envelope_storage() { .process_envelope(block_root, envelope, &mut full_state) .await; assert_eq!(full_state_root, envelope_state_root); - full_state_roots.push(full_state_root); + stored_states.push((slot, StatePayloadStatus::Full, full_state_root)); block_roots.push(block_root); state = full_state; } - // Verify storage. + // Verify block storage. for (i, block_root) in block_roots.iter().enumerate() { // Block can be loaded. assert!( @@ -5504,41 +5544,28 @@ async fn test_gloas_block_and_envelope_storage() { "envelope at slot {} should be in DB", i + 1 ); + } - // Pending state can be loaded. - let pending_state_root = pending_state_roots[i]; - let loaded_pending_state = store - .get_state(&pending_state_root, None, CACHE_STATE_IN_TESTS) - .unwrap(); - assert!( - loaded_pending_state.is_some(), - "pending state at slot {} should be in DB", - i + 1 - ); - let loaded_pending_state = loaded_pending_state.unwrap(); + // Verify state storage. + // Iterate in reverse order to frustrate the cache. + for (slot, payload_status, state_root) in stored_states.into_iter().rev() { + println!("{slot}: {state_root:?}"); + let Some(mut loaded_state) = store + .get_state(&state_root, Some(slot), CACHE_STATE_IN_TESTS) + .unwrap() + else { + panic!("missing {payload_status:?} state at slot {slot} with root {state_root:?}"); + }; + assert_eq!(loaded_state.slot(), slot); assert_eq!( - loaded_pending_state.payload_status_with_skipped_pending(), - StatePayloadStatus::Pending, - "loaded pending state at slot {} should have Pending status", - i + 1 + loaded_state.payload_status(), + payload_status, + "slot = {slot}" ); - - // Full state can be loaded. - let full_state_root = full_state_roots[i]; - let loaded_full_state = store - .get_state(&full_state_root, None, CACHE_STATE_IN_TESTS) - .unwrap(); - assert!( - loaded_full_state.is_some(), - "full state at slot {} should be in DB", - i + 1 - ); - let loaded_full_state = loaded_full_state.unwrap(); assert_eq!( - loaded_full_state.payload_status_with_skipped_pending(), - StatePayloadStatus::Full, - "loaded full state at slot {} should have Full status", - i + 1 + loaded_state.canonical_root().unwrap(), + state_root, + "slot = {slot}" ); } } @@ -5574,7 +5601,7 @@ async fn test_gloas_state_payload_status() { // Verify the pending state has correct payload status. assert_eq!( - pending_state.payload_status_with_skipped_pending(), + pending_state.payload_status(), StatePayloadStatus::Pending, "pending state at slot {} should be Pending", i @@ -5588,7 +5615,7 @@ async fn test_gloas_state_payload_status() { .await; assert_eq!( - full_state.payload_status_with_skipped_pending(), + full_state.payload_status(), StatePayloadStatus::Full, "full state at slot {} should be Full", i @@ -5600,7 +5627,7 @@ async fn test_gloas_state_payload_status() { .unwrap() .expect("full state should exist in DB"); assert_eq!( - loaded_full.payload_status_with_skipped_pending(), + loaded_full.payload_status(), StatePayloadStatus::Full, "loaded full state at slot {} should be Full after round-trip", i diff --git a/beacon_node/store/src/hdiff.rs b/beacon_node/store/src/hdiff.rs index 3ad6a1f0d3..e678a344c2 100644 --- a/beacon_node/store/src/hdiff.rs +++ b/beacon_node/store/src/hdiff.rs @@ -662,18 +662,14 @@ impl HierarchyModuli { &self, slot: Slot, start_slot: Slot, - payload_status: StatePayloadStatus, + _payload_status: StatePayloadStatus, ) -> Result { - // Store all Full states by replaying from their respective Pending state at the same slot. - // Make an exception for the genesis state, which "counts as" Full by virtue of having 0x0 - // in both `latest_block_hash` and `latest_execution_payload_bid.block_hash`. - if let StatePayloadStatus::Full = payload_status - && slot >= start_slot - && slot != 0 - { - return Ok(StorageStrategy::ReplayFrom(slot)); - } - + // FIXME(sproul): Reverted the idea of using different storage strategies for full and + // pending states, this has the consequence of storing double diffs and double snapshots + // at full slots. The complexity of managing skipped slots was the main impetus for + // reverting the payload-status sensitive design: a Full skipped slot has no same-slot + // Pending state to replay from, so has to be handled differently from Full non-skipped + // slots. match slot.cmp(&start_slot) { Ordering::Less => return Err(Error::LessThanStart(slot, start_slot)), Ordering::Equal => return Ok(StorageStrategy::Snapshot), diff --git a/beacon_node/store/src/hot_cold_store.rs b/beacon_node/store/src/hot_cold_store.rs index 0f8924be73..883560e62c 100644 --- a/beacon_node/store/src/hot_cold_store.rs +++ b/beacon_node/store/src/hot_cold_store.rs @@ -1665,7 +1665,7 @@ impl, Cold: ItemStore> HotColdDB state: &BeaconState, ops: &mut Vec, ) -> Result<(), Error> { - let payload_status = state.payload_status_with_skipped_pending(); + let payload_status = state.payload_status(); match self.state_cache.lock().put_state( *state_root, @@ -1735,7 +1735,7 @@ impl, Cold: ItemStore> HotColdDB self, *state_root, state, - self.hot_storage_strategy(state.slot(), state.payload_status_with_skipped_pending())?, + self.hot_storage_strategy(state.slot(), state.payload_status())?, )?; ops.push(hot_state_summary.as_kv_store_op(*state_root)); Ok(hot_state_summary) @@ -1748,8 +1748,7 @@ impl, Cold: ItemStore> HotColdDB ops: &mut Vec, ) -> Result<(), Error> { let slot = state.slot(); - let storage_strategy = - self.hot_storage_strategy(slot, state.payload_status_with_skipped_pending())?; + let storage_strategy = self.hot_storage_strategy(slot, state.payload_status())?; match storage_strategy { StorageStrategy::ReplayFrom(_) => { // Already have persisted the state summary, don't persist anything else @@ -1885,19 +1884,29 @@ impl, Cold: ItemStore> HotColdDB return Ok(StatePayloadStatus::Pending); } - // Load the hot state summary for the previous state. If it has the same slot as this - // summary then we know this summary is for a `Full` block (payload state). - // NOTE: We treat any and all skipped-slot states as `Pending` by this definition, which is - // perhaps a bit strange (they could have a payload most-recently applied). - // TODO(gloas): could maybe simplify this by checking diff_base_slot == slot? + // Load the hot state summary for the previous state. + // + // If it has the same slot as this summary then we know this summary is for a `Full` block + // (payload state), because they are always diffed against their same-slot `Pending` state. + // + // If the previous summary has a different slot AND the latest block is from `summary.slot`, + // then this state *must* be `Pending` (it is the summary for latest block itself). + // + // Otherwise, we are at a skipped slot and must traverse the graph of state summaries + // backwards until we reach a summary for the latest block. This recursion could be quite + // far in the case of a long skip. We could optimise this in future using the + // `diff_base_state` (like in `get_ancestor_state_root`), or by doing a proper DB + // migration. let previous_state_summary = self .load_hot_state_summary(&previous_state_root)? .ok_or(Error::MissingHotStateSummary(previous_state_root))?; if previous_state_summary.slot == summary.slot { Ok(StatePayloadStatus::Full) - } else { + } else if summary.slot == summary.latest_block_slot { Ok(StatePayloadStatus::Pending) + } else { + self.get_hot_state_summary_payload_status(&previous_state_summary) } } @@ -2010,6 +2019,12 @@ impl, Cold: ItemStore> HotColdDB ) = self.load_hot_state_summary(state_root)? { let payload_status = self.get_hot_state_summary_payload_status(&summary)?; + debug!( + %slot, + ?state_root, + ?payload_status, + "Loading hot state" + ); let mut state = match self.hot_storage_strategy(slot, payload_status)? { strat @ StorageStrategy::Snapshot | strat @ StorageStrategy::DiffFrom(_) => { let buffer_timer = metrics::start_timer_vec( @@ -2082,9 +2097,7 @@ impl, Cold: ItemStore> HotColdDB desired_payload_status: StatePayloadStatus, update_cache: bool, ) -> Result, Error> { - if base_state.slot() == slot - && base_state.payload_status_with_skipped_pending() == desired_payload_status - { + if base_state.slot() == slot && base_state.payload_status() == desired_payload_status { return Ok(base_state); } @@ -2124,10 +2137,19 @@ impl, Cold: ItemStore> HotColdDB Ok(()) }; + debug!( + %slot, + blocks = ?blocks.iter().map(|block| block.slot()).collect::>(), + envelopes = ?envelopes.iter().map(|e| e.message.slot).collect::>(), + payload_status = ?desired_payload_status, + "Replaying blocks and envelopes" + ); + self.replay_blocks( base_state, blocks, envelopes, + desired_payload_status, slot, no_state_root_iter(), Some(Box::new(state_cache_hook)), @@ -2440,10 +2462,13 @@ impl, Cold: ItemStore> HotColdDB self.forwards_state_roots_iterator_until(base_state.slot(), slot, || { Err(Error::StateShouldNotBeRequired(slot)) })?; + // TODO(gloas): calculate correct payload status for cold states + let payload_status = StatePayloadStatus::Pending; let state = self.replay_blocks( base_state, blocks, envelopes, + payload_status, slot, Some(state_root_iter), None, @@ -2681,6 +2706,7 @@ impl, Cold: ItemStore> HotColdDB } // Load the payload for the last block if desired. + // TODO(gloas): check that we don't load a duplicate in the case of a skipped slot if let StatePayloadStatus::Full = desired_payload_status { let envelope = self.get_payload_envelope(&end_block_root)?.ok_or( HotColdDBError::MissingExecutionPayloadEnvelope(end_block_root), @@ -2700,6 +2726,7 @@ impl, Cold: ItemStore> HotColdDB state: BeaconState, blocks: Vec>, envelopes: Vec>, + desired_payload_status: StatePayloadStatus, target_slot: Slot, state_root_iter: Option>>, pre_slot_hook: Option>, @@ -2708,7 +2735,8 @@ impl, Cold: ItemStore> HotColdDB let mut block_replayer = BlockReplayer::new(state, &self.spec) .no_signature_verification() - .minimal_block_root_verification(); + .minimal_block_root_verification() + .desired_state_payload_status(desired_payload_status); let have_state_root_iterator = state_root_iter.is_some(); if let Some(state_root_iter) = state_root_iter { @@ -4176,21 +4204,14 @@ impl HotStateSummary { let latest_block_root = state.get_latest_block_root(state_root); // Payload status of the state determines a lot about how it is stored. - let payload_status = state.payload_status_with_skipped_pending(); + let payload_status = state.payload_status(); let get_state_root = |slot| { if slot == state.slot() { - // In the case where this state is a `Full` state, use the `state_root` of its - // prior `Pending` state. - if let StatePayloadStatus::Full = payload_status { - // TODO(gloas): change this assert to debug_assert_eq - assert_eq!(state.latest_block_header().slot, state.slot()); - Ok(state.latest_block_header().state_root) - } else { - Ok::<_, Error>(state_root) - } + // TODO(gloas): I think we can remove this case + Ok::<_, Error>(state_root) } else { - Ok(get_ancestor_state_root(store, state, slot).map_err(|e| { + Ok::<_, Error>(get_ancestor_state_root(store, state, slot).map_err(|e| { Error::StateSummaryIteratorError { error: e, from_state_root: state_root, @@ -4210,8 +4231,12 @@ impl HotStateSummary { let previous_state_root = if state.slot() == 0 { // Set to 0x0 for genesis state to prevent any sort of circular reference. Hash256::zero() - } else if let StatePayloadStatus::Full = payload_status { - get_state_root(state.slot())? + } else if let StatePayloadStatus::Full = payload_status + && state.slot() == state.latest_block_header().slot + { + // A Full state at a non-skipped slot builds off the Pending state of the same slot, + // i.e. the state with the same `state_root` as its `BeaconBlock` + state.latest_block_header().state_root } else { get_state_root(state.slot().safe_sub(1_u64)?)? }; diff --git a/consensus/state_processing/src/block_replayer.rs b/consensus/state_processing/src/block_replayer.rs index 313a20da46..93d0313867 100644 --- a/consensus/state_processing/src/block_replayer.rs +++ b/consensus/state_processing/src/block_replayer.rs @@ -320,7 +320,7 @@ where .block_hash; // Similar to `is_parent_block_full`, but reading the block hash from the - // not-yet-applied `block`. The 0x0 case covers genesis (no block replay reqd). + // not-yet-applied `block`. The slot 0 case covers genesis (no block replay reqd). if self.state.slot() != 0 && block.is_parent_block_full(latest_bid_block_hash) { let envelope = next_envelope_at_slot(self.state.slot())?; // State root for the next slot processing is now the envelope's state root. diff --git a/consensus/types/src/state/beacon_state.rs b/consensus/types/src/state/beacon_state.rs index e23215fc5a..34cfd0ca1c 100644 --- a/consensus/types/src/state/beacon_state.rs +++ b/consensus/types/src/state/beacon_state.rs @@ -1284,25 +1284,6 @@ impl BeaconState { } } - /// Determine the payload status of this state with all skipped slots considered pending. - /// - /// Prior to Gloas this is always `Pending`. - /// - /// Post-Gloas, the definition of the `StatePayloadStatus` is: - /// - /// - `Full` if this state is the IMMEDIATE result of envelope processing (no skipped slots) - /// - `Pending` if this state is the result of block processing, or slot processing (skipped - /// slot). - pub fn payload_status_with_skipped_pending(&self) -> StatePayloadStatus { - if !self.fork_name_unchecked().gloas_enabled() { - StatePayloadStatus::Pending - } else if self.is_parent_block_full() && self.latest_block_header().slot == self.slot() { - StatePayloadStatus::Full - } else { - StatePayloadStatus::Pending - } - } - /// Return `true` if the validator who produced `slot_signature` is eligible to aggregate. /// /// Spec v0.12.1 From 30f8cab18262674b7a5c3f624f694dbc9440b37b Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Thu, 26 Feb 2026 21:38:57 +1100 Subject: [PATCH 20/20] Fixes to test relying on cold DB --- beacon_node/beacon_chain/tests/store_tests.rs | 33 ++++++++----------- 1 file changed, 13 insertions(+), 20 deletions(-) diff --git a/beacon_node/beacon_chain/tests/store_tests.rs b/beacon_node/beacon_chain/tests/store_tests.rs index 96dedefda9..34865ebcee 100644 --- a/beacon_node/beacon_chain/tests/store_tests.rs +++ b/beacon_node/beacon_chain/tests/store_tests.rs @@ -5790,7 +5790,8 @@ async fn test_gloas_hot_state_hierarchy() { // Build enough blocks to span multiple epochs. With MinimalEthSpec (8 slots/epoch), // 40 slots covers 5 epochs. let num_blocks = E::slots_per_epoch() * 5; - let all_validators = (0..LOW_VALIDATOR_COUNT).collect::>(); + // TODO(gloas): enable finalisation by increasing this threshold + let some_validators = (0..LOW_VALIDATOR_COUNT / 2).collect::>(); let (genesis_state, _genesis_state_root) = harness.get_current_state_and_root(); @@ -5815,7 +5816,7 @@ async fn test_gloas_hot_state_hierarchy() { state_root, last_block_root.into(), &block_contents.0, - &all_validators, + &some_validators, ); } @@ -5838,27 +5839,17 @@ async fn test_gloas_hot_state_hierarchy() { let _head_state = harness.get_current_state(); let _head_slot = harness.head_slot(); - // States at some slots should be retrievable. + // States at all slots on the canonical chain should be retrievable. for slot_num in 1..=num_blocks { let slot = Slot::new(slot_num); // Get the state root from the block at this slot via the state root iterator. - let state_root_result: Option<(Hash256, Slot)> = harness - .chain - .forwards_iter_state_roots(slot) - .expect("should get iter") - .map(Result::unwrap) - .find(|(_, s)| *s == slot); + let state_root = harness.chain.state_root_at_slot(slot).unwrap().unwrap(); - if let Some((state_root, _)) = state_root_result { - let loaded_state = store - .get_state(&state_root, Some(slot), CACHE_STATE_IN_TESTS) - .unwrap(); - assert!( - loaded_state.is_some(), - "state at slot {} should be loadable", - slot_num - ); - } + let mut loaded_state = store + .get_state(&state_root, Some(slot), CACHE_STATE_IN_TESTS) + .unwrap() + .unwrap(); + assert_eq!(loaded_state.canonical_root().unwrap(), state_root); } // Verify chain dump and iterators work with Gloas states. @@ -5917,7 +5908,9 @@ fn check_chain_dump_from_slot(harness: &TestHarness, from_slot: Slot, expected_l ); // Check presence of execution payload on disk. - if harness.chain.spec.bellatrix_fork_epoch.is_some() { + if harness.chain.spec.bellatrix_fork_epoch.is_some() + && !harness.chain.spec.is_gloas_scheduled() + { assert!( harness .chain