diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 07f3bb01fa..ab2097e001 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -2031,9 +2031,16 @@ impl BeaconChain { // required information. (justified_checkpoint, committee_len) } else { + // We assume that the `Pending` state has the same shufflings as a `Full` state + // for the same block. Analysis: https://hackmd.io/@dapplion/gloas_dependant_root let (advanced_state_root, mut state) = self .store - .get_advanced_hot_state(beacon_block_root, request_slot, beacon_state_root)? + .get_advanced_hot_state( + beacon_block_root, + StatePayloadStatus::Pending, + request_slot, + beacon_state_root, + )? .ok_or(Error::MissingBeaconState(beacon_state_root))?; if state.current_epoch() < request_epoch { partial_state_advance( @@ -4662,12 +4669,19 @@ impl BeaconChain { if cached_head.head_block_root() == parent_block_root { (Cow::Borrowed(head_state), cached_head.head_state_root()) } else { + // TODO(gloas): this function needs updating to be envelope-aware + // See: https://github.com/sigp/lighthouse/issues/8957 let block = self .get_blinded_block(&parent_block_root)? .ok_or(Error::MissingBeaconBlock(parent_block_root))?; let (state_root, state) = self .store - .get_advanced_hot_state(parent_block_root, proposal_slot, block.state_root())? + .get_advanced_hot_state( + parent_block_root, + StatePayloadStatus::Pending, + proposal_slot, + block.state_root(), + )? .ok_or(Error::MissingBeaconState(block.state_root()))?; (Cow::Owned(state), state_root) }; @@ -6599,9 +6613,16 @@ impl BeaconChain { let (mut state, state_root) = if let Some((state, state_root)) = head_state_opt { (state, state_root) } else { + // We assume that the `Pending` state has the same shufflings as a `Full` state + // for the same block. Analysis: https://hackmd.io/@dapplion/gloas_dependant_root let (state_root, state) = self .store - .get_advanced_hot_state(head_block_root, target_slot, head_block.state_root)? + .get_advanced_hot_state( + head_block_root, + StatePayloadStatus::Pending, + target_slot, + head_block.state_root, + )? .ok_or(Error::MissingBeaconState(head_block.state_root))?; (state, state_root) }; diff --git a/beacon_node/beacon_chain/src/blob_verification.rs b/beacon_node/beacon_chain/src/blob_verification.rs index fe111628db..86b385d818 100644 --- a/beacon_node/beacon_chain/src/blob_verification.rs +++ b/beacon_node/beacon_chain/src/blob_verification.rs @@ -20,6 +20,7 @@ use tree_hash::TreeHash; use types::data::BlobIdentifier; use types::{ BeaconStateError, BlobSidecar, Epoch, EthSpec, Hash256, SignedBeaconBlockHeader, Slot, + StatePayloadStatus, }; /// An error occurred while validating a gossip blob. @@ -508,9 +509,16 @@ pub fn validate_blob_sidecar_for_gossip = (BeaconBlock>, ConsensusBlockValue); +type BlockProductionResult = (BeaconBlock, BeaconState, ConsensusBlockValue); pub type PreparePayloadResult = Result, BlockProductionError>; pub type PreparePayloadHandle = JoinHandle>>; @@ -425,6 +425,12 @@ impl BeaconChain { )) } + /// Complete a block by computing its state root, and + /// + /// Return `(block, pending_state, block_value)` where: + /// + /// - `pending_state` is the state post block application (prior to payload application) + /// - `block_value` is the consensus-layer rewards for `block` #[allow(clippy::type_complexity)] fn complete_partial_beacon_block_gloas( &self, @@ -433,7 +439,7 @@ impl BeaconChain { payload_data: Option>, mut state: BeaconState, verification: ProduceBlockVerification, - ) -> Result<(BeaconBlock>, u64), BlockProductionError> { + ) -> Result, BlockProductionError> { let PartialBeaconBlock { slot, proposer_index, @@ -545,6 +551,9 @@ impl BeaconChain { drop(state_root_timer); + // Clone the Pending state (post-block, pre-envelope) for callers that need it. + let pending_state = state.clone(); + let (mut block, _) = signed_beacon_block.deconstruct(); *block.state_root_mut() = state_root; @@ -605,7 +614,7 @@ impl BeaconChain { "Produced beacon block" ); - Ok((block, consensus_block_value)) + Ok((block, pending_state, consensus_block_value)) } // TODO(gloas) introduce `ProposerPreferences` so we can build out trustless diff --git a/beacon_node/beacon_chain/src/block_production/mod.rs b/beacon_node/beacon_chain/src/block_production/mod.rs index 76c8b77e93..b33323f527 100644 --- a/beacon_node/beacon_chain/src/block_production/mod.rs +++ b/beacon_node/beacon_chain/src/block_production/mod.rs @@ -3,7 +3,7 @@ use std::{sync::Arc, time::Duration}; use proto_array::ProposerHeadError; use slot_clock::SlotClock; use tracing::{debug, error, info, instrument, warn}; -use types::{BeaconState, Hash256, Slot}; +use types::{BeaconState, Hash256, Slot, StatePayloadStatus}; use crate::{ BeaconChain, BeaconChainTypes, BlockProductionError, StateSkipConfig, @@ -37,8 +37,14 @@ impl BeaconChain { }; let (state, state_root_opt) = if head_slot < slot { // Attempt an aggressive re-org if configured and the conditions are right. - if let Some((re_org_state, re_org_state_root)) = - self.get_state_for_re_org(slot, head_slot, head_block_root) + // TODO(gloas): re-enable reorgs + let gloas_enabled = self + .spec + .fork_name_at_slot::(slot) + .gloas_enabled(); + if !gloas_enabled + && let Some((re_org_state, re_org_state_root)) = + self.get_state_for_re_org(slot, head_slot, head_block_root) { info!( %slot, @@ -49,9 +55,30 @@ impl BeaconChain { } else { // Fetch the head state advanced through to `slot`, which should be present in the // state cache thanks to the state advance timer. + // TODO(gloas): need to fix this once fork choice understands payloads + // for now we just use the existence of the head's payload envelope to determine + // whether we should build atop it + let (payload_status, parent_state_root) = if gloas_enabled + && let Ok(Some(envelope)) = self.store.get_payload_envelope(&head_block_root) + { + debug!( + %slot, + parent_state_root = ?envelope.message.state_root, + parent_block_root = ?head_block_root, + "Building Gloas block on full state" + ); + (StatePayloadStatus::Full, envelope.message.state_root) + } else { + (StatePayloadStatus::Pending, head_state_root) + }; let (state_root, state) = self .store - .get_advanced_hot_state(head_block_root, slot, head_state_root) + .get_advanced_hot_state( + head_block_root, + payload_status, + slot, + parent_state_root, + ) .map_err(BlockProductionError::FailedToLoadState)? .ok_or(BlockProductionError::UnableToProduceAtSlot(slot))?; (state, Some(state_root)) @@ -204,7 +231,11 @@ impl BeaconChain { let (state_root, state) = self .store - .get_advanced_hot_state_from_cache(re_org_parent_block, slot) + .get_advanced_hot_state_from_cache( + re_org_parent_block, + StatePayloadStatus::Pending, + slot, + ) .or_else(|| { warn!(reason = "no state in cache", "Not attempting re-org"); None diff --git a/beacon_node/beacon_chain/src/block_verification.rs b/beacon_node/beacon_chain/src/block_verification.rs index b748bf5c6c..1be9bd4181 100644 --- a/beacon_node/beacon_chain/src/block_verification.rs +++ b/beacon_node/beacon_chain/src/block_verification.rs @@ -99,7 +99,8 @@ use tracing::{Instrument, Span, debug, debug_span, error, info_span, instrument} use types::{ BeaconBlockRef, BeaconState, BeaconStateError, BlobsList, ChainSpec, DataColumnSidecarList, Epoch, EthSpec, FullPayload, Hash256, InconsistentFork, KzgProofs, RelativeEpoch, - SignedBeaconBlock, SignedBeaconBlockHeader, Slot, data::DataColumnSidecarError, + SignedBeaconBlock, SignedBeaconBlockHeader, Slot, StatePayloadStatus, + data::DataColumnSidecarError, }; /// Maximum block slot number. Block with slots bigger than this constant will NOT be processed. @@ -1491,7 +1492,11 @@ impl ExecutionPendingBlock { let distance = block.slot().as_u64().saturating_sub(state.slot().as_u64()); for _ in 0..distance { - let state_root = if parent.beacon_block.slot() == state.slot() { + // TODO(gloas): could do a similar optimisation here for Full blocks if we have access + // to the parent envelope and its `state_root`. + let state_root = if parent.beacon_block.slot() == state.slot() + && state.payload_status() == StatePayloadStatus::Pending + { // If it happens that `pre_state` has *not* already been advanced forward a single // slot, then there is no need to compute the state root for this // `per_slot_processing` call since that state root is already stored in the parent @@ -1908,9 +1913,31 @@ fn load_parent>( // Retrieve any state that is advanced through to at most `block.slot()`: this is // particularly important if `block` descends from the finalized/split block, but at a slot // prior to the finalized slot (which is invalid and inaccessible in our DB schema). + // + // Post-Gloas we must also fetch a state with the correct payload status. If the current + // block builds upon the payload of its parent block, then we know the parent block is FULL + // and we need to load the full state. + let (payload_status, parent_state_root) = + if block.as_block().fork_name_unchecked().gloas_enabled() + && let Ok(parent_bid_block_hash) = parent_block.payload_bid_block_hash() + { + if block.as_block().is_parent_block_full(parent_bid_block_hash) { + // TODO(gloas): loading the envelope here is not very efficient + let envelope = chain.store.get_payload_envelope(&root)?.ok_or_else(|| { + BeaconChainError::DBInconsistent(format!( + "Missing envelope for parent block {root:?}", + )) + })?; + (StatePayloadStatus::Full, envelope.message.state_root) + } else { + (StatePayloadStatus::Pending, parent_block.state_root()) + } + } else { + (StatePayloadStatus::Pending, parent_block.state_root()) + }; let (parent_state_root, state) = chain .store - .get_advanced_hot_state(root, block.slot(), parent_block.state_root())? + .get_advanced_hot_state(root, payload_status, block.slot(), parent_state_root)? .ok_or_else(|| { BeaconChainError::DBInconsistent( format!("Missing state for parent block {root:?}",), @@ -1933,7 +1960,9 @@ fn load_parent>( ); } - let beacon_state_root = if state.slot() == parent_block.slot() { + let beacon_state_root = if state.slot() == parent_block.slot() + && let StatePayloadStatus::Pending = payload_status + { // Sanity check. if parent_state_root != parent_block.state_root() { return Err(BeaconChainError::DBInconsistent(format!( diff --git a/beacon_node/beacon_chain/src/builder.rs b/beacon_node/beacon_chain/src/builder.rs index d5935b492a..59fa5ec9ec 100644 --- a/beacon_node/beacon_chain/src/builder.rs +++ b/beacon_node/beacon_chain/src/builder.rs @@ -45,7 +45,7 @@ use tree_hash::TreeHash; use types::data::CustodyIndex; use types::{ BeaconBlock, BeaconState, BlobSidecarList, ChainSpec, ColumnIndex, DataColumnSidecarList, - Epoch, EthSpec, Hash256, SignedBeaconBlock, Slot, + Epoch, EthSpec, Hash256, SignedBeaconBlock, Slot, StatePayloadStatus, }; /// An empty struct used to "witness" all the `BeaconChainTypes` traits. It has no user-facing @@ -783,8 +783,16 @@ where .map_err(|e| descriptive_db_error("head block", &e))? .ok_or("Head block not found in store")?; + // TODO(gloas): update head loading to load Full block once fork choice works + let payload_status = StatePayloadStatus::Pending; + let (_head_state_root, head_state) = store - .get_advanced_hot_state(head_block_root, current_slot, head_block.state_root()) + .get_advanced_hot_state( + head_block_root, + payload_status, + current_slot, + head_block.state_root(), + ) .map_err(|e| descriptive_db_error("head state", &e))? .ok_or("Head state not found in store")?; diff --git a/beacon_node/beacon_chain/src/canonical_head.rs b/beacon_node/beacon_chain/src/canonical_head.rs index 1a08ac3f88..fd060e2b59 100644 --- a/beacon_node/beacon_chain/src/canonical_head.rs +++ b/beacon_node/beacon_chain/src/canonical_head.rs @@ -305,8 +305,16 @@ impl CanonicalHead { .get_full_block(&beacon_block_root)? .ok_or(Error::MissingBeaconBlock(beacon_block_root))?; let current_slot = fork_choice.fc_store().get_current_slot(); + + // TODO(gloas): pass a better payload status once fork choice is implemented + let payload_status = StatePayloadStatus::Pending; let (_, beacon_state) = store - .get_advanced_hot_state(beacon_block_root, current_slot, beacon_block.state_root())? + .get_advanced_hot_state( + beacon_block_root, + payload_status, + current_slot, + beacon_block.state_root(), + )? .ok_or(Error::MissingBeaconState(beacon_block.state_root()))?; let snapshot = BeaconSnapshot { @@ -673,10 +681,13 @@ impl BeaconChain { .get_full_block(&new_view.head_block_root)? .ok_or(Error::MissingBeaconBlock(new_view.head_block_root))?; + // TODO(gloas): update once we have fork choice + let payload_status = StatePayloadStatus::Pending; let (_, beacon_state) = self .store .get_advanced_hot_state( new_view.head_block_root, + payload_status, current_slot, beacon_block.state_root(), )? diff --git a/beacon_node/beacon_chain/src/data_column_verification.rs b/beacon_node/beacon_chain/src/data_column_verification.rs index 08acfdffa4..dde9fad342 100644 --- a/beacon_node/beacon_chain/src/data_column_verification.rs +++ b/beacon_node/beacon_chain/src/data_column_verification.rs @@ -20,7 +20,7 @@ use tracing::{debug, instrument}; use types::data::ColumnIndex; use types::{ BeaconStateError, ChainSpec, DataColumnSidecar, DataColumnSidecarFulu, DataColumnSubnetId, - EthSpec, Hash256, Slot, + EthSpec, Hash256, Slot, StatePayloadStatus, }; /// An error occurred while validating a gossip data column. @@ -706,9 +706,16 @@ fn verify_proposer_and_signature( index = %column_index, "Proposer shuffling cache miss for column verification" ); + // We assume that the `Pending` state has the same shufflings as a `Full` state + // for the same block. Analysis: https://hackmd.io/@dapplion/gloas_dependant_root chain .store - .get_advanced_hot_state(block_parent_root, column_slot, parent_block.state_root) + .get_advanced_hot_state( + block_parent_root, + StatePayloadStatus::Pending, + column_slot, + parent_block.state_root, + ) .map_err(|e| GossipDataColumnError::BeaconChainError(Box::new(e.into())))? .ok_or_else(|| { GossipDataColumnError::BeaconChainError(Box::new( diff --git a/beacon_node/beacon_chain/src/schema_change/migration_schema_v24.rs b/beacon_node/beacon_chain/src/schema_change/migration_schema_v24.rs index 1e1823a836..c8dfe1ac9b 100644 --- a/beacon_node/beacon_chain/src/schema_change/migration_schema_v24.rs +++ b/beacon_node/beacon_chain/src/schema_change/migration_schema_v24.rs @@ -16,6 +16,7 @@ use store::{ use tracing::{debug, info, warn}; use types::{ BeaconState, CACHED_EPOCHS, ChainSpec, Checkpoint, CommitteeCache, EthSpec, Hash256, Slot, + execution::StatePayloadStatus, }; /// We stopped using the pruning checkpoint in schema v23 but never explicitly deleted it. @@ -58,6 +59,7 @@ pub fn get_state_v22( base_state, summary.slot, summary.latest_block_root, + StatePayloadStatus::Pending, update_cache, ) .map(Some) diff --git a/beacon_node/beacon_chain/src/state_advance_timer.rs b/beacon_node/beacon_chain/src/state_advance_timer.rs index cb916cb514..4c070e7ecc 100644 --- a/beacon_node/beacon_chain/src/state_advance_timer.rs +++ b/beacon_node/beacon_chain/src/state_advance_timer.rs @@ -26,7 +26,10 @@ use std::sync::{ use task_executor::TaskExecutor; use tokio::time::{Instant, sleep, sleep_until}; use tracing::{Instrument, debug, debug_span, error, instrument, warn}; -use types::{AttestationShufflingId, BeaconStateError, EthSpec, Hash256, RelativeEpoch, Slot}; +use types::{ + AttestationShufflingId, BeaconStateError, EthSpec, Hash256, RelativeEpoch, Slot, + StatePayloadStatus, +}; /// If the head slot is more than `MAX_ADVANCE_DISTANCE` from the current slot, then don't perform /// the state advancement. @@ -277,9 +280,16 @@ fn advance_head(beacon_chain: &Arc>) -> Resu (snapshot.beacon_block_root, snapshot.beacon_state_root()) }; + // TODO(gloas): do better once we have fork choice + let payload_status = StatePayloadStatus::Pending; let (head_state_root, mut state) = beacon_chain .store - .get_advanced_hot_state(head_block_root, current_slot, head_block_state_root)? + .get_advanced_hot_state( + head_block_root, + payload_status, + current_slot, + head_block_state_root, + )? .ok_or(Error::HeadMissingFromSnapshotCache(head_block_root))?; let initial_slot = state.slot(); diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index eefb5d48b7..4bc5bb21d3 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -27,7 +27,7 @@ use bls::{ use eth2::types::{GraffitiPolicy, SignedBlockContentsTuple}; use execution_layer::test_utils::generate_genesis_header; use execution_layer::{ - ExecutionLayer, + ExecutionLayer, NewPayloadRequest, NewPayloadRequestGloas, auth::JwtKey, test_utils::{DEFAULT_JWT_SECRET, ExecutionBlockGenerator, MockBuilder, MockExecutionLayer}, }; @@ -52,7 +52,8 @@ use ssz_types::{RuntimeVariableList, VariableList}; use state_processing::ConsensusContext; use state_processing::per_block_processing::compute_timestamp_at_slot; use state_processing::per_block_processing::{ - BlockSignatureStrategy, VerifyBlockRoot, per_block_processing, + BlockSignatureStrategy, VerifyBlockRoot, deneb::kzg_commitment_to_versioned_hash, + per_block_processing, }; use state_processing::state_advance::complete_state_advance; use std::borrow::Cow; @@ -66,6 +67,7 @@ use store::database::interface::BeaconNodeBackend; use store::{HotColdDB, ItemStore, MemoryStore, config::StoreConfig}; use task_executor::TaskExecutor; use task_executor::{ShutdownReason, test_utils::TestRuntime}; +use tracing::debug; use tree_hash::TreeHash; use typenum::U4294967296; use types::attestation::IndexedAttestationBase; @@ -1092,6 +1094,86 @@ where (block_contents, block_response.state) } + /// Returns a newly created block, signed by the proposer for the given slot, + /// along with the execution payload envelope (for Gloas) and the pending state. + /// + /// For pre-Gloas forks, the envelope is `None` and this behaves like `make_block`. + pub async fn make_block_with_envelope( + &self, + mut state: BeaconState, + slot: Slot, + ) -> ( + SignedBlockContentsTuple, + Option>, + BeaconState, + ) { + assert_ne!(slot, 0, "can't produce a block at slot 0"); + assert!(slot >= state.slot()); + + if state.fork_name_unchecked().gloas_enabled() + || self.spec.fork_name_at_slot::(slot).gloas_enabled() + { + complete_state_advance(&mut state, None, slot, &self.spec) + .expect("should be able to advance state to slot"); + state.build_caches(&self.spec).expect("should build caches"); + + let proposer_index = state.get_beacon_proposer_index(slot, &self.spec).unwrap(); + + let graffiti = Graffiti::from(self.rng.lock().random::<[u8; 32]>()); + let graffiti_settings = + GraffitiSettings::new(Some(graffiti), Some(GraffitiPolicy::PreserveUserGraffiti)); + let randao_reveal = self.sign_randao_reveal(&state, proposer_index, slot); + + let (block, pending_state, _consensus_block_value) = self + .chain + .produce_block_on_state_gloas( + state, + None, + slot, + randao_reveal, + graffiti_settings, + ProduceBlockVerification::VerifyRandao, + ) + .await + .unwrap(); + + let signed_block = Arc::new(block.sign( + &self.validator_keypairs[proposer_index].sk, + &pending_state.fork(), + pending_state.genesis_validators_root(), + &self.spec, + )); + + // Retrieve the cached envelope produced during block production and sign it. + let signed_envelope = self + .chain + .pending_payload_envelopes + .write() + .remove(slot) + .map(|envelope| { + let epoch = slot.epoch(E::slots_per_epoch()); + let domain = self.spec.get_domain( + epoch, + Domain::BeaconBuilder, + &pending_state.fork(), + pending_state.genesis_validators_root(), + ); + let message = envelope.signing_root(domain); + let signature = self.validator_keypairs[proposer_index].sk.sign(message); + SignedExecutionPayloadEnvelope { + message: envelope, + signature, + } + }); + + let block_contents: SignedBlockContentsTuple = (signed_block, None); + (block_contents, signed_envelope, pending_state) + } else { + let (block_contents, state) = self.make_block(state, slot).await; + (block_contents, None, state) + } + } + /// Useful for the `per_block_processing` tests. Creates a block, and returns the state after /// caches are built but before the generated block is processed. pub async fn make_block_return_pre_state( @@ -2575,6 +2657,84 @@ where Ok(block_hash) } + /// Process an execution payload envelope for a Gloas block. + pub async fn process_envelope( + &self, + block_root: Hash256, + signed_envelope: SignedExecutionPayloadEnvelope, + pending_state: &mut BeaconState, + ) -> Hash256 { + let state_root = signed_envelope.message.state_root; + debug!( + slot = %signed_envelope.message.slot, + ?state_root, + "Processing execution payload envelope" + ); + let block_state_root = pending_state + .update_tree_hash_cache() + .expect("should compute pending state root"); + + state_processing::envelope_processing::process_execution_payload_envelope( + pending_state, + Some(block_state_root), + &signed_envelope, + state_processing::VerifySignatures::True, + state_processing::envelope_processing::VerifyStateRoot::True, + &self.spec, + ) + .expect("should process envelope"); + + // Notify the EL of the new payload so forkchoiceUpdated can reference it. + let block = self + .chain + .store + .get_blinded_block(&block_root) + .expect("should read block from store") + .expect("block should exist in store"); + + let bid = &block + .message() + .body() + .signed_execution_payload_bid() + .expect("Gloas block should have a payload bid") + .message; + + let versioned_hashes = bid + .blob_kzg_commitments + .iter() + .map(kzg_commitment_to_versioned_hash) + .collect(); + + let request = NewPayloadRequest::Gloas(NewPayloadRequestGloas { + execution_payload: &signed_envelope.message.payload, + versioned_hashes, + parent_beacon_block_root: block.message().parent_root(), + execution_requests: &signed_envelope.message.execution_requests, + }); + + self.chain + .execution_layer + .as_ref() + .expect("harness should have execution layer") + .notify_new_payload(request) + .await + .expect("newPayload should succeed"); + + // Store the envelope. + self.chain + .store + .put_payload_envelope(&block_root, signed_envelope) + .expect("should store envelope"); + + // Store the Full state. + self.chain + .store + .put_state(&state_root, pending_state) + .expect("should store full state"); + + state_root + } + /// Builds an `Rpc` block from a `SignedBeaconBlock` and blobs or data columns retrieved from /// the database. pub fn build_rpc_block_from_store_blobs( diff --git a/beacon_node/beacon_chain/tests/rewards.rs b/beacon_node/beacon_chain/tests/rewards.rs index bc7c98041f..1889c1f625 100644 --- a/beacon_node/beacon_chain/tests/rewards.rs +++ b/beacon_node/beacon_chain/tests/rewards.rs @@ -845,13 +845,14 @@ async fn check_all_base_rewards_for_subset( .state_at_slot(Slot::new(slot - 1), StateSkipConfig::WithoutStateRoots) .unwrap(); + // TODO(gloas): handle payloads? let mut pre_state = BlockReplayer::>::new( parent_state, &harness.spec, ) .no_signature_verification() .minimal_block_root_verification() - .apply_blocks(vec![], Some(block.slot())) + .apply_blocks(vec![], vec![], Some(block.slot())) .unwrap() .into_state(); diff --git a/beacon_node/beacon_chain/tests/store_tests.rs b/beacon_node/beacon_chain/tests/store_tests.rs index 86f4af3efc..a70ad89ca9 100644 --- a/beacon_node/beacon_chain/tests/store_tests.rs +++ b/beacon_node/beacon_chain/tests/store_tests.rs @@ -708,8 +708,13 @@ async fn block_replayer_hooks() { .add_attested_blocks_at_slots(state.clone(), state_root, &block_slots, &all_validators) .await; - let blocks = store - .load_blocks_to_replay(Slot::new(0), max_slot, end_block_root.into()) + let (blocks, envelopes) = store + .load_blocks_to_replay( + Slot::new(0), + max_slot, + end_block_root.into(), + StatePayloadStatus::Pending, + ) .unwrap(); let mut pre_slots = vec![]; @@ -744,7 +749,7 @@ async fn block_replayer_hooks() { post_block_slots.push(block.slot()); Ok(()) })) - .apply_blocks(blocks, None) + .apply_blocks(blocks, envelopes, None) .unwrap() .into_state(); @@ -3842,7 +3847,12 @@ async fn process_blocks_and_attestations_for_unaligned_checkpoint() { let (split_state_root, mut advanced_split_state) = harness .chain .store - .get_advanced_hot_state(split.block_root, split.slot, split.state_root) + .get_advanced_hot_state( + split.block_root, + StatePayloadStatus::Pending, + split.slot, + split.state_root, + ) .unwrap() .unwrap(); complete_state_advance( @@ -5470,6 +5480,427 @@ fn check_finalization(harness: &TestHarness, expected_slot: u64) { ); } +// ===================== Gloas Store Tests ===================== + +/// Test basic Gloas block + envelope storage and retrieval. +#[tokio::test] +async fn test_gloas_block_and_envelope_storage_no_skips() { + test_gloas_block_and_envelope_storage_generic(32, vec![], false).await +} + +#[tokio::test] +async fn test_gloas_block_and_envelope_storage_some_skips() { + test_gloas_block_and_envelope_storage_generic(32, vec![2, 4, 5, 16, 23, 24, 25], false).await +} + +#[tokio::test] +async fn test_gloas_block_and_envelope_storage_no_skips_w_cache() { + test_gloas_block_and_envelope_storage_generic(32, vec![], true).await +} + +#[tokio::test] +async fn test_gloas_block_and_envelope_storage_some_skips_w_cache() { + test_gloas_block_and_envelope_storage_generic(32, vec![2, 4, 5, 16, 23, 24, 25], true).await +} + +async fn test_gloas_block_and_envelope_storage_generic( + num_slots: u64, + skipped_slots: Vec, + use_state_cache: bool, +) { + if !fork_name_from_env().is_some_and(|f| f.gloas_enabled()) { + return; + } + + let db_path = tempdir().unwrap(); + let store_config = if !use_state_cache { + StoreConfig { + state_cache_size: new_non_zero_usize(1), + ..StoreConfig::default() + } + } else { + StoreConfig::default() + }; + let spec = test_spec::(); + let store = get_store_generic(&db_path, store_config, spec); + let harness = get_harness(store.clone(), LOW_VALIDATOR_COUNT); + let spec = &harness.chain.spec; + + let (genesis_state, genesis_state_root) = harness.get_current_state_and_root(); + let mut state = genesis_state; + + let mut block_roots = vec![]; + let mut stored_states = vec![(Slot::new(0), StatePayloadStatus::Full, genesis_state_root)]; + + for i in 1..=num_slots { + let slot = Slot::new(i); + harness.advance_slot(); + + if skipped_slots.contains(&i) { + complete_state_advance(&mut state, None, slot, spec) + .expect("should be able to advance state to slot"); + + let state_root = state.canonical_root().unwrap(); + store.put_state(&state_root, &state).unwrap(); + stored_states.push((slot, state.payload_status(), state_root)); + } + + let (block_contents, envelope, mut pending_state) = + harness.make_block_with_envelope(state, slot).await; + let block_root = block_contents.0.canonical_root(); + + // Process the block. + harness + .process_block(slot, block_root, block_contents) + .await + .unwrap(); + + let pending_state_root = pending_state.update_tree_hash_cache().unwrap(); + stored_states.push((slot, StatePayloadStatus::Pending, pending_state_root)); + + // Process the envelope. + let envelope = envelope.expect("Gloas block should have envelope"); + let mut full_state = pending_state.clone(); + let envelope_state_root = envelope.message.state_root; + let full_state_root = harness + .process_envelope(block_root, envelope, &mut full_state) + .await; + assert_eq!(full_state_root, envelope_state_root); + stored_states.push((slot, StatePayloadStatus::Full, full_state_root)); + + block_roots.push(block_root); + state = full_state; + } + + // Verify block storage. + for (i, block_root) in block_roots.iter().enumerate() { + // Block can be loaded. + assert!( + store.get_blinded_block(block_root).unwrap().is_some(), + "block at slot {} should be in DB", + i + 1 + ); + + // Envelope can be loaded. + let loaded_envelope = store.get_payload_envelope(block_root).unwrap(); + assert!( + loaded_envelope.is_some(), + "envelope at slot {} should be in DB", + i + 1 + ); + } + + // Verify state storage. + // Iterate in reverse order to frustrate the cache. + for (slot, payload_status, state_root) in stored_states.into_iter().rev() { + println!("{slot}: {state_root:?}"); + let Some(mut loaded_state) = store + .get_state(&state_root, Some(slot), CACHE_STATE_IN_TESTS) + .unwrap() + else { + panic!("missing {payload_status:?} state at slot {slot} with root {state_root:?}"); + }; + assert_eq!(loaded_state.slot(), slot); + assert_eq!( + loaded_state.payload_status(), + payload_status, + "slot = {slot}" + ); + assert_eq!( + loaded_state.canonical_root().unwrap(), + state_root, + "slot = {slot}" + ); + } +} + +/// Test that Pending and Full states have the correct payload status through round-trip +/// storage and retrieval. +#[tokio::test] +async fn test_gloas_state_payload_status() { + if !fork_name_from_env().is_some_and(|f| f.gloas_enabled()) { + return; + } + + let db_path = tempdir().unwrap(); + let store = get_store(&db_path); + let harness = get_harness(store.clone(), LOW_VALIDATOR_COUNT); + + let num_blocks = 6u64; + let (genesis_state, _genesis_state_root) = harness.get_current_state_and_root(); + let mut state = genesis_state; + + for i in 1..=num_blocks { + let slot = Slot::new(i); + harness.advance_slot(); + + let (block_contents, envelope, pending_state) = + harness.make_block_with_envelope(state, slot).await; + let block_root = block_contents.0.canonical_root(); + + harness + .process_block(slot, block_root, block_contents) + .await + .unwrap(); + + // Verify the pending state has correct payload status. + assert_eq!( + pending_state.payload_status(), + StatePayloadStatus::Pending, + "pending state at slot {} should be Pending", + i + ); + + // Process the envelope and verify the full state has correct payload status. + let envelope = envelope.expect("Gloas block should have envelope"); + let mut full_state = pending_state; + let full_state_root = harness + .process_envelope(block_root, envelope, &mut full_state) + .await; + + assert_eq!( + full_state.payload_status(), + StatePayloadStatus::Full, + "full state at slot {} should be Full", + i + ); + + // Round-trip: load the full state from DB and check status. + let loaded_full = store + .get_state(&full_state_root, None, CACHE_STATE_IN_TESTS) + .unwrap() + .expect("full state should exist in DB"); + assert_eq!( + loaded_full.payload_status(), + StatePayloadStatus::Full, + "loaded full state at slot {} should be Full after round-trip", + i + ); + + state = full_state; + } +} + +/// Test block replay with and without envelopes. +#[tokio::test] +async fn test_gloas_block_replay_with_envelopes() { + if !fork_name_from_env().is_some_and(|f| f.gloas_enabled()) { + return; + } + + let db_path = tempdir().unwrap(); + let store = get_store(&db_path); + let harness = get_harness(store.clone(), LOW_VALIDATOR_COUNT); + + let num_blocks = 16u64; + let (genesis_state, _genesis_state_root) = harness.get_current_state_and_root(); + let mut state = genesis_state.clone(); + + let mut last_block_root = Hash256::zero(); + let mut pending_states = HashMap::new(); + let mut full_states = HashMap::new(); + + for i in 1..=num_blocks { + let slot = Slot::new(i); + harness.advance_slot(); + + let (block_contents, envelope, pending_state) = + harness.make_block_with_envelope(state, slot).await; + let block_root = block_contents.0.canonical_root(); + + harness + .process_block(slot, block_root, block_contents) + .await + .unwrap(); + + let pending_state_root = pending_state.clone().update_tree_hash_cache().unwrap(); + pending_states.insert(slot, (pending_state_root, pending_state.clone())); + + let envelope = envelope.expect("Gloas block should have envelope"); + let mut full_state = pending_state; + let full_state_root = harness + .process_envelope(block_root, envelope, &mut full_state) + .await; + full_states.insert(slot, (full_state_root, full_state.clone())); + + last_block_root = block_root; + state = full_state; + } + + let end_slot = Slot::new(num_blocks); + + // Load blocks for Pending replay (no envelopes for the last block). + let (blocks_pending, envelopes_pending) = store + .load_blocks_to_replay( + Slot::new(0), + end_slot, + last_block_root, + StatePayloadStatus::Pending, + ) + .unwrap(); + assert!( + !blocks_pending.is_empty(), + "should have blocks for pending replay" + ); + // For Pending, no envelope for the first block (slot 0) or last block; envelopes for + // intermediate blocks whose payloads are canonical. + let expected_pending_envelopes = blocks_pending.len().saturating_sub(2); + assert_eq!( + envelopes_pending.len(), + expected_pending_envelopes, + "pending replay should have envelopes for all blocks except the last" + ); + assert!( + blocks_pending + .iter() + .skip(1) + .take(envelopes_pending.len()) + .map(|block| block.slot()) + .eq(envelopes_pending + .iter() + .map(|envelope| envelope.message.slot)), + "block and envelope slots should match" + ); + + // Load blocks for Full replay (envelopes for all blocks including the last). + let (blocks_full, envelopes_full) = store + .load_blocks_to_replay( + Slot::new(0), + end_slot, + last_block_root, + StatePayloadStatus::Full, + ) + .unwrap(); + assert_eq!( + envelopes_full.len(), + expected_pending_envelopes + 1, + "full replay should have one more envelope than pending replay" + ); + + // Replay to Pending state and verify. + let mut replayed_pending = + BlockReplayer::::new(genesis_state.clone(), store.get_chain_spec()) + .no_signature_verification() + .minimal_block_root_verification() + .desired_state_payload_status(StatePayloadStatus::Pending) + .apply_blocks(blocks_pending, envelopes_pending, None) + .expect("should replay blocks to pending state") + .into_state(); + replayed_pending.apply_pending_mutations().unwrap(); + + let (_, mut expected_pending) = pending_states.get(&end_slot).unwrap().clone(); + expected_pending.apply_pending_mutations().unwrap(); + + replayed_pending.drop_all_caches().unwrap(); + expected_pending.drop_all_caches().unwrap(); + assert_eq!( + replayed_pending, expected_pending, + "replayed pending state should match stored pending state" + ); + + // Replay to Full state and verify. + let mut replayed_full = + BlockReplayer::::new(genesis_state, store.get_chain_spec()) + .no_signature_verification() + .minimal_block_root_verification() + .desired_state_payload_status(StatePayloadStatus::Full) + .apply_blocks(blocks_full, envelopes_full, None) + .expect("should replay blocks to full state") + .into_state(); + replayed_full.apply_pending_mutations().unwrap(); + + let (_, mut expected_full) = full_states.get(&end_slot).unwrap().clone(); + expected_full.apply_pending_mutations().unwrap(); + + replayed_full.drop_all_caches().unwrap(); + expected_full.drop_all_caches().unwrap(); + assert_eq!( + replayed_full, expected_full, + "replayed full state should match stored full state" + ); +} + +/// Test the hot state hierarchy with Full states stored as ReplayFrom. +#[tokio::test] +async fn test_gloas_hot_state_hierarchy() { + if !fork_name_from_env().is_some_and(|f| f.gloas_enabled()) { + return; + } + + let db_path = tempdir().unwrap(); + let store = get_store(&db_path); + let harness = get_harness(store.clone(), LOW_VALIDATOR_COUNT); + + // Build enough blocks to span multiple epochs. With MinimalEthSpec (8 slots/epoch), + // 40 slots covers 5 epochs. + let num_blocks = E::slots_per_epoch() * 5; + // TODO(gloas): enable finalisation by increasing this threshold + let some_validators = (0..LOW_VALIDATOR_COUNT / 2).collect::>(); + + let (genesis_state, _genesis_state_root) = harness.get_current_state_and_root(); + + // Use manual block building with envelopes for the first few blocks, + // then use the standard attested-blocks path once we've verified envelope handling. + let mut state = genesis_state; + let mut last_block_root = Hash256::zero(); + + for i in 1..=num_blocks { + let slot = Slot::new(i); + harness.advance_slot(); + + let (block_contents, envelope, pending_state) = + harness.make_block_with_envelope(state.clone(), slot).await; + let block_root = block_contents.0.canonical_root(); + + // Attest to previous block before processing next. + if i > 1 { + let state_root = state.update_tree_hash_cache().unwrap(); + harness.attest_block( + &state, + state_root, + last_block_root.into(), + &block_contents.0, + &some_validators, + ); + } + + harness + .process_block(slot, block_root, block_contents) + .await + .unwrap(); + + let envelope = envelope.expect("Gloas block should have envelope"); + let mut full_state = pending_state; + harness + .process_envelope(block_root, envelope, &mut full_state) + .await; + + last_block_root = block_root; + state = full_state; + } + + // Verify states can be loaded and have correct payload status. + let _head_state = harness.get_current_state(); + let _head_slot = harness.head_slot(); + + // States at all slots on the canonical chain should be retrievable. + for slot_num in 1..=num_blocks { + let slot = Slot::new(slot_num); + // Get the state root from the block at this slot via the state root iterator. + let state_root = harness.chain.state_root_at_slot(slot).unwrap().unwrap(); + + let mut loaded_state = store + .get_state(&state_root, Some(slot), CACHE_STATE_IN_TESTS) + .unwrap() + .unwrap(); + assert_eq!(loaded_state.canonical_root().unwrap(), state_root); + } + + // Verify chain dump and iterators work with Gloas states. + check_chain_dump(&harness, num_blocks + 1); + check_iterators(&harness); +} + /// Check that the HotColdDB's split_slot is equal to the start slot of the last finalized epoch. fn check_split_slot( harness: &TestHarness, @@ -5521,7 +5952,9 @@ fn check_chain_dump_from_slot(harness: &TestHarness, from_slot: Slot, expected_l ); // Check presence of execution payload on disk. - if harness.chain.spec.bellatrix_fork_epoch.is_some() { + if harness.chain.spec.bellatrix_fork_epoch.is_some() + && !harness.chain.spec.is_gloas_scheduled() + { assert!( harness .chain diff --git a/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs b/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs index e94924d8b2..a66f7a9b55 100644 --- a/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs +++ b/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs @@ -932,8 +932,14 @@ pub fn generate_genesis_header(spec: &ChainSpec) -> Option None, + ForkName::Gloas => { + // TODO(gloas): we are using a Fulu header for now, but this gets fixed up by the + // genesis builder anyway which translates it to bid/latest_block_hash. + let mut header = ExecutionPayloadHeader::Fulu(<_>::default()); + *header.block_hash_mut() = genesis_block_hash.unwrap_or_default(); + *header.transactions_root_mut() = empty_transactions_root; + Some(header) + } } } diff --git a/beacon_node/http_api/src/attestation_performance.rs b/beacon_node/http_api/src/attestation_performance.rs index 6e285829d2..05ed36e68b 100644 --- a/beacon_node/http_api/src/attestation_performance.rs +++ b/beacon_node/http_api/src/attestation_performance.rs @@ -205,8 +205,9 @@ pub fn get_attestation_performance( }) .collect::, _>>()?; + // TODO(gloas): add payloads replayer = replayer - .apply_blocks(blocks, None) + .apply_blocks(blocks, vec![], None) .map_err(|e| custom_server_error(format!("{:?}", e)))?; } diff --git a/beacon_node/http_api/src/block_packing_efficiency.rs b/beacon_node/http_api/src/block_packing_efficiency.rs index 3772470b28..725a0648a5 100644 --- a/beacon_node/http_api/src/block_packing_efficiency.rs +++ b/beacon_node/http_api/src/block_packing_efficiency.rs @@ -398,8 +398,9 @@ pub fn get_block_packing_efficiency( }) .collect::, _>>()?; + // TODO(gloas): add payloads replayer = replayer - .apply_blocks(blocks, None) + .apply_blocks(blocks, vec![], None) .map_err(|e: PackingEfficiencyError| custom_server_error(format!("{:?}", e)))?; } diff --git a/beacon_node/http_api/src/produce_block.rs b/beacon_node/http_api/src/produce_block.rs index 607221686f..70475de130 100644 --- a/beacon_node/http_api/src/produce_block.rs +++ b/beacon_node/http_api/src/produce_block.rs @@ -70,7 +70,7 @@ pub async fn produce_block_v4( let graffiti_settings = GraffitiSettings::new(query.graffiti, query.graffiti_policy); - let (block, consensus_block_value) = chain + let (block, _pending_state, consensus_block_value) = chain .produce_block_with_verification_gloas( randao_reveal, slot, diff --git a/beacon_node/http_api/src/sync_committee_rewards.rs b/beacon_node/http_api/src/sync_committee_rewards.rs index 9bc1f6ead4..8715fc2b1e 100644 --- a/beacon_node/http_api/src/sync_committee_rewards.rs +++ b/beacon_node/http_api/src/sync_committee_rewards.rs @@ -66,11 +66,12 @@ pub fn get_state_before_applying_block( }) .map_err(|e| custom_not_found(format!("Parent state is not available! {:?}", e)))?; + // TODO(gloas): handle payloads? let replayer = BlockReplayer::new(parent_state, &chain.spec) .no_signature_verification() .state_root_iter([Ok((parent_block.state_root(), parent_block.slot()))].into_iter()) .minimal_block_root_verification() - .apply_blocks(vec![], Some(block.slot())) + .apply_blocks(vec![], vec![], Some(block.slot())) .map_err(unhandled_error::)?; Ok(replayer.into_state()) diff --git a/beacon_node/store/src/hdiff.rs b/beacon_node/store/src/hdiff.rs index 3777c83b60..85ac56454c 100644 --- a/beacon_node/store/src/hdiff.rs +++ b/beacon_node/store/src/hdiff.rs @@ -654,6 +654,12 @@ impl HierarchyModuli { /// layer 2 diff will point to the start snapshot instead of the layer 1 diff at /// 2998272. pub fn storage_strategy(&self, slot: Slot, start_slot: Slot) -> Result { + // Initially had the idea of using different storage strategies for full and pending states, + // but it was very complex. However without this concept we end up storing two diffs/two + // snapshots at full slots. The complexity of managing skipped slots was the main impetus + // for reverting the payload-status sensitive design: a Full skipped slot has no same-slot + // Pending state to replay from, so has to be handled differently from Full non-skipped + // slots. match slot.cmp(&start_slot) { Ordering::Less => return Err(Error::LessThanStart(slot, start_slot)), Ordering::Equal => return Ok(StorageStrategy::Snapshot), diff --git a/beacon_node/store/src/hot_cold_store.rs b/beacon_node/store/src/hot_cold_store.rs index fe3477dbfe..428086c464 100644 --- a/beacon_node/store/src/hot_cold_store.rs +++ b/beacon_node/store/src/hot_cold_store.rs @@ -186,6 +186,7 @@ pub enum HotColdDBError { MissingHotHDiff(Hash256), MissingHDiff(Slot), MissingExecutionPayload(Hash256), + MissingExecutionPayloadEnvelope(Hash256), MissingFullBlockExecutionPayloadPruned(Hash256, Slot), MissingAnchorInfo, MissingFrozenBlockSlot(Hash256), @@ -1132,10 +1133,13 @@ impl, Cold: ItemStore> HotColdDB pub fn get_advanced_hot_state( &self, block_root: Hash256, + payload_status: StatePayloadStatus, max_slot: Slot, state_root: Hash256, ) -> Result)>, Error> { - if let Some(cached) = self.get_advanced_hot_state_from_cache(block_root, max_slot) { + if let Some(cached) = + self.get_advanced_hot_state_from_cache(block_root, payload_status, max_slot) + { return Ok(Some(cached)); } @@ -1157,7 +1161,11 @@ impl, Cold: ItemStore> HotColdDB .into()); } - let state_root = if block_root == split.block_root && split.slot <= max_slot { + // Split state should always be `Pending`. + let state_root = if block_root == split.block_root + && let StatePayloadStatus::Pending = payload_status + && split.slot <= max_slot + { split.state_root } else { state_root @@ -1204,11 +1212,12 @@ impl, Cold: ItemStore> HotColdDB pub fn get_advanced_hot_state_from_cache( &self, block_root: Hash256, + payload_status: StatePayloadStatus, max_slot: Slot, ) -> Option<(Hash256, BeaconState)> { self.state_cache .lock() - .get_by_block_root(block_root, max_slot) + .get_by_block_root(block_root, payload_status, max_slot) } /// Delete a state, ensuring it is removed from the LRU cache, as well as from on-disk. @@ -1379,6 +1388,8 @@ impl, Cold: ItemStore> HotColdDB // NOTE: `hot_storage_strategy` can error if there are states in the database // prior to the `anchor_slot`. This can happen if checkpoint sync has been // botched and left some states in the database prior to completing. + // Use `Pending` status here because snapshots and diffs are only stored for + // `Pending` states. if let Some(slot) = slot && let Ok(strategy) = self.hot_storage_strategy(slot) { @@ -1846,6 +1857,55 @@ impl, Cold: ItemStore> HotColdDB } } + /// Compute the `StatePayloadStatus` for a stored state based on its summary. + /// + /// In future this might become a field of the summary, but this would require a whole DB + /// migration. For now we use an extra read from the DB to determine it. + fn get_hot_state_summary_payload_status( + &self, + summary: &HotStateSummary, + ) -> Result { + // Treat pre-Gloas states as `Pending`. + if !self + .spec + .fork_name_at_slot::(summary.slot) + .gloas_enabled() + { + return Ok(StatePayloadStatus::Pending); + } + + // Treat genesis state as `Pending` (`BeaconBlock` state). + let previous_state_root = summary.previous_state_root; + if previous_state_root.is_zero() { + return Ok(StatePayloadStatus::Pending); + } + + // Load the hot state summary for the previous state. + // + // If it has the same slot as this summary then we know this summary is for a `Full` state + // (payload state), because they are always diffed against their same-slot `Pending` state. + // + // If the previous summary has a different slot AND the latest block is from `summary.slot`, + // then this state *must* be `Pending` (it is the summary for latest block itself). + // + // Otherwise, we are at a skipped slot and must traverse the graph of state summaries + // backwards until we reach a summary for the latest block. This recursion could be quite + // far in the case of a long skip. We could optimise this in future using the + // `diff_base_state` (like in `get_ancestor_state_root`), or by doing a proper DB + // migration. + let previous_state_summary = self + .load_hot_state_summary(&previous_state_root)? + .ok_or(Error::MissingHotStateSummary(previous_state_root))?; + + if previous_state_summary.slot == summary.slot { + Ok(StatePayloadStatus::Full) + } else if summary.slot == summary.latest_block_slot { + Ok(StatePayloadStatus::Pending) + } else { + self.get_hot_state_summary_payload_status(&previous_state_summary) + } + } + fn load_hot_hdiff_buffer(&self, state_root: Hash256) -> Result { if let Some(buffer) = self .state_cache @@ -1941,13 +2001,22 @@ impl, Cold: ItemStore> HotColdDB ) -> Result, Hash256)>, Error> { metrics::inc_counter(&metrics::BEACON_STATE_HOT_GET_COUNT); - if let Some(HotStateSummary { - slot, - latest_block_root, - diff_base_state, - .. - }) = self.load_hot_state_summary(state_root)? + if let Some( + summary @ HotStateSummary { + slot, + latest_block_root, + diff_base_state, + .. + }, + ) = self.load_hot_state_summary(state_root)? { + let payload_status = self.get_hot_state_summary_payload_status(&summary)?; + debug!( + %slot, + ?state_root, + ?payload_status, + "Loading hot state" + ); let mut state = match self.hot_storage_strategy(slot)? { strat @ StorageStrategy::Snapshot | strat @ StorageStrategy::DiffFrom(_) => { let buffer_timer = metrics::start_timer_vec( @@ -1999,6 +2068,7 @@ impl, Cold: ItemStore> HotColdDB base_state, slot, latest_block_root, + payload_status, update_cache, )? } @@ -2016,19 +2086,26 @@ impl, Cold: ItemStore> HotColdDB base_state: BeaconState, slot: Slot, latest_block_root: Hash256, + desired_payload_status: StatePayloadStatus, update_cache: bool, ) -> Result, Error> { - if base_state.slot() == slot { + if base_state.slot() == slot && base_state.payload_status() == desired_payload_status { return Ok(base_state); } - let blocks = self.load_blocks_to_replay(base_state.slot(), slot, latest_block_root)?; + let (blocks, envelopes) = self.load_blocks_to_replay( + base_state.slot(), + slot, + latest_block_root, + desired_payload_status, + )?; let _t = metrics::start_timer(&metrics::STORE_BEACON_REPLAY_HOT_BLOCKS_TIME); // If replaying blocks, and `update_cache` is true, also cache the epoch boundary // state that this state is based on. It may be useful as the basis of more states // in the same epoch. let state_cache_hook = |state_root, state: &mut BeaconState| { + // TODO(gloas): prevent caching of the payload_status=Full state? if !update_cache || state.slot() % E::slots_per_epoch() != 0 { return Ok(()); } @@ -2052,9 +2129,19 @@ impl, Cold: ItemStore> HotColdDB Ok(()) }; + debug!( + %slot, + blocks = ?blocks.iter().map(|block| block.slot()).collect::>(), + envelopes = ?envelopes.iter().map(|e| e.message.slot).collect::>(), + payload_status = ?desired_payload_status, + "Replaying blocks and envelopes" + ); + self.replay_blocks( base_state, blocks, + envelopes, + desired_payload_status, slot, no_state_root_iter(), Some(Box::new(state_cache_hook)), @@ -2358,7 +2445,7 @@ impl, Cold: ItemStore> HotColdDB return Ok(base_state); } - let blocks = self.load_cold_blocks(base_state.slot() + 1, slot)?; + let (blocks, envelopes) = self.load_cold_blocks(base_state.slot() + 1, slot)?; // Include state root for base state as it is required by block processing to not // have to hash the state. @@ -2367,7 +2454,17 @@ impl, Cold: ItemStore> HotColdDB self.forwards_state_roots_iterator_until(base_state.slot(), slot, || { Err(Error::StateShouldNotBeRequired(slot)) })?; - let state = self.replay_blocks(base_state, blocks, slot, Some(state_root_iter), None)?; + // TODO(gloas): calculate correct payload status for cold states + let payload_status = StatePayloadStatus::Pending; + let state = self.replay_blocks( + base_state, + blocks, + envelopes, + payload_status, + slot, + Some(state_root_iter), + None, + )?; debug!( target_slot = %slot, replay_time_ms = metrics::stop_timer_with_duration(replay_timer).as_millis(), @@ -2460,40 +2557,77 @@ impl, Cold: ItemStore> HotColdDB } } - /// Load cold blocks between `start_slot` and `end_slot` inclusive. + /// Load cold blocks and payload envelopes between `start_slot` and `end_slot` inclusive. + #[allow(clippy::type_complexity)] pub fn load_cold_blocks( &self, start_slot: Slot, end_slot: Slot, - ) -> Result>, Error> { + ) -> Result< + ( + Vec>, + Vec>, + ), + Error, + > { let _t = metrics::start_timer(&metrics::STORE_BEACON_LOAD_COLD_BLOCKS_TIME); let block_root_iter = self.forwards_block_roots_iterator_until(start_slot, end_slot, || { Err(Error::StateShouldNotBeRequired(end_slot)) })?; - process_results(block_root_iter, |iter| { + let blocks = process_results(block_root_iter, |iter| { iter.map(|(block_root, _slot)| block_root) .dedup() .map(|block_root| { self.get_blinded_block(&block_root)? .ok_or(Error::MissingBlock(block_root)) }) - .collect() - })? + .collect::, Error>>() + })??; + + // If Gloas is not enabled for any slots in the range, just return `blocks`. + if !self.spec.fork_name_at_slot::(start_slot).gloas_enabled() + && !self.spec.fork_name_at_slot::(end_slot).gloas_enabled() + { + return Ok((blocks, vec![])); + } + // TODO(gloas): wire this up + let end_block_root = Hash256::ZERO; + let desired_payload_status = StatePayloadStatus::Pending; + let envelopes = self.load_payload_envelopes_for_blocks( + &blocks, + end_block_root, + desired_payload_status, + )?; + + Ok((blocks, envelopes)) } - /// Load the blocks between `start_slot` and `end_slot` by backtracking from `end_block_hash`. + /// Load the blocks & envelopes between `start_slot` and `end_slot` by backtracking from + /// `end_block_root`. /// /// Blocks are returned in slot-ascending order, suitable for replaying on a state with slot /// equal to `start_slot`, to reach a state with slot equal to `end_slot`. + /// + /// Payloads are also returned in slot-ascending order, but only payloads forming part of + /// the chain are loaded (payloads for EMPTY slots are omitted). Prior to Gloas, an empty + /// vec of payloads will be returned. + #[allow(clippy::type_complexity)] pub fn load_blocks_to_replay( &self, start_slot: Slot, end_slot: Slot, - end_block_hash: Hash256, - ) -> Result>>, Error> { + end_block_root: Hash256, + desired_payload_status: StatePayloadStatus, + ) -> Result< + ( + Vec>, + Vec>, + ), + Error, + > { let _t = metrics::start_timer(&metrics::STORE_BEACON_LOAD_HOT_BLOCKS_TIME); - let mut blocks = ParentRootBlockIterator::new(self, end_block_hash) + let mut blocks = ParentRootBlockIterator::new(self, end_block_root) .map(|result| result.map(|(_, block)| block)) // Include the block at the end slot (if any), it needs to be // replayed in order to construct the canonical state at `end_slot`. @@ -2520,17 +2654,70 @@ impl, Cold: ItemStore> HotColdDB }) .collect::, _>>()?; blocks.reverse(); - Ok(blocks) + + // If Gloas is not enabled for any slots in the range, just return `blocks`. + if !self.spec.fork_name_at_slot::(start_slot).gloas_enabled() + && !self.spec.fork_name_at_slot::(end_slot).gloas_enabled() + { + return Ok((blocks, vec![])); + } + + let envelopes = self.load_payload_envelopes_for_blocks( + &blocks, + end_block_root, + desired_payload_status, + )?; + + Ok((blocks, envelopes)) + } + + pub fn load_payload_envelopes_for_blocks( + &self, + blocks: &[SignedBlindedBeaconBlock], + end_block_root: Hash256, + desired_payload_status: StatePayloadStatus, + ) -> Result>, Error> { + let mut envelopes = vec![]; + + for (block, next_block) in blocks.iter().tuple_windows() { + if block.fork_name_unchecked().gloas_enabled() { + // Check next block to see if this block's payload is canonical on this chain. + let block_hash = block.payload_bid_block_hash()?; + if !next_block.is_parent_block_full(block_hash) { + // No payload at this slot (empty), nothing to load. + continue; + } + // Using `parent_root` avoids computation. + let block_root = next_block.parent_root(); + let envelope = self + .get_payload_envelope(&block_root)? + .ok_or(HotColdDBError::MissingExecutionPayloadEnvelope(block_root))?; + envelopes.push(envelope); + } + } + + // Load the payload for the last block if desired. + if let StatePayloadStatus::Full = desired_payload_status { + let envelope = self.get_payload_envelope(&end_block_root)?.ok_or( + HotColdDBError::MissingExecutionPayloadEnvelope(end_block_root), + )?; + envelopes.push(envelope); + } + + Ok(envelopes) } /// Replay `blocks` on top of `state` until `target_slot` is reached. /// /// Will skip slots as necessary. The returned state is not guaranteed /// to have any caches built, beyond those immediately required by block processing. + #[allow(clippy::too_many_arguments)] pub fn replay_blocks( &self, state: BeaconState, - blocks: Vec>>, + blocks: Vec>, + envelopes: Vec>, + desired_payload_status: StatePayloadStatus, target_slot: Slot, state_root_iter: Option>>, pre_slot_hook: Option>, @@ -2539,7 +2726,8 @@ impl, Cold: ItemStore> HotColdDB let mut block_replayer = BlockReplayer::new(state, &self.spec) .no_signature_verification() - .minimal_block_root_verification(); + .minimal_block_root_verification() + .desired_state_payload_status(desired_payload_status); let have_state_root_iterator = state_root_iter.is_some(); if let Some(state_root_iter) = state_root_iter { @@ -2551,7 +2739,7 @@ impl, Cold: ItemStore> HotColdDB } block_replayer - .apply_blocks(blocks, Some(target_slot)) + .apply_blocks(blocks, envelopes, Some(target_slot)) .map(|block_replayer| { if have_state_root_iterator && block_replayer.state_root_miss() { warn!( @@ -4006,11 +4194,15 @@ impl HotStateSummary { // slots where there isn't a skip). let latest_block_root = state.get_latest_block_root(state_root); + // Payload status of the state determines a lot about how it is stored. + let payload_status = state.payload_status(); + let get_state_root = |slot| { if slot == state.slot() { + // TODO(gloas): I think we can remove this case Ok::<_, Error>(state_root) } else { - Ok(get_ancestor_state_root(store, state, slot).map_err(|e| { + Ok::<_, Error>(get_ancestor_state_root(store, state, slot).map_err(|e| { Error::StateSummaryIteratorError { error: e, from_state_root: state_root, @@ -4030,6 +4222,12 @@ impl HotStateSummary { let previous_state_root = if state.slot() == 0 { // Set to 0x0 for genesis state to prevent any sort of circular reference. Hash256::zero() + } else if let StatePayloadStatus::Full = payload_status + && state.slot() == state.latest_block_header().slot + { + // A Full state at a non-skipped slot builds off the Pending state of the same slot, + // i.e. the state with the same `state_root` as its `BeaconBlock` + state.latest_block_header().state_root } else { get_state_root(state.slot().safe_sub(1_u64)?)? }; diff --git a/beacon_node/store/src/reconstruct.rs b/beacon_node/store/src/reconstruct.rs index 7aca692ef9..e51543c3a2 100644 --- a/beacon_node/store/src/reconstruct.rs +++ b/beacon_node/store/src/reconstruct.rs @@ -67,6 +67,7 @@ where state.build_caches(&self.spec)?; + // TODO(gloas): handle payload envelope replay process_results(block_root_iter, |iter| -> Result<(), Error> { let mut io_batch = vec![]; diff --git a/beacon_node/store/src/state_cache.rs b/beacon_node/store/src/state_cache.rs index 6d159c9361..d016922ade 100644 --- a/beacon_node/store/src/state_cache.rs +++ b/beacon_node/store/src/state_cache.rs @@ -7,7 +7,7 @@ use lru::LruCache; use std::collections::{BTreeMap, HashMap, HashSet}; use std::num::NonZeroUsize; use tracing::instrument; -use types::{BeaconState, ChainSpec, Epoch, EthSpec, Hash256, Slot}; +use types::{BeaconState, ChainSpec, Epoch, EthSpec, Hash256, Slot, execution::StatePayloadStatus}; /// Fraction of the LRU cache to leave intact during culling. const CULL_EXEMPT_NUMERATOR: usize = 1; @@ -23,10 +23,10 @@ pub struct FinalizedState { state: BeaconState, } -/// Map from block_root -> slot -> state_root. +/// Map from (block_root, payload_status) -> slot -> state_root. #[derive(Debug, Default)] pub struct BlockMap { - blocks: HashMap, + blocks: HashMap<(Hash256, StatePayloadStatus), SlotMap>, } /// Map from slot -> state_root. @@ -143,8 +143,11 @@ impl StateCache { return Err(Error::FinalizedStateDecreasingSlot); } + let payload_status = state.payload_status(); + // Add to block map. - self.block_map.insert(block_root, state.slot(), state_root); + self.block_map + .insert(block_root, payload_status, state.slot(), state_root); // Prune block map. let state_roots_to_prune = self.block_map.prune(state.slot()); @@ -267,7 +270,9 @@ impl StateCache { // Record the connection from block root and slot to this state. let slot = state.slot(); - self.block_map.insert(block_root, slot, state_root); + let payload_status = state.payload_status(); + self.block_map + .insert(block_root, payload_status, slot, state_root); Ok(PutStateOutcome::New(deleted_states)) } @@ -316,9 +321,10 @@ impl StateCache { pub fn get_by_block_root( &mut self, block_root: Hash256, + payload_status: StatePayloadStatus, slot: Slot, ) -> Option<(Hash256, BeaconState)> { - let slot_map = self.block_map.blocks.get(&block_root)?; + let slot_map = self.block_map.blocks.get(&(block_root, payload_status))?; // Find the state at `slot`, or failing that the most recent ancestor. let state_root = slot_map @@ -339,7 +345,12 @@ impl StateCache { } pub fn delete_block_states(&mut self, block_root: &Hash256) { - if let Some(slot_map) = self.block_map.delete_block_states(block_root) { + let (pending_state_roots, full_state_roots) = + self.block_map.delete_block_states(block_root); + for slot_map in [pending_state_roots, full_state_roots] + .into_iter() + .flatten() + { for state_root in slot_map.slots.values() { self.states.pop(state_root); } @@ -412,8 +423,14 @@ impl StateCache { } impl BlockMap { - fn insert(&mut self, block_root: Hash256, slot: Slot, state_root: Hash256) { - let slot_map = self.blocks.entry(block_root).or_default(); + fn insert( + &mut self, + block_root: Hash256, + payload_status: StatePayloadStatus, + slot: Slot, + state_root: Hash256, + ) { + let slot_map = self.blocks.entry((block_root, payload_status)).or_default(); slot_map.slots.insert(slot, state_root); } @@ -444,8 +461,12 @@ impl BlockMap { }); } - fn delete_block_states(&mut self, block_root: &Hash256) -> Option { - self.blocks.remove(block_root) + fn delete_block_states(&mut self, block_root: &Hash256) -> (Option, Option) { + let pending_state_roots = self + .blocks + .remove(&(*block_root, StatePayloadStatus::Pending)); + let full_state_roots = self.blocks.remove(&(*block_root, StatePayloadStatus::Full)); + (pending_state_roots, full_state_roots) } } diff --git a/consensus/state_processing/src/block_replayer.rs b/consensus/state_processing/src/block_replayer.rs index 56e667cdd3..a10d6179fe 100644 --- a/consensus/state_processing/src/block_replayer.rs +++ b/consensus/state_processing/src/block_replayer.rs @@ -1,6 +1,11 @@ use crate::{ BlockProcessingError, BlockSignatureStrategy, ConsensusContext, SlotProcessingError, - VerifyBlockRoot, per_block_processing, per_epoch_processing::EpochProcessingSummary, + VerifyBlockRoot, VerifySignatures, + envelope_processing::{ + EnvelopeProcessingError, VerifyStateRoot, process_execution_payload_envelope, + }, + per_block_processing, + per_epoch_processing::EpochProcessingSummary, per_slot_processing, }; use itertools::Itertools; @@ -8,7 +13,7 @@ use std::iter::Peekable; use std::marker::PhantomData; use types::{ BeaconState, BeaconStateError, BlindedPayload, ChainSpec, EthSpec, Hash256, SignedBeaconBlock, - Slot, + SignedExecutionPayloadEnvelope, Slot, execution::StatePayloadStatus, }; pub type PreBlockHook<'a, E, Error> = Box< @@ -24,7 +29,7 @@ pub type PostSlotHook<'a, E, Error> = Box< >; pub type StateRootIterDefault = std::iter::Empty>; -/// Efficiently apply blocks to a state while configuring various parameters. +/// Efficiently apply blocks and payloads to a state while configuring various parameters. /// /// Usage follows a builder pattern. pub struct BlockReplayer< @@ -41,8 +46,21 @@ pub struct BlockReplayer< post_block_hook: Option>, pre_slot_hook: Option>, post_slot_hook: Option>, + /// Iterator over state roots for all *block* states. + /// + /// Pre-Gloas, this is all states. Post-Gloas, this is *just* the states corresponding to beacon + /// blocks. For states corresponding to payloads, we read the state root from the payload + /// envelope. + // TODO(gloas): this concept might need adjusting when we implement the cold DB. pub(crate) state_root_iter: Option>, state_root_miss: bool, + /// The payload status of the state desired as the end result of block replay. + /// + /// This dictates whether a payload should be applied after applying the last block. + /// + /// Prior to Gloas, this should always be set to `StatePayloadStatus::Pending` to indicate + /// that no envelope needs to be applied. + desired_state_payload_status: StatePayloadStatus, _phantom: PhantomData, } @@ -50,7 +68,12 @@ pub struct BlockReplayer< pub enum BlockReplayError { SlotProcessing(SlotProcessingError), BlockProcessing(BlockProcessingError), + EnvelopeProcessing(EnvelopeProcessingError), BeaconState(BeaconStateError), + /// A payload envelope for this `slot` was required but not provided. + MissingPayloadEnvelope { + slot: Slot, + }, } impl From for BlockReplayError { @@ -65,6 +88,12 @@ impl From for BlockReplayError { } } +impl From for BlockReplayError { + fn from(e: EnvelopeProcessingError) -> Self { + Self::EnvelopeProcessing(e) + } +} + impl From for BlockReplayError { fn from(e: BeaconStateError) -> Self { Self::BeaconState(e) @@ -96,6 +125,7 @@ where post_slot_hook: None, state_root_iter: None, state_root_miss: false, + desired_state_payload_status: StatePayloadStatus::Pending, _phantom: PhantomData, } } @@ -161,6 +191,14 @@ where self } + /// Set the desired payload status of the state reached by replay. + /// + /// This determines whether to apply a payload after applying the last block. + pub fn desired_state_payload_status(mut self, payload_status: StatePayloadStatus) -> Self { + self.desired_state_payload_status = payload_status; + self + } + /// Compute the state root for `self.state` as efficiently as possible. /// /// This function MUST only be called when `self.state` is a post-state, i.e. it MUST not be @@ -208,6 +246,38 @@ where Ok(state_root) } + /// Apply an execution payload envelope to `self.state`. + /// + /// The `block_state_root` MUST be the `state_root` of the most recently applied block. + /// + /// Returns the `state_root` of `self.state` after payload application. + fn apply_payload_envelope( + &mut self, + envelope: &SignedExecutionPayloadEnvelope, + block_state_root: Hash256, + ) -> Result { + // TODO(gloas): bulk signature verification could be relevant here? + let verify_payload_signatures = + if let BlockSignatureStrategy::NoVerification = self.block_sig_strategy { + VerifySignatures::False + } else { + VerifySignatures::True + }; + // TODO(gloas): state root verif enabled during initial prototyping + let verify_state_root = VerifyStateRoot::True; + process_execution_payload_envelope( + &mut self.state, + Some(block_state_root), + envelope, + verify_payload_signatures, + verify_state_root, + self.spec, + ) + .map_err(BlockReplayError::from)?; + + Ok(envelope.message.state_root) + } + /// Apply `blocks` atop `self.state`, taking care of slot processing. /// /// If `target_slot` is provided then the state will be advanced through to `target_slot` @@ -215,8 +285,21 @@ where pub fn apply_blocks( mut self, blocks: Vec>>, + payload_envelopes: Vec>, target_slot: Option, ) -> Result { + let mut envelopes_iter = payload_envelopes.into_iter(); + + let mut next_envelope_at_slot = |slot| { + if let Some(envelope) = envelopes_iter.next() + && envelope.message.slot == slot + { + Ok(envelope) + } else { + Err(BlockReplayError::MissingPayloadEnvelope { slot }) + } + }; + for (i, block) in blocks.iter().enumerate() { // Allow one additional block at the start which is only used for its state root. if i == 0 && block.slot() <= self.state.slot() { @@ -224,7 +307,35 @@ where } while self.state.slot() < block.slot() { - let state_root = self.get_state_root(&blocks, i)?; + let mut state_root = self.get_state_root(&blocks, i)?; + + // Apply the payload for the *previous* block if the bid in the current block + // indicates that the parent is full (and it hasn't already been applied). + state_root = if block.fork_name_unchecked().gloas_enabled() + && self.state.slot() == self.state.latest_block_header().slot + { + let latest_bid_block_hash = self + .state + .latest_execution_payload_bid() + .map_err(BlockReplayError::from)? + .block_hash; + + // Similar to `is_parent_block_full`, but reading the block hash from the + // not-yet-applied `block`. The slot 0 case covers genesis (no block replay reqd). + if self.state.slot() != 0 && block.is_parent_block_full(latest_bid_block_hash) { + let envelope = next_envelope_at_slot(self.state.slot())?; + // State root for the next slot processing is now the envelope's state root. + self.apply_payload_envelope(&envelope, state_root)? + } else { + // Empty payload at this slot, the state root is unchanged from when the + // beacon block was applied. + state_root + } + } else { + // Pre-Gloas or at skipped slots post-Gloas, the state root of the parent state + // is always the output from `self.get_state_root`. + state_root + }; if let Some(ref mut pre_slot_hook) = self.pre_slot_hook { pre_slot_hook(state_root, &mut self.state)?; @@ -268,9 +379,24 @@ where } } + // Apply the last payload if desired. + let mut opt_state_root = if let StatePayloadStatus::Full = self.desired_state_payload_status + && let Some(last_block) = blocks.last() + { + let envelope = next_envelope_at_slot(self.state.slot())?; + Some(self.apply_payload_envelope(&envelope, last_block.state_root())?) + } else { + None + }; + if let Some(target_slot) = target_slot { while self.state.slot() < target_slot { - let state_root = self.get_state_root(&blocks, blocks.len())?; + // Read state root from `opt_state_root` if a payload was just applied. + let state_root = if let Some(root) = opt_state_root.take() { + root + } else { + self.get_state_root(&blocks, blocks.len())? + }; if let Some(ref mut pre_slot_hook) = self.pre_slot_hook { pre_slot_hook(state_root, &mut self.state)?; diff --git a/consensus/state_processing/src/envelope_processing.rs b/consensus/state_processing/src/envelope_processing.rs index be6b7c1b29..97953b835f 100644 --- a/consensus/state_processing/src/envelope_processing.rs +++ b/consensus/state_processing/src/envelope_processing.rs @@ -241,8 +241,6 @@ pub fn process_execution_payload_envelope( // TODO(gloas): newPayload happens here in the spec, ensure we wire that up correctly process_deposit_requests_post_gloas(state, &execution_requests.deposits, spec)?; - - // TODO(gloas): gotta update these process_withdrawal_requests(state, &execution_requests.withdrawals, spec)?; process_consolidation_requests(state, &execution_requests.consolidations, spec)?; diff --git a/consensus/state_processing/src/per_block_processing/tests.rs b/consensus/state_processing/src/per_block_processing/tests.rs index 96610c2010..0203b33e61 100644 --- a/consensus/state_processing/src/per_block_processing/tests.rs +++ b/consensus/state_processing/src/per_block_processing/tests.rs @@ -1014,7 +1014,7 @@ async fn block_replayer_peeking_state_roots() { let block_replayer = BlockReplayer::new(parent_state, &harness.chain.spec) .state_root_iter(state_root_iter.into_iter()) .no_signature_verification() - .apply_blocks(vec![target_block], None) + .apply_blocks(vec![target_block], vec![], None) .unwrap(); assert_eq!( diff --git a/consensus/state_processing/src/state_advance.rs b/consensus/state_processing/src/state_advance.rs index 11a956bc2a..1114562155 100644 --- a/consensus/state_processing/src/state_advance.rs +++ b/consensus/state_processing/src/state_advance.rs @@ -77,6 +77,11 @@ pub fn partial_state_advance( // (all-zeros) state root. let mut initial_state_root = Some(if state.slot() > state.latest_block_header().slot { state_root_opt.unwrap_or_else(Hash256::zero) + } else if state.slot() == state.latest_block_header().slot + && !state.latest_block_header().state_root.is_zero() + { + // Post-Gloas Full state case. + state.latest_block_header().state_root } else { state_root_opt.ok_or(Error::StateRootNotProvided)? }); diff --git a/consensus/types/src/block/signed_beacon_block.rs b/consensus/types/src/block/signed_beacon_block.rs index aeb3c18d95..b6218ba64d 100644 --- a/consensus/types/src/block/signed_beacon_block.rs +++ b/consensus/types/src/block/signed_beacon_block.rs @@ -14,6 +14,7 @@ use tree_hash::TreeHash; use tree_hash_derive::TreeHash; use crate::{ + ExecutionBlockHash, block::{ BLOB_KZG_COMMITMENTS_INDEX, BeaconBlock, BeaconBlockAltair, BeaconBlockBase, BeaconBlockBellatrix, BeaconBlockBodyBellatrix, BeaconBlockBodyCapella, @@ -365,6 +366,32 @@ impl> SignedBeaconBlock format_kzg_commitments(commitments.as_ref()) } + + /// Convenience accessor for the block's bid's `block_hash`. + /// + /// This method returns an error prior to Gloas. + pub fn payload_bid_block_hash(&self) -> Result { + self.message() + .body() + .signed_execution_payload_bid() + .map(|bid| bid.message.block_hash) + } + + /// Check if the `parent_hash` in this block's `signed_payload_bid` matches `parent_block_hash`. + /// + /// This function is useful post-Gloas for determining if the parent block is full, *without* + /// necessarily needing access to a beacon state. The passed in `parent_block_hash` MUST be the + /// `block_hash` from the parent beacon block's bid. If the parent beacon state is available + /// this can alternatively be fetched from `state.latest_payload_bid`. + /// + /// This function returns `false` for all blocks prior to Gloas. + pub fn is_parent_block_full(&self, parent_block_hash: ExecutionBlockHash) -> bool { + let Ok(signed_payload_bid) = self.message().body().signed_execution_payload_bid() else { + // Prior to Gloas. + return false; + }; + signed_payload_bid.message.parent_block_hash == parent_block_hash + } } // We can convert pre-Bellatrix blocks without payloads into blocks with payloads. diff --git a/consensus/types/src/execution/mod.rs b/consensus/types/src/execution/mod.rs index a3d4ed8730..591be32b24 100644 --- a/consensus/types/src/execution/mod.rs +++ b/consensus/types/src/execution/mod.rs @@ -12,6 +12,7 @@ mod payload; mod signed_bls_to_execution_change; mod signed_execution_payload_bid; mod signed_execution_payload_envelope; +mod state_payload_status; pub use bls_to_execution_change::BlsToExecutionChange; pub use eth1_data::Eth1Data; @@ -41,3 +42,4 @@ pub use payload::{ pub use signed_bls_to_execution_change::SignedBlsToExecutionChange; pub use signed_execution_payload_bid::SignedExecutionPayloadBid; pub use signed_execution_payload_envelope::SignedExecutionPayloadEnvelope; +pub use state_payload_status::StatePayloadStatus; diff --git a/consensus/types/src/execution/state_payload_status.rs b/consensus/types/src/execution/state_payload_status.rs new file mode 100644 index 0000000000..1661be6060 --- /dev/null +++ b/consensus/types/src/execution/state_payload_status.rs @@ -0,0 +1,18 @@ +use serde::{Deserialize, Serialize}; + +/// Payload status as it applies to a `BeaconState` post-Gloas. +/// +/// A state can either be a post-state for a block (in which case we call it `Pending`) or a +/// payload envelope (`Full`). When handling states it is often necessary to know which of these +/// two variants is required. +/// +/// Note that states at skipped slots could be either `Pending` or `Full`, depending on whether +/// the payload for the most-recently applied block was also applied. +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize)] +#[serde(rename_all = "lowercase")] +pub enum StatePayloadStatus { + /// For states produced by `process_block` executed on a `BeaconBlock`. + Pending, + /// For states produced by `process_execution_payload` on a `ExecutionPayloadEnvelope`. + Full, +} diff --git a/consensus/types/src/state/beacon_state.rs b/consensus/types/src/state/beacon_state.rs index bd67f469d2..34cfd0ca1c 100644 --- a/consensus/types/src/state/beacon_state.rs +++ b/consensus/types/src/state/beacon_state.rs @@ -36,7 +36,7 @@ use crate::{ execution::{ Eth1Data, ExecutionPayloadHeaderBellatrix, ExecutionPayloadHeaderCapella, ExecutionPayloadHeaderDeneb, ExecutionPayloadHeaderElectra, ExecutionPayloadHeaderFulu, - ExecutionPayloadHeaderRef, ExecutionPayloadHeaderRefMut, + ExecutionPayloadHeaderRef, ExecutionPayloadHeaderRefMut, StatePayloadStatus, }, fork::{Fork, ForkName, ForkVersionDecode, InconsistentFork, map_fork_name}, light_client::consts::{ @@ -1266,6 +1266,24 @@ impl BeaconState { } } + /// Determine the payload status of this state. + /// + /// Prior to Gloas this is always `Pending`. + /// + /// Post-Gloas, the definition of the `StatePayloadStatus` is: + /// + /// - `Full` if this state is the result of envelope processing. + /// - `Pending` if this state is the result of block processing. + pub fn payload_status(&self) -> StatePayloadStatus { + if !self.fork_name_unchecked().gloas_enabled() { + StatePayloadStatus::Pending + } else if self.is_parent_block_full() { + StatePayloadStatus::Full + } else { + StatePayloadStatus::Pending + } + } + /// Return `true` if the validator who produced `slot_signature` is eligible to aggregate. /// /// Spec v0.12.1