From cf3d5e285e9109def686d24b543d5b44cb233347 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Tue, 21 Apr 2026 16:29:15 +1000 Subject: [PATCH] Gloas spec v1.7.0-alpha.5 and beacon_chain tests (#8998) Fix database pruning post-Gloas - Fix DB pruning logic (and state summaries DAG) - Get the `beacon_chain` tests running with `FORK_NAME=gloas` :tada: Co-Authored-By: Michael Sproul Co-Authored-By: Jimmy Chen Co-Authored-By: Eitan Seri- Levi Co-Authored-By: dapplion <35266934+dapplion@users.noreply.github.com> Co-Authored-By: Eitan Seri-Levi --- .github/forbidden-files.txt | 1 + Makefile | 5 +- .../beacon_chain/src/beacon_block_streamer.rs | 1 + beacon_node/beacon_chain/src/beacon_chain.rs | 68 +-- .../beacon_chain/src/beacon_snapshot.rs | 9 +- .../beacon_chain/src/blob_verification.rs | 8 +- .../src/block_production/gloas.rs | 109 +++- .../beacon_chain/src/block_production/mod.rs | 81 +-- .../beacon_chain/src/block_verification.rs | 43 +- beacon_node/beacon_chain/src/builder.rs | 46 +- .../beacon_chain/src/canonical_head.rs | 97 ++-- .../src/data_column_verification.rs | 9 +- beacon_node/beacon_chain/src/errors.rs | 6 +- .../beacon_chain/src/execution_payload.rs | 8 + beacon_node/beacon_chain/src/migrate.rs | 2 +- .../src/payload_bid_verification/tests.rs | 9 +- .../src/payload_envelope_streamer/tests.rs | 3 +- .../execution_pending_envelope.rs | 18 +- .../gossip_verified_envelope.rs | 15 +- .../payload_envelope_verification/import.rs | 11 +- .../src/payload_envelope_verification/mod.rs | 10 +- .../src/pending_payload_envelopes.rs | 7 +- .../beacon_chain/src/state_advance_timer.rs | 14 +- beacon_node/beacon_chain/src/test_utils.rs | 66 ++- .../src/validator_pubkey_cache.rs | 18 +- .../tests/attestation_production.rs | 2 +- .../tests/attestation_verification.rs | 15 +- .../beacon_chain/tests/block_verification.rs | 94 +++- .../beacon_chain/tests/column_verification.rs | 7 +- beacon_node/beacon_chain/tests/events.rs | 5 +- .../tests/payload_invalidation.rs | 43 +- beacon_node/beacon_chain/tests/rewards.rs | 3 +- beacon_node/beacon_chain/tests/store_tests.rs | 467 ++++++++---------- beacon_node/beacon_chain/tests/tests.rs | 18 +- .../beacon_chain/tests/validator_monitor.rs | 3 +- beacon_node/execution_layer/src/engine_api.rs | 64 ++- .../execution_layer/src/engine_api/http.rs | 34 ++ .../src/engine_api/json_structures.rs | 35 +- .../test_utils/execution_block_generator.rs | 5 + .../src/test_utils/handle_rpc.rs | 18 +- .../src/test_utils/mock_builder.rs | 26 +- .../src/test_utils/mock_execution_layer.rs | 30 +- .../execution_layer/src/test_utils/mod.rs | 1 + .../src/beacon/execution_payload_envelope.rs | 6 +- .../http_api/src/sync_committee_rewards.rs | 3 +- beacon_node/http_api/tests/tests.rs | 5 +- .../src/network_beacon_processor/tests.rs | 14 +- beacon_node/store/src/hot_cold_store.rs | 280 +---------- beacon_node/store/src/reconstruct.rs | 1 - beacon_node/store/src/state_cache.rs | 43 +- common/eth2/src/types.rs | 2 - consensus/fork_choice/src/fork_choice.rs | 29 +- .../gloas_payload.rs | 106 ++-- consensus/proto_array/src/proto_array.rs | 39 +- .../src/proto_array_fork_choice.rs | 43 +- .../state_processing/src/block_replayer.rs | 137 +---- .../src/envelope_processing.rs | 153 ++---- consensus/state_processing/src/genesis.rs | 40 +- .../src/per_block_processing.rs | 140 +++++- .../src/per_block_processing/errors.rs | 7 + .../src/per_block_processing/tests.rs | 2 +- .../src/per_block_processing/withdrawals.rs | 10 +- .../state_processing/src/upgrade/gloas.rs | 5 +- consensus/types/src/block/beacon_block.rs | 1 + .../types/src/block/beacon_block_body.rs | 6 + .../types/src/block/signed_beacon_block.rs | 6 +- .../types/src/execution/execution_payload.rs | 8 +- .../src/execution/execution_payload_bid.rs | 1 + .../execution/execution_payload_envelope.rs | 8 +- consensus/types/src/execution/mod.rs | 2 - .../signed_execution_payload_envelope.rs | 2 +- .../src/execution/state_payload_status.rs | 18 - consensus/types/src/state/beacon_state.rs | 42 +- testing/ef_tests/Makefile | 2 +- testing/ef_tests/check_all_files_accessed.py | 6 +- testing/ef_tests/src/cases/fork_choice.rs | 109 +++- testing/ef_tests/src/cases/operations.rs | 54 +- testing/ef_tests/src/handler.rs | 8 +- testing/ef_tests/src/lib.rs | 8 +- testing/ef_tests/tests/tests.rs | 18 +- .../src/test_rig.rs | 4 + .../lighthouse_validator_store/src/lib.rs | 2 +- 82 files changed, 1513 insertions(+), 1391 deletions(-) delete mode 100644 consensus/types/src/execution/state_payload_status.rs diff --git a/.github/forbidden-files.txt b/.github/forbidden-files.txt index b070067350..8649fbb574 100644 --- a/.github/forbidden-files.txt +++ b/.github/forbidden-files.txt @@ -12,3 +12,4 @@ beacon_node/http_api/src/block_rewards.rs common/eth2/src/lighthouse/attestation_performance.rs common/eth2/src/lighthouse/block_packing_efficiency.rs common/eth2/src/lighthouse/block_rewards.rs +consensus/types/src/execution/state_payload_status.rs diff --git a/Makefile b/Makefile index 033ad6cfc8..280e74d1d9 100644 --- a/Makefile +++ b/Makefile @@ -207,11 +207,10 @@ run-ef-tests: ./$(EF_TESTS)/check_all_files_accessed.py $(EF_TESTS)/.accessed_file_log.txt $(EF_TESTS)/consensus-spec-tests # Run the tests in the `beacon_chain` crate for all known forks. -# TODO(EIP-7732) Extend to support gloas by using RECENT_FORKS instead -test-beacon-chain: $(patsubst %,test-beacon-chain-%,$(RECENT_FORKS_BEFORE_GLOAS)) +test-beacon-chain: $(patsubst %,test-beacon-chain-%,$(RECENT_FORKS)) test-beacon-chain-%: - env FORK_NAME=$* cargo nextest run --release --features "fork_from_env,slasher/lmdb,$(TEST_FEATURES)" -p beacon_chain + env FORK_NAME=$* cargo nextest run --release --features "fork_from_env,slasher/lmdb,$(TEST_FEATURES)" -p beacon_chain --no-fail-fast # Run the tests in the `http_api` crate for recent forks. test-http-api: $(patsubst %,test-http-api-%,$(RECENT_FORKS_BEFORE_GLOAS)) diff --git a/beacon_node/beacon_chain/src/beacon_block_streamer.rs b/beacon_node/beacon_chain/src/beacon_block_streamer.rs index 9ddc50a9f7..ed74022c3d 100644 --- a/beacon_node/beacon_chain/src/beacon_block_streamer.rs +++ b/beacon_node/beacon_chain/src/beacon_block_streamer.rs @@ -733,6 +733,7 @@ mod tests { spec.deneb_fork_epoch = Some(Epoch::new(deneb_fork_epoch as u64)); spec.electra_fork_epoch = Some(Epoch::new(electra_fork_epoch as u64)); spec.fulu_fork_epoch = Some(Epoch::new(fulu_fork_epoch as u64)); + spec.gloas_fork_epoch = None; let spec = Arc::new(spec); let harness = get_harness(VALIDATOR_COUNT, spec.clone()); diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 4e4ff341fe..e14c7c047f 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -2058,12 +2058,7 @@ impl BeaconChain { // for the same block. Analysis: https://hackmd.io/@dapplion/gloas_dependant_root let (advanced_state_root, mut state) = self .store - .get_advanced_hot_state( - beacon_block_root, - StatePayloadStatus::Pending, - request_slot, - beacon_state_root, - )? + .get_advanced_hot_state(beacon_block_root, request_slot, beacon_state_root)? .ok_or(Error::MissingBeaconState(beacon_state_root))?; if state.current_epoch() < request_epoch { partial_state_advance( @@ -4564,7 +4559,7 @@ impl BeaconChain { // // Load the parent state from disk. let chain = self.clone(); - let (state, state_root_opt) = self + let block_production_state = self .task_executor .spawn_blocking_handle( move || chain.load_state_for_block_production(slot), @@ -4573,6 +4568,10 @@ impl BeaconChain { .ok_or(BlockProductionError::ShuttingDown)? .await .map_err(BlockProductionError::TokioJoin)??; + let (state, state_root_opt) = ( + block_production_state.state, + block_production_state.state_root, + ); // Part 2/2 (async, with some blocking components) // @@ -4722,12 +4721,7 @@ impl BeaconChain { .ok_or(Error::MissingBeaconBlock(parent_block_root))?; let (state_root, state) = self .store - .get_advanced_hot_state( - parent_block_root, - StatePayloadStatus::Pending, - proposal_slot, - block.state_root(), - )? + .get_advanced_hot_state(parent_block_root, proposal_slot, block.state_root())? .ok_or(Error::MissingBeaconState(block.state_root()))?; (Cow::Owned(state), state_root) }; @@ -6019,6 +6013,12 @@ impl BeaconChain { None }; + let slot_number = if prepare_slot_fork.gloas_enabled() { + Some(prepare_slot.as_u64()) + } else { + None + }; + let payload_attributes = PayloadAttributes::new( self.slot_clock .start_of(prepare_slot) @@ -6028,6 +6028,7 @@ impl BeaconChain { execution_layer.get_suggested_fee_recipient(proposer).await, withdrawals.map(Into::into), parent_beacon_block_root, + slot_number, ); execution_layer @@ -6663,12 +6664,7 @@ impl BeaconChain { // for the same block. Analysis: https://hackmd.io/@dapplion/gloas_dependant_root let (state_root, state) = self .store - .get_advanced_hot_state( - head_block_root, - StatePayloadStatus::Pending, - target_slot, - head_block.state_root, - )? + .get_advanced_hot_state(head_block_root, target_slot, head_block.state_root)? .ok_or(Error::MissingBeaconState(head_block.state_root))?; (state, state_root) }; @@ -6756,10 +6752,10 @@ impl BeaconChain { blocks.push((beacon_block_root, Arc::new(beacon_block))); } - // Collect states, using the next blocks to determine if states are full (have Gloas - // payloads). + // Collect envelopes, using the next blocks to determine if payloads are canonical + // (the parent block was full). for (i, (block_root, block)) in blocks.iter().enumerate() { - let (opt_envelope, state_root) = if block.fork_name_unchecked().gloas_enabled() { + let opt_envelope = if block.fork_name_unchecked().gloas_enabled() { let opt_envelope = self.store.get_payload_envelope(block_root)?.map(Arc::new); if let Some((_, next_block)) = blocks.get(i + 1) { @@ -6768,22 +6764,30 @@ impl BeaconChain { let envelope = opt_envelope.ok_or_else(|| { Error::DBInconsistent(format!("Missing envelope {block_root:?}")) })?; - let state_root = envelope.message.state_root; - (Some(envelope), state_root) + Some(envelope) } else { - (None, block.state_root()) + None } } else { - // TODO(gloas): should use fork choice/cached head for last block in sequence - opt_envelope - .as_ref() - .map_or((None, block.state_root()), |envelope| { - (Some(envelope.clone()), envelope.message.state_root) - }) + // Last block in the sequence: use canonical head to determine + // whether the payload is canonical. + let head = self.canonical_head.cached_head(); + assert_eq!(head.head_block_root(), *block_root); + let payload_received = + head.head_payload_status() == fork_choice::PayloadStatus::Full; + if payload_received { + let envelope = opt_envelope.ok_or_else(|| { + Error::DBInconsistent(format!("Missing envelope {block_root:?}")) + })?; + Some(envelope) + } else { + None + } } } else { - (None, block.state_root()) + None }; + let state_root = block.state_root(); let mut beacon_state = self .store diff --git a/beacon_node/beacon_chain/src/beacon_snapshot.rs b/beacon_node/beacon_chain/src/beacon_snapshot.rs index 566713e3f3..996a964386 100644 --- a/beacon_node/beacon_chain/src/beacon_snapshot.rs +++ b/beacon_node/beacon_chain/src/beacon_snapshot.rs @@ -44,18 +44,13 @@ impl> BeaconSnapshot { } } - /// Returns the state root from `self.beacon_block` or `self.execution_envelope` as - /// appropriate. + /// Returns the state root from `self.beacon_block`. /// /// ## Caution /// /// It is not strictly enforced that `root(self.beacon_state) == self.beacon_state_root()`. pub fn beacon_state_root(&self) -> Hash256 { - if let Some(ref envelope) = self.execution_envelope { - envelope.message.state_root - } else { - self.beacon_block.message().state_root() - } + self.beacon_block.message().state_root() } /// Update all fields of the checkpoint. diff --git a/beacon_node/beacon_chain/src/blob_verification.rs b/beacon_node/beacon_chain/src/blob_verification.rs index 86b385d818..e557a24369 100644 --- a/beacon_node/beacon_chain/src/blob_verification.rs +++ b/beacon_node/beacon_chain/src/blob_verification.rs @@ -20,7 +20,6 @@ use tree_hash::TreeHash; use types::data::BlobIdentifier; use types::{ BeaconStateError, BlobSidecar, Epoch, EthSpec, Hash256, SignedBeaconBlockHeader, Slot, - StatePayloadStatus, }; /// An error occurred while validating a gossip blob. @@ -513,12 +512,7 @@ pub fn validate_blob_sidecar_for_gossip BeaconChain { // // Load the parent state from disk. let chain = self.clone(); - let (state, state_root_opt) = self + let block_production_state = self .task_executor .spawn_blocking_handle( move || chain.load_state_for_block_production(slot), @@ -96,6 +99,12 @@ impl BeaconChain { .ok_or(BlockProductionError::ShuttingDown)? .await .map_err(BlockProductionError::TokioJoin)??; + let BlockProductionState { + state, + state_root: state_root_opt, + parent_payload_status, + parent_envelope, + } = block_production_state; // Part 2/2 (async, with some blocking components) // @@ -103,6 +112,8 @@ impl BeaconChain { self.produce_block_on_state_gloas( state, state_root_opt, + parent_payload_status, + parent_envelope, slot, randao_reveal, graffiti_settings, @@ -113,10 +124,13 @@ impl BeaconChain { // TODO(gloas) need to implement builder boost factor logic #[instrument(level = "debug", skip_all)] + #[allow(clippy::too_many_arguments)] pub async fn produce_block_on_state_gloas( self: &Arc, state: BeaconState, state_root_opt: Option, + parent_payload_status: PayloadStatus, + parent_envelope: Option>>, produce_at_slot: Slot, randao_reveal: Signature, graffiti_settings: GraffitiSettings, @@ -148,6 +162,16 @@ impl BeaconChain { .await .map_err(BlockProductionError::TokioJoin)??; + // Extract the parent's execution requests from the envelope (if parent was full). + let parent_execution_requests = if parent_payload_status == PayloadStatus::Full { + parent_envelope + .as_ref() + .map(|env| env.message.execution_requests.clone()) + .ok_or(BlockProductionError::MissingParentExecutionPayload)? + } else { + ExecutionRequests::default() + }; + // Part 2/3 (async) // // Produce the execution payload bid. @@ -157,6 +181,8 @@ impl BeaconChain { .clone() .produce_execution_payload_bid( state, + parent_payload_status, + parent_envelope, produce_at_slot, BID_VALUE_SELF_BUILD, BUILDER_INDEX_SELF_BUILD, @@ -173,6 +199,7 @@ impl BeaconChain { chain.complete_partial_beacon_block_gloas( partial_beacon_block, execution_payload_bid, + parent_execution_requests, payload_data, state, verification, @@ -427,6 +454,7 @@ impl BeaconChain { &self, partial_beacon_block: PartialBeaconBlock, signed_execution_payload_bid: SignedExecutionPayloadBid, + parent_execution_requests: ExecutionRequests, payload_data: Option>, mut state: BeaconState, verification: ProduceBlockVerification, @@ -488,6 +516,7 @@ impl BeaconChain { bls_to_execution_changes: bls_to_execution_changes .try_into() .map_err(BlockProductionError::SszTypesError)?, + parent_execution_requests, signed_execution_payload_bid, payload_attestations: payload_attestations .try_into() @@ -558,29 +587,23 @@ impl BeaconChain { execution_requests: payload_data.execution_requests, builder_index: payload_data.builder_index, beacon_block_root, - slot: payload_data.slot, - state_root: Hash256::ZERO, }; - let mut signed_envelope = SignedExecutionPayloadEnvelope { + let signed_envelope = SignedExecutionPayloadEnvelope { message: execution_payload_envelope, signature: Signature::empty(), }; - // We skip state root verification here because the relevant state root - // cant be calculated until after the new block has been constructed. - process_execution_payload_envelope( - &mut state, - None, + // Verify the envelope against the state. This performs no state mutation. + verify_execution_payload_envelope( + &state, &signed_envelope, VerifySignatures::False, - VerifyStateRoot::False, + state_root, &self.spec, ) .map_err(BlockProductionError::EnvelopeProcessingError)?; - signed_envelope.message.state_root = state.update_tree_hash_cache()?; - // Cache the envelope for later retrieval by the validator for signing and publishing. let envelope_slot = payload_data.slot; // TODO(gloas) might be safer to cache by root instead of by slot. @@ -622,7 +645,9 @@ impl BeaconChain { #[instrument(level = "debug", skip_all)] pub async fn produce_execution_payload_bid( self: Arc, - mut state: BeaconState, + state: BeaconState, + parent_payload_status: PayloadStatus, + parent_envelope: Option>>, produce_at_slot: Slot, bid_value: u64, builder_index: BuilderIndex, @@ -665,6 +690,17 @@ impl BeaconChain { .map_err(|e| BlockProductionError::BeaconChain(Box::new(e)))?, }; + let parent_bid = state.latest_execution_payload_bid()?; + + // TODO(gloas): need should_extend_payload check here as well + let parent_block_hash = if parent_payload_status == PayloadStatus::Full { + // Build on parent bid's payload. + parent_bid.block_hash + } else { + // Skip parent bid's payload. For genesis this is the EL genesis hash. + parent_bid.parent_block_hash + }; + // TODO(gloas) this should be BlockProductionVersion::V4 // V3 is okay for now as long as we're not connected to a builder // TODO(gloas) add builder boost factor @@ -672,6 +708,8 @@ impl BeaconChain { self.clone(), &state, parent_root, + parent_block_hash, + parent_envelope, proposer_index, builder_params, )?; @@ -689,13 +727,11 @@ impl BeaconChain { blobs_and_proofs: _, } = block_proposal_contents; - let state_root = state.update_tree_hash_cache()?; - // TODO(gloas) since we are defaulting to local building, execution payment is 0 // execution payment should only be set to > 0 for trusted building. let bid = ExecutionPayloadBid:: { - parent_block_hash: state.latest_block_hash()?.to_owned(), - parent_block_root: state.get_latest_block_root(state_root), + parent_block_hash, + parent_block_root: parent_root, block_hash: payload.block_hash, prev_randao: payload.prev_randao, fee_recipient: Address::ZERO, @@ -705,6 +741,7 @@ impl BeaconChain { value: bid_value, execution_payment: EXECUTION_PAYMENT_TRUSTLESS_BUILD, blob_kzg_commitments, + execution_requests_root: execution_requests.tree_hash_root(), }; // Store payload data for envelope construction after block is created @@ -740,6 +777,8 @@ fn get_execution_payload_gloas( chain: Arc>, state: &BeaconState, parent_beacon_block_root: Hash256, + parent_block_hash: ExecutionBlockHash, + parent_envelope: Option>>, proposer_index: u64, builder_params: BuilderParams, ) -> Result, BlockProductionError> { @@ -751,11 +790,28 @@ fn get_execution_payload_gloas( compute_timestamp_at_slot(state, state.slot(), spec).map_err(BeaconStateError::from)?; let random = *state.get_randao_mix(current_epoch)?; - let latest_execution_block_hash = *state.latest_block_hash()?; - let latest_gas_limit = state.latest_execution_payload_bid()?.gas_limit; + // TODO(gloas): this gas limit calc is not necessarily right + let parent_bid = state.latest_execution_payload_bid()?; + let latest_gas_limit = parent_bid.gas_limit; - let withdrawals = if state.is_parent_block_full() { - Withdrawals::::from(get_expected_withdrawals(state, spec)?).into() + let is_parent_block_full = parent_block_hash == parent_bid.block_hash; + + let withdrawals = if is_parent_block_full { + if let Some(envelope) = parent_envelope { + let mut withdrawals_state = state.clone(); + apply_parent_execution_payload( + &mut withdrawals_state, + parent_bid, + &envelope.message.execution_requests, + spec, + )?; + Withdrawals::::from(get_expected_withdrawals(&withdrawals_state, spec)?) + .into() + } else { + // No envelope available (e.g. genesis). The parent had no execution requests, + // so compute withdrawals directly from the current state. + Withdrawals::::from(get_expected_withdrawals(state, spec)?).into() + } } else { // If the previous payload was missed, carry forward the withdrawals from the state. state.payload_expected_withdrawals()?.to_vec() @@ -773,7 +829,7 @@ fn get_execution_payload_gloas( timestamp, random, proposer_index, - latest_execution_block_hash, + parent_block_hash, latest_gas_limit, builder_params, withdrawals, @@ -839,12 +895,15 @@ where let suggested_fee_recipient = execution_layer .get_suggested_fee_recipient(proposer_index) .await; + let slot_number = Some(builder_params.slot.as_u64()); + let payload_attributes = PayloadAttributes::new( timestamp, random, suggested_fee_recipient, Some(withdrawals), Some(parent_beacon_block_root), + slot_number, ); let target_gas_limit = execution_layer.get_proposer_gas_limit(proposer_index).await; diff --git a/beacon_node/beacon_chain/src/block_production/mod.rs b/beacon_node/beacon_chain/src/block_production/mod.rs index bf42923cbe..fd5e381023 100644 --- a/beacon_node/beacon_chain/src/block_production/mod.rs +++ b/beacon_node/beacon_chain/src/block_production/mod.rs @@ -1,9 +1,10 @@ use std::{sync::Arc, time::Duration}; +use fork_choice::PayloadStatus; use proto_array::ProposerHeadError; use slot_clock::SlotClock; use tracing::{debug, error, info, instrument, warn}; -use types::{BeaconState, Hash256, Slot, StatePayloadStatus}; +use types::{BeaconState, Hash256, SignedExecutionPayloadEnvelope, Slot}; use crate::{ BeaconChain, BeaconChainTypes, BlockProductionError, StateSkipConfig, @@ -12,14 +13,24 @@ use crate::{ mod gloas; +/// State loaded from the database for block production. +pub(crate) struct BlockProductionState { + pub state: BeaconState, + pub state_root: Option, + pub parent_payload_status: PayloadStatus, + pub parent_envelope: Option>>, +} + impl BeaconChain { /// Load a beacon state from the database for block production. This is a long-running process /// that should not be performed in an `async` context. + /// + /// The returned `PayloadStatus` is the payload status of the parent block to be built upon. #[instrument(skip_all, level = "debug")] pub(crate) fn load_state_for_block_production( self: &Arc, slot: Slot, - ) -> Result<(BeaconState, Option), BlockProductionError> { + ) -> Result, BlockProductionError> { let fork_choice_timer = metrics::start_timer(&metrics::BLOCK_PRODUCTION_FORK_CHOICE_TIMES); self.wait_for_fork_choice_before_block_production(slot)?; drop(fork_choice_timer); @@ -27,16 +38,19 @@ impl BeaconChain { let state_load_timer = metrics::start_timer(&metrics::BLOCK_PRODUCTION_STATE_LOAD_TIMES); // Atomically read some values from the head whilst avoiding holding cached head `Arc` any - // longer than necessary. - let (head_slot, head_block_root, head_state_root) = { + // longer than necessary. If the head has a payload envelope (Gloas full head), cheaply + // clone the `Arc` so we can pass it to block production without a DB load. + let (head_slot, head_block_root, head_state_root, head_payload_status, head_envelope) = { let head = self.canonical_head.cached_head(); ( head.head_slot(), head.head_block_root(), head.head_state_root(), + head.head_payload_status(), + head.snapshot.execution_envelope.clone(), ) }; - let (state, state_root_opt) = if head_slot < slot { + let result = if head_slot < slot { // Attempt an aggressive re-org if configured and the conditions are right. // TODO(gloas): re-enable reorgs let gloas_enabled = self @@ -52,37 +66,29 @@ impl BeaconChain { head_to_reorg = %head_block_root, "Proposing block to re-org current head" ); - (re_org_state, Some(re_org_state_root)) + // TODO(gloas): ensure we use a sensible payload status when we enable reorgs + // for Gloas + BlockProductionState { + state: re_org_state, + state_root: Some(re_org_state_root), + parent_payload_status: PayloadStatus::Pending, + parent_envelope: None, + } } else { // Fetch the head state advanced through to `slot`, which should be present in the // state cache thanks to the state advance timer. - // TODO(gloas): need to fix this once fork choice understands payloads - // for now we just use the existence of the head's payload envelope to determine - // whether we should build atop it - let (payload_status, parent_state_root) = if gloas_enabled - && let Ok(Some(envelope)) = self.store.get_payload_envelope(&head_block_root) - { - debug!( - %slot, - parent_state_root = ?envelope.message.state_root, - parent_block_root = ?head_block_root, - "Building Gloas block on full state" - ); - (StatePayloadStatus::Full, envelope.message.state_root) - } else { - (StatePayloadStatus::Pending, head_state_root) - }; + let parent_state_root = head_state_root; let (state_root, state) = self .store - .get_advanced_hot_state( - head_block_root, - payload_status, - slot, - parent_state_root, - ) + .get_advanced_hot_state(head_block_root, slot, parent_state_root) .map_err(BlockProductionError::FailedToLoadState)? .ok_or(BlockProductionError::UnableToProduceAtSlot(slot))?; - (state, Some(state_root)) + BlockProductionState { + state, + state_root: Some(state_root), + parent_payload_status: head_payload_status, + parent_envelope: head_envelope, + } } } else { warn!( @@ -94,12 +100,19 @@ impl BeaconChain { .state_at_slot(slot - 1, StateSkipConfig::WithStateRoots) .map_err(|_| BlockProductionError::UnableToProduceAtSlot(slot))?; - (state, None) + // TODO(gloas): update this to read payload canonicity from fork choice once ready + let parent_payload_status = PayloadStatus::Pending; + BlockProductionState { + state, + state_root: None, + parent_payload_status, + parent_envelope: None, + } }; drop(state_load_timer); - Ok((state, state_root_opt)) + Ok(result) } /// If configured, wait for the fork choice run at the start of the slot to complete. @@ -232,11 +245,7 @@ impl BeaconChain { let (state_root, state) = self .store - .get_advanced_hot_state_from_cache( - re_org_parent_block, - StatePayloadStatus::Pending, - slot, - ) + .get_advanced_hot_state_from_cache(re_org_parent_block, slot) .or_else(|| { warn!(reason = "no state in cache", "Not attempting re-org"); None diff --git a/beacon_node/beacon_chain/src/block_verification.rs b/beacon_node/beacon_chain/src/block_verification.rs index 1ce1137f1e..9a43147233 100644 --- a/beacon_node/beacon_chain/src/block_verification.rs +++ b/beacon_node/beacon_chain/src/block_verification.rs @@ -99,8 +99,7 @@ use tracing::{Instrument, Span, debug, debug_span, error, info_span, instrument} use types::{ BeaconBlockRef, BeaconState, BeaconStateError, BlobsList, ChainSpec, DataColumnSidecarList, Epoch, EthSpec, FullPayload, Hash256, InconsistentFork, KzgProofs, RelativeEpoch, - SignedBeaconBlock, SignedBeaconBlockHeader, Slot, StatePayloadStatus, - data::DataColumnSidecarError, + SignedBeaconBlock, SignedBeaconBlockHeader, Slot, data::DataColumnSidecarError, }; /// Maximum block slot number. Block with slots bigger than this constant will NOT be processed. @@ -1509,11 +1508,7 @@ impl ExecutionPendingBlock { let distance = block.slot().as_u64().saturating_sub(state.slot().as_u64()); for _ in 0..distance { - // TODO(gloas): could do a similar optimisation here for Full blocks if we have access - // to the parent envelope and its `state_root`. - let state_root = if parent.beacon_block.slot() == state.slot() - && state.payload_status() == StatePayloadStatus::Pending - { + let state_root = if parent.beacon_block.slot() == state.slot() { // If it happens that `pre_state` has *not* already been advanced forward a single // slot, then there is no need to compute the state root for this // `per_slot_processing` call since that state root is already stored in the parent @@ -1957,37 +1952,9 @@ fn load_parent>( // particularly important if `block` descends from the finalized/split block, but at a slot // prior to the finalized slot (which is invalid and inaccessible in our DB schema). // - // Post-Gloas we must also fetch a state with the correct payload status. If the current - // block builds upon the payload of its parent block, then we know the parent block is FULL - // and we need to load the full state. - let (payload_status, parent_state_root) = if parent_block.slot() == chain.spec.genesis_slot - { - // Genesis state is always pending, there is no such thing as a "genesis envelope". - // See: https://github.com/ethereum/consensus-specs/issues/5043 - (StatePayloadStatus::Pending, parent_block.state_root()) - } else if !block.as_block().fork_name_unchecked().gloas_enabled() { - // All pre-Gloas parent states are pending. - (StatePayloadStatus::Pending, parent_block.state_root()) - } else if let Ok(parent_bid_block_hash) = parent_block.payload_bid_block_hash() - && block.as_block().is_parent_block_full(parent_bid_block_hash) - { - // Post-Gloas Full block case. - // TODO(gloas): loading the envelope here is not very efficient - let Some(envelope) = chain.store.get_payload_envelope(&root)? else { - return Err(BeaconChainError::DBInconsistent(format!( - "Missing envelope for parent block {root:?}", - )) - .into()); - }; - let state_root = envelope.message.state_root; - (StatePayloadStatus::Full, state_root) - } else { - // Post-Gloas empty block case (also covers the Gloas fork transition). - (StatePayloadStatus::Pending, parent_block.state_root()) - }; let (parent_state_root, state) = chain .store - .get_advanced_hot_state(root, payload_status, block.slot(), parent_state_root)? + .get_advanced_hot_state(root, block.slot(), parent_block.state_root())? .ok_or_else(|| { BeaconChainError::DBInconsistent( format!("Missing state for parent block {root:?}",), @@ -2010,9 +1977,7 @@ fn load_parent>( ); } - let beacon_state_root = if state.slot() == parent_block.slot() - && let StatePayloadStatus::Pending = payload_status - { + let beacon_state_root = if state.slot() == parent_block.slot() { // Sanity check. if parent_state_root != parent_block.state_root() { return Err(BeaconChainError::DBInconsistent(format!( diff --git a/beacon_node/beacon_chain/src/builder.rs b/beacon_node/beacon_chain/src/builder.rs index b963f7c342..74141dc64a 100644 --- a/beacon_node/beacon_chain/src/builder.rs +++ b/beacon_node/beacon_chain/src/builder.rs @@ -23,7 +23,7 @@ use crate::{ use bls::Signature; use execution_layer::ExecutionLayer; use fixed_bytes::FixedBytesExtended; -use fork_choice::{ForkChoice, ResetPayloadStatuses}; +use fork_choice::{ForkChoice, PayloadStatus, ResetPayloadStatuses}; use futures::channel::mpsc::Sender; use kzg::Kzg; use logging::crit; @@ -34,7 +34,9 @@ use rand::RngCore; use rayon::prelude::*; use slasher::Slasher; use slot_clock::{SlotClock, TestingSlotClock}; -use state_processing::{AllCaches, per_slot_processing}; +use state_processing::AllCaches; +use state_processing::genesis::genesis_block; +use state_processing::per_slot_processing; use std::marker::PhantomData; use std::sync::Arc; use std::time::Duration; @@ -44,8 +46,8 @@ use tracing::{debug, error, info, warn}; use tree_hash::TreeHash; use types::data::CustodyIndex; use types::{ - BeaconBlock, BeaconState, BlobSidecarList, ChainSpec, ColumnIndex, DataColumnSidecarList, - Epoch, EthSpec, Hash256, SignedBeaconBlock, Slot, + BeaconState, BlobSidecarList, ChainSpec, ColumnIndex, DataColumnSidecarList, Epoch, EthSpec, + Hash256, SignedBeaconBlock, Slot, }; /// An empty struct used to "witness" all the `BeaconChainTypes` traits. It has no user-facing @@ -321,7 +323,7 @@ where .clone() .ok_or("set_genesis_state requires a store")?; - let beacon_block = genesis_block(&mut beacon_state, &self.spec)?; + let beacon_block = make_genesis_block(&mut beacon_state, &self.spec)?; beacon_state .build_caches(&self.spec) @@ -374,7 +376,7 @@ where // Since v4.4.0 we will set the anchor with a dummy state upper limit in order to prevent // historic states from being retained (unless `--archive` is set). let retain_historic_states = self.chain_config.archive; - let genesis_beacon_block = genesis_block(&mut beacon_state, &self.spec)?; + let genesis_beacon_block = make_genesis_block(&mut beacon_state, &self.spec)?; self.pending_io_batch.push( store .init_anchor_info( @@ -617,7 +619,6 @@ where .map_err(|e| format!("Failed to initialize data column info: {:?}", e))?, ); - // TODO(gloas): add check that checkpoint state is Pending let snapshot = BeaconSnapshot { beacon_block_root: weak_subj_block_root, execution_envelope: None, @@ -786,23 +787,26 @@ where .map_err(|e| descriptive_db_error("head block", &e))? .ok_or("Head block not found in store")?; - let state_payload_status = head_payload_status.as_state_payload_status(); - let (_head_state_root, head_state) = store - .get_advanced_hot_state( - head_block_root, - state_payload_status, - current_slot, - head_block.state_root(), - ) + .get_advanced_hot_state(head_block_root, current_slot, head_block.state_root()) .map_err(|e| descriptive_db_error("head state", &e))? .ok_or("Head state not found in store")?; let head_shuffling_ids = BlockShufflingIds::try_from_head(head_block_root, &head_state)?; + // Load the execution envelope from the store if the head has a Full payload. + let execution_envelope = if head_payload_status == PayloadStatus::Full { + store + .get_payload_envelope(&head_block_root) + .map_err(|e| format!("Error loading head execution envelope: {:?}", e))? + .map(Arc::new) + } else { + None + }; + let mut head_snapshot = BeaconSnapshot { beacon_block_root: head_block_root, - execution_envelope: None, + execution_envelope, beacon_block: Arc::new(head_block), beacon_state: head_state, }; @@ -1166,17 +1170,19 @@ where } } -fn genesis_block( +fn make_genesis_block( genesis_state: &mut BeaconState, spec: &ChainSpec, ) -> Result, String> { - let mut genesis_block = BeaconBlock::empty(spec); - *genesis_block.state_root_mut() = genesis_state + let mut block = genesis_block(genesis_state, spec) + .map_err(|e| format!("Error building genesis block: {:?}", e))?; + + *block.state_root_mut() = genesis_state .update_tree_hash_cache() .map_err(|e| format!("Error hashing genesis state: {:?}", e))?; Ok(SignedBeaconBlock::from_block( - genesis_block, + block, // Empty signature, which should NEVER be read. This isn't to-spec, but makes the genesis // block consistent with every other block. Signature::empty(), diff --git a/beacon_node/beacon_chain/src/canonical_head.rs b/beacon_node/beacon_chain/src/canonical_head.rs index cd53d0ef7c..1e5e1300ab 100644 --- a/beacon_node/beacon_chain/src/canonical_head.rs +++ b/beacon_node/beacon_chain/src/canonical_head.rs @@ -43,8 +43,8 @@ use crate::{ }; use eth2::types::{EventKind, SseChainReorg, SseFinalizedCheckpoint, SseLateHead}; use fork_choice::{ - ExecutionStatus, ForkChoiceStore, ForkChoiceView, ForkchoiceUpdateParameters, ProtoBlock, - ResetPayloadStatuses, + ExecutionStatus, ForkChoiceStore, ForkChoiceView, ForkchoiceUpdateParameters, PayloadStatus, + ProtoBlock, ResetPayloadStatuses, }; use itertools::process_results; @@ -315,20 +315,22 @@ impl CanonicalHead { .ok_or(Error::MissingBeaconBlock(beacon_block_root))?; let current_slot = fork_choice.fc_store().get_current_slot(); - // TODO(gloas): pass a better payload status once fork choice is implemented - let payload_status = StatePayloadStatus::Pending; let (_, beacon_state) = store - .get_advanced_hot_state( - beacon_block_root, - payload_status, - current_slot, - beacon_block.state_root(), - )? + .get_advanced_hot_state(beacon_block_root, current_slot, beacon_block.state_root())? .ok_or(Error::MissingBeaconState(beacon_block.state_root()))?; + // Load the execution envelope from the store if the head has a Full payload. + let execution_envelope = if head_payload_status == PayloadStatus::Full { + store + .get_payload_envelope(&beacon_block_root)? + .map(Arc::new) + } else { + None + }; + let snapshot = BeaconSnapshot { beacon_block_root, - execution_envelope: None, + execution_envelope, beacon_block: Arc::new(beacon_block), beacon_state, }; @@ -683,30 +685,42 @@ impl BeaconChain { drop(fork_choice_read_lock); // If the head has changed, update `self.canonical_head`. - let new_cached_head = if new_view.head_block_root != old_view.head_block_root { + let new_cached_head = if new_view.head_block_root != old_view.head_block_root + || new_payload_status != old_payload_status + { metrics::inc_counter(&metrics::FORK_CHOICE_CHANGED_HEAD); + // TODO(gloas): could optimise this to reuse state and rest of snapshot if just the + // payload status has changed. let mut new_snapshot = { let beacon_block = self .store .get_full_block(&new_view.head_block_root)? .ok_or(Error::MissingBeaconBlock(new_view.head_block_root))?; - // TODO(gloas): update once we have fork choice - let payload_status = StatePayloadStatus::Pending; + // Load the execution envelope from the store if the head has a Full payload. + let state_root = beacon_block.state_root(); + let execution_envelope = if new_payload_status == PayloadStatus::Full { + let envelope = self + .store + .get_payload_envelope(&new_view.head_block_root)? + .map(Arc::new) + .ok_or(Error::MissingExecutionPayloadEnvelope( + new_view.head_block_root, + ))?; + + Some(envelope) + } else { + None + }; let (_, beacon_state) = self .store - .get_advanced_hot_state( - new_view.head_block_root, - payload_status, - current_slot, - beacon_block.state_root(), - )? - .ok_or(Error::MissingBeaconState(beacon_block.state_root()))?; + .get_advanced_hot_state(new_view.head_block_root, current_slot, state_root)? + .ok_or(Error::MissingBeaconState(state_root))?; BeaconSnapshot { beacon_block: Arc::new(beacon_block), - execution_envelope: None, + execution_envelope, beacon_block_root: new_view.head_block_root, beacon_state, } @@ -770,7 +784,8 @@ impl BeaconChain { let old_snapshot = &old_cached_head.snapshot; // If the head changed, perform some updates. - if new_snapshot.beacon_block_root != old_snapshot.beacon_block_root + if (new_snapshot.beacon_block_root != old_snapshot.beacon_block_root + || new_payload_status != old_payload_status) && let Err(e) = self.after_new_head(&old_cached_head, &new_cached_head, new_head_proto_block) { @@ -974,26 +989,30 @@ impl BeaconChain { // The store migration task and op pool pruning require the *state at the first slot of the // finalized epoch*, rather than the state of the latest finalized block. These two values // will only differ when the first slot of the finalized epoch is a skip slot. - // - // Use the `StateRootsIterator` directly rather than `BeaconChain::state_root_at_slot` - // to ensure we use the same state that we just set as the head. let new_finalized_slot = new_view .finalized_checkpoint .epoch .start_slot(T::EthSpec::slots_per_epoch()); - let new_finalized_state_root = process_results( - StateRootsIterator::new(&self.store, &new_snapshot.beacon_state), - |mut iter| { - iter.find_map(|(state_root, slot)| { - if slot == new_finalized_slot { - Some(state_root) - } else { - None - } - }) - }, - )? - .ok_or(Error::MissingFinalizedStateRoot(new_finalized_slot))?; + let new_finalized_state_root = if new_finalized_slot == finalized_proto_block.slot { + // Fast-path for the common case where the finalized state is not at a skipped slot. + finalized_proto_block.state_root + } else { + // Use the `StateRootsIterator` directly rather than `BeaconChain::state_root_at_slot` + // to ensure we use the same state that we just set as the head. + process_results( + StateRootsIterator::new(&self.store, &new_snapshot.beacon_state), + |mut iter| { + iter.find_map(|(state_root, slot)| { + if slot == new_finalized_slot { + Some(state_root) + } else { + None + } + }) + }, + )? + .ok_or(Error::MissingFinalizedStateRoot(new_finalized_slot))? + }; let update_cache = true; let new_finalized_state = self diff --git a/beacon_node/beacon_chain/src/data_column_verification.rs b/beacon_node/beacon_chain/src/data_column_verification.rs index f2cec0980f..a24dbd8942 100644 --- a/beacon_node/beacon_chain/src/data_column_verification.rs +++ b/beacon_node/beacon_chain/src/data_column_verification.rs @@ -21,7 +21,7 @@ use tracing::{debug, instrument}; use types::data::ColumnIndex; use types::{ BeaconStateError, ChainSpec, DataColumnSidecar, DataColumnSidecarFulu, DataColumnSubnetId, - EthSpec, Hash256, Slot, StatePayloadStatus, + EthSpec, Hash256, Slot, }; /// An error occurred while validating a gossip data column. @@ -743,12 +743,7 @@ fn verify_proposer_and_signature( // for the same block. Analysis: https://hackmd.io/@dapplion/gloas_dependant_root chain .store - .get_advanced_hot_state( - block_parent_root, - StatePayloadStatus::Pending, - column_slot, - parent_block.state_root, - ) + .get_advanced_hot_state(block_parent_root, column_slot, parent_block.state_root) .map_err(|e| GossipDataColumnError::BeaconChainError(Box::new(e.into())))? .ok_or_else(|| { GossipDataColumnError::BeaconChainError(Box::new( diff --git a/beacon_node/beacon_chain/src/errors.rs b/beacon_node/beacon_chain/src/errors.rs index d5ff12e33b..9802f091e0 100644 --- a/beacon_node/beacon_chain/src/errors.rs +++ b/beacon_node/beacon_chain/src/errors.rs @@ -63,6 +63,7 @@ pub enum BeaconChainError { ForkChoiceStoreError(ForkChoiceStoreError), MissingBeaconBlock(Hash256), MissingBeaconState(Hash256), + MissingExecutionPayloadEnvelope(Hash256), MissingHotStateSummary(Hash256), SlotProcessingError(SlotProcessingError), EpochProcessingError(EpochProcessingError), @@ -294,9 +295,6 @@ pub enum BlockProductionError { BeaconStateError(BeaconStateError), StateAdvanceError(StateAdvanceError), OpPoolError(OpPoolError), - /// The `BeaconChain` was explicitly configured _without_ a connection to eth1, therefore it - /// cannot produce blocks. - NoEth1ChainConnection, StateSlotTooHigh { produce_at_slot: Slot, state_slot: Slot, @@ -324,6 +322,8 @@ pub enum BlockProductionError { SszTypesError(ssz_types::Error), EnvelopeProcessingError(EnvelopeProcessingError), BlsError(bls::Error), + MissingParentExecutionPayload, + MissingExecutionPayloadEnvelope(Hash256), // TODO(gloas): Remove this once Gloas is implemented GloasNotImplemented(String), } diff --git a/beacon_node/beacon_chain/src/execution_payload.rs b/beacon_node/beacon_chain/src/execution_payload.rs index 2b03a095f1..16542eea2d 100644 --- a/beacon_node/beacon_chain/src/execution_payload.rs +++ b/beacon_node/beacon_chain/src/execution_payload.rs @@ -402,12 +402,20 @@ where let suggested_fee_recipient = execution_layer .get_suggested_fee_recipient(proposer_index) .await; + + let slot_number = if fork.gloas_enabled() { + Some(builder_params.slot.as_u64()) + } else { + None + }; + let payload_attributes = PayloadAttributes::new( timestamp, random, suggested_fee_recipient, withdrawals, parent_beacon_block_root, + slot_number, ); let target_gas_limit = execution_layer.get_proposer_gas_limit(proposer_index).await; diff --git a/beacon_node/beacon_chain/src/migrate.rs b/beacon_node/beacon_chain/src/migrate.rs index 24258d2d31..3c17c1ebba 100644 --- a/beacon_node/beacon_chain/src/migrate.rs +++ b/beacon_node/beacon_chain/src/migrate.rs @@ -330,7 +330,7 @@ impl, Cold: ItemStore> BackgroundMigrator state, other => { error!( diff --git a/beacon_node/beacon_chain/src/payload_bid_verification/tests.rs b/beacon_node/beacon_chain/src/payload_bid_verification/tests.rs index bb59b16ffb..98863a49d5 100644 --- a/beacon_node/beacon_chain/src/payload_bid_verification/tests.rs +++ b/beacon_node/beacon_chain/src/payload_bid_verification/tests.rs @@ -10,9 +10,10 @@ use kzg::KzgCommitment; use slot_clock::{SlotClock, TestingSlotClock}; use ssz::Encode; use ssz_types::VariableList; +use state_processing::genesis::genesis_block; use store::{HotColdDB, StoreConfig}; use types::{ - Address, BeaconBlock, ChainSpec, Checkpoint, Domain, Epoch, EthSpec, ExecutionBlockHash, + Address, ChainSpec, Checkpoint, Domain, Epoch, EthSpec, ExecutionBlockHash, ExecutionPayloadBid, Hash256, MinimalEthSpec, ProposerPreferences, SignedBeaconBlock, SignedExecutionPayloadBid, SignedProposerPreferences, SignedRoot, Slot, }; @@ -112,11 +113,11 @@ impl TestContext { ) .expect("should register inactive builder"); - let mut genesis_block = BeaconBlock::empty(&spec); - *genesis_block.state_root_mut() = state + let mut block = genesis_block(&state, &spec).expect("should build genesis block"); + *block.state_root_mut() = state .update_tree_hash_cache() .expect("should hash genesis state"); - let signed_block = SignedBeaconBlock::from_block(genesis_block, Signature::empty()); + let signed_block = SignedBeaconBlock::from_block(block, Signature::empty()); let block_root = signed_block.canonical_root(); let snapshot = BeaconSnapshot::new( diff --git a/beacon_node/beacon_chain/src/payload_envelope_streamer/tests.rs b/beacon_node/beacon_chain/src/payload_envelope_streamer/tests.rs index 9e869a59b8..0db6d57ed6 100644 --- a/beacon_node/beacon_chain/src/payload_envelope_streamer/tests.rs +++ b/beacon_node/beacon_chain/src/payload_envelope_streamer/tests.rs @@ -65,13 +65,12 @@ fn build_chain( message: ExecutionPayloadEnvelope { payload: ExecutionPayloadGloas { block_hash, + slot_number: slot, ..Default::default() }, execution_requests: Default::default(), builder_index: 0, beacon_block_root: block_root, - slot, - state_root: Hash256::zero(), }, signature: Signature::empty(), }) diff --git a/beacon_node/beacon_chain/src/payload_envelope_verification/execution_pending_envelope.rs b/beacon_node/beacon_chain/src/payload_envelope_verification/execution_pending_envelope.rs index 86f9293c8f..4b8e7347cc 100644 --- a/beacon_node/beacon_chain/src/payload_envelope_verification/execution_pending_envelope.rs +++ b/beacon_node/beacon_chain/src/payload_envelope_verification/execution_pending_envelope.rs @@ -1,10 +1,7 @@ use std::sync::Arc; use slot_clock::SlotClock; -use state_processing::{ - VerifySignatures, - envelope_processing::{VerifyStateRoot, process_execution_payload_envelope}, -}; +use state_processing::{VerifySignatures, envelope_processing::verify_execution_payload_envelope}; use types::EthSpec; use crate::{ @@ -77,16 +74,15 @@ impl GossipVerifiedEnvelope { } else { load_snapshot_from_state_root::(block_root, self.block.state_root(), &chain.store)? }; - let mut state = snapshot.pre_state; + let state = snapshot.pre_state; - // All the state modifications are done in envelope_processing - process_execution_payload_envelope( - &mut state, - Some(snapshot.state_root), + // Verify the envelope against the state (no state mutation). + verify_execution_payload_envelope( + &state, &signed_envelope, // verify signature already done for GossipVerifiedEnvelope VerifySignatures::False, - VerifyStateRoot::True, + snapshot.state_root, &chain.spec, )?; @@ -97,7 +93,7 @@ impl GossipVerifiedEnvelope { }, import_data: EnvelopeImportData { block_root, - post_state: Box::new(state), + _phantom: Default::default(), }, payload_verification_handle, }) diff --git a/beacon_node/beacon_chain/src/payload_envelope_verification/gossip_verified_envelope.rs b/beacon_node/beacon_chain/src/payload_envelope_verification/gossip_verified_envelope.rs index 77b44a2af0..80724e2b00 100644 --- a/beacon_node/beacon_chain/src/payload_envelope_verification/gossip_verified_envelope.rs +++ b/beacon_node/beacon_chain/src/payload_envelope_verification/gossip_verified_envelope.rs @@ -42,18 +42,18 @@ pub(crate) fn verify_envelope_consistency( ) -> Result<(), EnvelopeError> { // Check that the envelope's slot isn't from a slot prior // to the latest finalized slot. - if envelope.slot < latest_finalized_slot { + if envelope.slot() < latest_finalized_slot { return Err(EnvelopeError::PriorToFinalization { - payload_slot: envelope.slot, + payload_slot: envelope.slot(), latest_finalized_slot, }); } // Check that the slot of the envelope matches the slot of the block. - if envelope.slot != block.slot() { + if envelope.slot() != block.slot() { return Err(EnvelopeError::SlotMismatch { block: block.slot(), - envelope: envelope.slot, + envelope: envelope.slot(), }); } @@ -144,7 +144,7 @@ impl GossipVerifiedEnvelope { // validator pubkey cache for the proposer's pubkey, avoiding a state load from disk. // For external builder envelopes, we must load the state to access the builder registry. let builder_index = envelope.builder_index; - let block_slot = envelope.slot; + let block_slot = envelope.slot(); let envelope_epoch = block_slot.epoch(T::EthSpec::slots_per_epoch()); // Since the payload's block is already guaranteed to be imported, the associated `proto_block.current_epoch_shuffling_id` // already carries the correct `shuffling_decision_block`. @@ -224,7 +224,6 @@ impl GossipVerifiedEnvelope { builder_index, block_hash: signed_envelope.message.payload.block_hash, block_root: beacon_block_root, - state_root: signed_envelope.message.state_root, }, )); } @@ -334,13 +333,12 @@ mod tests { ExecutionPayloadEnvelope { payload: ExecutionPayloadGloas { block_hash, + slot_number: slot, ..ExecutionPayloadGloas::default() }, execution_requests: ExecutionRequests::default(), builder_index, beacon_block_root: Hash256::ZERO, - slot, - state_root: Hash256::ZERO, } } @@ -365,6 +363,7 @@ mod tests { voluntary_exits: VariableList::empty(), sync_aggregate: SyncAggregate::empty(), bls_to_execution_changes: VariableList::empty(), + parent_execution_requests: ExecutionRequests::default(), signed_execution_payload_bid: SignedExecutionPayloadBid::empty(), payload_attestations: VariableList::empty(), _phantom: PhantomData, diff --git a/beacon_node/beacon_chain/src/payload_envelope_verification/import.rs b/beacon_node/beacon_chain/src/payload_envelope_verification/import.rs index 7e79799310..5a6d3a1b7d 100644 --- a/beacon_node/beacon_chain/src/payload_envelope_verification/import.rs +++ b/beacon_node/beacon_chain/src/payload_envelope_verification/import.rs @@ -6,7 +6,7 @@ use fork_choice::PayloadVerificationStatus; use slot_clock::SlotClock; use store::StoreOp; use tracing::{debug, error, info, info_span, instrument, warn}; -use types::{BeaconState, BlockImportSource, Hash256, SignedExecutionPayloadEnvelope}; +use types::{BlockImportSource, Hash256, SignedExecutionPayloadEnvelope}; use super::{ AvailableEnvelope, AvailableExecutedEnvelope, EnvelopeError, EnvelopeImportData, @@ -198,7 +198,7 @@ impl BeaconChain { let EnvelopeImportData { block_root, - post_state, + _phantom, } = import_data; let block_root = { @@ -208,7 +208,6 @@ impl BeaconChain { chain.import_execution_payload_envelope( envelope, block_root, - *post_state, payload_verification_outcome.payload_verification_status, ) }, @@ -231,7 +230,6 @@ impl BeaconChain { &self, signed_envelope: AvailableEnvelope, block_root: Hash256, - state: BeaconState, payload_verification_status: PayloadVerificationStatus, ) -> Result { // Everything in this initial section is on the hot path for processing the envelope. @@ -285,10 +283,6 @@ impl BeaconChain { block_root, signed_envelope.clone(), )); - ops.push(StoreOp::PutState( - signed_envelope.message.state_root, - &state, - )); let db_span = info_span!("persist_payloads_and_blobs").entered(); @@ -365,7 +359,6 @@ impl BeaconChain { builder_index: signed_envelope.message.builder_index, block_hash: signed_envelope.block_hash(), block_root, - state_root: signed_envelope.message.state_root, execution_optimistic: payload_verification_status.is_optimistic(), })); } diff --git a/beacon_node/beacon_chain/src/payload_envelope_verification/mod.rs b/beacon_node/beacon_chain/src/payload_envelope_verification/mod.rs index 225d5a9892..51fc3f235d 100644 --- a/beacon_node/beacon_chain/src/payload_envelope_verification/mod.rs +++ b/beacon_node/beacon_chain/src/payload_envelope_verification/mod.rs @@ -18,11 +18,11 @@ //! //! ``` +use std::marker::PhantomData; use std::sync::Arc; -use store::Error as DBError; - use state_processing::{BlockProcessingError, envelope_processing::EnvelopeProcessingError}; +use store::Error as DBError; use tracing::instrument; use types::{ BeaconState, BeaconStateError, ChainSpec, DataColumnSidecarList, EthSpec, ExecutionBlockHash, @@ -41,10 +41,11 @@ mod payload_notifier; pub use execution_pending_envelope::ExecutionPendingEnvelope; +// TODO(gloas): could remove this type completely, or remove the generic #[derive(PartialEq)] pub struct EnvelopeImportData { pub block_root: Hash256, - pub post_state: Box>, + _phantom: PhantomData, } #[derive(Debug)] @@ -249,9 +250,6 @@ impl From for EnvelopeError { committed_bid, envelope, }, - EnvelopeProcessingError::BlockProcessingError(e) => { - EnvelopeError::BlockProcessingError(e) - } e => EnvelopeError::EnvelopeProcessingError(e), } } diff --git a/beacon_node/beacon_chain/src/pending_payload_envelopes.rs b/beacon_node/beacon_chain/src/pending_payload_envelopes.rs index 336ab5323f..351783832d 100644 --- a/beacon_node/beacon_chain/src/pending_payload_envelopes.rs +++ b/beacon_node/beacon_chain/src/pending_payload_envelopes.rs @@ -87,12 +87,13 @@ mod tests { fn make_envelope(slot: Slot) -> ExecutionPayloadEnvelope { ExecutionPayloadEnvelope { - payload: ExecutionPayloadGloas::default(), + payload: ExecutionPayloadGloas { + slot_number: slot, + ..ExecutionPayloadGloas::default() + }, execution_requests: ExecutionRequests::default(), builder_index: 0, beacon_block_root: Hash256::ZERO, - slot, - state_root: Hash256::ZERO, } } diff --git a/beacon_node/beacon_chain/src/state_advance_timer.rs b/beacon_node/beacon_chain/src/state_advance_timer.rs index 4c070e7ecc..cb916cb514 100644 --- a/beacon_node/beacon_chain/src/state_advance_timer.rs +++ b/beacon_node/beacon_chain/src/state_advance_timer.rs @@ -26,10 +26,7 @@ use std::sync::{ use task_executor::TaskExecutor; use tokio::time::{Instant, sleep, sleep_until}; use tracing::{Instrument, debug, debug_span, error, instrument, warn}; -use types::{ - AttestationShufflingId, BeaconStateError, EthSpec, Hash256, RelativeEpoch, Slot, - StatePayloadStatus, -}; +use types::{AttestationShufflingId, BeaconStateError, EthSpec, Hash256, RelativeEpoch, Slot}; /// If the head slot is more than `MAX_ADVANCE_DISTANCE` from the current slot, then don't perform /// the state advancement. @@ -280,16 +277,9 @@ fn advance_head(beacon_chain: &Arc>) -> Resu (snapshot.beacon_block_root, snapshot.beacon_state_root()) }; - // TODO(gloas): do better once we have fork choice - let payload_status = StatePayloadStatus::Pending; let (head_state_root, mut state) = beacon_chain .store - .get_advanced_hot_state( - head_block_root, - payload_status, - current_slot, - head_block_state_root, - )? + .get_advanced_hot_state(head_block_root, current_slot, head_block_state_root)? .ok_or(Error::HeadMissingFromSnapshotCache(head_block_root))?; let initial_slot = state.slot(); diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index 1b03b6e10b..e84f9ad983 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -1043,6 +1043,13 @@ where assert_ne!(slot, 0, "can't produce a block at slot 0"); assert!(slot >= state.slot()); + // For Gloas forks, delegate to make_block_with_envelope and discard the envelope. + if self.spec.fork_name_at_slot::(slot).gloas_enabled() { + let (block_contents, _envelope, state) = + Box::pin(self.make_block_with_envelope(state, slot)).await; + return (block_contents, state); + } + complete_state_advance(&mut state, None, slot, &self.spec) .expect("should be able to advance state to slot"); @@ -1124,11 +1131,24 @@ where GraffitiSettings::new(Some(graffiti), Some(GraffitiPolicy::PreserveUserGraffiti)); let randao_reveal = self.sign_randao_reveal(&state, proposer_index, slot); + // Load the parent's payload envelope and status from the cached head. + // TODO(gloas): we may want to pass these as arguments to support cases where we build + // on alternate chains to the head. + let (parent_payload_status, parent_envelope) = { + let head = self.chain.canonical_head.cached_head(); + ( + head.head_payload_status(), + head.snapshot.execution_envelope.clone(), + ) + }; + let (block, pending_state, _consensus_block_value) = self .chain .produce_block_on_state_gloas( state, None, + parent_payload_status, + parent_envelope, slot, randao_reveal, graffiti_settings, @@ -2681,32 +2701,27 @@ where Ok(block_hash) } - /// Process an execution payload envelope for a Gloas block. + /// Verify and process (with fork choice) an execution payload envelope for a Gloas block. pub async fn process_envelope( &self, block_root: Hash256, signed_envelope: SignedExecutionPayloadEnvelope, - pending_state: &mut BeaconState, - ) -> Hash256 { - let state_root = signed_envelope.message.state_root; + state: &BeaconState, + block_state_root: Hash256, + ) { debug!( - slot = %signed_envelope.message.slot, - ?state_root, + slot = %signed_envelope.slot(), "Processing execution payload envelope" ); - let block_state_root = pending_state - .update_tree_hash_cache() - .expect("should compute pending state root"); - state_processing::envelope_processing::process_execution_payload_envelope( - pending_state, - Some(block_state_root), + state_processing::envelope_processing::verify_execution_payload_envelope( + state, &signed_envelope, state_processing::VerifySignatures::True, - state_processing::envelope_processing::VerifyStateRoot::True, + block_state_root, &self.spec, ) - .expect("should process envelope"); + .expect("should verify envelope"); // Notify the EL of the new payload so forkchoiceUpdated can reference it. let block = self @@ -2747,16 +2762,18 @@ where // Store the envelope. self.chain .store - .put_payload_envelope(&block_root, signed_envelope) + .put_payload_envelope(&block_root, &signed_envelope) .expect("should store envelope"); - // Store the Full state. + // Update fork choice so it knows the payload was received. self.chain - .store - .put_state(&state_root, pending_state) - .expect("should store full state"); + .canonical_head + .fork_choice_write_lock() + .on_valid_payload_envelope_received(block_root) + .expect("should update fork choice with envelope"); - state_root + // Run fork choice because the envelope could become the head. + self.chain.recompute_head_at_current_slot().await; } /// Builds a `RangeSyncBlock` from a `SignedBeaconBlock` and blobs or data columns retrieved from @@ -2970,7 +2987,8 @@ where BlockError, > { self.set_current_slot(slot); - let (block_contents, new_state) = self.make_block(state, slot).await; + let (block_contents, opt_envelope, new_state) = + self.make_block_with_envelope(state, slot).await; let block_hash = self .process_block( @@ -2979,6 +2997,12 @@ where block_contents.clone(), ) .await?; + + if let Some(envelope) = opt_envelope { + let block_state_root = block_contents.0.state_root(); + self.process_envelope(block_hash.into(), envelope, &new_state, block_state_root) + .await; + } Ok((block_hash, block_contents, new_state)) } diff --git a/beacon_node/beacon_chain/src/validator_pubkey_cache.rs b/beacon_node/beacon_chain/src/validator_pubkey_cache.rs index 26ac02d91b..36bf5c7113 100644 --- a/beacon_node/beacon_chain/src/validator_pubkey_cache.rs +++ b/beacon_node/beacon_chain/src/validator_pubkey_cache.rs @@ -302,7 +302,8 @@ mod test { #[test] fn basic_operation() { - let (state, keypairs) = get_state(8); + // >= 32 validators required for Gloas genesis with MainnetEthSpec (32 slots/epoch). + let (state, keypairs) = get_state(32); let store = get_store(); @@ -311,21 +312,14 @@ mod test { check_cache_get(&cache, &keypairs[..]); // Try adding a state with the same number of keypairs. - let (state, keypairs) = get_state(8); - cache - .import_new_pubkeys(&state) - .expect("should import pubkeys"); - check_cache_get(&cache, &keypairs[..]); - - // Try adding a state with less keypairs. - let (state, _) = get_state(1); + let (state, keypairs) = get_state(32); cache .import_new_pubkeys(&state) .expect("should import pubkeys"); check_cache_get(&cache, &keypairs[..]); // Try adding a state with more keypairs. - let (state, keypairs) = get_state(12); + let (state, keypairs) = get_state(48); cache .import_new_pubkeys(&state) .expect("should import pubkeys"); @@ -334,7 +328,7 @@ mod test { #[test] fn persistence() { - let (state, keypairs) = get_state(8); + let (state, keypairs) = get_state(32); let store = get_store(); @@ -349,7 +343,7 @@ mod test { check_cache_get(&cache, &keypairs[..]); // Add some more keypairs. - let (state, keypairs) = get_state(12); + let (state, keypairs) = get_state(48); let ops = cache .import_new_pubkeys(&state) .expect("should import pubkeys"); diff --git a/beacon_node/beacon_chain/tests/attestation_production.rs b/beacon_node/beacon_chain/tests/attestation_production.rs index bca60d27cd..a3ab959d12 100644 --- a/beacon_node/beacon_chain/tests/attestation_production.rs +++ b/beacon_node/beacon_chain/tests/attestation_production.rs @@ -10,7 +10,7 @@ use std::sync::{Arc, LazyLock}; use tree_hash::TreeHash; use types::{Attestation, EthSpec, MainnetEthSpec, RelativeEpoch, Slot}; -pub const VALIDATOR_COUNT: usize = 16; +pub const VALIDATOR_COUNT: usize = 32; /// A cached set of keys. static KEYPAIRS: LazyLock> = diff --git a/beacon_node/beacon_chain/tests/attestation_verification.rs b/beacon_node/beacon_chain/tests/attestation_verification.rs index 91bc8e249a..da7f380e36 100644 --- a/beacon_node/beacon_chain/tests/attestation_verification.rs +++ b/beacon_node/beacon_chain/tests/attestation_verification.rs @@ -1389,13 +1389,18 @@ async fn attestation_to_finalized_block() { let earlier_block_root = earlier_block.canonical_root(); assert_ne!(earlier_block_root, finalized_checkpoint.root); + // For Gloas, `block.state_root()` returns the pending state root, but the cold DB + // may store the full state root. Use `get_cold_state_root` to get the actual stored key. + let cold_state_root = harness + .chain + .store + .get_cold_state_root(earlier_slot) + .expect("should not error getting cold state root") + .expect("cold state root should be present for finalized slot in archive store"); + let mut state = harness .chain - .get_state( - &earlier_block.state_root(), - Some(earlier_slot), - CACHE_STATE_IN_TESTS, - ) + .get_state(&cold_state_root, Some(earlier_slot), CACHE_STATE_IN_TESTS) .expect("should not error getting state") .expect("should find state"); diff --git a/beacon_node/beacon_chain/tests/block_verification.rs b/beacon_node/beacon_chain/tests/block_verification.rs index 2bb60f111a..6646fe0b1e 100644 --- a/beacon_node/beacon_chain/tests/block_verification.rs +++ b/beacon_node/beacon_chain/tests/block_verification.rs @@ -31,8 +31,8 @@ use types::{test_utils::generate_deterministic_keypair, *}; type E = MainnetEthSpec; -// Should ideally be divisible by 3. -const VALIDATOR_COUNT: usize = 24; +// Gloas requires >= 1 validator per slot for PTC committee computation, so >= 32 for MainnetEthSpec. +const VALIDATOR_COUNT: usize = 32; const CHAIN_SEGMENT_LENGTH: usize = 64 * 5; const BLOCK_INDICES: &[usize] = &[0, 1, 32, 64, 68 + 1, 129, CHAIN_SEGMENT_LENGTH - 1]; @@ -77,10 +77,9 @@ async fn get_chain_segment() -> (Vec>, Vec], + harness: &BeaconChainHarness>, +) { + for snapshot in chain_segment { + if let Some(ref envelope) = snapshot.execution_envelope { + harness + .chain + .store + .put_payload_envelope(&snapshot.beacon_block_root, envelope) + .expect("should store envelope"); + } + } +} + +/// Update fork choice with envelope payload status for all blocks in the chain segment. +/// +/// Must be called after the blocks have been imported into fork choice. +fn update_fork_choice_with_envelopes( + chain_segment: &[BeaconSnapshot], + harness: &BeaconChainHarness>, +) { + for snapshot in chain_segment { + if snapshot.execution_envelope.is_some() { + // Call may fail if block was invalid (it will have no fork choice node). + let _ = harness + .chain + .canonical_head + .fork_choice_write_lock() + .on_valid_payload_envelope_received(snapshot.beacon_block_root); + } + } +} + fn junk_signature() -> Signature { let kp = generate_deterministic_keypair(VALIDATOR_COUNT); let message = Hash256::from_slice(&[42; 32]); @@ -303,6 +343,7 @@ fn update_data_column_signed_header( async fn chain_segment_full_segment() { let harness = get_harness(VALIDATOR_COUNT, NodeCustodyType::Fullnode); let (chain_segment, chain_segment_blobs) = get_chain_segment().await; + store_envelopes_for_chain_segment(&chain_segment, &harness); let blocks: Vec> = chain_segment_blocks(&chain_segment, &chain_segment_blobs, harness.chain.clone()) .into_iter() @@ -328,6 +369,7 @@ async fn chain_segment_full_segment() { .into_block_error() .expect("should import chain segment"); + update_fork_choice_with_envelopes(&chain_segment, &harness); harness.chain.recompute_head_at_current_slot().await; assert_eq!( @@ -348,6 +390,7 @@ async fn chain_segment_varying_chunk_size() { for chunk_size in &[1, 2, 31, 32, 33] { let harness = get_harness(VALIDATOR_COUNT, NodeCustodyType::Fullnode); + store_envelopes_for_chain_segment(&chain_segment, &harness); harness .chain @@ -363,6 +406,7 @@ async fn chain_segment_varying_chunk_size() { .unwrap_or_else(|_| panic!("should import chain segment of len {}", chunk_size)); } + update_fork_choice_with_envelopes(&chain_segment, &harness); harness.chain.recompute_head_at_current_slot().await; assert_eq!( @@ -514,6 +558,7 @@ async fn assert_invalid_signature( snapshots: &[BeaconSnapshot], item: &str, ) { + store_envelopes_for_chain_segment(chain_segment, harness); let blocks: Vec> = snapshots .iter() .zip(chain_segment_blobs.iter()) @@ -540,10 +585,22 @@ async fn assert_invalid_signature( harness.chain.recompute_head_at_current_slot().await; // Ensure the block will be rejected if imported on its own (without gossip checking). - let ancestor_blocks = chain_segment + // Only include blocks that haven't been imported yet (after the finalized slot) to avoid + // `WouldRevertFinalizedSlot` errors when part 1 already imported and finalized some blocks. + // Use the fork choice finalized checkpoint directly, as the cached head may not reflect + // finalization that occurred during process_chain_segment. + let finalized_slot = harness + .chain + .canonical_head + .fork_choice_read_lock() + .finalized_checkpoint() + .epoch + .start_slot(E::slots_per_epoch()); + let ancestor_blocks: Vec> = chain_segment .iter() .take(block_index) .zip(chain_segment_blobs.iter()) + .filter(|(snapshot, _)| snapshot.beacon_block.slot() > finalized_slot) .map(|(snapshot, blobs)| { build_range_sync_block(snapshot.beacon_block.clone(), blobs, harness.chain.clone()) }) @@ -554,6 +611,7 @@ async fn assert_invalid_signature( .chain .process_chain_segment(ancestor_blocks, NotifyExecutionLayer::Yes) .await; + update_fork_choice_with_envelopes(chain_segment, harness); harness.chain.recompute_head_at_current_slot().await; let process_res = harness @@ -594,6 +652,7 @@ async fn get_invalid_sigs_harness( chain_segment: &[BeaconSnapshot], ) -> BeaconChainHarness> { let harness = get_harness(VALIDATOR_COUNT, NodeCustodyType::Fullnode); + store_envelopes_for_chain_segment(chain_segment, &harness); harness .chain .slot_clock @@ -1091,6 +1150,21 @@ async fn block_gossip_verification() { ) .await .expect("should import valid gossip verified block"); + // Post-Gloas, store the execution payload envelope so that subsequent blocks can look up + // the parent envelope. + if let Some(ref envelope) = snapshot.execution_envelope { + harness + .chain + .store + .put_payload_envelope(&snapshot.beacon_block_root, envelope) + .expect("should store envelope"); + harness + .chain + .canonical_head + .fork_choice_write_lock() + .on_valid_payload_envelope_received(snapshot.beacon_block_root) + .expect("should update fork choice with envelope"); + } if let Some(data_sidecars) = blobs_opt { verify_and_process_gossip_data_sidecars(&harness, data_sidecars).await; } @@ -2040,7 +2114,10 @@ async fn range_sync_block_construction_fails_with_wrong_blob_count() { async fn range_sync_block_rejects_missing_custody_columns() { let spec = test_spec::(); - if !spec.fork_name_at_slot::(Slot::new(0)).fulu_enabled() { + // Gloas blocks don't have blob_kzg_commitments (blobs are in the execution payload envelope). + if !spec.fork_name_at_slot::(Slot::new(0)).fulu_enabled() + || spec.fork_name_at_slot::(Slot::new(0)).gloas_enabled() + { return; } @@ -2118,7 +2195,10 @@ async fn range_sync_block_rejects_missing_custody_columns() { async fn rpc_block_allows_construction_past_da_boundary() { let spec = test_spec::(); - if !spec.fork_name_at_slot::(Slot::new(0)).fulu_enabled() { + // Gloas blocks don't have blob_kzg_commitments (blobs are in the execution payload envelope). + if !spec.fork_name_at_slot::(Slot::new(0)).fulu_enabled() + || spec.fork_name_at_slot::(Slot::new(0)).gloas_enabled() + { return; } diff --git a/beacon_node/beacon_chain/tests/column_verification.rs b/beacon_node/beacon_chain/tests/column_verification.rs index 6114bd7f45..5846ccfd7e 100644 --- a/beacon_node/beacon_chain/tests/column_verification.rs +++ b/beacon_node/beacon_chain/tests/column_verification.rs @@ -16,8 +16,8 @@ use types::*; type E = MainnetEthSpec; -// Should ideally be divisible by 3. -const VALIDATOR_COUNT: usize = 24; +// >= 32 validators required for Gloas genesis with MainnetEthSpec (32 slots/epoch). +const VALIDATOR_COUNT: usize = 32; /// A cached set of keys. static KEYPAIRS: LazyLock> = @@ -52,7 +52,8 @@ async fn rpc_columns_with_invalid_header_signature() { let spec = Arc::new(test_spec::()); // Only run this test if columns are enabled. - if !spec.is_fulu_scheduled() { + // TODO(gloas): Gloas blocks don't have blob_kzg_commitments — blobs are in the envelope. + if !spec.is_fulu_scheduled() || spec.is_gloas_scheduled() { return; } diff --git a/beacon_node/beacon_chain/tests/events.rs b/beacon_node/beacon_chain/tests/events.rs index 121f8c255d..5305965f0f 100644 --- a/beacon_node/beacon_chain/tests/events.rs +++ b/beacon_node/beacon_chain/tests/events.rs @@ -170,7 +170,10 @@ async fn blob_sidecar_event_on_process_rpc_blobs() { #[tokio::test] async fn data_column_sidecar_event_on_process_rpc_columns() { - if fork_name_from_env().is_none_or(|f| !f.fulu_enabled()) { + // Gloas blocks don't have blob_kzg_commitments (blobs are in the execution payload envelope). + if fork_name_from_env().is_none_or(|f| !f.fulu_enabled()) + || fork_name_from_env().is_some_and(|f| f.gloas_enabled()) + { return; }; diff --git a/beacon_node/beacon_chain/tests/payload_invalidation.rs b/beacon_node/beacon_chain/tests/payload_invalidation.rs index 947024e8c2..38d4f4c47e 100644 --- a/beacon_node/beacon_chain/tests/payload_invalidation.rs +++ b/beacon_node/beacon_chain/tests/payload_invalidation.rs @@ -371,7 +371,7 @@ impl InvalidPayloadRig { /// Simple test of the different import types. #[tokio::test] async fn valid_invalid_syncing() { - if fork_name_from_env().is_some_and(|f| !f.bellatrix_enabled()) { + if fork_name_from_env().is_some_and(|f| !f.bellatrix_enabled() || f.gloas_enabled()) { return; } let mut rig = InvalidPayloadRig::new(); @@ -388,7 +388,7 @@ async fn valid_invalid_syncing() { /// `latest_valid_hash`. #[tokio::test] async fn invalid_payload_invalidates_parent() { - if fork_name_from_env().is_some_and(|f| !f.bellatrix_enabled()) { + if fork_name_from_env().is_some_and(|f| !f.bellatrix_enabled() || f.gloas_enabled()) { return; } let mut rig = InvalidPayloadRig::new().enable_attestations(); @@ -445,7 +445,7 @@ async fn immediate_forkchoice_update_invalid_test( #[tokio::test] async fn immediate_forkchoice_update_payload_invalid() { - if fork_name_from_env().is_some_and(|f| !f.bellatrix_enabled()) { + if fork_name_from_env().is_some_and(|f| !f.bellatrix_enabled() || f.gloas_enabled()) { return; } immediate_forkchoice_update_invalid_test(|latest_valid_hash| Payload::Invalid { @@ -456,7 +456,7 @@ async fn immediate_forkchoice_update_payload_invalid() { #[tokio::test] async fn immediate_forkchoice_update_payload_invalid_block_hash() { - if fork_name_from_env().is_some_and(|f| !f.bellatrix_enabled()) { + if fork_name_from_env().is_some_and(|f| !f.bellatrix_enabled() || f.gloas_enabled()) { return; } immediate_forkchoice_update_invalid_test(|_| Payload::InvalidBlockHash).await @@ -464,7 +464,7 @@ async fn immediate_forkchoice_update_payload_invalid_block_hash() { #[tokio::test] async fn immediate_forkchoice_update_payload_invalid_terminal_block() { - if fork_name_from_env().is_some_and(|f| !f.bellatrix_enabled()) { + if fork_name_from_env().is_some_and(|f| !f.bellatrix_enabled() || f.gloas_enabled()) { return; } immediate_forkchoice_update_invalid_test(|_| Payload::Invalid { @@ -476,7 +476,7 @@ async fn immediate_forkchoice_update_payload_invalid_terminal_block() { /// Ensure the client tries to exit when the justified checkpoint is invalidated. #[tokio::test] async fn justified_checkpoint_becomes_invalid() { - if fork_name_from_env().is_some_and(|f| !f.bellatrix_enabled()) { + if fork_name_from_env().is_some_and(|f| !f.bellatrix_enabled() || f.gloas_enabled()) { return; } let mut rig = InvalidPayloadRig::new().enable_attestations(); @@ -520,7 +520,7 @@ async fn justified_checkpoint_becomes_invalid() { /// Ensure that a `latest_valid_hash` for a pre-finality block only reverts a single block. #[tokio::test] async fn pre_finalized_latest_valid_hash() { - if fork_name_from_env().is_some_and(|f| !f.bellatrix_enabled()) { + if fork_name_from_env().is_some_and(|f| !f.bellatrix_enabled() || f.gloas_enabled()) { return; } let num_blocks = E::slots_per_epoch() * 4; @@ -569,7 +569,7 @@ async fn pre_finalized_latest_valid_hash() { /// - Will not validate `latest_valid_root` and its ancestors. #[tokio::test] async fn latest_valid_hash_will_not_validate() { - if fork_name_from_env().is_some_and(|f| !f.bellatrix_enabled()) { + if fork_name_from_env().is_some_and(|f| !f.bellatrix_enabled() || f.gloas_enabled()) { return; } const LATEST_VALID_SLOT: u64 = 3; @@ -618,7 +618,7 @@ async fn latest_valid_hash_will_not_validate() { /// Check behaviour when the `latest_valid_hash` is a junk value. #[tokio::test] async fn latest_valid_hash_is_junk() { - if fork_name_from_env().is_some_and(|f| !f.bellatrix_enabled()) { + if fork_name_from_env().is_some_and(|f| !f.bellatrix_enabled() || f.gloas_enabled()) { return; } let num_blocks = E::slots_per_epoch() * 5; @@ -661,7 +661,7 @@ async fn latest_valid_hash_is_junk() { /// Check that descendants of invalid blocks are also invalidated. #[tokio::test] async fn invalidates_all_descendants() { - if fork_name_from_env().is_some_and(|f| !f.bellatrix_enabled()) { + if fork_name_from_env().is_some_and(|f| !f.bellatrix_enabled() || f.gloas_enabled()) { return; } let num_blocks = E::slots_per_epoch() * 4 + E::slots_per_epoch() / 2; @@ -764,7 +764,7 @@ async fn invalidates_all_descendants() { /// Check that the head will switch after the canonical branch is invalidated. #[tokio::test] async fn switches_heads() { - if fork_name_from_env().is_some_and(|f| !f.bellatrix_enabled()) { + if fork_name_from_env().is_some_and(|f| !f.bellatrix_enabled() || f.gloas_enabled()) { return; } let num_blocks = E::slots_per_epoch() * 4 + E::slots_per_epoch() / 2; @@ -863,7 +863,7 @@ async fn switches_heads() { #[tokio::test] async fn invalid_during_processing() { - if fork_name_from_env().is_some_and(|f| !f.bellatrix_enabled()) { + if fork_name_from_env().is_some_and(|f| !f.bellatrix_enabled() || f.gloas_enabled()) { return; } let mut rig = InvalidPayloadRig::new(); @@ -897,7 +897,7 @@ async fn invalid_during_processing() { #[tokio::test] async fn invalid_after_optimistic_sync() { - if fork_name_from_env().is_some_and(|f| !f.bellatrix_enabled()) { + if fork_name_from_env().is_some_and(|f| !f.bellatrix_enabled() || f.gloas_enabled()) { return; } let mut rig = InvalidPayloadRig::new().enable_attestations(); @@ -937,7 +937,7 @@ async fn invalid_after_optimistic_sync() { #[tokio::test] async fn manually_validate_child() { - if fork_name_from_env().is_some_and(|f| !f.bellatrix_enabled()) { + if fork_name_from_env().is_some_and(|f| !f.bellatrix_enabled() || f.gloas_enabled()) { return; } let mut rig = InvalidPayloadRig::new().enable_attestations(); @@ -957,7 +957,7 @@ async fn manually_validate_child() { #[tokio::test] async fn manually_validate_parent() { - if fork_name_from_env().is_some_and(|f| !f.bellatrix_enabled()) { + if fork_name_from_env().is_some_and(|f| !f.bellatrix_enabled() || f.gloas_enabled()) { return; } let mut rig = InvalidPayloadRig::new().enable_attestations(); @@ -977,7 +977,7 @@ async fn manually_validate_parent() { #[tokio::test] async fn payload_preparation() { - if fork_name_from_env().is_some_and(|f| !f.bellatrix_enabled()) { + if fork_name_from_env().is_some_and(|f| !f.bellatrix_enabled() || f.gloas_enabled()) { return; } let mut rig = InvalidPayloadRig::new(); @@ -1034,13 +1034,14 @@ async fn payload_preparation() { fee_recipient, None, None, + None, ); assert_eq!(rig.previous_payload_attributes(), payload_attributes); } #[tokio::test] async fn invalid_parent() { - if fork_name_from_env().is_some_and(|f| !f.bellatrix_enabled()) { + if fork_name_from_env().is_some_and(|f| !f.bellatrix_enabled() || f.gloas_enabled()) { return; } let mut rig = InvalidPayloadRig::new(); @@ -1107,7 +1108,7 @@ async fn invalid_parent() { #[tokio::test] async fn attesting_to_optimistic_head() { - if fork_name_from_env().is_some_and(|f| !f.bellatrix_enabled()) { + if fork_name_from_env().is_some_and(|f| !f.bellatrix_enabled() || f.gloas_enabled()) { return; } let mut rig = InvalidPayloadRig::new(); @@ -1320,7 +1321,7 @@ impl InvalidHeadSetup { #[tokio::test] async fn recover_from_invalid_head_by_importing_blocks() { - if fork_name_from_env().is_some_and(|f| !f.bellatrix_enabled()) { + if fork_name_from_env().is_some_and(|f| !f.bellatrix_enabled() || f.gloas_enabled()) { return; } let InvalidHeadSetup { @@ -1362,7 +1363,7 @@ async fn recover_from_invalid_head_by_importing_blocks() { #[tokio::test] async fn recover_from_invalid_head_after_persist_and_reboot() { - if fork_name_from_env().is_some_and(|f| !f.bellatrix_enabled()) { + if fork_name_from_env().is_some_and(|f| !f.bellatrix_enabled() || f.gloas_enabled()) { return; } let InvalidHeadSetup { @@ -1407,7 +1408,7 @@ async fn recover_from_invalid_head_after_persist_and_reboot() { #[tokio::test] async fn weights_after_resetting_optimistic_status() { - if fork_name_from_env().is_some_and(|f| !f.bellatrix_enabled()) { + if fork_name_from_env().is_some_and(|f| !f.bellatrix_enabled() || f.gloas_enabled()) { return; } let mut rig = InvalidPayloadRig::new().enable_attestations(); diff --git a/beacon_node/beacon_chain/tests/rewards.rs b/beacon_node/beacon_chain/tests/rewards.rs index 1889c1f625..bc7c98041f 100644 --- a/beacon_node/beacon_chain/tests/rewards.rs +++ b/beacon_node/beacon_chain/tests/rewards.rs @@ -845,14 +845,13 @@ async fn check_all_base_rewards_for_subset( .state_at_slot(Slot::new(slot - 1), StateSkipConfig::WithoutStateRoots) .unwrap(); - // TODO(gloas): handle payloads? let mut pre_state = BlockReplayer::>::new( parent_state, &harness.spec, ) .no_signature_verification() .minimal_block_root_verification() - .apply_blocks(vec![], vec![], Some(block.slot())) + .apply_blocks(vec![], Some(block.slot())) .unwrap() .into_state(); diff --git a/beacon_node/beacon_chain/tests/store_tests.rs b/beacon_node/beacon_chain/tests/store_tests.rs index c6e13bd160..47bda60eb8 100644 --- a/beacon_node/beacon_chain/tests/store_tests.rs +++ b/beacon_node/beacon_chain/tests/store_tests.rs @@ -27,6 +27,7 @@ use beacon_chain::{ }; use bls::{Keypair, Signature, SignatureBytes}; use fixed_bytes::FixedBytesExtended; +use fork_choice::PayloadStatus; use logging::create_test_tracing_subscriber; use maplit::hashset; use rand::Rng; @@ -53,7 +54,7 @@ use types::test_utils::{SeedableRng, XorShiftRng}; use types::*; // Should ideally be divisible by 3. -pub const LOW_VALIDATOR_COUNT: usize = 24; +pub const LOW_VALIDATOR_COUNT: usize = 32; pub const HIGH_VALIDATOR_COUNT: usize = 64; // When set to true, cache any states fetched from the db. @@ -184,6 +185,10 @@ async fn light_client_bootstrap_test() { // No-op prior to Altair. return; }; + // TODO(EIP-7732): Light client not yet implemented for Gloas. + if spec.is_gloas_scheduled() { + return; + } let db_path = tempdir().unwrap(); let store = get_store_generic(&db_path, StoreConfig::default(), spec.clone()); @@ -239,6 +244,10 @@ async fn light_client_updates_test() { // No-op prior to Altair. return; }; + // TODO(EIP-7732): Light client not yet implemented for Gloas. + if spec.is_gloas_scheduled() { + return; + } let num_final_blocks = E::slots_per_epoch() * 2; let db_path = tempdir().unwrap(); @@ -568,13 +577,12 @@ async fn epoch_boundary_state_attestation_processing() { .get_blinded_block(&block_root) .unwrap() .expect("block exists"); - // Use get_state as the state may be finalized by this point + // Use get_state as the state may be finalized by this point. + let state_root = block.state_root(); let mut epoch_boundary_state = store - .get_state(&block.state_root(), None, CACHE_STATE_IN_TESTS) + .get_state(&state_root, None, CACHE_STATE_IN_TESTS) .expect("no error") - .unwrap_or_else(|| { - panic!("epoch boundary state should exist {:?}", block.state_root()) - }); + .unwrap_or_else(|| panic!("epoch boundary state should exist {:?}", state_root)); let ebs_state_root = epoch_boundary_state.update_tree_hash_cache().unwrap(); let mut ebs_of_ebs = store .get_state(&ebs_state_root, None, CACHE_STATE_IN_TESTS) @@ -673,8 +681,11 @@ async fn forwards_iter_block_and_state_roots_until() { let block_root = block_roots[slot.as_usize()]; assert_eq!(block_root_iter.next().unwrap().unwrap(), (block_root, slot)); + let (iter_state_root, iter_slot) = state_root_iter.next().unwrap().unwrap(); + assert_eq!(iter_slot, slot); + let state_root = state_roots[slot.as_usize()]; - assert_eq!(state_root_iter.next().unwrap().unwrap(), (state_root, slot)); + assert_eq!(iter_state_root, state_root); } }; @@ -708,13 +719,8 @@ async fn block_replayer_hooks() { .add_attested_blocks_at_slots(state.clone(), state_root, &block_slots, &all_validators) .await; - let (blocks, envelopes) = store - .load_blocks_to_replay( - Slot::new(0), - max_slot, - end_block_root.into(), - StatePayloadStatus::Pending, - ) + let blocks = store + .load_blocks_to_replay(Slot::new(0), max_slot, end_block_root.into()) .unwrap(); let mut pre_slots = vec![]; @@ -749,7 +755,7 @@ async fn block_replayer_hooks() { post_block_slots.push(block.slot()); Ok(()) })) - .apply_blocks(blocks, envelopes, None) + .apply_blocks(blocks, None) .unwrap() .into_state(); @@ -2871,12 +2877,6 @@ async fn reproduction_unaligned_checkpoint_sync_pruned_payload() { .block_root_at_slot(checkpoint_slot, WhenSlotSkipped::Prev) .unwrap() .unwrap(); - let wss_state_root = harness - .chain - .state_root_at_slot(checkpoint_slot) - .unwrap() - .unwrap(); - let wss_block = harness .chain .store @@ -2884,8 +2884,21 @@ async fn reproduction_unaligned_checkpoint_sync_pruned_payload() { .unwrap() .unwrap(); - // The test premise requires the anchor block to have a payload. - assert!(wss_block.message().execution_payload().is_ok()); + let wss_state_root = harness + .chain + .state_root_at_slot(checkpoint_slot) + .unwrap() + .unwrap(); + + // The test premise requires the anchor block to have a payload (or a payload bid in Gloas). + assert!( + wss_block.message().execution_payload().is_ok() + || wss_block + .message() + .body() + .signed_execution_payload_bid() + .is_ok() + ); let wss_blobs_opt = harness .chain @@ -2967,15 +2980,19 @@ async fn reproduction_unaligned_checkpoint_sync_pruned_payload() { chain.head_snapshot().beacon_state.slot() ); - let payload_exists = chain - .store - .execution_payload_exists(&wss_block_root) - .unwrap_or(false); + // In Gloas, the execution payload envelope is separate from the block and will be synced + // from the network. We don't check for its existence here. + if !wss_block.fork_name_unchecked().gloas_enabled() { + let payload_exists = chain + .store + .execution_payload_exists(&wss_block_root) + .unwrap_or(false); - assert!( - payload_exists, - "Split block payload must exist in the new node's store after checkpoint sync" - ); + assert!( + payload_exists, + "Split block payload must exist in the new node's store after checkpoint sync" + ); + } } async fn weak_subjectivity_sync_test( @@ -3013,18 +3030,17 @@ async fn weak_subjectivity_sync_test( .block_root_at_slot(checkpoint_slot, WhenSlotSkipped::Prev) .unwrap() .unwrap(); - let wss_state_root = harness - .chain - .state_root_at_slot(checkpoint_slot) - .unwrap() - .unwrap(); - let wss_block = harness .chain .store .get_full_block(&wss_block_root) .unwrap() .unwrap(); + let wss_state_root = harness + .chain + .state_root_at_slot(checkpoint_slot) + .unwrap() + .unwrap(); let wss_blobs_opt = harness .chain .get_or_reconstruct_blobs(&wss_block_root) @@ -3101,6 +3117,20 @@ async fn weak_subjectivity_sync_test( .build() .expect("should build"); + // Store the WSS envelope to simulate it arriving from network sync. + // In production, the envelope would be synced from the network after checkpoint sync. + if let Some(envelope) = harness + .chain + .store + .get_payload_envelope(&wss_block.canonical_root()) + .unwrap_or(None) + { + beacon_chain + .store + .put_payload_envelope(&wss_block.canonical_root(), &envelope) + .unwrap(); + } + let beacon_chain = Arc::new(beacon_chain); let wss_block_root = wss_block.canonical_root(); let store_wss_block = harness @@ -3120,6 +3150,21 @@ async fn weak_subjectivity_sync_test( assert_eq!(store_wss_blobs_opt, wss_blobs_opt); } + // Store the WSS block's envelope in the new chain (required for Gloas forward sync). + // The first forward block needs the checkpoint block's envelope to determine the parent's + // Full state. + if let Some(envelope) = harness + .chain + .store + .get_payload_envelope(&wss_block_root) + .unwrap() + { + beacon_chain + .store + .put_payload_envelope(&wss_block_root, &envelope) + .unwrap(); + } + // Apply blocks forward to reach head. let chain_dump = harness.chain.chain_dump().unwrap(); let new_blocks = chain_dump @@ -3154,6 +3199,21 @@ async fn weak_subjectivity_sync_test( ) .await .unwrap(); + + // Store the envelope and apply it to fork choice. + if let Some(envelope) = &snapshot.execution_envelope { + beacon_chain + .store + .put_payload_envelope(&block_root, envelope) + .unwrap(); + // Update fork choice so head selection accounts for Full payload status. + beacon_chain + .canonical_head + .fork_choice_write_lock() + .on_valid_payload_envelope_received(block_root) + .unwrap(); + } + beacon_chain.recompute_head_at_current_slot().await; // Check that the new block's state can be loaded correctly. @@ -3305,6 +3365,17 @@ async fn weak_subjectivity_sync_test( } assert_eq!(beacon_chain.store.get_oldest_block_slot(), 0); + // Store envelopes for all historic blocks (needed for dumping the chain from the new node). + for snapshot in chain_dump.iter() { + let block_root = snapshot.beacon_block_root; + if let Some(envelope) = &snapshot.execution_envelope { + beacon_chain + .store + .put_payload_envelope(&block_root, envelope) + .unwrap(); + } + } + // Sanity check for non-aligned WSS starts, to make sure the WSS block is persisted properly if wss_block_slot != wss_state_slot { let new_node_block_root_at_wss_block = beacon_chain @@ -3374,13 +3445,12 @@ async fn weak_subjectivity_sync_test( assert_eq!(state.canonical_root().unwrap(), state_root); } - // Anchor slot is still set to the slot of the checkpoint block. - // Note: since hot tree states the anchor slot is set to the aligned ws state slot - // https://github.com/sigp/lighthouse/pull/6750 - let wss_aligned_slot = if checkpoint_slot % E::slots_per_epoch() == 0 { - checkpoint_slot + // Anchor slot is set to the WSS state slot, which is always epoch-aligned (the state is + // advanced to an epoch boundary during checkpoint sync). + let wss_aligned_slot = if wss_state_slot % E::slots_per_epoch() == 0 { + wss_state_slot } else { - (checkpoint_slot.epoch(E::slots_per_epoch()) + Epoch::new(1)) + (wss_state_slot.epoch(E::slots_per_epoch()) + Epoch::new(1)) .start_slot(E::slots_per_epoch()) }; assert_eq!(store.get_anchor_info().anchor_slot, wss_aligned_slot); @@ -3635,6 +3705,10 @@ async fn test_import_historical_data_columns_batch_no_block_found() { if fork_name_from_env().is_some_and(|f| !f.fulu_enabled()) { return; }; + // TODO(Gloas): blocks don't have blob_kzg_commitments (blobs are in the execution payload envelope). + if fork_name_from_env().is_some_and(|f| f.gloas_enabled()) { + return; + } let spec = test_spec::(); let db_path = tempdir().unwrap(); @@ -3745,12 +3819,14 @@ async fn process_blocks_and_attestations_for_unaligned_checkpoint() { let all_validators = (0..LOW_VALIDATOR_COUNT).collect::>(); - let split_slot = Slot::new(E::slots_per_epoch() * 4); + let finalized_epoch_start_slot = Slot::new(E::slots_per_epoch() * 4); let pre_skips = 1; let post_skips = 1; - // Build the chain up to the intended split slot, with 3 skips before the split. - let slots = (1..=split_slot.as_u64() - pre_skips) + let split_slot = finalized_epoch_start_slot; + + // Build the chain up to the intended finalized epoch slot, with 1 skip before the split. + let slots = (1..=finalized_epoch_start_slot.as_u64() - pre_skips) .map(Slot::new) .collect::>(); @@ -3769,20 +3845,26 @@ async fn process_blocks_and_attestations_for_unaligned_checkpoint() { // // - one that is invalid because it conflicts with finalization (slot <= finalized_slot) // - one that is valid because its slot is not finalized (slot > finalized_slot) + // + // Note: block verification uses finalized_checkpoint.epoch.start_slot() (== + // finalized_epoch_start_slot) for the finalized slot check. let (unadvanced_split_state, unadvanced_split_state_root) = harness.get_current_state_and_root(); let ((invalid_fork_block, _), _) = harness - .make_block(unadvanced_split_state.clone(), split_slot) + .make_block(unadvanced_split_state.clone(), finalized_epoch_start_slot) .await; let ((valid_fork_block, _), _) = harness - .make_block(unadvanced_split_state.clone(), split_slot + 1) + .make_block( + unadvanced_split_state.clone(), + finalized_epoch_start_slot + 1, + ) .await; // Advance the chain so that the intended split slot is finalized. // Do not attest in the epoch boundary slot, to make attestation production later easier (no // equivocations). - let finalizing_slot = split_slot + 2 * E::slots_per_epoch(); + let finalizing_slot = finalized_epoch_start_slot + 2 * E::slots_per_epoch(); for _ in 0..pre_skips + post_skips { harness.advance_slot(); } @@ -3834,12 +3916,7 @@ async fn process_blocks_and_attestations_for_unaligned_checkpoint() { let (split_state_root, mut advanced_split_state) = harness .chain .store - .get_advanced_hot_state( - split.block_root, - StatePayloadStatus::Pending, - split.slot, - split.state_root, - ) + .get_advanced_hot_state(split.block_root, split.slot, split.state_root) .unwrap() .unwrap(); complete_state_advance( @@ -3973,6 +4050,7 @@ async fn schema_downgrade_to_min_version(store_config: StoreConfig, archive: boo let num_blocks_produced = E::slots_per_epoch() * 4; let db_path = tempdir().unwrap(); let spec = test_spec::(); + let is_gloas = spec.is_gloas_scheduled(); let chain_config = ChainConfig { archive, @@ -3995,7 +4073,11 @@ async fn schema_downgrade_to_min_version(store_config: StoreConfig, archive: boo ) .await; - let min_version = SchemaVersion(28); + let min_version = if is_gloas { + SchemaVersion(29) + } else { + SchemaVersion(28) + }; // Save the slot clock so that the new harness doesn't revert in time. let slot_clock = harness.chain.slot_clock.clone(); @@ -4565,6 +4647,10 @@ async fn fulu_prune_data_columns_happy_case() { // No-op if PeerDAS not scheduled. return; } + // TODO(Gloas): blocks don't have blob_kzg_commitments (blobs are in the execution payload envelope). + if store.get_chain_spec().is_gloas_scheduled() { + return; + } let Some(fulu_fork_epoch) = store.get_chain_spec().fulu_fork_epoch else { // No-op prior to Fulu. return; @@ -4620,6 +4706,10 @@ async fn fulu_prune_data_columns_no_finalization() { // No-op if PeerDAS not scheduled. return; } + // TODO(Gloas): blocks don't have blob_kzg_commitments (blobs are in the execution payload envelope). + if store.get_chain_spec().is_gloas_scheduled() { + return; + } let Some(fulu_fork_epoch) = store.get_chain_spec().fulu_fork_epoch else { // No-op prior to Fulu. return; @@ -4839,6 +4929,10 @@ async fn fulu_prune_data_columns_margin_test(margin: u64) { // No-op if PeerDAS not scheduled. return; } + // TODO(Gloas): blocks don't have blob_kzg_commitments (blobs are in the execution payload envelope). + if store.get_chain_spec().is_gloas_scheduled() { + return; + } let Some(fulu_fork_epoch) = store.get_chain_spec().fulu_fork_epoch else { // No-op prior to Fulu. return; @@ -5156,6 +5250,10 @@ async fn test_custody_column_filtering_regular_node() { if !test_spec::().is_peer_das_scheduled() { return; } + // TODO(Gloas): blocks don't have blob_kzg_commitments (blobs are in the execution payload envelope). + if test_spec::().is_gloas_scheduled() { + return; + } let db_path = tempdir().unwrap(); let store = get_store(&db_path); @@ -5200,6 +5298,10 @@ async fn test_custody_column_filtering_supernode() { if !test_spec::().is_peer_das_scheduled() { return; } + // TODO(Gloas): blocks don't have blob_kzg_commitments (blobs are in the execution payload envelope). + if test_spec::().is_gloas_scheduled() { + return; + } let db_path = tempdir().unwrap(); let store = get_store(&db_path); @@ -5515,7 +5617,7 @@ async fn test_gloas_block_and_envelope_storage_generic( let mut state = genesis_state; let mut block_roots = vec![]; - let mut stored_states = vec![(Slot::new(0), StatePayloadStatus::Full, genesis_state_root)]; + let mut stored_states = vec![(Slot::new(0), genesis_state_root)]; for i in 1..=num_slots { let slot = Slot::new(i); @@ -5527,10 +5629,10 @@ async fn test_gloas_block_and_envelope_storage_generic( let state_root = state.canonical_root().unwrap(); store.put_state(&state_root, &state).unwrap(); - stored_states.push((slot, state.payload_status(), state_root)); + stored_states.push((slot, state_root)); } - let (block_contents, envelope, mut pending_state) = + let (block_contents, envelope, mut post_block_state) = harness.make_block_with_envelope(state, slot).await; let block_root = block_contents.0.canonical_root(); @@ -5540,21 +5642,17 @@ async fn test_gloas_block_and_envelope_storage_generic( .await .unwrap(); - let pending_state_root = pending_state.update_tree_hash_cache().unwrap(); - stored_states.push((slot, StatePayloadStatus::Pending, pending_state_root)); + let state_root = post_block_state.update_tree_hash_cache().unwrap(); + stored_states.push((slot, state_root)); // Process the envelope. let envelope = envelope.expect("Gloas block should have envelope"); - let mut full_state = pending_state.clone(); - let envelope_state_root = envelope.message.state_root; - let full_state_root = harness - .process_envelope(block_root, envelope, &mut full_state) + harness + .process_envelope(block_root, envelope, &post_block_state, state_root) .await; - assert_eq!(full_state_root, envelope_state_root); - stored_states.push((slot, StatePayloadStatus::Full, full_state_root)); block_roots.push(block_root); - state = full_state; + state = post_block_state; } // Verify block storage. @@ -5577,20 +5675,15 @@ async fn test_gloas_block_and_envelope_storage_generic( // Verify state storage. // Iterate in reverse order to frustrate the cache. - for (slot, payload_status, state_root) in stored_states.into_iter().rev() { + for (slot, state_root) in stored_states.into_iter().rev() { println!("{slot}: {state_root:?}"); let Some(mut loaded_state) = store .get_state(&state_root, Some(slot), CACHE_STATE_IN_TESTS) .unwrap() else { - panic!("missing {payload_status:?} state at slot {slot} with root {state_root:?}"); + panic!("missing state at slot {slot} with root {state_root:?}"); }; assert_eq!(loaded_state.slot(), slot); - assert_eq!( - loaded_state.payload_status(), - payload_status, - "slot = {slot}" - ); assert_eq!( loaded_state.canonical_root().unwrap(), state_root, @@ -5600,74 +5693,6 @@ async fn test_gloas_block_and_envelope_storage_generic( check_db_invariants(&harness); } -/// Test that Pending and Full states have the correct payload status through round-trip -/// storage and retrieval. -#[tokio::test] -async fn test_gloas_state_payload_status() { - if !fork_name_from_env().is_some_and(|f| f.gloas_enabled()) { - return; - } - - let db_path = tempdir().unwrap(); - let store = get_store(&db_path); - let harness = get_harness(store.clone(), LOW_VALIDATOR_COUNT); - - let num_blocks = 6u64; - let (genesis_state, _genesis_state_root) = harness.get_current_state_and_root(); - let mut state = genesis_state; - - for i in 1..=num_blocks { - let slot = Slot::new(i); - harness.advance_slot(); - - let (block_contents, envelope, pending_state) = - harness.make_block_with_envelope(state, slot).await; - let block_root = block_contents.0.canonical_root(); - - harness - .process_block(slot, block_root, block_contents) - .await - .unwrap(); - - // Verify the pending state has correct payload status. - assert_eq!( - pending_state.payload_status(), - StatePayloadStatus::Pending, - "pending state at slot {} should be Pending", - i - ); - - // Process the envelope and verify the full state has correct payload status. - let envelope = envelope.expect("Gloas block should have envelope"); - let mut full_state = pending_state; - let full_state_root = harness - .process_envelope(block_root, envelope, &mut full_state) - .await; - - assert_eq!( - full_state.payload_status(), - StatePayloadStatus::Full, - "full state at slot {} should be Full", - i - ); - - // Round-trip: load the full state from DB and check status. - let loaded_full = store - .get_state(&full_state_root, None, CACHE_STATE_IN_TESTS) - .unwrap() - .expect("full state should exist in DB"); - assert_eq!( - loaded_full.payload_status(), - StatePayloadStatus::Full, - "loaded full state at slot {} should be Full after round-trip", - i - ); - - state = full_state; - } - check_db_invariants(&harness); -} - /// Test block replay with and without envelopes. #[tokio::test] async fn test_gloas_block_replay_with_envelopes() { @@ -5704,11 +5729,11 @@ async fn test_gloas_block_replay_with_envelopes() { pending_states.insert(slot, (pending_state_root, pending_state.clone())); let envelope = envelope.expect("Gloas block should have envelope"); - let mut full_state = pending_state; - let full_state_root = harness - .process_envelope(block_root, envelope, &mut full_state) + let full_state = pending_state; + harness + .process_envelope(block_root, envelope, &full_state, pending_state_root) .await; - full_states.insert(slot, (full_state_root, full_state.clone())); + full_states.insert(slot, (pending_state_root, full_state.clone())); last_block_root = block_root; state = full_state; @@ -5716,94 +5741,29 @@ async fn test_gloas_block_replay_with_envelopes() { let end_slot = Slot::new(num_blocks); - // Load blocks for Pending replay (no envelopes for the last block). - let (blocks_pending, envelopes_pending) = store - .load_blocks_to_replay( - Slot::new(0), - end_slot, - last_block_root, - StatePayloadStatus::Pending, - ) + // Load blocks for replay. + let blocks = store + .load_blocks_to_replay(Slot::new(0), end_slot, last_block_root) .unwrap(); - assert!( - !blocks_pending.is_empty(), - "should have blocks for pending replay" - ); - // For Pending, no envelope for the first block (slot 0) or last block; envelopes for - // intermediate blocks whose payloads are canonical. - let expected_pending_envelopes = blocks_pending.len().saturating_sub(2); + assert!(!blocks.is_empty(), "should have blocks for replay"); + + // Replay blocks and verify against the expected state. + let mut replayed = BlockReplayer::::new(genesis_state, store.get_chain_spec()) + .no_signature_verification() + .minimal_block_root_verification() + .apply_blocks(blocks, None) + .expect("should replay blocks") + .into_state(); + replayed.apply_pending_mutations().unwrap(); + + let (_, mut expected) = pending_states.get(&end_slot).unwrap().clone(); + expected.apply_pending_mutations().unwrap(); + + replayed.drop_all_caches().unwrap(); + expected.drop_all_caches().unwrap(); assert_eq!( - envelopes_pending.len(), - expected_pending_envelopes, - "pending replay should have envelopes for all blocks except the last" - ); - assert!( - blocks_pending - .iter() - .skip(1) - .take(envelopes_pending.len()) - .map(|block| block.slot()) - .eq(envelopes_pending - .iter() - .map(|envelope| envelope.message.slot)), - "block and envelope slots should match" - ); - - // Load blocks for Full replay (envelopes for all blocks including the last). - let (blocks_full, envelopes_full) = store - .load_blocks_to_replay( - Slot::new(0), - end_slot, - last_block_root, - StatePayloadStatus::Full, - ) - .unwrap(); - assert_eq!( - envelopes_full.len(), - expected_pending_envelopes + 1, - "full replay should have one more envelope than pending replay" - ); - - // Replay to Pending state and verify. - let mut replayed_pending = - BlockReplayer::::new(genesis_state.clone(), store.get_chain_spec()) - .no_signature_verification() - .minimal_block_root_verification() - .desired_state_payload_status(StatePayloadStatus::Pending) - .apply_blocks(blocks_pending, envelopes_pending, None) - .expect("should replay blocks to pending state") - .into_state(); - replayed_pending.apply_pending_mutations().unwrap(); - - let (_, mut expected_pending) = pending_states.get(&end_slot).unwrap().clone(); - expected_pending.apply_pending_mutations().unwrap(); - - replayed_pending.drop_all_caches().unwrap(); - expected_pending.drop_all_caches().unwrap(); - assert_eq!( - replayed_pending, expected_pending, - "replayed pending state should match stored pending state" - ); - - // Replay to Full state and verify. - let mut replayed_full = - BlockReplayer::::new(genesis_state, store.get_chain_spec()) - .no_signature_verification() - .minimal_block_root_verification() - .desired_state_payload_status(StatePayloadStatus::Full) - .apply_blocks(blocks_full, envelopes_full, None) - .expect("should replay blocks to full state") - .into_state(); - replayed_full.apply_pending_mutations().unwrap(); - - let (_, mut expected_full) = full_states.get(&end_slot).unwrap().clone(); - expected_full.apply_pending_mutations().unwrap(); - - replayed_full.drop_all_caches().unwrap(); - expected_full.drop_all_caches().unwrap(); - assert_eq!( - replayed_full, expected_full, - "replayed full state should match stored full state" + replayed, expected, + "replayed state should match stored state" ); check_db_invariants(&harness); } @@ -5836,40 +5796,43 @@ async fn test_gloas_hot_state_hierarchy() { let slot = Slot::new(i); harness.advance_slot(); - let (block_contents, envelope, pending_state) = + let (block_contents, envelope, mut pending_state) = harness.make_block_with_envelope(state.clone(), slot).await; let block_root = block_contents.0.canonical_root(); - - // Attest to previous block before processing next. - if i > 1 { - let state_root = state.update_tree_hash_cache().unwrap(); - harness.attest_block( - &state, - state_root, - last_block_root.into(), - &block_contents.0, - &some_validators, - ); - } + let signed_block = block_contents.0.clone(); harness .process_block(slot, block_root, block_contents) .await .unwrap(); + // Attest to the current block at its own slot (same-slot attestation). + // In Gloas, same-slot attestations have index=0 and route to Pending in + // fork choice, correctly propagating weight through the Full path. + // Use pending_state (at slot i) so the target root resolves correctly. + let pending_state_root = pending_state.update_tree_hash_cache().unwrap(); + harness.attest_block( + &pending_state, + pending_state_root, + block_root.into(), + &signed_block, + &some_validators, + ); + let envelope = envelope.expect("Gloas block should have envelope"); - let mut full_state = pending_state; + let full_state = pending_state; harness - .process_envelope(block_root, envelope, &mut full_state) + .process_envelope(block_root, envelope, &full_state, pending_state_root) .await; last_block_root = block_root; state = full_state; } - // Verify states can be loaded and have correct payload status. - let _head_state = harness.get_current_state(); - let _head_slot = harness.head_slot(); + // Head should be the block at slot 40 with full payload. + let head = harness.chain.canonical_head.cached_head(); + assert_eq!(head.head_block_root(), last_block_root); + assert_eq!(head.head_payload_status(), PayloadStatus::Full); // States at all slots on the canonical chain should be retrievable. for slot_num in 1..=num_blocks { @@ -5880,7 +5843,7 @@ async fn test_gloas_hot_state_hierarchy() { let mut loaded_state = store .get_state(&state_root, Some(slot), CACHE_STATE_IN_TESTS) .unwrap() - .unwrap(); + .unwrap_or_else(|| panic!("missing state at {slot}/{state_root:?}")); assert_eq!(loaded_state.canonical_root().unwrap(), state_root); } diff --git a/beacon_node/beacon_chain/tests/tests.rs b/beacon_node/beacon_chain/tests/tests.rs index 10c0b429a9..3958ce6c6d 100644 --- a/beacon_node/beacon_chain/tests/tests.rs +++ b/beacon_node/beacon_chain/tests/tests.rs @@ -115,7 +115,18 @@ fn massive_skips() { assert!(state.slot() > 1, "the state should skip at least one slot"); - if state.fork_name_unchecked().fulu_enabled() { + if state.fork_name_unchecked().gloas_enabled() { + // Gloas uses compute_balance_weighted_selection for proposer selection, which + // returns InvalidIndicesCount (not InsufficientValidators) when the active + // validator set is empty. + assert_eq!( + error, + SlotProcessingError::EpochProcessingError(EpochProcessingError::BeaconStateError( + BeaconStateError::InvalidIndicesCount + )), + "should return error indicating that validators have been slashed out" + ) + } else if state.fork_name_unchecked().fulu_enabled() { // post-fulu this is done in per_epoch_processing assert_eq!( error, @@ -1006,9 +1017,12 @@ async fn pseudo_finalize_test_generic( }; // pseudo finalize + // Post-Gloas the finalized state must be Pending (the block's state_root), not Full + // (the envelope's state_root), because the payload of the finalized block is not finalized. + let finalized_state_root = head.beacon_block.message().state_root(); harness .chain - .manually_finalize_state(head.beacon_state_root(), checkpoint) + .manually_finalize_state(finalized_state_root, checkpoint) .unwrap(); let split = harness.chain.store.get_split_info(); diff --git a/beacon_node/beacon_chain/tests/validator_monitor.rs b/beacon_node/beacon_chain/tests/validator_monitor.rs index 521fc4ac97..a37ab6458f 100644 --- a/beacon_node/beacon_chain/tests/validator_monitor.rs +++ b/beacon_node/beacon_chain/tests/validator_monitor.rs @@ -117,7 +117,8 @@ async fn missed_blocks_across_epochs() { #[tokio::test] async fn missed_blocks_basic() { - let validator_count = 16; + // >= 32 validators required for Gloas genesis with MainnetEthSpec (32 slots/epoch). + let validator_count = 32; let slots_per_epoch = E::slots_per_epoch(); diff --git a/beacon_node/execution_layer/src/engine_api.rs b/beacon_node/execution_layer/src/engine_api.rs index 9c19e94c0e..236340aa29 100644 --- a/beacon_node/execution_layer/src/engine_api.rs +++ b/beacon_node/execution_layer/src/engine_api.rs @@ -1,11 +1,11 @@ use crate::engines::ForkchoiceState; use crate::http::{ ENGINE_FORKCHOICE_UPDATED_V1, ENGINE_FORKCHOICE_UPDATED_V2, ENGINE_FORKCHOICE_UPDATED_V3, - ENGINE_GET_BLOBS_V1, ENGINE_GET_BLOBS_V2, ENGINE_GET_CLIENT_VERSION_V1, - ENGINE_GET_PAYLOAD_BODIES_BY_HASH_V1, ENGINE_GET_PAYLOAD_BODIES_BY_RANGE_V1, - ENGINE_GET_PAYLOAD_V1, ENGINE_GET_PAYLOAD_V2, ENGINE_GET_PAYLOAD_V3, ENGINE_GET_PAYLOAD_V4, - ENGINE_GET_PAYLOAD_V5, ENGINE_NEW_PAYLOAD_V1, ENGINE_NEW_PAYLOAD_V2, ENGINE_NEW_PAYLOAD_V3, - ENGINE_NEW_PAYLOAD_V4, ENGINE_NEW_PAYLOAD_V5, + ENGINE_FORKCHOICE_UPDATED_V4, ENGINE_GET_BLOBS_V1, ENGINE_GET_BLOBS_V2, + ENGINE_GET_CLIENT_VERSION_V1, ENGINE_GET_PAYLOAD_BODIES_BY_HASH_V1, + ENGINE_GET_PAYLOAD_BODIES_BY_RANGE_V1, ENGINE_GET_PAYLOAD_V1, ENGINE_GET_PAYLOAD_V2, + ENGINE_GET_PAYLOAD_V3, ENGINE_GET_PAYLOAD_V4, ENGINE_GET_PAYLOAD_V5, ENGINE_NEW_PAYLOAD_V1, + ENGINE_NEW_PAYLOAD_V2, ENGINE_NEW_PAYLOAD_V3, ENGINE_NEW_PAYLOAD_V4, ENGINE_NEW_PAYLOAD_V5, }; use eth2::types::{ BlobsBundle, SsePayloadAttributes, SsePayloadAttributesV1, SsePayloadAttributesV2, @@ -158,7 +158,7 @@ impl ExecutionBlock { } #[superstruct( - variants(V1, V2, V3), + variants(V1, V2, V3, V4), variant_attributes(derive(Clone, Debug, Eq, Hash, PartialEq),), cast_error(ty = "Error", expr = "Error::IncorrectStateVariant"), partial_getter_error(ty = "Error", expr = "Error::IncorrectStateVariant") @@ -171,10 +171,12 @@ pub struct PayloadAttributes { pub prev_randao: Hash256, #[superstruct(getter(copy))] pub suggested_fee_recipient: Address, - #[superstruct(only(V2, V3))] + #[superstruct(only(V2, V3, V4))] pub withdrawals: Vec, - #[superstruct(only(V3), partial_getter(copy))] + #[superstruct(only(V3, V4), partial_getter(copy))] pub parent_beacon_block_root: Hash256, + #[superstruct(only(V4), partial_getter(copy))] + pub slot_number: u64, } impl PayloadAttributes { @@ -184,24 +186,35 @@ impl PayloadAttributes { suggested_fee_recipient: Address, withdrawals: Option>, parent_beacon_block_root: Option, + slot_number: Option, ) -> Self { - match withdrawals { - Some(withdrawals) => match parent_beacon_block_root { - Some(parent_beacon_block_root) => PayloadAttributes::V3(PayloadAttributesV3 { + match (withdrawals, parent_beacon_block_root, slot_number) { + (Some(withdrawals), Some(parent_beacon_block_root), Some(slot_number)) => { + PayloadAttributes::V4(PayloadAttributesV4 { timestamp, prev_randao, suggested_fee_recipient, withdrawals, parent_beacon_block_root, - }), - None => PayloadAttributes::V2(PayloadAttributesV2 { + slot_number, + }) + } + (Some(withdrawals), Some(parent_beacon_block_root), None) => { + PayloadAttributes::V3(PayloadAttributesV3 { timestamp, prev_randao, suggested_fee_recipient, withdrawals, - }), - }, - None => PayloadAttributes::V1(PayloadAttributesV1 { + parent_beacon_block_root, + }) + } + (Some(withdrawals), None, _) => PayloadAttributes::V2(PayloadAttributesV2 { + timestamp, + prev_randao, + suggested_fee_recipient, + withdrawals, + }), + (None, _, _) => PayloadAttributes::V1(PayloadAttributesV1 { timestamp, prev_randao, suggested_fee_recipient, @@ -246,6 +259,21 @@ impl From for SsePayloadAttributes { withdrawals, parent_beacon_block_root, }), + // V4 maps to V3 for SSE (slot_number is not part of the SSE spec) + PayloadAttributes::V4(PayloadAttributesV4 { + timestamp, + prev_randao, + suggested_fee_recipient, + withdrawals, + parent_beacon_block_root, + slot_number: _, + }) => Self::V3(SsePayloadAttributesV3 { + timestamp, + prev_randao, + suggested_fee_recipient, + withdrawals, + parent_beacon_block_root, + }), } } } @@ -555,6 +583,7 @@ pub struct EngineCapabilities { pub forkchoice_updated_v1: bool, pub forkchoice_updated_v2: bool, pub forkchoice_updated_v3: bool, + pub forkchoice_updated_v4: bool, pub get_payload_bodies_by_hash_v1: bool, pub get_payload_bodies_by_range_v1: bool, pub get_payload_v1: bool, @@ -594,6 +623,9 @@ impl EngineCapabilities { if self.forkchoice_updated_v3 { response.push(ENGINE_FORKCHOICE_UPDATED_V3); } + if self.forkchoice_updated_v4 { + response.push(ENGINE_FORKCHOICE_UPDATED_V4); + } if self.get_payload_bodies_by_hash_v1 { response.push(ENGINE_GET_PAYLOAD_BODIES_BY_HASH_V1); } diff --git a/beacon_node/execution_layer/src/engine_api/http.rs b/beacon_node/execution_layer/src/engine_api/http.rs index bcd95d1ae4..dcf8205406 100644 --- a/beacon_node/execution_layer/src/engine_api/http.rs +++ b/beacon_node/execution_layer/src/engine_api/http.rs @@ -48,6 +48,7 @@ pub const ENGINE_GET_PAYLOAD_TIMEOUT: Duration = Duration::from_secs(2); pub const ENGINE_FORKCHOICE_UPDATED_V1: &str = "engine_forkchoiceUpdatedV1"; pub const ENGINE_FORKCHOICE_UPDATED_V2: &str = "engine_forkchoiceUpdatedV2"; pub const ENGINE_FORKCHOICE_UPDATED_V3: &str = "engine_forkchoiceUpdatedV3"; +pub const ENGINE_FORKCHOICE_UPDATED_V4: &str = "engine_forkchoiceUpdatedV4"; pub const ENGINE_FORKCHOICE_UPDATED_TIMEOUT: Duration = Duration::from_secs(8); pub const ENGINE_GET_PAYLOAD_BODIES_BY_HASH_V1: &str = "engine_getPayloadBodiesByHashV1"; @@ -84,6 +85,7 @@ pub static LIGHTHOUSE_CAPABILITIES: &[&str] = &[ ENGINE_FORKCHOICE_UPDATED_V1, ENGINE_FORKCHOICE_UPDATED_V2, ENGINE_FORKCHOICE_UPDATED_V3, + ENGINE_FORKCHOICE_UPDATED_V4, ENGINE_GET_PAYLOAD_BODIES_BY_HASH_V1, ENGINE_GET_PAYLOAD_BODIES_BY_RANGE_V1, ENGINE_GET_CLIENT_VERSION_V1, @@ -1132,6 +1134,27 @@ impl HttpJsonRpc { Ok(response.into()) } + pub async fn forkchoice_updated_v4( + &self, + forkchoice_state: ForkchoiceState, + payload_attributes: Option, + ) -> Result { + let params = json!([ + JsonForkchoiceStateV1::from(forkchoice_state), + payload_attributes.map(JsonPayloadAttributes::from) + ]); + + let response: JsonForkchoiceUpdatedV1Response = self + .rpc_request( + ENGINE_FORKCHOICE_UPDATED_V4, + params, + ENGINE_FORKCHOICE_UPDATED_TIMEOUT * self.execution_timeout_multiplier, + ) + .await?; + + Ok(response.into()) + } + pub async fn get_payload_bodies_by_hash_v1( &self, block_hashes: Vec, @@ -1204,6 +1227,7 @@ impl HttpJsonRpc { forkchoice_updated_v1: capabilities.contains(ENGINE_FORKCHOICE_UPDATED_V1), forkchoice_updated_v2: capabilities.contains(ENGINE_FORKCHOICE_UPDATED_V2), forkchoice_updated_v3: capabilities.contains(ENGINE_FORKCHOICE_UPDATED_V3), + forkchoice_updated_v4: capabilities.contains(ENGINE_FORKCHOICE_UPDATED_V4), get_payload_bodies_by_hash_v1: capabilities .contains(ENGINE_GET_PAYLOAD_BODIES_BY_HASH_V1), get_payload_bodies_by_range_v1: capabilities @@ -1449,6 +1473,16 @@ impl HttpJsonRpc { )) } } + PayloadAttributes::V4(_) => { + if engine_capabilities.forkchoice_updated_v4 { + self.forkchoice_updated_v4(forkchoice_state, maybe_payload_attributes) + .await + } else { + Err(Error::RequiredMethodUnsupported( + "engine_forkchoiceUpdatedV4", + )) + } + } } } else if engine_capabilities.forkchoice_updated_v3 { self.forkchoice_updated_v3(forkchoice_state, maybe_payload_attributes) diff --git a/beacon_node/execution_layer/src/engine_api/json_structures.rs b/beacon_node/execution_layer/src/engine_api/json_structures.rs index 97c8e8a625..a77861981f 100644 --- a/beacon_node/execution_layer/src/engine_api/json_structures.rs +++ b/beacon_node/execution_layer/src/engine_api/json_structures.rs @@ -107,6 +107,12 @@ pub struct JsonExecutionPayload { #[superstruct(only(Deneb, Electra, Fulu, Gloas))] #[serde(with = "serde_utils::u64_hex_be")] pub excess_blob_gas: u64, + #[superstruct(only(Gloas))] + #[serde(with = "ssz_types::serde_utils::hex_var_list")] + pub block_access_list: VariableList, + #[superstruct(only(Gloas))] + #[serde(with = "serde_utils::u64_hex_be")] + pub slot_number: u64, } impl From> for JsonExecutionPayloadBellatrix { @@ -252,6 +258,8 @@ impl TryFrom> for JsonExecutionPayloadGloas withdrawals: withdrawals_to_json(payload.withdrawals)?, blob_gas_used: payload.blob_gas_used, excess_blob_gas: payload.excess_blob_gas, + block_access_list: payload.block_access_list, + slot_number: payload.slot_number.into(), }) } } @@ -425,6 +433,8 @@ impl TryFrom> for ExecutionPayloadGloas withdrawals: withdrawals_from_json(payload.withdrawals)?, blob_gas_used: payload.blob_gas_used, excess_blob_gas: payload.excess_blob_gas, + block_access_list: payload.block_access_list, + slot_number: payload.slot_number.into(), }) } } @@ -716,7 +726,7 @@ impl<'a> From<&'a JsonWithdrawal> for EncodableJsonWithdrawal<'a> { } #[superstruct( - variants(V1, V2, V3), + variants(V1, V2, V3, V4), variant_attributes( derive(Debug, Clone, PartialEq, Serialize, Deserialize), serde(rename_all = "camelCase") @@ -732,10 +742,13 @@ pub struct JsonPayloadAttributes { pub prev_randao: Hash256, #[serde(with = "serde_utils::address_hex")] pub suggested_fee_recipient: Address, - #[superstruct(only(V2, V3))] + #[superstruct(only(V2, V3, V4))] pub withdrawals: Vec, - #[superstruct(only(V3))] + #[superstruct(only(V3, V4))] pub parent_beacon_block_root: Hash256, + #[superstruct(only(V4))] + #[serde(with = "serde_utils::u64_hex_be")] + pub slot_number: u64, } impl From for JsonPayloadAttributes { @@ -759,6 +772,14 @@ impl From for JsonPayloadAttributes { withdrawals: pa.withdrawals.into_iter().map(Into::into).collect(), parent_beacon_block_root: pa.parent_beacon_block_root, }), + PayloadAttributes::V4(pa) => Self::V4(JsonPayloadAttributesV4 { + timestamp: pa.timestamp, + prev_randao: pa.prev_randao, + suggested_fee_recipient: pa.suggested_fee_recipient, + withdrawals: pa.withdrawals.into_iter().map(Into::into).collect(), + parent_beacon_block_root: pa.parent_beacon_block_root, + slot_number: pa.slot_number, + }), } } } @@ -784,6 +805,14 @@ impl From for PayloadAttributes { withdrawals: jpa.withdrawals.into_iter().map(Into::into).collect(), parent_beacon_block_root: jpa.parent_beacon_block_root, }), + JsonPayloadAttributes::V4(jpa) => Self::V4(PayloadAttributesV4 { + timestamp: jpa.timestamp, + prev_randao: jpa.prev_randao, + suggested_fee_recipient: jpa.suggested_fee_recipient, + withdrawals: jpa.withdrawals.into_iter().map(Into::into).collect(), + parent_beacon_block_root: jpa.parent_beacon_block_root, + slot_number: jpa.slot_number, + }), } } } diff --git a/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs b/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs index a66f7a9b55..ace6276b75 100644 --- a/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs +++ b/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs @@ -735,6 +735,9 @@ impl ExecutionBlockGenerator { blob_gas_used: 0, excess_blob_gas: 0, }), + _ => unreachable!(), + }, + PayloadAttributes::V4(pa) => match self.get_fork_at_timestamp(pa.timestamp) { ForkName::Gloas => ExecutionPayload::Gloas(ExecutionPayloadGloas { parent_hash: head_block_hash, fee_recipient: pa.suggested_fee_recipient, @@ -753,6 +756,8 @@ impl ExecutionBlockGenerator { withdrawals: pa.withdrawals.clone().try_into().unwrap(), blob_gas_used: 0, excess_blob_gas: 0, + block_access_list: VariableList::empty(), + slot_number: pa.slot_number.into(), }), _ => unreachable!(), }, diff --git a/beacon_node/execution_layer/src/test_utils/handle_rpc.rs b/beacon_node/execution_layer/src/test_utils/handle_rpc.rs index e263e5402a..058f1e76da 100644 --- a/beacon_node/execution_layer/src/test_utils/handle_rpc.rs +++ b/beacon_node/execution_layer/src/test_utils/handle_rpc.rs @@ -507,7 +507,8 @@ pub async fn handle_rpc( } ENGINE_FORKCHOICE_UPDATED_V1 | ENGINE_FORKCHOICE_UPDATED_V2 - | ENGINE_FORKCHOICE_UPDATED_V3 => { + | ENGINE_FORKCHOICE_UPDATED_V3 + | ENGINE_FORKCHOICE_UPDATED_V4 => { let forkchoice_state: JsonForkchoiceStateV1 = get_param(params, 0).map_err(|s| (s, BAD_PARAMS_ERROR_CODE))?; let payload_attributes = match method { @@ -554,6 +555,11 @@ pub async fn handle_rpc( .map(|opt| opt.map(JsonPayloadAttributes::V3)) .map_err(|s| (s, BAD_PARAMS_ERROR_CODE))? } + ENGINE_FORKCHOICE_UPDATED_V4 => { + get_param::>(params, 1) + .map(|opt| opt.map(JsonPayloadAttributes::V4)) + .map_err(|s| (s, BAD_PARAMS_ERROR_CODE))? + } _ => unreachable!(), }; @@ -607,7 +613,7 @@ pub async fn handle_rpc( )); } } - ForkName::Deneb | ForkName::Electra | ForkName::Fulu | ForkName::Gloas => { + ForkName::Deneb | ForkName::Electra | ForkName::Fulu => { if method == ENGINE_FORKCHOICE_UPDATED_V1 { return Err(( format!("{} called after Deneb fork!", method), @@ -621,6 +627,14 @@ pub async fn handle_rpc( )); } } + ForkName::Gloas => { + if method != ENGINE_FORKCHOICE_UPDATED_V4 { + return Err(( + format!("{} called after Gloas fork! Use V4.", method), + FORK_REQUEST_MISMATCH_ERROR_CODE, + )); + } + } _ => unreachable!(), }; } diff --git a/beacon_node/execution_layer/src/test_utils/mock_builder.rs b/beacon_node/execution_layer/src/test_utils/mock_builder.rs index 7b6c4e8310..6ab6cca3f6 100644 --- a/beacon_node/execution_layer/src/test_utils/mock_builder.rs +++ b/beacon_node/execution_layer/src/test_utils/mock_builder.rs @@ -898,16 +898,24 @@ impl MockBuilder { fee_recipient, expected_withdrawals, None, + None, + ), + ForkName::Deneb | ForkName::Electra | ForkName::Fulu => PayloadAttributes::new( + timestamp, + *prev_randao, + fee_recipient, + expected_withdrawals, + Some(head_block_root), + None, + ), + ForkName::Gloas => PayloadAttributes::new( + timestamp, + *prev_randao, + fee_recipient, + expected_withdrawals, + Some(head_block_root), + Some(slot.as_u64()), ), - ForkName::Deneb | ForkName::Electra | ForkName::Fulu | ForkName::Gloas => { - PayloadAttributes::new( - timestamp, - *prev_randao, - fee_recipient, - expected_withdrawals, - Some(head_block_root), - ) - } ForkName::Base | ForkName::Altair => { return Err("invalid fork".to_string()); } diff --git a/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs b/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs index 91966ff65e..288416d51e 100644 --- a/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs +++ b/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs @@ -96,8 +96,14 @@ impl MockExecutionLayer { justified_hash: None, finalized_hash: None, }; - let payload_attributes = - PayloadAttributes::new(timestamp, prev_randao, Address::repeat_byte(42), None, None); + let payload_attributes = PayloadAttributes::new( + timestamp, + prev_randao, + Address::repeat_byte(42), + None, + None, + None, + ); // Insert a proposer to ensure the fork choice updated command works. let slot = Slot::new(0); @@ -124,8 +130,14 @@ impl MockExecutionLayer { chain_health: ChainHealth::Healthy, }; let suggested_fee_recipient = self.el.get_suggested_fee_recipient(validator_index).await; - let payload_attributes = - PayloadAttributes::new(timestamp, prev_randao, suggested_fee_recipient, None, None); + let payload_attributes = PayloadAttributes::new( + timestamp, + prev_randao, + suggested_fee_recipient, + None, + None, + None, + ); let payload_parameters = PayloadParameters { parent_hash, @@ -171,8 +183,14 @@ impl MockExecutionLayer { chain_health: ChainHealth::Healthy, }; let suggested_fee_recipient = self.el.get_suggested_fee_recipient(validator_index).await; - let payload_attributes = - PayloadAttributes::new(timestamp, prev_randao, suggested_fee_recipient, None, None); + let payload_attributes = PayloadAttributes::new( + timestamp, + prev_randao, + suggested_fee_recipient, + None, + None, + None, + ); let payload_parameters = PayloadParameters { parent_hash, diff --git a/beacon_node/execution_layer/src/test_utils/mod.rs b/beacon_node/execution_layer/src/test_utils/mod.rs index 47e3c9064c..6d8c30d316 100644 --- a/beacon_node/execution_layer/src/test_utils/mod.rs +++ b/beacon_node/execution_layer/src/test_utils/mod.rs @@ -47,6 +47,7 @@ pub const DEFAULT_ENGINE_CAPABILITIES: EngineCapabilities = EngineCapabilities { forkchoice_updated_v1: true, forkchoice_updated_v2: true, forkchoice_updated_v3: true, + forkchoice_updated_v4: true, get_payload_bodies_by_hash_v1: true, get_payload_bodies_by_range_v1: true, get_payload_v1: true, diff --git a/beacon_node/http_api/src/beacon/execution_payload_envelope.rs b/beacon_node/http_api/src/beacon/execution_payload_envelope.rs index 4a974c9919..382b967b43 100644 --- a/beacon_node/http_api/src/beacon/execution_payload_envelope.rs +++ b/beacon_node/http_api/src/beacon/execution_payload_envelope.rs @@ -91,7 +91,7 @@ pub async fn publish_execution_payload_envelope( chain: Arc>, network_tx: &UnboundedSender>, ) -> Result, Rejection> { - let slot = envelope.message.slot; + let slot = envelope.slot(); let beacon_block_root = envelope.message.beacon_block_root; // TODO(gloas): Replace this check once we have gossip validation. @@ -161,9 +161,7 @@ pub(crate) fn get_beacon_execution_payload_envelope( )) })?; - let fork_name = chain - .spec - .fork_name_at_slot::(envelope.message.slot); + let fork_name = chain.spec.fork_name_at_slot::(envelope.slot()); match accept_header { Some(api_types::Accept::Ssz) => Response::builder() diff --git a/beacon_node/http_api/src/sync_committee_rewards.rs b/beacon_node/http_api/src/sync_committee_rewards.rs index 8715fc2b1e..9bc1f6ead4 100644 --- a/beacon_node/http_api/src/sync_committee_rewards.rs +++ b/beacon_node/http_api/src/sync_committee_rewards.rs @@ -66,12 +66,11 @@ pub fn get_state_before_applying_block( }) .map_err(|e| custom_not_found(format!("Parent state is not available! {:?}", e)))?; - // TODO(gloas): handle payloads? let replayer = BlockReplayer::new(parent_state, &chain.spec) .no_signature_verification() .state_root_iter([Ok((parent_block.state_root(), parent_block.slot()))].into_iter()) .minimal_block_root_verification() - .apply_blocks(vec![], vec![], Some(block.slot())) + .apply_blocks(vec![], Some(block.slot())) .map_err(unhandled_error::)?; Ok(replayer.into_state()) diff --git a/beacon_node/http_api/tests/tests.rs b/beacon_node/http_api/tests/tests.rs index bf8443929c..2dd4c28040 100644 --- a/beacon_node/http_api/tests/tests.rs +++ b/beacon_node/http_api/tests/tests.rs @@ -3937,7 +3937,7 @@ impl ApiTester { .cloned() .expect("envelope should exist in pending cache for local building"); assert_eq!(envelope.beacon_block_root, block_root); - assert_eq!(envelope.slot, slot); + assert_eq!(envelope.slot(), slot); } /// Assert envelope fields match the expected block root and slot. @@ -3948,9 +3948,8 @@ impl ApiTester { slot: Slot, ) { assert_eq!(envelope.beacon_block_root, block_root); - assert_eq!(envelope.slot, slot); + assert_eq!(envelope.slot(), slot); assert_eq!(envelope.builder_index, BUILDER_INDEX_SELF_BUILD); - assert_ne!(envelope.state_root, Hash256::ZERO); } /// Sign an execution payload envelope. diff --git a/beacon_node/network/src/network_beacon_processor/tests.rs b/beacon_node/network/src/network_beacon_processor/tests.rs index d0f0557223..76c6ba812d 100644 --- a/beacon_node/network/src/network_beacon_processor/tests.rs +++ b/beacon_node/network/src/network_beacon_processor/tests.rs @@ -20,7 +20,6 @@ use beacon_chain::test_utils::{ use beacon_chain::{BeaconChain, WhenSlotSkipped}; use beacon_processor::{work_reprocessing_queue::*, *}; use bls::Signature; -use fixed_bytes::FixedBytesExtended; use itertools::Itertools; use libp2p::gossipsub::MessageAcceptance; use lighthouse_network::rpc::InboundRequestId; @@ -2125,12 +2124,13 @@ fn make_test_payload_envelope( ) -> SignedExecutionPayloadEnvelope { SignedExecutionPayloadEnvelope { message: ExecutionPayloadEnvelope { - payload: ExecutionPayloadGloas::default(), + payload: ExecutionPayloadGloas { + slot_number: slot, + ..ExecutionPayloadGloas::default() + }, execution_requests: ExecutionRequests::default(), builder_index: 0, beacon_block_root, - slot, - state_root: Hash256::zero(), }, signature: Signature::empty(), } @@ -2158,7 +2158,7 @@ async fn test_payload_envelopes_by_range() { let envelope = make_test_payload_envelope(Slot::new(slot), root); rig.chain .store - .put_payload_envelope(&root, envelope) + .put_payload_envelope(&root, &envelope) .unwrap(); expected_roots.push(root); } @@ -2208,7 +2208,7 @@ async fn test_payload_envelopes_by_root() { let envelope = make_test_payload_envelope(Slot::new(1), block_root); rig.chain .store - .put_payload_envelope(&block_root, envelope) + .put_payload_envelope(&block_root, &envelope) .unwrap(); let roots = RuntimeVariableList::new(vec![block_root], 1).unwrap(); @@ -2298,7 +2298,7 @@ async fn test_payload_envelopes_by_range_no_duplicates_with_skip_slots() { let envelope = make_test_payload_envelope(Slot::new(slot), root); rig.chain .store - .put_payload_envelope(&root, envelope) + .put_payload_envelope(&root, &envelope) .unwrap(); } } diff --git a/beacon_node/store/src/hot_cold_store.rs b/beacon_node/store/src/hot_cold_store.rs index 78dd69e55a..e9b9de76e6 100644 --- a/beacon_node/store/src/hot_cold_store.rs +++ b/beacon_node/store/src/hot_cold_store.rs @@ -1064,7 +1064,7 @@ impl, Cold: ItemStore> HotColdDB pub fn put_payload_envelope( &self, block_root: &Hash256, - payload_envelope: SignedExecutionPayloadEnvelope, + payload_envelope: &SignedExecutionPayloadEnvelope, ) -> Result<(), Error> { self.hot_db.put_bytes( SignedExecutionPayloadEnvelope::::db_column(), @@ -1133,13 +1133,10 @@ impl, Cold: ItemStore> HotColdDB pub fn get_advanced_hot_state( &self, block_root: Hash256, - payload_status: StatePayloadStatus, max_slot: Slot, state_root: Hash256, ) -> Result)>, Error> { - if let Some(cached) = - self.get_advanced_hot_state_from_cache(block_root, payload_status, max_slot) - { + if let Some(cached) = self.get_advanced_hot_state_from_cache(block_root, max_slot) { return Ok(Some(cached)); } @@ -1161,11 +1158,7 @@ impl, Cold: ItemStore> HotColdDB .into()); } - // Split state should always be `Pending`. - let state_root = if block_root == split.block_root - && let StatePayloadStatus::Pending = payload_status - && split.slot <= max_slot - { + let state_root = if block_root == split.block_root && split.slot <= max_slot { split.state_root } else { state_root @@ -1212,12 +1205,11 @@ impl, Cold: ItemStore> HotColdDB pub fn get_advanced_hot_state_from_cache( &self, block_root: Hash256, - payload_status: StatePayloadStatus, max_slot: Slot, ) -> Option<(Hash256, BeaconState)> { self.state_cache .lock() - .get_by_block_root(block_root, payload_status, max_slot) + .get_by_block_root(block_root, max_slot) } /// Delete a state, ensuring it is removed from the LRU cache, as well as from on-disk. @@ -1857,100 +1849,6 @@ impl, Cold: ItemStore> HotColdDB } } - /// Compute the `StatePayloadStatus` for a stored state based on its summary. - /// - /// In future this might become a field of the summary, but this would require a whole DB - /// migration. For now we use an extra read from the DB to determine it. - fn get_hot_state_summary_payload_status( - &self, - summary: &HotStateSummary, - ) -> Result { - // Treat pre-Gloas states as `Pending`. - if !self - .spec - .fork_name_at_slot::(summary.slot) - .gloas_enabled() - { - return Ok(StatePayloadStatus::Pending); - } - - // Treat genesis state as `Pending` (`BeaconBlock` state). - let previous_state_root = summary.previous_state_root; - if previous_state_root.is_zero() { - return Ok(StatePayloadStatus::Pending); - } - - // Load the hot state summary for the previous state. - // - // If it has the same slot as this summary then we know this summary is for a `Full` state - // (payload state), because they are always diffed against their same-slot `Pending` state. - // - // If the previous summary has a different slot AND the latest block is from `summary.slot`, - // then this state *must* be `Pending` (it is the summary for latest block itself). - // - // Otherwise, we are at a skipped slot and must traverse the graph of state summaries - // backwards until we reach a summary for the latest block. This recursion could be quite - // far in the case of a long skip. We could optimise this in future using the - // `diff_base_state` (like in `get_ancestor_state_root`), or by doing a proper DB - // migration. - let previous_state_summary = self - .load_hot_state_summary(&previous_state_root)? - .ok_or(Error::MissingHotStateSummary(previous_state_root))?; - - if previous_state_summary.slot == summary.slot { - Ok(StatePayloadStatus::Full) - } else if summary.slot == summary.latest_block_slot { - Ok(StatePayloadStatus::Pending) - } else { - self.get_hot_state_summary_payload_status(&previous_state_summary) - } - } - - /// Recompute the payload status for a state at `slot` that is stored in the cold DB. - /// - /// This function returns an error for any `slot` that is outside the range of slots stored in - /// the freezer DB. - /// - /// For all slots prior to Gloas, it returns `Pending`. - /// - /// For post-Gloas slots the algorithm is: - /// - /// 1. Load the most recently applied block at `slot` (may not be from `slot` in case of a skip) - /// 2. Load the canonical `state_root` at the slot of the block. If this `state_root` matches - /// the one in the block then we know the state at *that* slot is canonically empty (no - /// payload). Conversely, if it is different, we know that the block's slot is full (assuming - /// no database corruption). - /// 3. The payload status of `slot` is the same as the payload status of `block.slot()`, because - /// we only care about whether a beacon block or payload was applied most recently, and - /// `block` is by definition the most-recently-applied block. - /// - /// All of this mucking around could be avoided if we do a schema migration to record the - /// payload status in the database. For now, this is simpler. - fn get_cold_state_payload_status(&self, slot: Slot) -> Result { - // Pre-Gloas states are always `Pending`. - if !self.spec.fork_name_at_slot::(slot).gloas_enabled() { - return Ok(StatePayloadStatus::Pending); - } - - let block_root = self - .get_cold_block_root(slot)? - .ok_or(HotColdDBError::MissingFrozenBlock(slot))?; - - let block = self - .get_blinded_block(&block_root)? - .ok_or(Error::MissingBlock(block_root))?; - - let state_root = self - .get_cold_state_root(block.slot())? - .ok_or(HotColdDBError::MissingRestorePointState(block.slot()))?; - - if block.state_root() != state_root { - Ok(StatePayloadStatus::Full) - } else { - Ok(StatePayloadStatus::Pending) - } - } - fn load_hot_hdiff_buffer(&self, state_root: Hash256) -> Result { if let Some(buffer) = self .state_cache @@ -2046,20 +1944,16 @@ impl, Cold: ItemStore> HotColdDB ) -> Result, Hash256)>, Error> { metrics::inc_counter(&metrics::BEACON_STATE_HOT_GET_COUNT); - if let Some( - summary @ HotStateSummary { - slot, - latest_block_root, - diff_base_state, - .. - }, - ) = self.load_hot_state_summary(state_root)? + if let Some(HotStateSummary { + slot, + latest_block_root, + diff_base_state, + .. + }) = self.load_hot_state_summary(state_root)? { - let payload_status = self.get_hot_state_summary_payload_status(&summary)?; debug!( %slot, ?state_root, - ?payload_status, "Loading hot state" ); let mut state = match self.hot_storage_strategy(slot)? { @@ -2113,7 +2007,6 @@ impl, Cold: ItemStore> HotColdDB base_state, slot, latest_block_root, - payload_status, update_cache, )? } @@ -2131,26 +2024,19 @@ impl, Cold: ItemStore> HotColdDB base_state: BeaconState, slot: Slot, latest_block_root: Hash256, - desired_payload_status: StatePayloadStatus, update_cache: bool, ) -> Result, Error> { - if base_state.slot() == slot && base_state.payload_status() == desired_payload_status { + if base_state.slot() == slot { return Ok(base_state); } - let (blocks, envelopes) = self.load_blocks_to_replay( - base_state.slot(), - slot, - latest_block_root, - desired_payload_status, - )?; + let blocks = self.load_blocks_to_replay(base_state.slot(), slot, latest_block_root)?; let _t = metrics::start_timer(&metrics::STORE_BEACON_REPLAY_HOT_BLOCKS_TIME); // If replaying blocks, and `update_cache` is true, also cache the epoch boundary // state that this state is based on. It may be useful as the basis of more states // in the same epoch. let state_cache_hook = |state_root, state: &mut BeaconState| { - // TODO(gloas): prevent caching of the payload_status=Full state? if !update_cache || state.slot() % E::slots_per_epoch() != 0 { return Ok(()); } @@ -2177,16 +2063,12 @@ impl, Cold: ItemStore> HotColdDB debug!( %slot, blocks = ?blocks.iter().map(|block| block.slot()).collect::>(), - envelopes = ?envelopes.iter().map(|e| e.message.slot).collect::>(), - payload_status = ?desired_payload_status, - "Replaying blocks and envelopes" + "Replaying blocks" ); self.replay_blocks( base_state, blocks, - envelopes, - desired_payload_status, slot, no_state_root_iter(), Some(Box::new(state_cache_hook)), @@ -2490,7 +2372,8 @@ impl, Cold: ItemStore> HotColdDB return Ok(base_state); } - let (blocks, envelopes) = self.load_cold_blocks(base_state.slot() + 1, slot)?; + let base_slot = base_state.slot(); + let blocks = self.load_cold_blocks(base_slot + 1, slot)?; // Include state root for base state as it is required by block processing to not // have to hash the state. @@ -2499,16 +2382,7 @@ impl, Cold: ItemStore> HotColdDB self.forwards_state_roots_iterator_until(base_state.slot(), slot, || { Err(Error::StateShouldNotBeRequired(slot)) })?; - let payload_status = self.get_cold_state_payload_status(slot)?; - let state = self.replay_blocks( - base_state, - blocks, - envelopes, - payload_status, - slot, - Some(state_root_iter), - None, - )?; + let state = self.replay_blocks(base_state, blocks, slot, Some(state_root_iter), None)?; debug!( target_slot = %slot, replay_time_ms = metrics::stop_timer_with_duration(replay_timer).as_millis(), @@ -2601,76 +2475,39 @@ impl, Cold: ItemStore> HotColdDB } } - /// Load cold blocks and payload envelopes between `start_slot` and `end_slot` inclusive. - #[allow(clippy::type_complexity)] + /// Load cold blocks between `start_slot` and `end_slot` inclusive. pub fn load_cold_blocks( &self, start_slot: Slot, end_slot: Slot, - ) -> Result< - ( - Vec>, - Vec>, - ), - Error, - > { + ) -> Result>, Error> { let _t = metrics::start_timer(&metrics::STORE_BEACON_LOAD_COLD_BLOCKS_TIME); let block_root_iter = self.forwards_block_roots_iterator_until(start_slot, end_slot, || { Err(Error::StateShouldNotBeRequired(end_slot)) })?; - let blocks = process_results(block_root_iter, |iter| { + process_results(block_root_iter, |iter| { iter.map(|(block_root, _slot)| block_root) .dedup() .map(|block_root| { self.get_blinded_block(&block_root)? .ok_or(Error::MissingBlock(block_root)) }) - .collect::, Error>>() - })??; - - // If Gloas is not enabled for any slots in the range, just return `blocks`. - if !self.spec.fork_name_at_slot::(start_slot).gloas_enabled() - && !self.spec.fork_name_at_slot::(end_slot).gloas_enabled() - { - return Ok((blocks, vec![])); - } - let end_block_root = self - .get_cold_block_root(end_slot)? - .ok_or(HotColdDBError::MissingFrozenBlock(end_slot))?; - let desired_payload_status = self.get_cold_state_payload_status(end_slot)?; - let envelopes = self.load_payload_envelopes_for_blocks( - &blocks, - end_block_root, - desired_payload_status, - )?; - - Ok((blocks, envelopes)) + .collect() + })? } - /// Load the blocks & envelopes between `start_slot` and `end_slot` by backtracking from + /// Load the blocks between `start_slot` and `end_slot` by backtracking from /// `end_block_root`. /// /// Blocks are returned in slot-ascending order, suitable for replaying on a state with slot /// equal to `start_slot`, to reach a state with slot equal to `end_slot`. - /// - /// Payloads are also returned in slot-ascending order, but only payloads forming part of - /// the chain are loaded (payloads for EMPTY slots are omitted). Prior to Gloas, an empty - /// vec of payloads will be returned. - #[allow(clippy::type_complexity)] pub fn load_blocks_to_replay( &self, start_slot: Slot, end_slot: Slot, end_block_root: Hash256, - desired_payload_status: StatePayloadStatus, - ) -> Result< - ( - Vec>, - Vec>, - ), - Error, - > { + ) -> Result>, Error> { let _t = metrics::start_timer(&metrics::STORE_BEACON_LOAD_HOT_BLOCKS_TIME); let mut blocks = ParentRootBlockIterator::new(self, end_block_root) .map(|result| result.map(|(_, block)| block)) @@ -2699,70 +2536,17 @@ impl, Cold: ItemStore> HotColdDB }) .collect::, _>>()?; blocks.reverse(); - - // If Gloas is not enabled for any slots in the range, just return `blocks`. - if !self.spec.fork_name_at_slot::(start_slot).gloas_enabled() - && !self.spec.fork_name_at_slot::(end_slot).gloas_enabled() - { - return Ok((blocks, vec![])); - } - - let envelopes = self.load_payload_envelopes_for_blocks( - &blocks, - end_block_root, - desired_payload_status, - )?; - - Ok((blocks, envelopes)) - } - - pub fn load_payload_envelopes_for_blocks( - &self, - blocks: &[SignedBlindedBeaconBlock], - end_block_root: Hash256, - desired_payload_status: StatePayloadStatus, - ) -> Result>, Error> { - let mut envelopes = vec![]; - - for (block, next_block) in blocks.iter().tuple_windows() { - if block.fork_name_unchecked().gloas_enabled() { - // Check next block to see if this block's payload is canonical on this chain. - let block_hash = block.payload_bid_block_hash()?; - if !next_block.is_parent_block_full(block_hash) { - // No payload at this slot (empty), nothing to load. - continue; - } - // Using `parent_root` avoids computation. - let block_root = next_block.parent_root(); - let envelope = self - .get_payload_envelope(&block_root)? - .ok_or(HotColdDBError::MissingExecutionPayloadEnvelope(block_root))?; - envelopes.push(envelope); - } - } - - // Load the payload for the last block if desired. - if let StatePayloadStatus::Full = desired_payload_status { - let envelope = self.get_payload_envelope(&end_block_root)?.ok_or( - HotColdDBError::MissingExecutionPayloadEnvelope(end_block_root), - )?; - envelopes.push(envelope); - } - - Ok(envelopes) + Ok(blocks) } /// Replay `blocks` on top of `state` until `target_slot` is reached. /// /// Will skip slots as necessary. The returned state is not guaranteed /// to have any caches built, beyond those immediately required by block processing. - #[allow(clippy::too_many_arguments)] pub fn replay_blocks( &self, state: BeaconState, blocks: Vec>, - envelopes: Vec>, - desired_payload_status: StatePayloadStatus, target_slot: Slot, state_root_iter: Option>>, pre_slot_hook: Option>, @@ -2771,8 +2555,7 @@ impl, Cold: ItemStore> HotColdDB let mut block_replayer = BlockReplayer::new(state, &self.spec) .no_signature_verification() - .minimal_block_root_verification() - .desired_state_payload_status(desired_payload_status); + .minimal_block_root_verification(); let have_state_root_iterator = state_root_iter.is_some(); if let Some(state_root_iter) = state_root_iter { @@ -2784,7 +2567,7 @@ impl, Cold: ItemStore> HotColdDB } block_replayer - .apply_blocks(blocks, envelopes, Some(target_slot)) + .apply_blocks(blocks, Some(target_slot)) .map(|block_replayer| { if have_state_root_iterator && block_replayer.state_root_miss() { warn!( @@ -3800,6 +3583,7 @@ pub fn migrate_database, Cold: ItemStore>( ) -> Result { debug!( slot = %finalized_state.slot(), + state_root = ?finalized_state_root, "Freezer migration started" ); @@ -4219,12 +4003,8 @@ impl HotStateSummary { // slots where there isn't a skip). let latest_block_root = state.get_latest_block_root(state_root); - // Payload status of the state determines a lot about how it is stored. - let payload_status = state.payload_status(); - let get_state_root = |slot| { if slot == state.slot() { - // TODO(gloas): I think we can remove this case Ok::<_, Error>(state_root) } else { Ok::<_, Error>(get_ancestor_state_root(store, state, slot).map_err(|e| { @@ -4247,12 +4027,6 @@ impl HotStateSummary { let previous_state_root = if state.slot() == 0 { // Set to 0x0 for genesis state to prevent any sort of circular reference. Hash256::zero() - } else if let StatePayloadStatus::Full = payload_status - && state.slot() == state.latest_block_header().slot - { - // A Full state at a non-skipped slot builds off the Pending state of the same slot, - // i.e. the state with the same `state_root` as its `BeaconBlock` - state.latest_block_header().state_root } else { get_state_root(state.slot().safe_sub(1_u64)?)? }; diff --git a/beacon_node/store/src/reconstruct.rs b/beacon_node/store/src/reconstruct.rs index e51543c3a2..7aca692ef9 100644 --- a/beacon_node/store/src/reconstruct.rs +++ b/beacon_node/store/src/reconstruct.rs @@ -67,7 +67,6 @@ where state.build_caches(&self.spec)?; - // TODO(gloas): handle payload envelope replay process_results(block_root_iter, |iter| -> Result<(), Error> { let mut io_batch = vec![]; diff --git a/beacon_node/store/src/state_cache.rs b/beacon_node/store/src/state_cache.rs index d016922ade..6d159c9361 100644 --- a/beacon_node/store/src/state_cache.rs +++ b/beacon_node/store/src/state_cache.rs @@ -7,7 +7,7 @@ use lru::LruCache; use std::collections::{BTreeMap, HashMap, HashSet}; use std::num::NonZeroUsize; use tracing::instrument; -use types::{BeaconState, ChainSpec, Epoch, EthSpec, Hash256, Slot, execution::StatePayloadStatus}; +use types::{BeaconState, ChainSpec, Epoch, EthSpec, Hash256, Slot}; /// Fraction of the LRU cache to leave intact during culling. const CULL_EXEMPT_NUMERATOR: usize = 1; @@ -23,10 +23,10 @@ pub struct FinalizedState { state: BeaconState, } -/// Map from (block_root, payload_status) -> slot -> state_root. +/// Map from block_root -> slot -> state_root. #[derive(Debug, Default)] pub struct BlockMap { - blocks: HashMap<(Hash256, StatePayloadStatus), SlotMap>, + blocks: HashMap, } /// Map from slot -> state_root. @@ -143,11 +143,8 @@ impl StateCache { return Err(Error::FinalizedStateDecreasingSlot); } - let payload_status = state.payload_status(); - // Add to block map. - self.block_map - .insert(block_root, payload_status, state.slot(), state_root); + self.block_map.insert(block_root, state.slot(), state_root); // Prune block map. let state_roots_to_prune = self.block_map.prune(state.slot()); @@ -270,9 +267,7 @@ impl StateCache { // Record the connection from block root and slot to this state. let slot = state.slot(); - let payload_status = state.payload_status(); - self.block_map - .insert(block_root, payload_status, slot, state_root); + self.block_map.insert(block_root, slot, state_root); Ok(PutStateOutcome::New(deleted_states)) } @@ -321,10 +316,9 @@ impl StateCache { pub fn get_by_block_root( &mut self, block_root: Hash256, - payload_status: StatePayloadStatus, slot: Slot, ) -> Option<(Hash256, BeaconState)> { - let slot_map = self.block_map.blocks.get(&(block_root, payload_status))?; + let slot_map = self.block_map.blocks.get(&block_root)?; // Find the state at `slot`, or failing that the most recent ancestor. let state_root = slot_map @@ -345,12 +339,7 @@ impl StateCache { } pub fn delete_block_states(&mut self, block_root: &Hash256) { - let (pending_state_roots, full_state_roots) = - self.block_map.delete_block_states(block_root); - for slot_map in [pending_state_roots, full_state_roots] - .into_iter() - .flatten() - { + if let Some(slot_map) = self.block_map.delete_block_states(block_root) { for state_root in slot_map.slots.values() { self.states.pop(state_root); } @@ -423,14 +412,8 @@ impl StateCache { } impl BlockMap { - fn insert( - &mut self, - block_root: Hash256, - payload_status: StatePayloadStatus, - slot: Slot, - state_root: Hash256, - ) { - let slot_map = self.blocks.entry((block_root, payload_status)).or_default(); + fn insert(&mut self, block_root: Hash256, slot: Slot, state_root: Hash256) { + let slot_map = self.blocks.entry(block_root).or_default(); slot_map.slots.insert(slot, state_root); } @@ -461,12 +444,8 @@ impl BlockMap { }); } - fn delete_block_states(&mut self, block_root: &Hash256) -> (Option, Option) { - let pending_state_roots = self - .blocks - .remove(&(*block_root, StatePayloadStatus::Pending)); - let full_state_roots = self.blocks.remove(&(*block_root, StatePayloadStatus::Full)); - (pending_state_roots, full_state_roots) + fn delete_block_states(&mut self, block_root: &Hash256) -> Option { + self.blocks.remove(block_root) } } diff --git a/common/eth2/src/types.rs b/common/eth2/src/types.rs index dd16f46c55..d724156f86 100644 --- a/common/eth2/src/types.rs +++ b/common/eth2/src/types.rs @@ -1093,7 +1093,6 @@ pub struct SseExecutionPayload { pub builder_index: u64, pub block_hash: ExecutionBlockHash, pub block_root: Hash256, - pub state_root: Hash256, pub execution_optimistic: bool, } @@ -1104,7 +1103,6 @@ pub struct SseExecutionPayloadGossip { pub builder_index: u64, pub block_hash: ExecutionBlockHash, pub block_root: Hash256, - pub state_root: Hash256, } #[derive(PartialEq, Debug, Serialize, Deserialize, Clone)] diff --git a/consensus/fork_choice/src/fork_choice.rs b/consensus/fork_choice/src/fork_choice.rs index 92fd4c1faf..21415e478a 100644 --- a/consensus/fork_choice/src/fork_choice.rs +++ b/consensus/fork_choice/src/fork_choice.rs @@ -560,9 +560,22 @@ where )?; // Cache some values for the next forkchoiceUpdate call to the execution layer. - let head_hash = self - .get_block(&head_root) - .and_then(|b| b.execution_status.block_hash()); + // For Gloas blocks, `execution_status` is Irrelevant (no embedded payload). + // If the payload envelope was received (Full), use the bid's block_hash as the + // execution chain head. Otherwise fall back to the parent hash (Pending) or None. + // TODO(gloas): this is a bit messy, and we probably need a similar treatment for + // justified/finalized + // Can fix as part of: https://github.com/sigp/lighthouse/issues/8957 + let head_hash = self.get_block(&head_root).and_then(|b| { + b.execution_status + .block_hash() + .or(match head_payload_status { + PayloadStatus::Full => b.execution_payload_block_hash, + PayloadStatus::Pending | PayloadStatus::Empty => { + b.execution_payload_parent_hash + } + }) + }); let justified_root = self.justified_checkpoint().root; let finalized_root = self.finalized_checkpoint().root; let justified_hash = self @@ -804,7 +817,7 @@ where })); } - let attestation_threshold = spec.get_unaggregated_attestation_due(); + let attestation_threshold = spec.get_attestation_due::(block.slot()); // Add proposer score boost if the block is timely. // TODO(gloas): the spec's `update_proposer_boost_root` additionally checks that @@ -1493,6 +1506,14 @@ where } } + /// Returns whether the proposer should extend the execution payload chain of the given block. + pub fn should_extend_payload(&self, block_root: &Hash256) -> Result> { + let proposer_boost_root = self.fc_store.proposer_boost_root(); + self.proto_array + .should_extend_payload::(block_root, proposer_boost_root) + .map_err(Error::ProtoArrayStringError) + } + /// Returns an `ExecutionStatus` if the block is known **and** a descendant of the finalized root. pub fn get_block_execution_status(&self, block_root: &Hash256) -> Option { if self.is_finalized_checkpoint_or_descendant(*block_root) { diff --git a/consensus/proto_array/src/fork_choice_test_definition/gloas_payload.rs b/consensus/proto_array/src/fork_choice_test_definition/gloas_payload.rs index 2e792028e5..197e1102a3 100644 --- a/consensus/proto_array/src/fork_choice_test_definition/gloas_payload.rs +++ b/consensus/proto_array/src/fork_choice_test_definition/gloas_payload.rs @@ -109,6 +109,8 @@ pub fn get_gloas_chain_following_test_definition() -> ForkChoiceTestDefinition { pub fn get_gloas_payload_probe_test_definition() -> ForkChoiceTestDefinition { let mut ops = vec![]; + // Block 1 at slot 1: child of genesis. Genesis has execution_payload_block_hash=zero + // (no execution payload at genesis), so all children have parent_payload_status=Empty. ops.push(Operation::ProcessBlock { slot: Slot::new(1), root: get_root(1), @@ -212,8 +214,10 @@ pub fn get_gloas_payload_probe_test_definition() -> ForkChoiceTestDefinition { justified_checkpoint: get_checkpoint(0), finalized_checkpoint: get_checkpoint(0), operations: ops, - execution_payload_parent_hash: Some(get_hash(42)), - execution_payload_block_hash: Some(get_hash(0)), + // Genesis has zero execution block hash (no payload at genesis), which + // ensures all children get parent_payload_status=Empty. + execution_payload_parent_hash: Some(ExecutionBlockHash::zero()), + execution_payload_block_hash: Some(ExecutionBlockHash::zero()), spec: Some(gloas_spec()), } } @@ -600,18 +604,20 @@ pub fn get_gloas_interleaved_attestations_test_definition() -> ForkChoiceTestDef /// Test interleaving of blocks, payload validation, and attestations. /// -/// Scenario: -/// - Genesis block (slot 0) -/// - Block 1 (slot 1) extends genesis, Full chain -/// - Block 2 (slot 1) extends genesis, Empty chain -/// - Before payload arrives: payload_received is false for block 1 +/// Scenario (branching at block 1 since genesis has no payload): +/// - Genesis block (slot 0) with zero execution block hash +/// - Block 1 (slot 1) child of genesis (Empty parent status since genesis hash=zero) +/// - Block 2 (slot 2) extends block 1 Full chain (parent_hash matches block 1's block_hash) +/// - Block 3 (slot 2) extends block 1 Empty chain (parent_hash doesn't match) +/// - Before payload arrives: payload_received is false for block 1, only Empty reachable /// - Process execution payload for block 1 → payload_received becomes true -/// - Payload attestations arrive voting block 1's payload as timely + available -/// - Head should follow block 1 because the PTC votes now count (payload_received = true) +/// - Both Full and Empty directions from block 1 become available +/// - With equal weight, tiebreaker prefers Full → Block 2 wins pub fn get_gloas_payload_received_interleaving_test_definition() -> ForkChoiceTestDefinition { let mut ops = vec![]; - // Block 1 at slot 1: extends genesis Full chain. + // Block 1 at slot 1: child of genesis. Genesis has zero block hash, so + // parent_payload_status = Empty regardless of block 1's execution_payload_parent_hash. ops.push(Operation::ProcessBlock { slot: Slot::new(1), root: get_root(1), @@ -622,83 +628,94 @@ pub fn get_gloas_payload_received_interleaving_test_definition() -> ForkChoiceTe execution_payload_block_hash: Some(get_hash(1)), }); - // Block 2 at slot 1: extends genesis Empty chain (parent_hash doesn't match genesis EL hash). + // Block 2 at slot 2: Full child of block 1 (parent_hash matches block 1's block_hash). ops.push(Operation::ProcessBlock { - slot: Slot::new(1), + slot: Slot::new(2), root: get_root(2), - parent_root: get_root(0), + parent_root: get_root(1), + justified_checkpoint: get_checkpoint(0), + finalized_checkpoint: get_checkpoint(0), + execution_payload_parent_hash: Some(get_hash(1)), + execution_payload_block_hash: Some(get_hash(2)), + }); + + // Block 3 at slot 2: Empty child of block 1 (parent_hash doesn't match block 1's block_hash). + ops.push(Operation::ProcessBlock { + slot: Slot::new(2), + root: get_root(3), + parent_root: get_root(1), justified_checkpoint: get_checkpoint(0), finalized_checkpoint: get_checkpoint(0), execution_payload_parent_hash: Some(get_hash(99)), - execution_payload_block_hash: Some(get_hash(100)), + execution_payload_block_hash: Some(get_hash(3)), }); - // Both children have parent_payload_status set correctly. + // Verify parent_payload_status is set correctly. ops.push(Operation::AssertParentPayloadStatus { block_root: get_root(1), + expected_status: PayloadStatus::Empty, + }); + ops.push(Operation::AssertParentPayloadStatus { + block_root: get_root(2), expected_status: PayloadStatus::Full, }); ops.push(Operation::AssertParentPayloadStatus { - block_root: get_root(2), + block_root: get_root(3), expected_status: PayloadStatus::Empty, }); - // Per spec `get_forkchoice_store`: genesis starts with payload_received=true - // (anchor block is in `payload_states`). + // Genesis does NOT have payload_received (no payload at genesis). ops.push(Operation::AssertPayloadReceived { block_root: get_root(0), - expected: true, + expected: false, }); - // Give one vote to each child so they have equal weight. + // Block 1 does not have payload_received yet. + ops.push(Operation::AssertPayloadReceived { + block_root: get_root(1), + expected: false, + }); + + // Give one vote to each competing child so they have equal weight. ops.push(Operation::ProcessAttestation { validator_index: 0, - block_root: get_root(1), - attestation_slot: Slot::new(1), + block_root: get_root(2), + attestation_slot: Slot::new(2), }); ops.push(Operation::ProcessAttestation { validator_index: 1, - block_root: get_root(2), - attestation_slot: Slot::new(1), + block_root: get_root(3), + attestation_slot: Slot::new(2), }); - // Equal weight, payload_received=true on genesis → tiebreaker uses - // payload_received (not previous slot, equal payload weights) → prefers Full. - // Block 1 (Full) wins because it matches the Full preference. + // Before payload_received on block 1: only Empty direction available. + // Block 3 (Empty child) is reachable, Block 2 (Full child) is not. ops.push(Operation::FindHead { justified_checkpoint: get_checkpoint(0), finalized_checkpoint: get_checkpoint(0), justified_state_balances: vec![1, 1], - expected_head: get_root(1), + expected_head: get_root(3), current_slot: Slot::new(100), expected_payload_status: None, }); - // ProcessExecutionPayloadEnvelope on genesis is a no-op (already received at init). + // Process execution payload envelope for block 1 → payload_received becomes true. ops.push(Operation::ProcessExecutionPayloadEnvelope { - block_root: get_root(0), + block_root: get_root(1), }); ops.push(Operation::AssertPayloadReceived { - block_root: get_root(0), + block_root: get_root(1), expected: true, }); - // Set PTC votes on genesis as timely + data available (simulates PTC voting). - // This doesn't change the preference since genesis is not the previous slot - // (slot 0 + 1 != current_slot 100). - ops.push(Operation::SetPayloadTiebreak { - block_root: get_root(0), - is_timely: true, - is_data_available: true, - }); - - // Still prefers Full via payload_received tiebreaker → Block 1 (Full) wins. + // After payload_received on block 1: both Full and Empty directions available. + // Equal weight, tiebreaker prefers Full → Block 2 (Full child) wins. ops.push(Operation::FindHead { justified_checkpoint: get_checkpoint(0), finalized_checkpoint: get_checkpoint(0), justified_state_balances: vec![1, 1], - expected_head: get_root(1), + expected_head: get_root(2), current_slot: Slot::new(100), expected_payload_status: None, }); @@ -708,8 +725,9 @@ pub fn get_gloas_payload_received_interleaving_test_definition() -> ForkChoiceTe justified_checkpoint: get_checkpoint(0), finalized_checkpoint: get_checkpoint(0), operations: ops, - execution_payload_parent_hash: Some(get_hash(42)), - execution_payload_block_hash: Some(get_hash(0)), + // Genesis has zero execution block hash (no payload at genesis). + execution_payload_parent_hash: Some(ExecutionBlockHash::zero()), + execution_payload_block_hash: Some(ExecutionBlockHash::zero()), spec: Some(gloas_spec()), } } diff --git a/consensus/proto_array/src/proto_array.rs b/consensus/proto_array/src/proto_array.rs index 4946631f73..4ca7dab69c 100644 --- a/consensus/proto_array/src/proto_array.rs +++ b/consensus/proto_array/src/proto_array.rs @@ -568,8 +568,10 @@ impl ProtoArray { ProtoNode::V29(v29) => { // Both parent and child are Gloas blocks. The parent is full if the // block hash in the parent node matches the parent block hash in the - // child bid. - if execution_payload_parent_hash == v29.execution_payload_block_hash { + // child bid and the parent block isn't the genesis block. + if v29.execution_payload_block_hash != ExecutionBlockHash::zero() + && execution_payload_parent_hash == v29.execution_payload_block_hash + { PayloadStatus::Full } else { PayloadStatus::Empty @@ -582,18 +584,16 @@ impl ProtoArray { } } } else { - // TODO(gloas): re-assess this assumption - // Parent is missing (genesis or pruned due to finalization). Default to Full - // since this path should only be hit at Gloas genesis. - PayloadStatus::Full + // Parent is missing (genesis or pruned due to finalization). This code path + // should only be hit at Gloas genesis. Default to empty, the genesis block + // has no payload enevelope. + PayloadStatus::Empty }; - // Per spec `get_forkchoice_store`: the anchor (genesis) block has - // its payload state initialized (`payload_states = {anchor_root: ...}`). - // Without `payload_received = true` on genesis, the FULL virtual - // child doesn't exist in the spec's `get_node_children`, making all - // Full concrete children of genesis unreachable in `get_head`. - let is_genesis = parent_index.is_none(); + // The spec does something slightly strange where it initialises the payload timeliness + // votes and payload data availability votes for the anchor block to all true, but never + // adds the anchor to `store.payloads`, so it is never considered full. + let is_anchor = parent_index.is_none(); ProtoNode::V29(ProtoNodeV29 { slot: block.slot, @@ -614,26 +614,25 @@ impl ProtoArray { execution_payload_block_hash, execution_payload_parent_hash, // Per spec `get_forkchoice_store`: the anchor block's PTC votes are - // initialized to all-True, ensuring `is_payload_timely` and - // `is_payload_data_available` return true for the anchor. - payload_timeliness_votes: if is_genesis { + // initialized to all-True. + payload_timeliness_votes: if is_anchor { all_true_bitvector() } else { BitVector::default() }, - payload_data_availability_votes: if is_genesis { + payload_data_availability_votes: if is_anchor { all_true_bitvector() } else { BitVector::default() }, - payload_received: is_genesis, + payload_received: false, proposer_index, // Spec: `record_block_timeliness` + `get_forkchoice_store`. // Anchor gets [True, True]. Others computed from time_into_slot. - block_timeliness_attestation_threshold: is_genesis + block_timeliness_attestation_threshold: is_anchor || (is_current_slot && time_into_slot < spec.get_attestation_due::(current_slot)), - block_timeliness_ptc_threshold: is_genesis + block_timeliness_ptc_threshold: is_anchor || (is_current_slot && time_into_slot < spec.get_payload_attestation_due()), equivocating_attestation_score: 0, }) @@ -1438,7 +1437,7 @@ impl ProtoArray { } } - fn should_extend_payload( + pub fn should_extend_payload( &self, fc_node: &IndexedForkChoiceNode, proto_node: &ProtoNode, diff --git a/consensus/proto_array/src/proto_array_fork_choice.rs b/consensus/proto_array/src/proto_array_fork_choice.rs index 0ecaea3971..577e89baa1 100644 --- a/consensus/proto_array/src/proto_array_fork_choice.rs +++ b/consensus/proto_array/src/proto_array_fork_choice.rs @@ -17,7 +17,7 @@ use std::{ }; use types::{ AttestationShufflingId, ChainSpec, Checkpoint, Epoch, EthSpec, ExecutionBlockHash, Hash256, - Slot, StatePayloadStatus, + Slot, }; pub const DEFAULT_PRUNE_THRESHOLD: usize = 256; @@ -110,19 +110,6 @@ pub enum PayloadStatus { Pending = 2, } -impl PayloadStatus { - /// Convert a `PayloadStatus` into the equivalent `StatePayloadStatus`. - /// - /// This maps `Empty` onto `StatePayloadStatus::Pending` because empty and pending fork choice - /// nodes correspond to the exact same state. - pub fn as_state_payload_status(self) -> StatePayloadStatus { - match self { - Self::Empty | Self::Pending => StatePayloadStatus::Pending, - Self::Full => StatePayloadStatus::Full, - } - } -} - /// Spec's `ForkChoiceNode` augmented with ProtoNode index. pub struct IndexedForkChoiceNode { pub root: Hash256, @@ -1019,6 +1006,34 @@ impl ProtoArrayForkChoice { }) } + /// Returns whether the proposer should extend the parent's execution payload chain. + /// + /// This checks timeliness, data availability, and proposer boost conditions per the spec. + pub fn should_extend_payload( + &self, + block_root: &Hash256, + proposer_boost_root: Hash256, + ) -> Result { + let block_index = self + .proto_array + .indices + .get(block_root) + .ok_or_else(|| format!("Unknown block root: {block_root:?}"))?; + let proto_node = self + .proto_array + .nodes + .get(*block_index) + .ok_or_else(|| format!("Missing node at index: {block_index}"))?; + let fc_node = IndexedForkChoiceNode { + root: proto_node.root(), + proto_node_index: *block_index, + payload_status: proto_node.get_parent_payload_status(), + }; + self.proto_array + .should_extend_payload::(&fc_node, proto_node, proposer_boost_root) + .map_err(|e| format!("{e:?}")) + } + /// Returns the `block.execution_status` field, if the block is present. pub fn get_block_execution_status(&self, block_root: &Hash256) -> Option { let block = self.get_proto_node(block_root)?; diff --git a/consensus/state_processing/src/block_replayer.rs b/consensus/state_processing/src/block_replayer.rs index f5f06d1cb9..56e667cdd3 100644 --- a/consensus/state_processing/src/block_replayer.rs +++ b/consensus/state_processing/src/block_replayer.rs @@ -1,11 +1,6 @@ use crate::{ BlockProcessingError, BlockSignatureStrategy, ConsensusContext, SlotProcessingError, - VerifyBlockRoot, VerifySignatures, - envelope_processing::{ - EnvelopeProcessingError, VerifyStateRoot, process_execution_payload_envelope, - }, - per_block_processing, - per_epoch_processing::EpochProcessingSummary, + VerifyBlockRoot, per_block_processing, per_epoch_processing::EpochProcessingSummary, per_slot_processing, }; use itertools::Itertools; @@ -13,7 +8,7 @@ use std::iter::Peekable; use std::marker::PhantomData; use types::{ BeaconState, BeaconStateError, BlindedPayload, ChainSpec, EthSpec, Hash256, SignedBeaconBlock, - SignedExecutionPayloadEnvelope, Slot, execution::StatePayloadStatus, + Slot, }; pub type PreBlockHook<'a, E, Error> = Box< @@ -29,7 +24,7 @@ pub type PostSlotHook<'a, E, Error> = Box< >; pub type StateRootIterDefault = std::iter::Empty>; -/// Efficiently apply blocks and payloads to a state while configuring various parameters. +/// Efficiently apply blocks to a state while configuring various parameters. /// /// Usage follows a builder pattern. pub struct BlockReplayer< @@ -46,21 +41,8 @@ pub struct BlockReplayer< post_block_hook: Option>, pre_slot_hook: Option>, post_slot_hook: Option>, - /// Iterator over state roots for all *block* states. - /// - /// Pre-Gloas, this is all states. Post-Gloas, this is *just* the states corresponding to beacon - /// blocks. For states corresponding to payloads, we read the state root from the payload - /// envelope. - // TODO(gloas): this concept might need adjusting when we implement the cold DB. pub(crate) state_root_iter: Option>, state_root_miss: bool, - /// The payload status of the state desired as the end result of block replay. - /// - /// This dictates whether a payload should be applied after applying the last block. - /// - /// Prior to Gloas, this should always be set to `StatePayloadStatus::Pending` to indicate - /// that no envelope needs to be applied. - desired_state_payload_status: StatePayloadStatus, _phantom: PhantomData, } @@ -68,12 +50,7 @@ pub struct BlockReplayer< pub enum BlockReplayError { SlotProcessing(SlotProcessingError), BlockProcessing(BlockProcessingError), - EnvelopeProcessing(EnvelopeProcessingError), BeaconState(BeaconStateError), - /// A payload envelope for this `slot` was required but not provided. - MissingPayloadEnvelope { - slot: Slot, - }, } impl From for BlockReplayError { @@ -88,12 +65,6 @@ impl From for BlockReplayError { } } -impl From for BlockReplayError { - fn from(e: EnvelopeProcessingError) -> Self { - Self::EnvelopeProcessing(e) - } -} - impl From for BlockReplayError { fn from(e: BeaconStateError) -> Self { Self::BeaconState(e) @@ -125,7 +96,6 @@ where post_slot_hook: None, state_root_iter: None, state_root_miss: false, - desired_state_payload_status: StatePayloadStatus::Pending, _phantom: PhantomData, } } @@ -191,14 +161,6 @@ where self } - /// Set the desired payload status of the state reached by replay. - /// - /// This determines whether to apply a payload after applying the last block. - pub fn desired_state_payload_status(mut self, payload_status: StatePayloadStatus) -> Self { - self.desired_state_payload_status = payload_status; - self - } - /// Compute the state root for `self.state` as efficiently as possible. /// /// This function MUST only be called when `self.state` is a post-state, i.e. it MUST not be @@ -246,38 +208,6 @@ where Ok(state_root) } - /// Apply an execution payload envelope to `self.state`. - /// - /// The `block_state_root` MUST be the `state_root` of the most recently applied block. - /// - /// Returns the `state_root` of `self.state` after payload application. - fn apply_payload_envelope( - &mut self, - envelope: &SignedExecutionPayloadEnvelope, - block_state_root: Hash256, - ) -> Result { - // TODO(gloas): bulk signature verification could be relevant here? - let verify_payload_signatures = - if let BlockSignatureStrategy::NoVerification = self.block_sig_strategy { - VerifySignatures::False - } else { - VerifySignatures::True - }; - // TODO(gloas): state root verif enabled during initial prototyping - let verify_state_root = VerifyStateRoot::True; - process_execution_payload_envelope( - &mut self.state, - Some(block_state_root), - envelope, - verify_payload_signatures, - verify_state_root, - self.spec, - ) - .map_err(BlockReplayError::from)?; - - Ok(envelope.message.state_root) - } - /// Apply `blocks` atop `self.state`, taking care of slot processing. /// /// If `target_slot` is provided then the state will be advanced through to `target_slot` @@ -285,21 +215,8 @@ where pub fn apply_blocks( mut self, blocks: Vec>>, - payload_envelopes: Vec>, target_slot: Option, ) -> Result { - let mut envelopes_iter = payload_envelopes.into_iter(); - - let mut next_envelope_at_slot = |slot| { - if let Some(envelope) = envelopes_iter.next() - && envelope.message.slot == slot - { - Ok(envelope) - } else { - Err(BlockReplayError::MissingPayloadEnvelope { slot }) - } - }; - for (i, block) in blocks.iter().enumerate() { // Allow one additional block at the start which is only used for its state root. if i == 0 && block.slot() <= self.state.slot() { @@ -307,36 +224,7 @@ where } while self.state.slot() < block.slot() { - let mut state_root = self.get_state_root(&blocks, i)?; - - // Apply the payload for the *previous* block if the bid in the current block - // indicates that the parent is full (and it hasn't already been applied). - state_root = if block.fork_name_unchecked().gloas_enabled() - && self.state.slot() == self.state.latest_block_header().slot - && self.state.payload_status() == StatePayloadStatus::Pending - { - let latest_bid_block_hash = self - .state - .latest_execution_payload_bid() - .map_err(BlockReplayError::from)? - .block_hash; - - // Similar to `is_parent_block_full`, but reading the block hash from the - // not-yet-applied `block`. The slot 0 case covers genesis (no block replay reqd). - if self.state.slot() != 0 && block.is_parent_block_full(latest_bid_block_hash) { - let envelope = next_envelope_at_slot(self.state.slot())?; - // State root for the next slot processing is now the envelope's state root. - self.apply_payload_envelope(&envelope, state_root)? - } else { - // Empty payload at this slot, the state root is unchanged from when the - // beacon block was applied. - state_root - } - } else { - // Pre-Gloas or at skipped slots post-Gloas, the state root of the parent state - // is always the output from `self.get_state_root`. - state_root - }; + let state_root = self.get_state_root(&blocks, i)?; if let Some(ref mut pre_slot_hook) = self.pre_slot_hook { pre_slot_hook(state_root, &mut self.state)?; @@ -380,24 +268,9 @@ where } } - // Apply the last payload if desired. - let mut opt_state_root = if let StatePayloadStatus::Full = self.desired_state_payload_status - && let Some(last_block) = blocks.last() - { - let envelope = next_envelope_at_slot(self.state.slot())?; - Some(self.apply_payload_envelope(&envelope, last_block.state_root())?) - } else { - None - }; - if let Some(target_slot) = target_slot { while self.state.slot() < target_slot { - // Read state root from `opt_state_root` if a payload was just applied. - let state_root = if let Some(root) = opt_state_root.take() { - root - } else { - self.get_state_root(&blocks, blocks.len())? - }; + let state_root = self.get_state_root(&blocks, blocks.len())?; if let Some(ref mut pre_slot_hook) = self.pre_slot_hook { pre_slot_hook(state_root, &mut self.state)?; diff --git a/consensus/state_processing/src/envelope_processing.rs b/consensus/state_processing/src/envelope_processing.rs index 97953b835f..8ea96390e3 100644 --- a/consensus/state_processing/src/envelope_processing.rs +++ b/consensus/state_processing/src/envelope_processing.rs @@ -1,15 +1,10 @@ -use crate::BlockProcessingError; use crate::VerifySignatures; use crate::per_block_processing::compute_timestamp_at_slot; -use crate::per_block_processing::process_operations::{ - process_consolidation_requests, process_deposit_requests_post_gloas, - process_withdrawal_requests, -}; -use safe_arith::{ArithError, SafeArith}; +use safe_arith::ArithError; use tree_hash::TreeHash; use types::{ - BeaconState, BeaconStateError, BuilderIndex, BuilderPendingPayment, ChainSpec, EthSpec, - ExecutionBlockHash, Hash256, SignedExecutionPayloadEnvelope, Slot, + BeaconState, BeaconStateError, BuilderIndex, ChainSpec, EthSpec, ExecutionBlockHash, Hash256, + SignedExecutionPayloadEnvelope, Slot, }; macro_rules! envelope_verify { @@ -20,29 +15,11 @@ macro_rules! envelope_verify { }; } -/// The strategy to be used when validating the payloads state root. -#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] -#[derive(PartialEq, Clone, Copy)] -pub enum VerifyStateRoot { - /// Validate state root. - True, - /// Do not validate state root. Use with caution. - /// This should only be used when first constructing the payload envelope. - False, -} - -impl VerifyStateRoot { - pub fn is_true(self) -> bool { - self == VerifyStateRoot::True - } -} - #[derive(Debug, Clone)] pub enum EnvelopeProcessingError { /// Bad Signature BadSignature, BeaconStateError(BeaconStateError), - BlockProcessingError(BlockProcessingError), ArithError(ArithError), /// Envelope doesn't match latest beacon block header LatestBlockHeaderMismatch { @@ -89,15 +66,11 @@ pub enum EnvelopeProcessingError { state: u64, envelope: u64, }, - // Invalid state root - InvalidStateRoot { - state: Hash256, + // The execution requests root doesn't match the committed bid + ExecutionRequestsRootMismatch { + committed_bid: Hash256, envelope: Hash256, }, - // BitFieldError - BitFieldError(ssz::BitfieldError), - // Some kind of error calculating the builder payment index - BuilderPaymentIndexOutOfBounds(usize), /// The envelope was deemed invalid by the execution engine. ExecutionInvalid, } @@ -108,50 +81,44 @@ impl From for EnvelopeProcessingError { } } -impl From for EnvelopeProcessingError { - fn from(e: BlockProcessingError) -> Self { - EnvelopeProcessingError::BlockProcessingError(e) - } -} - impl From for EnvelopeProcessingError { fn from(e: ArithError) -> Self { EnvelopeProcessingError::ArithError(e) } } -/// Processes a `SignedExecutionPayloadEnvelope` +/// Verifies a `SignedExecutionPayloadEnvelope` against the beacon state. /// -/// This function does all the state modifications inside `process_execution_payload()` -pub fn process_execution_payload_envelope( - state: &mut BeaconState, - parent_state_root: Option, +/// This function performs pure verification with no state mutation. The execution requests +/// from the envelope are deferred to be processed in the next block via +/// `process_parent_execution_payload`. +/// +/// `block_state_root` should be the post-block state root (used to fill in the block header +/// for beacon_block_root verification). If `None`, the latest_block_header must already have +/// its state_root filled in. +pub fn verify_execution_payload_envelope( + state: &BeaconState, signed_envelope: &SignedExecutionPayloadEnvelope, verify_signatures: VerifySignatures, - verify_state_root: VerifyStateRoot, + block_state_root: Hash256, spec: &ChainSpec, ) -> Result<(), EnvelopeProcessingError> { - if verify_signatures.is_true() { - // Verify Signed Envelope Signature - if !signed_envelope.verify_signature_with_state(state, spec)? { - return Err(EnvelopeProcessingError::BadSignature); - } + if verify_signatures.is_true() && !signed_envelope.verify_signature_with_state(state, spec)? { + return Err(EnvelopeProcessingError::BadSignature); } let envelope = &signed_envelope.message; let payload = &envelope.payload; - let execution_requests = &envelope.execution_requests; - // Cache latest block header state root - if state.latest_block_header().state_root == Hash256::default() { - let previous_state_root = parent_state_root - .map(Ok) - .unwrap_or_else(|| state.canonical_root())?; - state.latest_block_header_mut().state_root = previous_state_root; + // Verify consistency with the beacon block. + // Use a copy of the header with state_root filled in, matching the spec's approach. + let mut header = state.latest_block_header().clone(); + if header.state_root == Hash256::default() { + // The caller must provide the post-block state root so we can compute + // the block header root without mutating state. + header.state_root = block_state_root; } - - // Verify consistency with the beacon block - let latest_block_header_root = state.latest_block_header().tree_hash_root(); + let latest_block_header_root = header.tree_hash_root(); envelope_verify!( envelope.beacon_block_root == latest_block_header_root, EnvelopeProcessingError::LatestBlockHeaderMismatch { @@ -160,9 +127,9 @@ pub fn process_execution_payload_envelope( } ); envelope_verify!( - envelope.slot == state.slot(), + envelope.slot() == state.slot(), EnvelopeProcessingError::SlotMismatch { - envelope_slot: envelope.slot, + envelope_slot: envelope.slot(), parent_state_slot: state.slot(), } ); @@ -238,59 +205,17 @@ pub fn process_execution_payload_envelope( } ); + // Verify execution requests root matches committed bid + let execution_requests_root = envelope.execution_requests.tree_hash_root(); + envelope_verify!( + execution_requests_root == committed_bid.execution_requests_root, + EnvelopeProcessingError::ExecutionRequestsRootMismatch { + committed_bid: committed_bid.execution_requests_root, + envelope: execution_requests_root, + } + ); + // TODO(gloas): newPayload happens here in the spec, ensure we wire that up correctly - process_deposit_requests_post_gloas(state, &execution_requests.deposits, spec)?; - process_withdrawal_requests(state, &execution_requests.withdrawals, spec)?; - process_consolidation_requests(state, &execution_requests.consolidations, spec)?; - - // Queue the builder payment - let payment_index = E::slots_per_epoch() - .safe_add(state.slot().as_u64().safe_rem(E::slots_per_epoch())?)? - as usize; - let payment_mut = state - .builder_pending_payments_mut()? - .get_mut(payment_index) - .ok_or(EnvelopeProcessingError::BuilderPaymentIndexOutOfBounds( - payment_index, - ))?; - - // We have re-ordered the blanking out of the pending payment to avoid a double-lookup. - // This is semantically equivalent to the ordering used by the spec because we have taken a - // clone of the payment prior to doing the write. - let payment_withdrawal = payment_mut.withdrawal.clone(); - *payment_mut = BuilderPendingPayment::default(); - - let amount = payment_withdrawal.amount; - if amount > 0 { - state - .builder_pending_withdrawals_mut()? - .push(payment_withdrawal) - .map_err(|e| EnvelopeProcessingError::BeaconStateError(e.into()))?; - } - - // Cache the execution payload hash - let availability_index = state - .slot() - .as_usize() - .safe_rem(E::slots_per_historical_root())?; - state - .execution_payload_availability_mut()? - .set(availability_index, true) - .map_err(EnvelopeProcessingError::BitFieldError)?; - *state.latest_block_hash_mut()? = payload.block_hash; - - if verify_state_root.is_true() { - // Verify the state root - let state_root = state.canonical_root()?; - envelope_verify!( - envelope.state_root == state_root, - EnvelopeProcessingError::InvalidStateRoot { - state: state_root, - envelope: envelope.state_root, - } - ); - } - Ok(()) } diff --git a/consensus/state_processing/src/genesis.rs b/consensus/state_processing/src/genesis.rs index 861fccb374..9dfbc87b48 100644 --- a/consensus/state_processing/src/genesis.rs +++ b/consensus/state_processing/src/genesis.rs @@ -167,9 +167,21 @@ pub fn initialize_beacon_state_from_eth1( // Remove intermediate Fulu fork from `state.fork`. state.fork_mut().previous_version = spec.gloas_fork_version; - // Override latest execution payload header. - // Here's where we *would* clone the header but there is no header here so.. - // TODO(EIP7732): check this + // The genesis block's bid must have block_hash = 0x00 per spec (empty payload). + // Retain the EL genesis hash in latest_block_hash and parent_block_hash so the + // first post-genesis proposer can build on the correct EL head. + let el_genesis_hash = state.latest_execution_payload_bid()?.block_hash; + let bid = state.latest_execution_payload_bid_mut()?; + bid.parent_block_hash = el_genesis_hash; + bid.block_hash = ExecutionBlockHash::default(); + + // Update latest_block_header to reflect the Gloas genesis block body which contains + // the EL genesis hash in the signed_execution_payload_bid. This is needed because + // BeaconState::new() created the header from BeaconBlock::empty() which has zero bid + // fields, but the spec requires the genesis block's bid to contain the EL block hash + // and the tree hash root of empty ExecutionRequests. + let block = genesis_block(&state, spec)?; + state.latest_block_header_mut().body_root = block.body_root(); } // Now that we have our validators, initialize the caches (including the committees) @@ -181,6 +193,28 @@ pub fn initialize_beacon_state_from_eth1( Ok(state) } +/// Create an unsigned genesis `BeaconBlock` whose body matches the genesis state. +/// +/// For Gloas, the block's `signed_execution_payload_bid` is populated from the state's +/// `latest_execution_payload_bid` so that the body root is consistent with +/// `state.latest_block_header.body_root`. +/// +/// The returned block has `state_root == Hash256::ZERO`; callers that need the real +/// state root should set it themselves. +pub fn genesis_block( + genesis_state: &BeaconState, + spec: &ChainSpec, +) -> Result, BeaconStateError> { + let mut block = BeaconBlock::empty(spec); + if let Ok(block) = block.as_gloas_mut() { + let state_bid = genesis_state.latest_execution_payload_bid()?; + let bid = &mut block.body.signed_execution_payload_bid.message; + bid.block_hash = state_bid.block_hash; + bid.execution_requests_root = state_bid.execution_requests_root; + } + Ok(block) +} + /// Determine whether a candidate genesis state is suitable for starting the chain. pub fn is_valid_genesis_state(state: &BeaconState, spec: &ChainSpec) -> bool { state diff --git a/consensus/state_processing/src/per_block_processing.rs b/consensus/state_processing/src/per_block_processing.rs index 210e0437be..71ad394ee6 100644 --- a/consensus/state_processing/src/per_block_processing.rs +++ b/consensus/state_processing/src/per_block_processing.rs @@ -120,7 +120,7 @@ pub fn per_block_processing>( let block = signed_block.message(); // Verify that the `SignedBeaconBlock` instantiation matches the fork at `signed_block.slot()`. - signed_block + let fork_name = signed_block .fork_name(spec) .map_err(BlockProcessingError::InconsistentBlockFork)?; @@ -129,6 +129,11 @@ pub fn per_block_processing>( .fork_name(spec) .map_err(BlockProcessingError::InconsistentStateFork)?; + // Process deferred execution requests from the parent's envelope. + if fork_name.gloas_enabled() { + process_parent_execution_payload(state, block, spec)?; + } + // Build epoch cache if it hasn't already been built, or if it is no longer valid initialize_epoch_cache(state, spec)?; initialize_progressive_balances_cache(state, spec)?; @@ -531,6 +536,139 @@ pub fn compute_timestamp_at_slot( .and_then(|since_genesis| state.genesis_time().safe_add(since_genesis)) } +/// Process the parent block's deferred execution payload effects. +/// +/// This implements the spec's `process_parent_execution_payload` function, which validates +/// the parent execution requests and delegates to `apply_parent_execution_payload` if the +/// parent block was full. This is called at the beginning of block processing, before +/// `process_block_header`. +/// +/// `process_parent_execution_payload` must be called before `process_execution_payload_bid` +/// (which overwrites `state.latest_execution_payload_bid`). +pub fn process_parent_execution_payload>( + state: &mut BeaconState, + block: BeaconBlockRef<'_, E, Payload>, + spec: &ChainSpec, +) -> Result<(), BlockProcessingError> { + let bid_parent_block_hash = block + .body() + .signed_execution_payload_bid()? + .message + .parent_block_hash; + let parent_bid = state.latest_execution_payload_bid()?.clone(); + let requests = block.body().parent_execution_requests()?; + + let is_genesis_block = parent_bid.block_hash == ExecutionBlockHash::zero(); + let is_parent_block_empty = bid_parent_block_hash != parent_bid.block_hash; + + if is_genesis_block || is_parent_block_empty { + // Parent was EMPTY -- no execution requests expected + block_verify!( + *requests == ExecutionRequests::default(), + BlockProcessingError::NonEmptyParentExecutionRequests + ); + return Ok(()); + } + + // Parent was FULL -- verify the bid commitment and apply the payload + let requests_root = requests.tree_hash_root(); + block_verify!( + requests_root == parent_bid.execution_requests_root, + BlockProcessingError::ExecutionRequestsRootMismatch { + expected: parent_bid.execution_requests_root, + found: requests_root, + } + ); + + apply_parent_execution_payload(state, &parent_bid, requests, spec) +} + +/// Apply the parent execution payload's deferred effects to the state. +/// +/// This implements the spec's `apply_parent_execution_payload` function: +/// 1. Processes deposits, withdrawals, and consolidations from execution requests +/// 2. Queues the builder pending payment from the parent's committed bid +/// 3. Updates `execution_payload_availability` and `latest_block_hash` +pub fn apply_parent_execution_payload( + state: &mut BeaconState, + parent_bid: &ExecutionPayloadBid, + requests: &ExecutionRequests, + spec: &ChainSpec, +) -> Result<(), BlockProcessingError> { + let parent_slot = parent_bid.slot; + let parent_epoch = parent_slot.epoch(E::slots_per_epoch()); + + // Process execution requests from the parent's payload + process_operations::process_deposit_requests_post_gloas(state, &requests.deposits, spec)?; + process_operations::process_withdrawal_requests(state, &requests.withdrawals, spec)?; + process_operations::process_consolidation_requests(state, &requests.consolidations, spec)?; + + // Queue the builder payment + if parent_epoch == state.current_epoch() { + let payment_index = E::slots_per_epoch() + .safe_add(parent_slot.as_u64().safe_rem(E::slots_per_epoch())?)? + as usize; + settle_builder_payment(state, payment_index)?; + } else if parent_epoch == state.previous_epoch() { + let payment_index = parent_slot.as_u64().safe_rem(E::slots_per_epoch())? as usize; + settle_builder_payment(state, payment_index)?; + } else if parent_bid.value > 0 { + // Parent is older than previous epoch -- payment entry has already been + // settled or evicted by process_builder_pending_payments at epoch boundaries. + // Append the withdrawal directly from the bid. + state + .builder_pending_withdrawals_mut()? + .push(BuilderPendingWithdrawal { + fee_recipient: parent_bid.fee_recipient, + amount: parent_bid.value, + builder_index: parent_bid.builder_index, + }) + .map_err(|e| BlockProcessingError::BeaconStateError(e.into()))?; + } + + // Update execution payload availability for the parent slot + let availability_index = parent_slot + .as_usize() + .safe_rem(E::slots_per_historical_root())?; + state + .execution_payload_availability_mut()? + .set(availability_index, true) + .map_err(BlockProcessingError::BitfieldError)?; + + // Update latest_block_hash to the parent bid's block_hash + *state.latest_block_hash_mut()? = parent_bid.block_hash; + + Ok(()) +} + +/// Spec: `settle_builder_payment`. +/// +/// Moves a pending payment from `builder_pending_payments[payment_index]` into +/// `builder_pending_withdrawals`, then clears the slot. +pub fn settle_builder_payment( + state: &mut BeaconState, + payment_index: usize, +) -> Result<(), BlockProcessingError> { + let payment_mut = state + .builder_pending_payments_mut()? + .get_mut(payment_index) + .ok_or(BlockProcessingError::BuilderPaymentIndexOutOfBounds( + payment_index, + ))?; + + let withdrawal = payment_mut.withdrawal.clone(); + *payment_mut = BuilderPendingPayment::default(); + + if withdrawal.amount > 0 { + state + .builder_pending_withdrawals_mut()? + .push(withdrawal) + .map_err(|e| BlockProcessingError::BeaconStateError(e.into()))?; + } + + Ok(()) +} + pub fn process_execution_payload_bid>( state: &mut BeaconState, block: BeaconBlockRef<'_, E, Payload>, diff --git a/consensus/state_processing/src/per_block_processing/errors.rs b/consensus/state_processing/src/per_block_processing/errors.rs index 71083378db..93d668c8c9 100644 --- a/consensus/state_processing/src/per_block_processing/errors.rs +++ b/consensus/state_processing/src/per_block_processing/errors.rs @@ -108,6 +108,13 @@ pub enum BlockProcessingError { }, /// Builder payment index out of bounds (Gloas) BuilderPaymentIndexOutOfBounds(usize), + /// The parent execution requests root doesn't match the committed bid + ExecutionRequestsRootMismatch { + expected: Hash256, + found: Hash256, + }, + /// Parent was not full but non-empty execution requests were provided + NonEmptyParentExecutionRequests, } impl From for BlockProcessingError { diff --git a/consensus/state_processing/src/per_block_processing/tests.rs b/consensus/state_processing/src/per_block_processing/tests.rs index 0203b33e61..96610c2010 100644 --- a/consensus/state_processing/src/per_block_processing/tests.rs +++ b/consensus/state_processing/src/per_block_processing/tests.rs @@ -1014,7 +1014,7 @@ async fn block_replayer_peeking_state_roots() { let block_replayer = BlockReplayer::new(parent_state, &harness.chain.spec) .state_root_iter(state_root_iter.into_iter()) .no_signature_verification() - .apply_blocks(vec![target_block], vec![], None) + .apply_blocks(vec![target_block], None) .unwrap(); assert_eq!( diff --git a/consensus/state_processing/src/per_block_processing/withdrawals.rs b/consensus/state_processing/src/per_block_processing/withdrawals.rs index 72c3339b10..3b14e904c4 100644 --- a/consensus/state_processing/src/per_block_processing/withdrawals.rs +++ b/consensus/state_processing/src/per_block_processing/withdrawals.rs @@ -9,8 +9,8 @@ use safe_arith::{SafeArith, SafeArithIter}; use tree_hash::TreeHash; use types::{ AbstractExecPayload, BeaconState, BeaconStateError, ChainSpec, EthSpec, ExecPayload, - ExpectedWithdrawals, ExpectedWithdrawalsCapella, ExpectedWithdrawalsElectra, - ExpectedWithdrawalsGloas, Validator, Withdrawal, Withdrawals, + ExecutionBlockHash, ExpectedWithdrawals, ExpectedWithdrawalsCapella, + ExpectedWithdrawalsElectra, ExpectedWithdrawalsGloas, Validator, Withdrawal, Withdrawals, }; /// Compute the next batch of withdrawals which should be included in a block. @@ -494,7 +494,11 @@ pub mod gloas { state: &mut BeaconState, spec: &ChainSpec, ) -> Result<(), BlockProcessingError> { - if !state.is_parent_block_full() { + // Return early if the parent block is empty. + let is_genesis_block = *state.latest_block_hash()? == ExecutionBlockHash::default(); + let is_parent_block_empty = + *state.latest_block_hash()? != state.latest_execution_payload_bid()?.block_hash; + if is_genesis_block || is_parent_block_empty { return Ok(()); } diff --git a/consensus/state_processing/src/upgrade/gloas.rs b/consensus/state_processing/src/upgrade/gloas.rs index b39ee6048f..84cdbf22c2 100644 --- a/consensus/state_processing/src/upgrade/gloas.rs +++ b/consensus/state_processing/src/upgrade/gloas.rs @@ -7,10 +7,12 @@ use ssz_types::BitVector; use ssz_types::FixedVector; use std::collections::HashSet; use std::mem; +use tree_hash::TreeHash; use typenum::Unsigned; use types::{ BeaconState, BeaconStateError as Error, BeaconStateGloas, BuilderPendingPayment, ChainSpec, - DepositData, EthSpec, ExecutionPayloadBid, Fork, is_builder_withdrawal_credential, + DepositData, EthSpec, ExecutionPayloadBid, ExecutionRequests, Fork, + is_builder_withdrawal_credential, }; /// Transform a `Fulu` state into a `Gloas` state. @@ -78,6 +80,7 @@ pub fn upgrade_state_to_gloas( // Execution Bid latest_execution_payload_bid: ExecutionPayloadBid { block_hash: pre.latest_execution_payload_header.block_hash, + execution_requests_root: ExecutionRequests::::default().tree_hash_root(), ..Default::default() }, // Capella diff --git a/consensus/types/src/block/beacon_block.rs b/consensus/types/src/block/beacon_block.rs index 5634d842b6..3360728eaa 100644 --- a/consensus/types/src/block/beacon_block.rs +++ b/consensus/types/src/block/beacon_block.rs @@ -716,6 +716,7 @@ impl> EmptyBlock for BeaconBlockGloa voluntary_exits: VariableList::empty(), sync_aggregate: SyncAggregate::empty(), bls_to_execution_changes: VariableList::empty(), + parent_execution_requests: ExecutionRequests::default(), signed_execution_payload_bid: SignedExecutionPayloadBid::empty(), payload_attestations: VariableList::empty(), _phantom: PhantomData, diff --git a/consensus/types/src/block/beacon_block_body.rs b/consensus/types/src/block/beacon_block_body.rs index fd5d976c9b..cd3f4dcaba 100644 --- a/consensus/types/src/block/beacon_block_body.rs +++ b/consensus/types/src/block/beacon_block_body.rs @@ -170,6 +170,8 @@ pub struct BeaconBlockBody = FullPay pub signed_execution_payload_bid: SignedExecutionPayloadBid, #[superstruct(only(Gloas))] pub payload_attestations: VariableList, E::MaxPayloadAttestations>, + #[superstruct(only(Gloas))] + pub parent_execution_requests: ExecutionRequests, #[superstruct(only(Base, Altair, Gloas))] #[metastruct(exclude_from(fields))] #[ssz(skip_serializing, skip_deserializing)] @@ -564,6 +566,7 @@ impl From>> voluntary_exits, sync_aggregate, bls_to_execution_changes, + parent_execution_requests, signed_execution_payload_bid, payload_attestations, _phantom, @@ -580,6 +583,7 @@ impl From>> voluntary_exits, sync_aggregate, bls_to_execution_changes, + parent_execution_requests, signed_execution_payload_bid, payload_attestations, _phantom: PhantomData, @@ -898,6 +902,7 @@ impl From>> voluntary_exits, sync_aggregate, bls_to_execution_changes, + parent_execution_requests, signed_execution_payload_bid, payload_attestations, _phantom, @@ -915,6 +920,7 @@ impl From>> voluntary_exits, sync_aggregate, bls_to_execution_changes, + parent_execution_requests, signed_execution_payload_bid, payload_attestations, _phantom: PhantomData, diff --git a/consensus/types/src/block/signed_beacon_block.rs b/consensus/types/src/block/signed_beacon_block.rs index dd6f52426a..23b01415c8 100644 --- a/consensus/types/src/block/signed_beacon_block.rs +++ b/consensus/types/src/block/signed_beacon_block.rs @@ -394,13 +394,15 @@ impl> SignedBeaconBlock /// `block_hash` from the parent beacon block's bid. If the parent beacon state is available /// this can alternatively be fetched from `state.latest_payload_bid`. /// - /// This function returns `false` for all blocks prior to Gloas. + /// This function returns `false` for all blocks prior to Gloas and for the zero + /// `parent_block_hash`. pub fn is_parent_block_full(&self, parent_block_hash: ExecutionBlockHash) -> bool { let Ok(signed_payload_bid) = self.message().body().signed_execution_payload_bid() else { // Prior to Gloas. return false; }; - signed_payload_bid.message.parent_block_hash == parent_block_hash + parent_block_hash != ExecutionBlockHash::zero() + && signed_payload_bid.message.parent_block_hash == parent_block_hash } } diff --git a/consensus/types/src/execution/execution_payload.rs b/consensus/types/src/execution/execution_payload.rs index d99b8785fa..c84a46874d 100644 --- a/consensus/types/src/execution/execution_payload.rs +++ b/consensus/types/src/execution/execution_payload.rs @@ -10,7 +10,7 @@ use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; use crate::{ - core::{Address, EthSpec, ExecutionBlockHash, Hash256}, + core::{Address, EthSpec, ExecutionBlockHash, Hash256, Slot}, fork::{ForkName, ForkVersionDecode}, state::BeaconStateError, test_utils::TestRandom, @@ -109,6 +109,12 @@ pub struct ExecutionPayload { #[superstruct(only(Deneb, Electra, Fulu, Gloas), partial_getter(copy))] #[serde(with = "serde_utils::quoted_u64")] pub excess_blob_gas: u64, + /// EIP-7928: Block access list + #[superstruct(only(Gloas))] + #[serde(with = "ssz_types::serde_utils::hex_var_list")] + pub block_access_list: VariableList, + #[superstruct(only(Gloas), partial_getter(copy))] + pub slot_number: Slot, } impl<'a, E: EthSpec> ExecutionPayloadRef<'a, E> { diff --git a/consensus/types/src/execution/execution_payload_bid.rs b/consensus/types/src/execution/execution_payload_bid.rs index 5c8771993e..b2438681c1 100644 --- a/consensus/types/src/execution/execution_payload_bid.rs +++ b/consensus/types/src/execution/execution_payload_bid.rs @@ -37,6 +37,7 @@ pub struct ExecutionPayloadBid { #[serde(with = "serde_utils::quoted_u64")] pub execution_payment: u64, pub blob_kzg_commitments: KzgCommitments, + pub execution_requests_root: Hash256, } impl SignedRoot for ExecutionPayloadBid {} diff --git a/consensus/types/src/execution/execution_payload_envelope.rs b/consensus/types/src/execution/execution_payload_envelope.rs index 169331a884..028423d681 100644 --- a/consensus/types/src/execution/execution_payload_envelope.rs +++ b/consensus/types/src/execution/execution_payload_envelope.rs @@ -20,8 +20,6 @@ pub struct ExecutionPayloadEnvelope { #[serde(with = "serde_utils::quoted_u64")] pub builder_index: u64, pub beacon_block_root: Hash256, - pub slot: Slot, - pub state_root: Hash256, } impl ExecutionPayloadEnvelope { @@ -32,8 +30,6 @@ impl ExecutionPayloadEnvelope { execution_requests: ExecutionRequests::default(), builder_index: 0, beacon_block_root: Hash256::zero(), - slot: Slot::new(0), - state_root: Hash256::zero(), } } @@ -60,6 +56,10 @@ impl ExecutionPayloadEnvelope { + (E::max_consolidation_requests_per_payload() * ::ssz_fixed_len()) } + + pub fn slot(&self) -> Slot { + self.payload.slot_number + } } impl SignedRoot for ExecutionPayloadEnvelope {} diff --git a/consensus/types/src/execution/mod.rs b/consensus/types/src/execution/mod.rs index 591be32b24..a3d4ed8730 100644 --- a/consensus/types/src/execution/mod.rs +++ b/consensus/types/src/execution/mod.rs @@ -12,7 +12,6 @@ mod payload; mod signed_bls_to_execution_change; mod signed_execution_payload_bid; mod signed_execution_payload_envelope; -mod state_payload_status; pub use bls_to_execution_change::BlsToExecutionChange; pub use eth1_data::Eth1Data; @@ -42,4 +41,3 @@ pub use payload::{ pub use signed_bls_to_execution_change::SignedBlsToExecutionChange; pub use signed_execution_payload_bid::SignedExecutionPayloadBid; pub use signed_execution_payload_envelope::SignedExecutionPayloadEnvelope; -pub use state_payload_status::StatePayloadStatus; diff --git a/consensus/types/src/execution/signed_execution_payload_envelope.rs b/consensus/types/src/execution/signed_execution_payload_envelope.rs index 76fa841680..522c8b3f54 100644 --- a/consensus/types/src/execution/signed_execution_payload_envelope.rs +++ b/consensus/types/src/execution/signed_execution_payload_envelope.rs @@ -42,7 +42,7 @@ impl SignedExecutionPayloadEnvelope { } pub fn slot(&self) -> Slot { - self.message.slot + self.message.slot() } pub fn epoch(&self) -> Epoch { diff --git a/consensus/types/src/execution/state_payload_status.rs b/consensus/types/src/execution/state_payload_status.rs deleted file mode 100644 index 1661be6060..0000000000 --- a/consensus/types/src/execution/state_payload_status.rs +++ /dev/null @@ -1,18 +0,0 @@ -use serde::{Deserialize, Serialize}; - -/// Payload status as it applies to a `BeaconState` post-Gloas. -/// -/// A state can either be a post-state for a block (in which case we call it `Pending`) or a -/// payload envelope (`Full`). When handling states it is often necessary to know which of these -/// two variants is required. -/// -/// Note that states at skipped slots could be either `Pending` or `Full`, depending on whether -/// the payload for the most-recently applied block was also applied. -#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize)] -#[serde(rename_all = "lowercase")] -pub enum StatePayloadStatus { - /// For states produced by `process_block` executed on a `BeaconBlock`. - Pending, - /// For states produced by `process_execution_payload` on a `ExecutionPayloadEnvelope`. - Full, -} diff --git a/consensus/types/src/state/beacon_state.rs b/consensus/types/src/state/beacon_state.rs index 8bef8816e5..7e2b3096a8 100644 --- a/consensus/types/src/state/beacon_state.rs +++ b/consensus/types/src/state/beacon_state.rs @@ -37,7 +37,7 @@ use crate::{ execution::{ Eth1Data, ExecutionPayloadHeaderBellatrix, ExecutionPayloadHeaderCapella, ExecutionPayloadHeaderDeneb, ExecutionPayloadHeaderElectra, ExecutionPayloadHeaderFulu, - ExecutionPayloadHeaderRef, ExecutionPayloadHeaderRefMut, StatePayloadStatus, + ExecutionPayloadHeaderRef, ExecutionPayloadHeaderRefMut, }, fork::{Fork, ForkName, ForkVersionDecode, InconsistentFork, map_fork_name}, light_client::consts::{ @@ -571,9 +571,10 @@ where )] #[metastruct(exclude_from(tree_lists))] pub latest_execution_payload_header: ExecutionPayloadHeaderFulu, + #[test_random(default)] #[superstruct(only(Gloas))] #[metastruct(exclude_from(tree_lists))] - pub latest_execution_payload_bid: ExecutionPayloadBid, + pub latest_block_hash: ExecutionBlockHash, #[superstruct(only(Capella, Deneb, Electra, Fulu, Gloas), partial_getter(copy))] #[serde(with = "serde_utils::quoted_u64")] #[metastruct(exclude_from(tree_lists))] @@ -657,10 +658,9 @@ where pub builder_pending_withdrawals: List, - #[test_random(default)] #[superstruct(only(Gloas))] #[metastruct(exclude_from(tree_lists))] - pub latest_block_hash: ExecutionBlockHash, + pub latest_execution_payload_bid: ExecutionPayloadBid, #[compare_fields(as_iter)] #[test_random(default)] @@ -1273,24 +1273,6 @@ impl BeaconState { } } - /// Determine the payload status of this state. - /// - /// Prior to Gloas this is always `Pending`. - /// - /// Post-Gloas, the definition of the `StatePayloadStatus` is: - /// - /// - `Full` if this state is the result of envelope processing. - /// - `Pending` if this state is the result of block processing. - pub fn payload_status(&self) -> StatePayloadStatus { - if !self.fork_name_unchecked().gloas_enabled() { - StatePayloadStatus::Pending - } else if self.is_parent_block_full() { - StatePayloadStatus::Full - } else { - StatePayloadStatus::Pending - } - } - /// Return `true` if the validator who produced `slot_signature` is eligible to aggregate. /// /// Spec v0.12.1 @@ -2507,22 +2489,6 @@ impl BeaconState { } } - /// Return true if the parent block was full (both beacon block and execution payload were present). - pub fn is_parent_block_full(&self) -> bool { - match self { - BeaconState::Base(_) | BeaconState::Altair(_) => false, - // TODO(EIP-7732): check the implications of this when we get to forkchoice modifications - BeaconState::Bellatrix(_) - | BeaconState::Capella(_) - | BeaconState::Deneb(_) - | BeaconState::Electra(_) - | BeaconState::Fulu(_) => true, - BeaconState::Gloas(state) => { - state.latest_execution_payload_bid.block_hash == state.latest_block_hash - } - } - } - /// Get the committee cache for some `slot`. /// /// Return an error if the cache for the slot's epoch is not initialized. diff --git a/testing/ef_tests/Makefile b/testing/ef_tests/Makefile index ab24ea35a0..facc8208d9 100644 --- a/testing/ef_tests/Makefile +++ b/testing/ef_tests/Makefile @@ -1,6 +1,6 @@ # To download/extract nightly tests, run: # CONSENSUS_SPECS_TEST_VERSION=nightly make -CONSENSUS_SPECS_TEST_VERSION ?= v1.7.0-alpha.4 +CONSENSUS_SPECS_TEST_VERSION ?= v1.7.0-alpha.5 REPO_NAME := consensus-spec-tests OUTPUT_DIR := ./$(REPO_NAME) diff --git a/testing/ef_tests/check_all_files_accessed.py b/testing/ef_tests/check_all_files_accessed.py index 2daafada31..5a54e150db 100755 --- a/testing/ef_tests/check_all_files_accessed.py +++ b/testing/ef_tests/check_all_files_accessed.py @@ -49,8 +49,6 @@ excluded_paths = [ "tests/.*/eip7805", # Heze fork is not implemented "tests/.*/heze/.*", - # TODO(gloas): remove these ignores as Gloas consensus is implemented - "tests/.*/gloas/fork_choice/.*", # Ignore MatrixEntry SSZ tests for now. "tests/.*/.*/ssz_static/MatrixEntry/.*", # TODO: partial data column not implemented yet @@ -77,7 +75,9 @@ excluded_paths = [ # We don't need these manifest files at the moment. "tests/.*/manifest.yaml", # TODO: gossip condition tests not implemented yet - "tests/.*/.*/networking/.*" + "tests/.*/.*/networking/.*", + # TODO: fast confirmation rule not merged yet + "tests/.*/.*/fast_confirmation", ] diff --git a/testing/ef_tests/src/cases/fork_choice.rs b/testing/ef_tests/src/cases/fork_choice.rs index 5e9dc001c7..2af205ee47 100644 --- a/testing/ef_tests/src/cases/fork_choice.rs +++ b/testing/ef_tests/src/cases/fork_choice.rs @@ -19,9 +19,13 @@ use beacon_chain::{ custody_context::NodeCustodyType, test_utils::{BeaconChainHarness, EphemeralHarnessType}, }; -use execution_layer::{PayloadStatusV1, json_structures::JsonPayloadStatusV1Status}; +use execution_layer::{ + PayloadStatusV1, PayloadStatusV1Status, json_structures::JsonPayloadStatusV1Status, +}; use serde::Deserialize; use ssz_derive::Decode; +use state_processing::VerifySignatures; +use state_processing::envelope_processing::verify_execution_payload_envelope; use state_processing::state_advance::complete_state_advance; use std::future::Future; use std::sync::Arc; @@ -995,38 +999,95 @@ impl Tester { valid: bool, ) -> Result<(), Error> { let block_root = signed_envelope.message.beacon_block_root; + let block_hash = signed_envelope.message.payload.block_hash; + let store = &self.harness.chain.store; + let spec = &self.harness.chain.spec; - // Store the envelope in the database so that child blocks extending - // the FULL path can load the parent's post-payload state. + // Simulate the EL: pre-configure the mock execution engine to return VALID + // for envelopes the test expects to be valid. Invalid envelopes are left + // unconfigured so the mock EE's default (SYNCING) rejects them. + let el = self.harness.mock_execution_layer.as_ref().unwrap(); if valid { - self.harness - .chain - .store - .put_payload_envelope(&block_root, signed_envelope.clone()) + el.server.set_new_payload_status( + block_hash, + PayloadStatusV1 { + status: JsonPayloadStatusV1Status::Valid.into(), + latest_valid_hash: Some(block_hash), + validation_error: None, + }, + ); + } + + // Attempt to verify the envelope against the block's post-state. + let verification_result = (|| { + let block = store + .get_blinded_block(&block_root) + .map_err(|e| Error::InternalError(format!("Failed to load block: {e:?}")))? + .ok_or_else(|| { + Error::InternalError(format!("Block not found for root {block_root:?}")) + })?; + let block_state_root = block.state_root(); + + let state = store + .get_hot_state(&block_state_root, CACHE_STATE_IN_TESTS) + .map_err(|e| Error::InternalError(format!("Failed to load state: {e:?}")))? + .ok_or_else(|| { + Error::InternalError(format!("State not found for root {block_state_root:?}")) + })?; + + verify_execution_payload_envelope( + &state, + signed_envelope, + VerifySignatures::True, + block_state_root, + spec, + ) + .map_err(|e| { + Error::InternalError(format!("Failed to process execution payload: {e:?}")) + })?; + + // Check the mock EE's response for this block hash (simulates newPayload). + let ee_valid = el + .server + .ctx + .get_new_payload_status(&block_hash) + .and_then(|r| r.ok()) + .is_some_and(|s| s.status == PayloadStatusV1Status::Valid); + if !ee_valid { + return Err(Error::InternalError(format!( + "Mock EE rejected payload with block hash {block_hash:?}", + ))); + } + + Ok(()) + })(); + + if valid { + verification_result?; + + // Store the envelope so that child blocks can load the parent's payload. + store + .put_payload_envelope(&block_root, signed_envelope) .map_err(|e| { Error::InternalError(format!( "Failed to store payload envelope for {block_root:?}: {e:?}", )) })?; - } - let result = self - .harness - .chain - .canonical_head - .fork_choice_write_lock() - .on_valid_payload_envelope_received(block_root); - - if valid { - result.map_err(|e| { - Error::InternalError(format!( - "on_execution_payload for block root {} failed: {:?}", - block_root, e - )) - })?; - } else if result.is_ok() { + self.harness + .chain + .canonical_head + .fork_choice_write_lock() + .on_valid_payload_envelope_received(block_root) + .map_err(|e| { + Error::InternalError(format!( + "on_execution_payload for block root {} failed: {:?}", + block_root, e + )) + })?; + } else if verification_result.is_ok() { return Err(Error::DidntFail(format!( - "on_execution_payload for block root {} should have failed", + "on_execution_payload envelope for block root {} should have failed", block_root ))); } diff --git a/testing/ef_tests/src/cases/operations.rs b/testing/ef_tests/src/cases/operations.rs index 1399815763..f90b6f2a6e 100644 --- a/testing/ef_tests/src/cases/operations.rs +++ b/testing/ef_tests/src/cases/operations.rs @@ -5,7 +5,7 @@ use crate::decode::{ssz_decode_file, ssz_decode_file_with, ssz_decode_state, yam use serde::Deserialize; use ssz::Decode; use state_processing::common::update_progressive_balances_cache::initialize_progressive_balances_cache; -use state_processing::envelope_processing::VerifyStateRoot; +use state_processing::envelope_processing::verify_execution_payload_envelope; use state_processing::epoch_cache::initialize_epoch_cache; use state_processing::per_block_processing::process_operations::{ process_consolidation_requests, process_deposit_requests_post_gloas, @@ -13,7 +13,7 @@ use state_processing::per_block_processing::process_operations::{ }; use state_processing::{ ConsensusContext, - envelope_processing::{EnvelopeProcessingError, process_execution_payload_envelope}, + envelope_processing::EnvelopeProcessingError, per_block_processing::{ VerifyBlockRoot, VerifySignatures, errors::BlockProcessingError, @@ -23,7 +23,7 @@ use state_processing::{ process_bls_to_execution_changes, process_deposits, process_exits, process_payload_attestation, process_proposer_slashings, }, - process_sync_aggregate, withdrawals, + process_parent_execution_payload, process_sync_aggregate, withdrawals, }, }; use std::fmt::Debug; @@ -59,6 +59,12 @@ pub struct ExecutionPayloadBidBlock { block: BeaconBlock, } +/// Newtype for testing parent execution payload processing. +#[derive(Debug, Clone, Deserialize)] +pub struct ParentExecutionPayloadBlock { + block: BeaconBlock, +} + #[derive(Debug, Clone)] pub struct Operations> { metadata: Metadata, @@ -441,8 +447,10 @@ impl Operation for SignedExecutionPayloadEnvelope { "signed_envelope.ssz_snappy".into() } - fn is_enabled_for_fork(fork_name: ForkName) -> bool { - fork_name.gloas_enabled() + fn is_enabled_for_fork(_fork_name: ForkName) -> bool { + // TODO(gloas): re-enable this test when enabled upstream + // fork_name.gloas_enabled() + false } fn decode(path: &Path, _: ForkName, _spec: &ChainSpec) -> Result { @@ -460,12 +468,12 @@ impl Operation for SignedExecutionPayloadEnvelope { .as_ref() .is_some_and(|e| e.execution_valid); if valid { - process_execution_payload_envelope( + let block_state_root = state.update_tree_hash_cache()?; + verify_execution_payload_envelope( state, - None, self, VerifySignatures::True, - VerifyStateRoot::True, + block_state_root, spec, ) } else { @@ -505,6 +513,36 @@ impl Operation for ExecutionPayloadBidBlock { } } +impl Operation for ParentExecutionPayloadBlock { + type Error = BlockProcessingError; + + fn handler_name() -> String { + "parent_execution_payload".into() + } + + fn filename() -> String { + "block.ssz_snappy".into() + } + + fn is_enabled_for_fork(fork_name: ForkName) -> bool { + fork_name.gloas_enabled() + } + + fn decode(path: &Path, _fork_name: ForkName, spec: &ChainSpec) -> Result { + ssz_decode_file_with(path, |bytes| BeaconBlock::from_ssz_bytes(bytes, spec)) + .map(|block| ParentExecutionPayloadBlock { block }) + } + + fn apply_to( + &self, + state: &mut BeaconState, + spec: &ChainSpec, + _: &Operations, + ) -> Result<(), BlockProcessingError> { + process_parent_execution_payload(state, self.block.to_ref(), spec) + } +} + impl Operation for WithdrawalsPayload { type Error = BlockProcessingError; diff --git a/testing/ef_tests/src/handler.rs b/testing/ef_tests/src/handler.rs index 4373d6b7d1..96798c910c 100644 --- a/testing/ef_tests/src/handler.rs +++ b/testing/ef_tests/src/handler.rs @@ -723,8 +723,12 @@ impl Handler for ForkChoiceHandler { return false; } - // on_execution_payload tests exist only for Gloas. - if self.handler_name == "on_execution_payload" && !fork_name.gloas_enabled() { + // on_execution_payload_envelope and get_parent_payload_status tests exist only for + // Gloas and later. + if (self.handler_name == "on_execution_payload_envelope" + || self.handler_name == "get_parent_payload_status") + && !fork_name.gloas_enabled() + { return false; } diff --git a/testing/ef_tests/src/lib.rs b/testing/ef_tests/src/lib.rs index 5587bbed41..0ffedc7eb8 100644 --- a/testing/ef_tests/src/lib.rs +++ b/testing/ef_tests/src/lib.rs @@ -2,10 +2,10 @@ pub use case_result::CaseResult; pub use cases::{ BuilderPendingPayments, Case, EffectiveBalanceUpdates, Eth1DataReset, ExecutionPayloadBidBlock, FeatureName, HistoricalRootsUpdate, HistoricalSummariesUpdate, InactivityUpdates, - JustificationAndFinalization, ParticipationFlagUpdates, ParticipationRecordUpdates, - PendingBalanceDeposits, PendingConsolidations, ProposerLookahead, PtcWindow, RandaoMixesReset, - RegistryUpdates, RewardsAndPenalties, Slashings, SlashingsReset, SyncCommitteeUpdates, - WithdrawalsPayload, + JustificationAndFinalization, ParentExecutionPayloadBlock, ParticipationFlagUpdates, + ParticipationRecordUpdates, PendingBalanceDeposits, PendingConsolidations, ProposerLookahead, + PtcWindow, RandaoMixesReset, RegistryUpdates, RewardsAndPenalties, Slashings, SlashingsReset, + SyncCommitteeUpdates, WithdrawalsPayload, }; pub use decode::log_file_access; pub use error::Error; diff --git a/testing/ef_tests/tests/tests.rs b/testing/ef_tests/tests/tests.rs index 62eb2dd038..79a02d7e80 100644 --- a/testing/ef_tests/tests/tests.rs +++ b/testing/ef_tests/tests/tests.rs @@ -99,6 +99,12 @@ fn operations_execution_payload_bid() { OperationsHandler::>::default().run(); } +#[test] +fn operations_parent_execution_payload() { + OperationsHandler::>::default().run(); + OperationsHandler::>::default().run(); +} + #[test] fn operations_payload_attestation() { OperationsHandler::>::default().run(); @@ -1039,9 +1045,15 @@ fn fork_choice_deposit_with_reorg() { } #[test] -fn fork_choice_on_execution_payload() { - ForkChoiceHandler::::new("on_execution_payload").run(); - ForkChoiceHandler::::new("on_execution_payload").run(); +fn fork_choice_on_execution_payload_envelope() { + ForkChoiceHandler::::new("on_execution_payload_envelope").run(); + ForkChoiceHandler::::new("on_execution_payload_envelope").run(); +} + +#[test] +fn fork_choice_get_parent_payload_status() { + ForkChoiceHandler::::new("get_parent_payload_status").run(); + ForkChoiceHandler::::new("get_parent_payload_status").run(); } #[test] diff --git a/testing/execution_engine_integration/src/test_rig.rs b/testing/execution_engine_integration/src/test_rig.rs index 6bf4a1aa52..05170d907c 100644 --- a/testing/execution_engine_integration/src/test_rig.rs +++ b/testing/execution_engine_integration/src/test_rig.rs @@ -315,6 +315,7 @@ impl TestRig { Address::repeat_byte(42), Some(vec![]), None, + None, ), ) .await; @@ -359,6 +360,7 @@ impl TestRig { suggested_fee_recipient, Some(vec![]), None, + None, ); let payload_parameters = PayloadParameters { @@ -517,6 +519,7 @@ impl TestRig { suggested_fee_recipient, Some(vec![]), None, + None, ); let payload_parameters = PayloadParameters { @@ -577,6 +580,7 @@ impl TestRig { Address::repeat_byte(42), Some(vec![]), None, + None, ); let slot = Slot::new(42); let head_block_root = Hash256::repeat_byte(100); diff --git a/validator_client/lighthouse_validator_store/src/lib.rs b/validator_client/lighthouse_validator_store/src/lib.rs index 76f7a86aab..c5bcd88eb1 100644 --- a/validator_client/lighthouse_validator_store/src/lib.rs +++ b/validator_client/lighthouse_validator_store/src/lib.rs @@ -1432,7 +1432,7 @@ impl ValidatorStore for LighthouseValidatorS ) -> Result, Error> { let signing_context = self.signing_context( Domain::BeaconBuilder, - envelope.slot.epoch(E::slots_per_epoch()), + envelope.slot().epoch(E::slots_per_epoch()), ); // Execution payload envelope signing is not slashable, bypass doppelganger protection.