From eec0700f94ee551484b6a39d596900ff53a8aba9 Mon Sep 17 00:00:00 2001 From: Eitan Seri-Levi Date: Mon, 16 Feb 2026 18:09:35 -0800 Subject: [PATCH] Gloas local block building MVP (#8754) The flow for local block building is 1. Create execution payload and bid 2. Construct beacon block 3. Sign beacon block and publish 4. Sign execution payload and publish This PR adds the beacon block v4 flow , GET payload envelope and POST payload envelope (local block building only). The spec for these endpoints can be found here: https://github.com/ethereum/beacon-APIs/pull/552 and is subject to change. We needed a way to store the unsigned execution payload envelope associated to the execution payload bid that was included in the block. I introduced a new cache that stores these unsigned execution payload envelopes. the GET payload envelope queries this cache directly so that a proposer, after publishing a block, can fetch the payload envelope + sign and publish it. I kept payload signing and publishing within the validators block service to keep things simple for now. The idea was to build out a block production MVP for devnet 0, try not to affect any non gloas code paths and build things out in such a way that it will be easy to deprecate pre-gloas code paths later on (for example block production v2 and v3). We will eventually need to track which beacon node was queried for the block so that we can later query it for the payload. But thats not needed for the devnet. Co-Authored-By: Eitan Seri- Levi Co-Authored-By: Michael Sproul Co-Authored-By: Jimmy Chen Co-Authored-By: Eitan Seri-Levi --- beacon_node/beacon_chain/src/beacon_chain.rs | 257 +---- .../src/block_production/gloas.rs | 890 ++++++++++++++++++ .../beacon_chain/src/block_production/mod.rs | 223 +++++ beacon_node/beacon_chain/src/builder.rs | 1 + beacon_node/beacon_chain/src/errors.rs | 2 +- beacon_node/beacon_chain/src/lib.rs | 2 + .../src/pending_payload_envelopes.rs | 151 +++ beacon_node/client/src/notifier.rs | 10 + beacon_node/execution_layer/src/lib.rs | 60 +- .../src/test_utils/mock_builder.rs | 2 +- .../src/beacon/execution_payload_envelope.rs | 116 +++ beacon_node/http_api/src/beacon/mod.rs | 1 + beacon_node/http_api/src/beacon/states.rs | 1 - beacon_node/http_api/src/lib.rs | 63 +- beacon_node/http_api/src/produce_block.rs | 84 +- .../validator/execution_payload_envelope.rs | 110 +++ beacon_node/http_api/src/validator/mod.rs | 41 +- beacon_node/http_api/tests/tests.rs | 247 ++++- common/eth2/src/lib.rs | 274 +++++- common/eth2/src/types.rs | 39 +- .../src/envelope_processing.rs | 38 +- consensus/types/src/builder/builder_bid.rs | 2 +- .../types/src/core/application_domain.rs | 1 + consensus/types/src/core/chain_spec.rs | 4 +- .../validator/validator_registration_data.rs | 2 +- testing/ef_tests/src/cases/operations.rs | 10 +- .../lighthouse_validator_store/src/lib.rs | 45 +- validator_client/signing_method/src/lib.rs | 5 + .../signing_method/src/web3signer.rs | 4 + .../validator_services/src/block_service.rs | 305 ++++-- validator_client/validator_store/src/lib.rs | 12 +- 31 files changed, 2656 insertions(+), 346 deletions(-) create mode 100644 beacon_node/beacon_chain/src/block_production/gloas.rs create mode 100644 beacon_node/beacon_chain/src/block_production/mod.rs create mode 100644 beacon_node/beacon_chain/src/pending_payload_envelopes.rs create mode 100644 beacon_node/http_api/src/beacon/execution_payload_envelope.rs create mode 100644 beacon_node/http_api/src/validator/execution_payload_envelope.rs diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 4ae7871758..d0f5297f1b 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -31,7 +31,7 @@ use crate::errors::{BeaconChainError as Error, BlockProductionError}; use crate::events::ServerSentEventHandler; use crate::execution_payload::{NotifyExecutionLayer, PreparePayloadHandle, get_execution_payload}; use crate::fetch_blobs::EngineGetBlobsOutput; -use crate::fork_choice_signal::{ForkChoiceSignalRx, ForkChoiceSignalTx, ForkChoiceWaitResult}; +use crate::fork_choice_signal::{ForkChoiceSignalRx, ForkChoiceSignalTx}; use crate::graffiti_calculator::{GraffitiCalculator, GraffitiSettings}; use crate::kzg_utils::reconstruct_blobs; use crate::light_client_finality_update_verification::{ @@ -56,6 +56,7 @@ use crate::observed_block_producers::ObservedBlockProducers; use crate::observed_data_sidecars::ObservedDataSidecars; use crate::observed_operations::{ObservationOutcome, ObservedOperations}; use crate::observed_slashable::ObservedSlashable; +use crate::pending_payload_envelopes::PendingPayloadEnvelopes; use crate::persisted_beacon_chain::PersistedBeaconChain; use crate::persisted_custody::persist_custody_context; use crate::persisted_fork_choice::PersistedForkChoice; @@ -235,7 +236,7 @@ pub struct PrePayloadAttributes { /// /// The parent block number is not part of the payload attributes sent to the EL, but *is* /// sent to builders via SSE. - pub parent_block_number: u64, + pub parent_block_number: Option, /// The block root of the block being built upon (same block as fcU `headBlockHash`). pub parent_beacon_block_root: Hash256, } @@ -419,6 +420,9 @@ pub struct BeaconChain { RwLock, T::EthSpec>>, /// Maintains a record of slashable message seen over the gossip network or RPC. pub observed_slashable: RwLock>, + /// Cache of pending execution payload envelopes for local block building. + /// Envelopes are stored here during block production and eventually published. + pub pending_payload_envelopes: RwLock>, /// Maintains a record of which validators have submitted voluntary exits. pub observed_voluntary_exits: Mutex>, /// Maintains a record of which validators we've seen proposer slashings for. @@ -4504,55 +4508,6 @@ impl BeaconChain { Ok(()) } - /// If configured, wait for the fork choice run at the start of the slot to complete. - #[instrument(level = "debug", skip_all)] - fn wait_for_fork_choice_before_block_production( - self: &Arc, - slot: Slot, - ) -> Result<(), BlockProductionError> { - if let Some(rx) = &self.fork_choice_signal_rx { - let current_slot = self - .slot() - .map_err(|_| BlockProductionError::UnableToReadSlot)?; - - let timeout = Duration::from_millis(self.config.fork_choice_before_proposal_timeout_ms); - - if slot == current_slot || slot == current_slot + 1 { - match rx.wait_for_fork_choice(slot, timeout) { - ForkChoiceWaitResult::Success(fc_slot) => { - debug!( - %slot, - fork_choice_slot = %fc_slot, - "Fork choice successfully updated before block production" - ); - } - ForkChoiceWaitResult::Behind(fc_slot) => { - warn!( - fork_choice_slot = %fc_slot, - %slot, - message = "this block may be orphaned", - "Fork choice notifier out of sync with block production" - ); - } - ForkChoiceWaitResult::TimeOut => { - warn!( - message = "this block may be orphaned", - "Timed out waiting for fork choice before proposal" - ); - } - } - } else { - error!( - %slot, - %current_slot, - message = "check clock sync, this block may be orphaned", - "Producing block at incorrect slot" - ); - } - } - Ok(()) - } - pub async fn produce_block_with_verification( self: &Arc, randao_reveal: Signature, @@ -4599,165 +4554,6 @@ impl BeaconChain { .await } - /// Load a beacon state from the database for block production. This is a long-running process - /// that should not be performed in an `async` context. - fn load_state_for_block_production( - self: &Arc, - slot: Slot, - ) -> Result<(BeaconState, Option), BlockProductionError> { - let fork_choice_timer = metrics::start_timer(&metrics::BLOCK_PRODUCTION_FORK_CHOICE_TIMES); - self.wait_for_fork_choice_before_block_production(slot)?; - drop(fork_choice_timer); - - let state_load_timer = metrics::start_timer(&metrics::BLOCK_PRODUCTION_STATE_LOAD_TIMES); - - // Atomically read some values from the head whilst avoiding holding cached head `Arc` any - // longer than necessary. - let (head_slot, head_block_root, head_state_root) = { - let head = self.canonical_head.cached_head(); - ( - head.head_slot(), - head.head_block_root(), - head.head_state_root(), - ) - }; - let (state, state_root_opt) = if head_slot < slot { - // Attempt an aggressive re-org if configured and the conditions are right. - if let Some((re_org_state, re_org_state_root)) = - self.get_state_for_re_org(slot, head_slot, head_block_root) - { - info!( - %slot, - head_to_reorg = %head_block_root, - "Proposing block to re-org current head" - ); - (re_org_state, Some(re_org_state_root)) - } else { - // Fetch the head state advanced through to `slot`, which should be present in the - // state cache thanks to the state advance timer. - let (state_root, state) = self - .store - .get_advanced_hot_state(head_block_root, slot, head_state_root) - .map_err(BlockProductionError::FailedToLoadState)? - .ok_or(BlockProductionError::UnableToProduceAtSlot(slot))?; - (state, Some(state_root)) - } - } else { - warn!( - message = "this block is more likely to be orphaned", - %slot, - "Producing block that conflicts with head" - ); - let state = self - .state_at_slot(slot - 1, StateSkipConfig::WithStateRoots) - .map_err(|_| BlockProductionError::UnableToProduceAtSlot(slot))?; - - (state, None) - }; - - drop(state_load_timer); - - Ok((state, state_root_opt)) - } - - /// Fetch the beacon state to use for producing a block if a 1-slot proposer re-org is viable. - /// - /// This function will return `None` if proposer re-orgs are disabled. - #[instrument(skip_all, level = "debug")] - fn get_state_for_re_org( - &self, - slot: Slot, - head_slot: Slot, - canonical_head: Hash256, - ) -> Option<(BeaconState, Hash256)> { - let re_org_head_threshold = self.config.re_org_head_threshold?; - let re_org_parent_threshold = self.config.re_org_parent_threshold?; - - if self.spec.proposer_score_boost.is_none() { - warn!( - reason = "this network does not have proposer boost enabled", - "Ignoring proposer re-org configuration" - ); - return None; - } - - let slot_delay = self - .slot_clock - .seconds_from_current_slot_start() - .or_else(|| { - warn!(error = "unable to read slot clock", "Not attempting re-org"); - None - })?; - - // Attempt a proposer re-org if: - // - // 1. It seems we have time to propagate and still receive the proposer boost. - // 2. The current head block was seen late. - // 3. The `get_proposer_head` conditions from fork choice pass. - let proposing_on_time = - slot_delay < self.config.re_org_cutoff(self.spec.get_slot_duration()); - if !proposing_on_time { - debug!(reason = "not proposing on time", "Not attempting re-org"); - return None; - } - - let head_late = self.block_observed_after_attestation_deadline(canonical_head, head_slot); - if !head_late { - debug!(reason = "head not late", "Not attempting re-org"); - return None; - } - - // Is the current head weak and appropriate for re-orging? - let proposer_head_timer = - metrics::start_timer(&metrics::BLOCK_PRODUCTION_GET_PROPOSER_HEAD_TIMES); - let proposer_head = self - .canonical_head - .fork_choice_read_lock() - .get_proposer_head( - slot, - canonical_head, - re_org_head_threshold, - re_org_parent_threshold, - &self.config.re_org_disallowed_offsets, - self.config.re_org_max_epochs_since_finalization, - ) - .map_err(|e| match e { - ProposerHeadError::DoNotReOrg(reason) => { - debug!( - %reason, - "Not attempting re-org" - ); - } - ProposerHeadError::Error(e) => { - warn!( - error = ?e, - "Not attempting re-org" - ); - } - }) - .ok()?; - drop(proposer_head_timer); - let re_org_parent_block = proposer_head.parent_node.root; - - let (state_root, state) = self - .store - .get_advanced_hot_state_from_cache(re_org_parent_block, slot) - .or_else(|| { - warn!(reason = "no state in cache", "Not attempting re-org"); - None - })?; - - info!( - weak_head = ?canonical_head, - parent = ?re_org_parent_block, - head_weight = proposer_head.head_node.weight, - threshold_weight = proposer_head.re_org_head_weight_threshold, - "Attempting re-org due to weak head" - ); - - Some((state, state_root)) - } - /// Get the proposer index and `prev_randao` value for a proposal at slot `proposal_slot`. /// /// The `proposer_head` may be the head block of `cached_head` or its parent. An error will @@ -4840,15 +4636,25 @@ impl BeaconChain { return Ok(None); }; - // Get the `prev_randao` and parent block number. - let head_block_number = cached_head.head_block_number()?; - let (prev_randao, parent_block_number) = if proposer_head == head_parent_block_root { - ( - cached_head.parent_random()?, - head_block_number.saturating_sub(1), - ) + // TODO(gloas) not sure what to do here see this issue + // https://github.com/sigp/lighthouse/issues/8817 + let (prev_randao, parent_block_number) = if self + .spec + .fork_name_at_slot::(proposal_slot) + .gloas_enabled() + { + (cached_head.head_random()?, None) } else { - (cached_head.head_random()?, head_block_number) + // Get the `prev_randao` and parent block number. + let head_block_number = cached_head.head_block_number()?; + if proposer_head == head_parent_block_root { + ( + cached_head.parent_random()?, + Some(head_block_number.saturating_sub(1)), + ) + } else { + (cached_head.head_random()?, Some(head_block_number)) + } }; Ok(Some(PrePayloadAttributes { @@ -5093,7 +4899,11 @@ impl BeaconChain { } /// Check if the block with `block_root` was observed after the attestation deadline of `slot`. - fn block_observed_after_attestation_deadline(&self, block_root: Hash256, slot: Slot) -> bool { + pub(crate) fn block_observed_after_attestation_deadline( + &self, + block_root: Hash256, + slot: Slot, + ) -> bool { let block_delays = self.block_times_cache.read().get_block_delays( block_root, self.slot_clock @@ -5860,7 +5670,11 @@ impl BeaconChain { execution_payload_value, ) } - BeaconState::Gloas(_) => return Err(BlockProductionError::GloasNotImplemented), + BeaconState::Gloas(_) => { + return Err(BlockProductionError::GloasNotImplemented( + "Attempting to produce gloas beacn block via non gloas code path".to_owned(), + )); + } }; let block = SignedBeaconBlock::from_block( @@ -6198,13 +6012,14 @@ impl BeaconChain { // Push a server-sent event (probably to a block builder or relay). if let Some(event_handler) = &self.event_handler && event_handler.has_payload_attributes_subscribers() + && let Some(parent_block_number) = pre_payload_attributes.parent_block_number { event_handler.register(EventKind::PayloadAttributes(ForkVersionedResponse { data: SseExtendedPayloadAttributes { proposal_slot: prepare_slot, proposer_index: proposer, parent_block_root: head_root, - parent_block_number: pre_payload_attributes.parent_block_number, + parent_block_number, parent_block_hash: forkchoice_update_params.head_hash.unwrap_or_default(), payload_attributes: payload_attributes.into(), }, diff --git a/beacon_node/beacon_chain/src/block_production/gloas.rs b/beacon_node/beacon_chain/src/block_production/gloas.rs new file mode 100644 index 0000000000..025cf21a73 --- /dev/null +++ b/beacon_node/beacon_chain/src/block_production/gloas.rs @@ -0,0 +1,890 @@ +use std::collections::HashMap; +use std::marker::PhantomData; +use std::sync::Arc; + +use bls::Signature; +use execution_layer::{ + BlockProposalContentsGloas, BuilderParams, PayloadAttributes, PayloadParameters, +}; +use operation_pool::CompactAttestationRef; +use ssz::Encode; +use state_processing::common::get_attesting_indices_from_state; +use state_processing::envelope_processing::{VerifyStateRoot, process_execution_payload_envelope}; +use state_processing::epoch_cache::initialize_epoch_cache; +use state_processing::per_block_processing::{ + compute_timestamp_at_slot, get_expected_withdrawals, verify_attestation_for_block_inclusion, +}; +use state_processing::{ + BlockSignatureStrategy, ConsensusContext, VerifyBlockRoot, VerifySignatures, +}; +use state_processing::{VerifyOperation, state_advance::complete_state_advance}; +use task_executor::JoinHandle; +use tracing::{Instrument, Span, debug, debug_span, error, instrument, trace, warn}; +use tree_hash::TreeHash; +use types::consts::gloas::BUILDER_INDEX_SELF_BUILD; +use types::{ + Address, Attestation, AttestationElectra, AttesterSlashing, AttesterSlashingElectra, + BeaconBlock, BeaconBlockBodyGloas, BeaconBlockGloas, BeaconState, BeaconStateError, + BuilderIndex, Deposit, Eth1Data, EthSpec, ExecutionBlockHash, ExecutionPayloadBid, + ExecutionPayloadEnvelope, ExecutionPayloadGloas, ExecutionRequests, FullPayload, Graffiti, + Hash256, PayloadAttestation, ProposerSlashing, RelativeEpoch, SignedBeaconBlock, + SignedBlsToExecutionChange, SignedExecutionPayloadBid, SignedExecutionPayloadEnvelope, + SignedVoluntaryExit, Slot, SyncAggregate, Withdrawal, Withdrawals, +}; + +use crate::{ + BeaconChain, BeaconChainError, BeaconChainTypes, BlockProductionError, + ProduceBlockVerification, graffiti_calculator::GraffitiSettings, metrics, +}; + +pub const BID_VALUE_SELF_BUILD: u64 = 0; +pub const EXECUTION_PAYMENT_TRUSTLESS_BUILD: u64 = 0; + +type ConsensusBlockValue = u64; +type BlockProductionResult = (BeaconBlock>, ConsensusBlockValue); + +pub type PreparePayloadResult = Result, BlockProductionError>; +pub type PreparePayloadHandle = JoinHandle>>; + +pub struct PartialBeaconBlock { + slot: Slot, + proposer_index: u64, + parent_root: Hash256, + randao_reveal: Signature, + eth1_data: Eth1Data, + graffiti: Graffiti, + proposer_slashings: Vec, + attester_slashings: Vec>, + attestations: Vec>, + payload_attestations: Vec>, + deposits: Vec, + voluntary_exits: Vec, + sync_aggregate: Option>, + bls_to_execution_changes: Vec, +} + +/// Data needed to construct an ExecutionPayloadEnvelope. +/// The envelope requires the beacon_block_root which can only be computed after the block exists. +pub struct ExecutionPayloadData { + pub payload: ExecutionPayloadGloas, + pub execution_requests: ExecutionRequests, + pub builder_index: BuilderIndex, + pub slot: Slot, +} + +impl BeaconChain { + pub async fn produce_block_with_verification_gloas( + self: &Arc, + randao_reveal: Signature, + slot: Slot, + graffiti_settings: GraffitiSettings, + verification: ProduceBlockVerification, + _builder_boost_factor: Option, + ) -> Result, BlockProductionError> { + metrics::inc_counter(&metrics::BLOCK_PRODUCTION_REQUESTS); + let _complete_timer = metrics::start_timer(&metrics::BLOCK_PRODUCTION_TIMES); + // Part 1/2 (blocking) + // + // Load the parent state from disk. + let chain = self.clone(); + let span = Span::current(); + let (state, state_root_opt) = self + .task_executor + .spawn_blocking_handle( + move || { + let _guard = + debug_span!(parent: span, "load_state_for_block_production").entered(); + chain.load_state_for_block_production(slot) + }, + "load_state_for_block_production", + ) + .ok_or(BlockProductionError::ShuttingDown)? + .await + .map_err(BlockProductionError::TokioJoin)??; + + // Part 2/2 (async, with some blocking components) + // + // Produce the block upon the state + self.produce_block_on_state_gloas( + state, + state_root_opt, + slot, + randao_reveal, + graffiti_settings, + verification, + ) + .await + } + + // TODO(gloas) need to implement builder boost factor logic + #[instrument(level = "debug", skip_all)] + pub async fn produce_block_on_state_gloas( + self: &Arc, + state: BeaconState, + state_root_opt: Option, + produce_at_slot: Slot, + randao_reveal: Signature, + graffiti_settings: GraffitiSettings, + verification: ProduceBlockVerification, + ) -> Result, BlockProductionError> { + // Part 1/3 (blocking) + // + // Perform the state advance and block-packing functions. + let chain = self.clone(); + let graffiti = self + .graffiti_calculator + .get_graffiti(graffiti_settings) + .await; + let span = Span::current(); + let (partial_beacon_block, state) = self + .task_executor + .spawn_blocking_handle( + move || { + let _guard = + debug_span!(parent: span, "produce_partial_beacon_block_gloas").entered(); + chain.produce_partial_beacon_block_gloas( + state, + state_root_opt, + produce_at_slot, + randao_reveal, + graffiti, + ) + }, + "produce_partial_beacon_block_gloas", + ) + .ok_or(BlockProductionError::ShuttingDown)? + .await + .map_err(BlockProductionError::TokioJoin)??; + + // Part 2/3 (async) + // + // Produce the execution payload bid. + // TODO(gloas) this is strictly for building local bids + // We'll need to build out trustless/trusted bid paths. + let (execution_payload_bid, state, payload_data) = self + .clone() + .produce_execution_payload_bid( + state, + produce_at_slot, + BID_VALUE_SELF_BUILD, + BUILDER_INDEX_SELF_BUILD, + ) + .await?; + + // Part 3/3 (blocking) + // + // Complete the block with the execution payload bid. + let chain = self.clone(); + let span = Span::current(); + self.task_executor + .spawn_blocking_handle( + move || { + let _guard = + debug_span!(parent: span, "complete_partial_beacon_block_gloas").entered(); + chain.complete_partial_beacon_block_gloas( + partial_beacon_block, + execution_payload_bid, + payload_data, + state, + verification, + ) + }, + "complete_partial_beacon_block_gloas", + ) + .ok_or(BlockProductionError::ShuttingDown)? + .await + .map_err(BlockProductionError::TokioJoin)? + } + + #[allow(clippy::too_many_arguments)] + #[allow(clippy::type_complexity)] + fn produce_partial_beacon_block_gloas( + self: &Arc, + mut state: BeaconState, + state_root_opt: Option, + produce_at_slot: Slot, + randao_reveal: Signature, + graffiti: Graffiti, + ) -> Result<(PartialBeaconBlock, BeaconState), BlockProductionError> + { + // It is invalid to try to produce a block using a state from a future slot. + if state.slot() > produce_at_slot { + return Err(BlockProductionError::StateSlotTooHigh { + produce_at_slot, + state_slot: state.slot(), + }); + } + + let slot_timer = metrics::start_timer(&metrics::BLOCK_PRODUCTION_SLOT_PROCESS_TIMES); + + // Ensure the state has performed a complete transition into the required slot. + complete_state_advance(&mut state, state_root_opt, produce_at_slot, &self.spec)?; + + drop(slot_timer); + + state.build_committee_cache(RelativeEpoch::Current, &self.spec)?; + state.apply_pending_mutations()?; + + let parent_root = if state.slot() > 0 { + *state + .get_block_root(state.slot() - 1) + .map_err(|_| BlockProductionError::UnableToGetBlockRootFromState)? + } else { + state.latest_block_header().canonical_root() + }; + + let proposer_index = state.get_beacon_proposer_index(state.slot(), &self.spec)? as u64; + + let slashings_and_exits_span = debug_span!("get_slashings_and_exits").entered(); + let (mut proposer_slashings, mut attester_slashings, mut voluntary_exits) = + self.op_pool.get_slashings_and_exits(&state, &self.spec); + + drop(slashings_and_exits_span); + + let eth1_data = state.eth1_data().clone(); + + let deposits = vec![]; + + let bls_changes_span = debug_span!("get_bls_to_execution_changes").entered(); + let bls_to_execution_changes = self + .op_pool + .get_bls_to_execution_changes(&state, &self.spec); + drop(bls_changes_span); + + // Iterate through the naive aggregation pool and ensure all the attestations from there + // are included in the operation pool. + { + let _guard = debug_span!("import_naive_aggregation_pool").entered(); + let _unagg_import_timer = + metrics::start_timer(&metrics::BLOCK_PRODUCTION_UNAGGREGATED_TIMES); + for attestation in self.naive_aggregation_pool.read().iter() { + let import = |attestation: &Attestation| { + let attesting_indices = + get_attesting_indices_from_state(&state, attestation.to_ref())?; + self.op_pool + .insert_attestation(attestation.clone(), attesting_indices) + }; + if let Err(e) = import(attestation) { + // Don't stop block production if there's an error, just create a log. + error!( + reason = ?e, + "Attestation did not transfer to op pool" + ); + } + } + }; + + let mut attestations = { + let _guard = debug_span!("pack_attestations").entered(); + let _attestation_packing_timer = + metrics::start_timer(&metrics::BLOCK_PRODUCTION_ATTESTATION_TIMES); + + // Epoch cache and total balance cache are required for op pool packing. + state.build_total_active_balance_cache(&self.spec)?; + initialize_epoch_cache(&mut state, &self.spec)?; + + let mut prev_filter_cache = HashMap::new(); + let prev_attestation_filter = |att: &CompactAttestationRef| { + self.filter_op_pool_attestation(&mut prev_filter_cache, att, &state) + }; + let mut curr_filter_cache = HashMap::new(); + let curr_attestation_filter = |att: &CompactAttestationRef| { + self.filter_op_pool_attestation(&mut curr_filter_cache, att, &state) + }; + + self.op_pool + .get_attestations( + &state, + prev_attestation_filter, + curr_attestation_filter, + &self.spec, + ) + .map_err(BlockProductionError::OpPoolError)? + }; + + // If paranoid mode is enabled re-check the signatures of every included message. + // This will be a lot slower but guards against bugs in block production and can be + // quickly rolled out without a release. + if self.config.paranoid_block_proposal { + let mut tmp_ctxt = ConsensusContext::new(state.slot()); + attestations.retain(|att| { + verify_attestation_for_block_inclusion( + &state, + att.to_ref(), + &mut tmp_ctxt, + VerifySignatures::True, + &self.spec, + ) + .map_err(|e| { + warn!( + err = ?e, + block_slot = %state.slot(), + attestation = ?att, + "Attempted to include an invalid attestation" + ); + }) + .is_ok() + }); + + proposer_slashings.retain(|slashing| { + slashing + .clone() + .validate(&state, &self.spec) + .map_err(|e| { + warn!( + err = ?e, + block_slot = %state.slot(), + ?slashing, + "Attempted to include an invalid proposer slashing" + ); + }) + .is_ok() + }); + + attester_slashings.retain(|slashing| { + slashing + .clone() + .validate(&state, &self.spec) + .map_err(|e| { + warn!( + err = ?e, + block_slot = %state.slot(), + ?slashing, + "Attempted to include an invalid attester slashing" + ); + }) + .is_ok() + }); + + voluntary_exits.retain(|exit| { + exit.clone() + .validate(&state, &self.spec) + .map_err(|e| { + warn!( + err = ?e, + block_slot = %state.slot(), + ?exit, + "Attempted to include an invalid proposer slashing" + ); + }) + .is_ok() + }); + + // TODO(gloas) verifiy payload attestation signature here as well + } + + let attester_slashings = attester_slashings + .into_iter() + .filter_map(|a| match a { + AttesterSlashing::Base(_) => None, + AttesterSlashing::Electra(a) => Some(a), + }) + .collect::>(); + + let attestations = attestations + .into_iter() + .filter_map(|a| match a { + Attestation::Base(_) => None, + Attestation::Electra(a) => Some(a), + }) + .collect::>(); + + let slot = state.slot(); + + let sync_aggregate = if matches!(&state, BeaconState::Base(_)) { + None + } else { + let sync_aggregate = self + .op_pool + .get_sync_aggregate(&state) + .map_err(BlockProductionError::OpPoolError)? + .unwrap_or_else(|| { + warn!( + slot = %state.slot(), + "Producing block with no sync contributions" + ); + SyncAggregate::new() + }); + Some(sync_aggregate) + }; + + Ok(( + PartialBeaconBlock { + slot, + proposer_index, + parent_root, + randao_reveal, + eth1_data, + graffiti, + proposer_slashings, + attester_slashings, + attestations, + deposits, + voluntary_exits, + sync_aggregate, + // TODO(gloas) need to implement payload attestations + payload_attestations: vec![], + bls_to_execution_changes, + }, + state, + )) + } + + #[allow(clippy::type_complexity)] + fn complete_partial_beacon_block_gloas( + &self, + partial_beacon_block: PartialBeaconBlock, + signed_execution_payload_bid: SignedExecutionPayloadBid, + payload_data: Option>, + mut state: BeaconState, + verification: ProduceBlockVerification, + ) -> Result<(BeaconBlock>, u64), BlockProductionError> { + let PartialBeaconBlock { + slot, + proposer_index, + parent_root, + randao_reveal, + eth1_data, + graffiti, + proposer_slashings, + attester_slashings, + attestations, + deposits, + voluntary_exits, + sync_aggregate, + payload_attestations, + bls_to_execution_changes, + } = partial_beacon_block; + + let beacon_block = match &state { + BeaconState::Base(_) + | BeaconState::Altair(_) + | BeaconState::Bellatrix(_) + | BeaconState::Capella(_) + | BeaconState::Deneb(_) + | BeaconState::Electra(_) + | BeaconState::Fulu(_) => { + return Err(BlockProductionError::InvalidBlockVariant( + "Cannot construct a block pre-Gloas".to_owned(), + )); + } + BeaconState::Gloas(_) => BeaconBlock::Gloas(BeaconBlockGloas { + slot, + proposer_index, + parent_root, + state_root: Hash256::ZERO, + body: BeaconBlockBodyGloas { + randao_reveal, + eth1_data, + graffiti, + proposer_slashings: proposer_slashings + .try_into() + .map_err(BlockProductionError::SszTypesError)?, + attester_slashings: attester_slashings + .try_into() + .map_err(BlockProductionError::SszTypesError)?, + attestations: attestations + .try_into() + .map_err(BlockProductionError::SszTypesError)?, + deposits: deposits + .try_into() + .map_err(BlockProductionError::SszTypesError)?, + voluntary_exits: voluntary_exits + .try_into() + .map_err(BlockProductionError::SszTypesError)?, + sync_aggregate: sync_aggregate + .ok_or(BlockProductionError::MissingSyncAggregate)?, + bls_to_execution_changes: bls_to_execution_changes + .try_into() + .map_err(BlockProductionError::SszTypesError)?, + signed_execution_payload_bid, + payload_attestations: payload_attestations + .try_into() + .map_err(BlockProductionError::SszTypesError)?, + _phantom: PhantomData::>, + }, + }), + }; + + let signed_beacon_block = SignedBeaconBlock::from_block( + beacon_block, + // The block is not signed here, that is the task of a validator client. + Signature::empty(), + ); + + let block_size = signed_beacon_block.ssz_bytes_len(); + debug!(%block_size, "Produced block on state"); + + metrics::observe(&metrics::BLOCK_SIZE, block_size as f64); + + if block_size > self.config.max_network_size { + return Err(BlockProductionError::BlockTooLarge(block_size)); + } + + let process_timer = metrics::start_timer(&metrics::BLOCK_PRODUCTION_PROCESS_TIMES); + let signature_strategy = match verification { + ProduceBlockVerification::VerifyRandao => BlockSignatureStrategy::VerifyRandao, + ProduceBlockVerification::NoVerification => BlockSignatureStrategy::NoVerification, + }; + + // Use a context without block root or proposer index so that both are checked. + let mut ctxt = ConsensusContext::new(signed_beacon_block.slot()); + + let consensus_block_value = self + .compute_beacon_block_reward(signed_beacon_block.message(), &mut state) + .map(|reward| reward.total) + .unwrap_or(0); + + state_processing::per_block_processing( + &mut state, + &signed_beacon_block, + signature_strategy, + VerifyBlockRoot::True, + &mut ctxt, + &self.spec, + )?; + drop(process_timer); + + let state_root_timer = metrics::start_timer(&metrics::BLOCK_PRODUCTION_STATE_ROOT_TIMES); + + let state_root = state.update_tree_hash_cache()?; + + drop(state_root_timer); + + let (mut block, _) = signed_beacon_block.deconstruct(); + *block.state_root_mut() = state_root; + + // Construct and cache the ExecutionPayloadEnvelope if we have payload data. + // For local building, we always have payload data. + // For trustless building, the builder will provide the envelope separately. + if let Some(payload_data) = payload_data { + let beacon_block_root = block.tree_hash_root(); + let execution_payload_envelope = ExecutionPayloadEnvelope { + payload: payload_data.payload, + execution_requests: payload_data.execution_requests, + builder_index: payload_data.builder_index, + beacon_block_root, + slot: payload_data.slot, + state_root: Hash256::ZERO, + }; + + let mut signed_envelope = SignedExecutionPayloadEnvelope { + message: execution_payload_envelope, + signature: Signature::empty(), + }; + + // TODO(gloas) add better error variant + // We skip state root verification here because the relevant state root + // cant be calculated until after the new block has been constructed. + process_execution_payload_envelope( + &mut state, + None, + &signed_envelope, + VerifySignatures::False, + VerifyStateRoot::False, + &self.spec, + ) + .map_err(|_| { + BlockProductionError::GloasNotImplemented( + "process_execution_payload_envelope failed".to_owned(), + ) + })?; + + signed_envelope.message.state_root = state.update_tree_hash_cache()?; + + // Cache the envelope for later retrieval by the validator for signing and publishing. + let envelope_slot = payload_data.slot; + // TODO(gloas) might be safer to cache by root instead of by slot. + // We should revisit this once this code path + beacon api spec matures + self.pending_payload_envelopes + .write() + .insert(envelope_slot, signed_envelope.message); + + debug!( + %beacon_block_root, + slot = %envelope_slot, + "Cached pending execution payload envelope" + ); + } + + metrics::inc_counter(&metrics::BLOCK_PRODUCTION_SUCCESSES); + + trace!( + parent = ?block.parent_root(), + attestations = block.body().attestations_len(), + slot = %block.slot(), + "Produced beacon block" + ); + + Ok((block, consensus_block_value)) + } + + // TODO(gloas) introduce `ProposerPreferences` so we can build out trustless + // bid building. Right now this only works for local building. + /// Produce an `ExecutionPayloadBid` for some `slot` upon the given `state`. + /// This function assumes we've already advanced `state`. + /// + /// Returns the signed bid, the state, and optionally the payload data needed to construct + /// the `ExecutionPayloadEnvelope` after the beacon block is created. + /// + /// For local building, payload data is always returned (`Some`). + /// For trustless building, the builder provides the envelope separately, so `None` is returned. + #[allow(clippy::type_complexity)] + #[instrument(level = "debug", skip_all)] + pub async fn produce_execution_payload_bid( + self: Arc, + mut state: BeaconState, + produce_at_slot: Slot, + bid_value: u64, + builder_index: BuilderIndex, + ) -> Result< + ( + SignedExecutionPayloadBid, + BeaconState, + Option>, + ), + BlockProductionError, + > { + // TODO(gloas) For non local building, add sanity check on value + // The builder MUST have enough excess balance to fulfill this bid (i.e. `value`) and all pending payments. + + // TODO(gloas) add metrics for execution payload bid production + + let parent_root = if state.slot() > 0 { + *state + .get_block_root(state.slot() - 1) + .map_err(|_| BlockProductionError::UnableToGetBlockRootFromState)? + } else { + state.latest_block_header().canonical_root() + }; + + let proposer_index = state.get_beacon_proposer_index(state.slot(), &self.spec)? as u64; + + let pubkey = state + .validators() + .get(proposer_index as usize) + .map(|v| v.pubkey) + .ok_or(BlockProductionError::BeaconChain(Box::new( + BeaconChainError::ValidatorIndexUnknown(proposer_index as usize), + )))?; + + let builder_params = BuilderParams { + pubkey, + slot: state.slot(), + chain_health: self + .is_healthy(&parent_root) + .map_err(|e| BlockProductionError::BeaconChain(Box::new(e)))?, + }; + + // TODO(gloas) this should be BlockProductionVersion::V4 + // V3 is okay for now as long as we're not connected to a builder + // TODO(gloas) add builder boost factor + let prepare_payload_handle = get_execution_payload_gloas( + self.clone(), + &state, + parent_root, + proposer_index, + builder_params, + )?; + + let block_proposal_contents = prepare_payload_handle + .await + .map_err(BlockProductionError::TokioJoin)? + .ok_or(BlockProductionError::ShuttingDown)??; + + let BlockProposalContentsGloas { + payload, + payload_value: _, + execution_requests, + blob_kzg_commitments, + blobs_and_proofs: _, + } = block_proposal_contents; + + let state_root = state.update_tree_hash_cache()?; + + // TODO(gloas) since we are defaulting to local building, execution payment is 0 + // execution payment should only be set to > 0 for trusted building. + let bid = ExecutionPayloadBid:: { + parent_block_hash: state.latest_block_hash()?.to_owned(), + parent_block_root: state.get_latest_block_root(state_root), + block_hash: payload.block_hash, + prev_randao: payload.prev_randao, + fee_recipient: Address::ZERO, + gas_limit: payload.gas_limit, + builder_index, + slot: produce_at_slot, + value: bid_value, + execution_payment: EXECUTION_PAYMENT_TRUSTLESS_BUILD, + blob_kzg_commitments, + }; + + // Store payload data for envelope construction after block is created + let payload_data = ExecutionPayloadData { + payload, + execution_requests, + builder_index, + slot: produce_at_slot, + }; + + // TODO(gloas) this is only local building + // we'll need to implement builder signature for the trustless path + Ok(( + SignedExecutionPayloadBid { + message: bid, + // TODO(gloas) return better error variant here + signature: Signature::infinity().map_err(|_| { + BlockProductionError::GloasNotImplemented( + "Failed to generate infinity signature".to_owned(), + ) + })?, + }, + state, + // Local building always returns payload data. + // Trustless building would return None here. + Some(payload_data), + )) + } +} + +/// Gets an execution payload for inclusion in a block. +/// +/// ## Errors +/// +/// Will return an error when using a pre-Gloas `state`. Ensure to only run this function +/// after the Gloas fork. +/// +/// ## Specification +/// +/// Equivalent to the `get_execution_payload` function in the Validator Guide: +/// +/// https://github.com/ethereum/consensus-specs/blob/v1.1.5/specs/merge/validator.md#block-proposal +fn get_execution_payload_gloas( + chain: Arc>, + state: &BeaconState, + parent_beacon_block_root: Hash256, + proposer_index: u64, + builder_params: BuilderParams, +) -> Result, BlockProductionError> { + // Compute all required values from the `state` now to avoid needing to pass it into a spawned + // task. + let spec = &chain.spec; + let current_epoch = state.current_epoch(); + let timestamp = + compute_timestamp_at_slot(state, state.slot(), spec).map_err(BeaconStateError::from)?; + let random = *state.get_randao_mix(current_epoch)?; + + let latest_execution_block_hash = *state.latest_block_hash()?; + let latest_gas_limit = state.latest_execution_payload_bid()?.gas_limit; + + let withdrawals = + Withdrawals::::from(get_expected_withdrawals(state, spec)?).into(); + + // Spawn a task to obtain the execution payload from the EL via a series of async calls. The + // `join_handle` can be used to await the result of the function. + let join_handle = chain + .task_executor + .clone() + .spawn_handle( + async move { + prepare_execution_payload::( + &chain, + timestamp, + random, + proposer_index, + latest_execution_block_hash, + latest_gas_limit, + builder_params, + withdrawals, + parent_beacon_block_root, + ) + .await + } + .instrument(debug_span!("prepare_execution_payload")), + "prepare_execution_payload", + ) + .ok_or(BlockProductionError::ShuttingDown)?; + + Ok(join_handle) +} + +/// Prepares an execution payload for inclusion in a block. +/// +/// ## Errors +/// +/// Will return an error when using a pre-Gloas fork `state`. Ensure to only run this function +/// after the Gloas fork. +/// +/// ## Specification +/// +/// Equivalent to the `prepare_execution_payload` function in the Validator Guide: +/// +/// https://github.com/ethereum/consensus-specs/blob/v1.1.5/specs/merge/validator.md#block-proposal +#[allow(clippy::too_many_arguments)] +async fn prepare_execution_payload( + chain: &Arc>, + timestamp: u64, + random: Hash256, + proposer_index: u64, + parent_block_hash: ExecutionBlockHash, + parent_gas_limit: u64, + builder_params: BuilderParams, + withdrawals: Vec, + parent_beacon_block_root: Hash256, +) -> Result, BlockProductionError> +where + T: BeaconChainTypes, +{ + let spec = &chain.spec; + let fork = spec.fork_name_at_slot::(builder_params.slot); + let execution_layer = chain + .execution_layer + .as_ref() + .ok_or(BlockProductionError::ExecutionLayerMissing)?; + + // Try to obtain the fork choice update parameters from the cached head. + // + // Use a blocking task to interact with the `canonical_head` lock otherwise we risk blocking the + // core `tokio` executor. + let inner_chain = chain.clone(); + let forkchoice_update_params = chain + .spawn_blocking_handle( + move || { + inner_chain + .canonical_head + .cached_head() + .forkchoice_update_parameters() + }, + "prepare_execution_payload_forkchoice_update_params", + ) + .instrument(debug_span!("forkchoice_update_params")) + .await + .map_err(|e| BlockProductionError::BeaconChain(Box::new(e)))?; + + let suggested_fee_recipient = execution_layer + .get_suggested_fee_recipient(proposer_index) + .await; + let payload_attributes = PayloadAttributes::new( + timestamp, + random, + suggested_fee_recipient, + Some(withdrawals), + Some(parent_beacon_block_root), + ); + + let target_gas_limit = execution_layer.get_proposer_gas_limit(proposer_index).await; + let payload_parameters = PayloadParameters { + parent_hash: parent_block_hash, + parent_gas_limit, + proposer_gas_limit: target_gas_limit, + payload_attributes: &payload_attributes, + forkchoice_update_params: &forkchoice_update_params, + current_fork: fork, + }; + + let block_contents = execution_layer + .get_payload_gloas(payload_parameters) + .await + .map_err(BlockProductionError::GetPayloadFailed)?; + + Ok(block_contents) +} diff --git a/beacon_node/beacon_chain/src/block_production/mod.rs b/beacon_node/beacon_chain/src/block_production/mod.rs new file mode 100644 index 0000000000..76c8b77e93 --- /dev/null +++ b/beacon_node/beacon_chain/src/block_production/mod.rs @@ -0,0 +1,223 @@ +use std::{sync::Arc, time::Duration}; + +use proto_array::ProposerHeadError; +use slot_clock::SlotClock; +use tracing::{debug, error, info, instrument, warn}; +use types::{BeaconState, Hash256, Slot}; + +use crate::{ + BeaconChain, BeaconChainTypes, BlockProductionError, StateSkipConfig, + fork_choice_signal::ForkChoiceWaitResult, metrics, +}; + +mod gloas; + +impl BeaconChain { + /// Load a beacon state from the database for block production. This is a long-running process + /// that should not be performed in an `async` context. + pub(crate) fn load_state_for_block_production( + self: &Arc, + slot: Slot, + ) -> Result<(BeaconState, Option), BlockProductionError> { + let fork_choice_timer = metrics::start_timer(&metrics::BLOCK_PRODUCTION_FORK_CHOICE_TIMES); + self.wait_for_fork_choice_before_block_production(slot)?; + drop(fork_choice_timer); + + let state_load_timer = metrics::start_timer(&metrics::BLOCK_PRODUCTION_STATE_LOAD_TIMES); + + // Atomically read some values from the head whilst avoiding holding cached head `Arc` any + // longer than necessary. + let (head_slot, head_block_root, head_state_root) = { + let head = self.canonical_head.cached_head(); + ( + head.head_slot(), + head.head_block_root(), + head.head_state_root(), + ) + }; + let (state, state_root_opt) = if head_slot < slot { + // Attempt an aggressive re-org if configured and the conditions are right. + if let Some((re_org_state, re_org_state_root)) = + self.get_state_for_re_org(slot, head_slot, head_block_root) + { + info!( + %slot, + head_to_reorg = %head_block_root, + "Proposing block to re-org current head" + ); + (re_org_state, Some(re_org_state_root)) + } else { + // Fetch the head state advanced through to `slot`, which should be present in the + // state cache thanks to the state advance timer. + let (state_root, state) = self + .store + .get_advanced_hot_state(head_block_root, slot, head_state_root) + .map_err(BlockProductionError::FailedToLoadState)? + .ok_or(BlockProductionError::UnableToProduceAtSlot(slot))?; + (state, Some(state_root)) + } + } else { + warn!( + message = "this block is more likely to be orphaned", + %slot, + "Producing block that conflicts with head" + ); + let state = self + .state_at_slot(slot - 1, StateSkipConfig::WithStateRoots) + .map_err(|_| BlockProductionError::UnableToProduceAtSlot(slot))?; + + (state, None) + }; + + drop(state_load_timer); + + Ok((state, state_root_opt)) + } + + /// If configured, wait for the fork choice run at the start of the slot to complete. + #[instrument(level = "debug", skip_all)] + fn wait_for_fork_choice_before_block_production( + self: &Arc, + slot: Slot, + ) -> Result<(), BlockProductionError> { + if let Some(rx) = &self.fork_choice_signal_rx { + let current_slot = self + .slot() + .map_err(|_| BlockProductionError::UnableToReadSlot)?; + + let timeout = Duration::from_millis(self.config.fork_choice_before_proposal_timeout_ms); + + if slot == current_slot || slot == current_slot + 1 { + match rx.wait_for_fork_choice(slot, timeout) { + ForkChoiceWaitResult::Success(fc_slot) => { + debug!( + %slot, + fork_choice_slot = %fc_slot, + "Fork choice successfully updated before block production" + ); + } + ForkChoiceWaitResult::Behind(fc_slot) => { + warn!( + fork_choice_slot = %fc_slot, + %slot, + message = "this block may be orphaned", + "Fork choice notifier out of sync with block production" + ); + } + ForkChoiceWaitResult::TimeOut => { + warn!( + message = "this block may be orphaned", + "Timed out waiting for fork choice before proposal" + ); + } + } + } else { + error!( + %slot, + %current_slot, + message = "check clock sync, this block may be orphaned", + "Producing block at incorrect slot" + ); + } + } + Ok(()) + } + + /// Fetch the beacon state to use for producing a block if a 1-slot proposer re-org is viable. + /// + /// This function will return `None` if proposer re-orgs are disabled. + #[instrument(skip_all, level = "debug")] + fn get_state_for_re_org( + &self, + slot: Slot, + head_slot: Slot, + canonical_head: Hash256, + ) -> Option<(BeaconState, Hash256)> { + let re_org_head_threshold = self.config.re_org_head_threshold?; + let re_org_parent_threshold = self.config.re_org_parent_threshold?; + + if self.spec.proposer_score_boost.is_none() { + warn!( + reason = "this network does not have proposer boost enabled", + "Ignoring proposer re-org configuration" + ); + return None; + } + + let slot_delay = self + .slot_clock + .seconds_from_current_slot_start() + .or_else(|| { + warn!(error = "unable to read slot clock", "Not attempting re-org"); + None + })?; + + // Attempt a proposer re-org if: + // + // 1. It seems we have time to propagate and still receive the proposer boost. + // 2. The current head block was seen late. + // 3. The `get_proposer_head` conditions from fork choice pass. + let proposing_on_time = + slot_delay < self.config.re_org_cutoff(self.spec.get_slot_duration()); + if !proposing_on_time { + debug!(reason = "not proposing on time", "Not attempting re-org"); + return None; + } + + let head_late = self.block_observed_after_attestation_deadline(canonical_head, head_slot); + if !head_late { + debug!(reason = "head not late", "Not attempting re-org"); + return None; + } + + // Is the current head weak and appropriate for re-orging? + let proposer_head_timer = + metrics::start_timer(&metrics::BLOCK_PRODUCTION_GET_PROPOSER_HEAD_TIMES); + let proposer_head = self + .canonical_head + .fork_choice_read_lock() + .get_proposer_head( + slot, + canonical_head, + re_org_head_threshold, + re_org_parent_threshold, + &self.config.re_org_disallowed_offsets, + self.config.re_org_max_epochs_since_finalization, + ) + .map_err(|e| match e { + ProposerHeadError::DoNotReOrg(reason) => { + debug!( + %reason, + "Not attempting re-org" + ); + } + ProposerHeadError::Error(e) => { + warn!( + error = ?e, + "Not attempting re-org" + ); + } + }) + .ok()?; + drop(proposer_head_timer); + let re_org_parent_block = proposer_head.parent_node.root; + + let (state_root, state) = self + .store + .get_advanced_hot_state_from_cache(re_org_parent_block, slot) + .or_else(|| { + warn!(reason = "no state in cache", "Not attempting re-org"); + None + })?; + + info!( + weak_head = ?canonical_head, + parent = ?re_org_parent_block, + head_weight = proposer_head.head_node.weight, + threshold_weight = proposer_head.re_org_head_weight_threshold, + "Attempting re-org due to weak head" + ); + + Some((state, state_root)) + } +} diff --git a/beacon_node/beacon_chain/src/builder.rs b/beacon_node/beacon_chain/src/builder.rs index f673519f5f..4c82c93ba3 100644 --- a/beacon_node/beacon_chain/src/builder.rs +++ b/beacon_node/beacon_chain/src/builder.rs @@ -1036,6 +1036,7 @@ where observed_column_sidecars: RwLock::new(ObservedDataSidecars::new(self.spec.clone())), observed_blob_sidecars: RwLock::new(ObservedDataSidecars::new(self.spec.clone())), observed_slashable: <_>::default(), + pending_payload_envelopes: <_>::default(), observed_voluntary_exits: <_>::default(), observed_proposer_slashings: <_>::default(), observed_attester_slashings: <_>::default(), diff --git a/beacon_node/beacon_chain/src/errors.rs b/beacon_node/beacon_chain/src/errors.rs index 816e75fd24..bcccc0ec12 100644 --- a/beacon_node/beacon_chain/src/errors.rs +++ b/beacon_node/beacon_chain/src/errors.rs @@ -319,7 +319,7 @@ pub enum BlockProductionError { MissingExecutionRequests, SszTypesError(ssz_types::Error), // TODO(gloas): Remove this once Gloas is implemented - GloasNotImplemented, + GloasNotImplemented(String), } easy_from_to!(BlockProcessingError, BlockProductionError); diff --git a/beacon_node/beacon_chain/src/lib.rs b/beacon_node/beacon_chain/src/lib.rs index e77739e2d5..3b03395a66 100644 --- a/beacon_node/beacon_chain/src/lib.rs +++ b/beacon_node/beacon_chain/src/lib.rs @@ -9,6 +9,7 @@ pub mod beacon_proposer_cache; mod beacon_snapshot; pub mod bellatrix_readiness; pub mod blob_verification; +mod block_production; pub mod block_reward; mod block_times_cache; mod block_verification; @@ -42,6 +43,7 @@ pub mod observed_block_producers; pub mod observed_data_sidecars; pub mod observed_operations; mod observed_slashable; +pub mod pending_payload_envelopes; pub mod persisted_beacon_chain; pub mod persisted_custody; mod persisted_fork_choice; diff --git a/beacon_node/beacon_chain/src/pending_payload_envelopes.rs b/beacon_node/beacon_chain/src/pending_payload_envelopes.rs new file mode 100644 index 0000000000..336ab5323f --- /dev/null +++ b/beacon_node/beacon_chain/src/pending_payload_envelopes.rs @@ -0,0 +1,151 @@ +//! Provides the `PendingPayloadEnvelopes` cache for storing execution payload envelopes +//! that have been produced during local block production. +//! +//! For local building, the envelope is created during block production. +//! This cache holds the envelopes temporarily until the validator fetches, signs, +//! and publishes the payload. + +use std::collections::HashMap; +use types::{EthSpec, ExecutionPayloadEnvelope, Slot}; + +/// Cache for pending execution payload envelopes awaiting publishing. +/// +/// Envelopes are keyed by slot and pruned based on slot age. +/// This cache is only used for local building. +pub struct PendingPayloadEnvelopes { + /// Maximum number of slots to keep envelopes before pruning. + max_slot_age: u64, + /// The envelopes, keyed by slot. + envelopes: HashMap>, +} + +impl Default for PendingPayloadEnvelopes { + fn default() -> Self { + Self::new(Self::DEFAULT_MAX_SLOT_AGE) + } +} + +impl PendingPayloadEnvelopes { + /// Default maximum slot age before pruning (2 slots). + pub const DEFAULT_MAX_SLOT_AGE: u64 = 2; + + /// Create a new cache with the specified maximum slot age. + pub fn new(max_slot_age: u64) -> Self { + Self { + max_slot_age, + envelopes: HashMap::new(), + } + } + + /// Insert a pending envelope into the cache. + pub fn insert(&mut self, slot: Slot, envelope: ExecutionPayloadEnvelope) { + // TODO(gloas): we may want to check for duplicates here, which shouldn't be allowed + self.envelopes.insert(slot, envelope); + } + + /// Get a pending envelope by slot. + pub fn get(&self, slot: Slot) -> Option<&ExecutionPayloadEnvelope> { + self.envelopes.get(&slot) + } + + /// Remove and return a pending envelope by slot. + pub fn remove(&mut self, slot: Slot) -> Option> { + self.envelopes.remove(&slot) + } + + /// Check if an envelope exists for the given slot. + pub fn contains(&self, slot: Slot) -> bool { + self.envelopes.contains_key(&slot) + } + + /// Prune envelopes older than `current_slot - max_slot_age`. + /// + /// This removes stale envelopes from blocks that were never published. + // TODO(gloas) implement pruning + pub fn prune(&mut self, current_slot: Slot) { + let min_slot = current_slot.saturating_sub(self.max_slot_age); + self.envelopes.retain(|slot, _| *slot >= min_slot); + } + + /// Returns the number of pending envelopes in the cache. + pub fn len(&self) -> usize { + self.envelopes.len() + } + + /// Returns true if the cache is empty. + pub fn is_empty(&self) -> bool { + self.envelopes.is_empty() + } +} + +#[cfg(test)] +mod tests { + use super::*; + use types::{ExecutionPayloadGloas, ExecutionRequests, Hash256, MainnetEthSpec}; + + type E = MainnetEthSpec; + + fn make_envelope(slot: Slot) -> ExecutionPayloadEnvelope { + ExecutionPayloadEnvelope { + payload: ExecutionPayloadGloas::default(), + execution_requests: ExecutionRequests::default(), + builder_index: 0, + beacon_block_root: Hash256::ZERO, + slot, + state_root: Hash256::ZERO, + } + } + + #[test] + fn insert_and_get() { + let mut cache = PendingPayloadEnvelopes::::default(); + let slot = Slot::new(1); + let envelope = make_envelope(slot); + + assert!(!cache.contains(slot)); + assert_eq!(cache.len(), 0); + + cache.insert(slot, envelope.clone()); + + assert!(cache.contains(slot)); + assert_eq!(cache.len(), 1); + assert_eq!(cache.get(slot), Some(&envelope)); + } + + #[test] + fn remove() { + let mut cache = PendingPayloadEnvelopes::::default(); + let slot = Slot::new(1); + let envelope = make_envelope(slot); + + cache.insert(slot, envelope.clone()); + assert!(cache.contains(slot)); + + let removed = cache.remove(slot); + assert_eq!(removed, Some(envelope)); + assert!(!cache.contains(slot)); + assert_eq!(cache.len(), 0); + } + + #[test] + fn prune_old_envelopes() { + let mut cache = PendingPayloadEnvelopes::::new(2); + + // Insert envelope at slot 5 + let slot_1 = Slot::new(5); + cache.insert(slot_1, make_envelope(slot_1)); + + // Insert envelope at slot 10 + let slot_2 = Slot::new(10); + cache.insert(slot_2, make_envelope(slot_2)); + + assert_eq!(cache.len(), 2); + + // Prune at slot 10 with max_slot_age=2, should keep slots >= 8 + cache.prune(Slot::new(10)); + + assert_eq!(cache.len(), 1); + assert!(!cache.contains(slot_1)); // slot 5 < 8, pruned + assert!(cache.contains(slot_2)); // slot 10 >= 8, kept + } +} diff --git a/beacon_node/client/src/notifier.rs b/beacon_node/client/src/notifier.rs index 3f01622c35..21a5abeb6c 100644 --- a/beacon_node/client/src/notifier.rs +++ b/beacon_node/client/src/notifier.rs @@ -431,6 +431,16 @@ async fn bellatrix_readiness_logging( current_slot: Slot, beacon_chain: &BeaconChain, ) { + // There is no execution payload in gloas blocks, so this will trigger + // bellatrix readiness logging in gloas if we dont skip the check below + if beacon_chain + .spec + .fork_name_at_slot::(current_slot) + .gloas_enabled() + { + return; + } + let merge_completed = beacon_chain .canonical_head .cached_head() diff --git a/beacon_node/execution_layer/src/lib.rs b/beacon_node/execution_layer/src/lib.rs index 33b83aab09..ad2486a4ad 100644 --- a/beacon_node/execution_layer/src/lib.rs +++ b/beacon_node/execution_layer/src/lib.rs @@ -48,7 +48,6 @@ use tree_hash::TreeHash; use types::builder::BuilderBid; use types::execution::BlockProductionVersion; use types::kzg_ext::KzgCommitments; -use types::new_non_zero_usize; use types::{ AbstractExecPayload, BlobsList, ExecutionPayloadDeneb, ExecutionRequests, KzgProofs, SignedBlindedBeaconBlock, @@ -58,6 +57,7 @@ use types::{ ExecutionPayloadCapella, ExecutionPayloadElectra, ExecutionPayloadFulu, FullPayload, ProposerPreparationData, Slot, }; +use types::{ExecutionPayloadGloas, new_non_zero_usize}; mod block_hash; mod engine_api; @@ -168,6 +168,7 @@ pub enum Error { BeaconStateError(BeaconStateError), PayloadTypeMismatch, VerifyingVersionedHashes(versioned_hashes::Error), + Unexpected(String), } impl From for Error { @@ -204,6 +205,26 @@ pub enum BlockProposalContentsType { Blinded(BlockProposalContents>), } +pub struct BlockProposalContentsGloas { + pub payload: ExecutionPayloadGloas, + pub payload_value: Uint256, + pub blob_kzg_commitments: KzgCommitments, + pub blobs_and_proofs: (BlobsList, KzgProofs), + pub execution_requests: ExecutionRequests, +} + +impl From> for BlockProposalContentsGloas { + fn from(response: GetPayloadResponseGloas) -> Self { + Self { + payload: response.execution_payload, + payload_value: response.block_value, + blob_kzg_commitments: response.blobs_bundle.commitments, + blobs_and_proofs: (response.blobs_bundle.blobs, response.blobs_bundle.proofs), + execution_requests: response.requests, + } + } +} + pub enum BlockProposalContents> { Payload { payload: Payload, @@ -884,6 +905,43 @@ impl ExecutionLayer { .and_then(|entry| entry.gas_limit) } + /// Maps to the `engine_getPayload` JSON-RPC call for post-Gloas payload construction. + /// + /// However, it will attempt to call `self.prepare_payload` if it cannot find an existing + /// payload id for the given parameters. + /// + /// ## Fallback Behavior + /// + /// The result will be returned from the first node that returns successfully. No more nodes + /// will be contacted. + pub async fn get_payload_gloas( + &self, + payload_parameters: PayloadParameters<'_>, + ) -> Result, Error> { + let payload_response_type = self.get_full_payload_caching(payload_parameters).await?; + let GetPayloadResponseType::Full(payload_response) = payload_response_type else { + return Err(Error::Unexpected( + "get_payload_gloas should never return a blinded payload".to_owned(), + )); + }; + let GetPayloadResponse::Gloas(payload_response) = payload_response else { + return Err(Error::Unexpected( + "get_payload_gloas should always return a gloas `GetPayloadResponse` variant" + .to_owned(), + )); + }; + metrics::inc_counter_vec( + &metrics::EXECUTION_LAYER_GET_PAYLOAD_OUTCOME, + &[metrics::SUCCESS], + ); + metrics::inc_counter_vec( + &metrics::EXECUTION_LAYER_GET_PAYLOAD_SOURCE, + &[metrics::LOCAL], + ); + + Ok(payload_response.into()) + } + /// Maps to the `engine_getPayload` JSON-RPC call. /// /// However, it will attempt to call `self.prepare_payload` if it cannot find an existing diff --git a/beacon_node/execution_layer/src/test_utils/mock_builder.rs b/beacon_node/execution_layer/src/test_utils/mock_builder.rs index 464879288b..7b6c4e8310 100644 --- a/beacon_node/execution_layer/src/test_utils/mock_builder.rs +++ b/beacon_node/execution_layer/src/test_utils/mock_builder.rs @@ -245,7 +245,7 @@ impl BidStuff for BuilderBid { } fn sign_builder_message(&mut self, sk: &SecretKey, spec: &ChainSpec) -> Signature { - let domain = spec.get_builder_domain(); + let domain = spec.get_builder_application_domain(); let message = self.signing_root(domain); sk.sign(message) } diff --git a/beacon_node/http_api/src/beacon/execution_payload_envelope.rs b/beacon_node/http_api/src/beacon/execution_payload_envelope.rs new file mode 100644 index 0000000000..81f2ea41ea --- /dev/null +++ b/beacon_node/http_api/src/beacon/execution_payload_envelope.rs @@ -0,0 +1,116 @@ +use crate::task_spawner::{Priority, TaskSpawner}; +use crate::utils::{ChainFilter, EthV1Filter, NetworkTxFilter, ResponseFilter, TaskSpawnerFilter}; +use beacon_chain::{BeaconChain, BeaconChainTypes}; +use bytes::Bytes; +use eth2::{CONTENT_TYPE_HEADER, SSZ_CONTENT_TYPE_HEADER}; +use lighthouse_network::PubsubMessage; +use network::NetworkMessage; +use ssz::Decode; +use std::sync::Arc; +use tokio::sync::mpsc::UnboundedSender; +use tracing::{info, warn}; +use types::SignedExecutionPayloadEnvelope; +use warp::{Filter, Rejection, Reply, reply::Response}; + +// POST beacon/execution_payload_envelope (SSZ) +pub(crate) fn post_beacon_execution_payload_envelope_ssz( + eth_v1: EthV1Filter, + task_spawner_filter: TaskSpawnerFilter, + chain_filter: ChainFilter, + network_tx_filter: NetworkTxFilter, +) -> ResponseFilter { + eth_v1 + .and(warp::path("beacon")) + .and(warp::path("execution_payload_envelope")) + .and(warp::path::end()) + .and(warp::header::exact( + CONTENT_TYPE_HEADER, + SSZ_CONTENT_TYPE_HEADER, + )) + .and(warp::body::bytes()) + .and(task_spawner_filter) + .and(chain_filter) + .and(network_tx_filter) + .then( + |body_bytes: Bytes, + task_spawner: TaskSpawner, + chain: Arc>, + network_tx: UnboundedSender>| { + task_spawner.spawn_async_with_rejection(Priority::P0, async move { + let envelope = + SignedExecutionPayloadEnvelope::::from_ssz_bytes(&body_bytes) + .map_err(|e| { + warp_utils::reject::custom_bad_request(format!("invalid SSZ: {e:?}")) + })?; + publish_execution_payload_envelope(envelope, chain, &network_tx).await + }) + }, + ) + .boxed() +} + +// POST beacon/execution_payload_envelope +pub(crate) fn post_beacon_execution_payload_envelope( + eth_v1: EthV1Filter, + task_spawner_filter: TaskSpawnerFilter, + chain_filter: ChainFilter, + network_tx_filter: NetworkTxFilter, +) -> ResponseFilter { + eth_v1 + .and(warp::path("beacon")) + .and(warp::path("execution_payload_envelope")) + .and(warp::path::end()) + .and(warp::body::json()) + .and(task_spawner_filter.clone()) + .and(chain_filter.clone()) + .and(network_tx_filter.clone()) + .then( + |envelope: SignedExecutionPayloadEnvelope, + task_spawner: TaskSpawner, + chain: Arc>, + network_tx: UnboundedSender>| { + task_spawner.spawn_async_with_rejection(Priority::P0, async move { + publish_execution_payload_envelope(envelope, chain, &network_tx).await + }) + }, + ) + .boxed() +} +/// Publishes a signed execution payload envelope to the network. +pub async fn publish_execution_payload_envelope( + envelope: SignedExecutionPayloadEnvelope, + chain: Arc>, + network_tx: &UnboundedSender>, +) -> Result { + let slot = envelope.message.slot; + let beacon_block_root = envelope.message.beacon_block_root; + + // TODO(gloas): Replace this check once we have gossip validation. + if !chain.spec.is_gloas_scheduled() { + return Err(warp_utils::reject::custom_bad_request( + "Execution payload envelopes are not supported before the Gloas fork".into(), + )); + } + + // TODO(gloas): We should probably add validation here i.e. BroadcastValidation::Gossip + info!( + %slot, + %beacon_block_root, + builder_index = envelope.message.builder_index, + "Publishing signed execution payload envelope to network" + ); + + // Publish to the network + crate::utils::publish_pubsub_message( + network_tx, + PubsubMessage::ExecutionPayload(Box::new(envelope)), + ) + .map_err(|_| { + warn!(%slot, "Failed to publish execution payload envelope to network"); + warp_utils::reject::custom_server_error( + "Unable to publish execution payload envelope to network".into(), + ) + })?; + + Ok(warp::reply().into_response()) +} diff --git a/beacon_node/http_api/src/beacon/mod.rs b/beacon_node/http_api/src/beacon/mod.rs index df5e6eee5c..9ec1c476f6 100644 --- a/beacon_node/http_api/src/beacon/mod.rs +++ b/beacon_node/http_api/src/beacon/mod.rs @@ -1,2 +1,3 @@ +pub mod execution_payload_envelope; pub mod pool; pub mod states; diff --git a/beacon_node/http_api/src/beacon/states.rs b/beacon_node/http_api/src/beacon/states.rs index 828efb86a7..50be7211d8 100644 --- a/beacon_node/http_api/src/beacon/states.rs +++ b/beacon_node/http_api/src/beacon/states.rs @@ -28,7 +28,6 @@ pub fn get_beacon_state_pending_consolidations( beacon_states_path: BeaconStatesPath, ) -> ResponseFilter { beacon_states_path - .clone() .and(warp::path("pending_consolidations")) .and(warp::path::end()) .then( diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index 6824eab4fd..92a1ad934d 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -36,6 +36,9 @@ mod validator_inclusion; mod validators; mod version; +use crate::beacon::execution_payload_envelope::{ + post_beacon_execution_payload_envelope, post_beacon_execution_payload_envelope_ssz, +}; use crate::beacon::pool::*; use crate::light_client::{get_light_client_bootstrap, get_light_client_updates}; use crate::utils::{AnyVersionFilter, EthV1Filter}; @@ -92,6 +95,7 @@ use types::{ BeaconStateError, Checkpoint, ConfigAndPreset, Epoch, EthSpec, ForkName, Hash256, SignedBlindedBeaconBlock, Slot, }; +use validator::execution_payload_envelope::get_validator_execution_payload_envelope; use version::{ ResponseIncludesVersion, V1, V2, add_consensus_version_header, add_ssz_content_type_header, execution_optimistic_finalized_beacon_response, inconsistent_fork_rejection, @@ -1486,6 +1490,22 @@ pub fn serve( let post_beacon_pool_bls_to_execution_changes = post_beacon_pool_bls_to_execution_changes(&network_tx_filter, &beacon_pool_path); + // POST beacon/execution_payload_envelope + let post_beacon_execution_payload_envelope = post_beacon_execution_payload_envelope( + eth_v1.clone(), + task_spawner_filter.clone(), + chain_filter.clone(), + network_tx_filter.clone(), + ); + + // POST beacon/execution_payload_envelope (SSZ) + let post_beacon_execution_payload_envelope_ssz = post_beacon_execution_payload_envelope_ssz( + eth_v1.clone(), + task_spawner_filter.clone(), + chain_filter.clone(), + network_tx_filter.clone(), + ); + let beacon_rewards_path = eth_v1 .clone() .and(warp::path("beacon")) @@ -2444,7 +2464,7 @@ pub fn serve( // GET validator/duties/proposer/{epoch} let get_validator_duties_proposer = get_validator_duties_proposer( - eth_v1.clone().clone(), + eth_v1.clone(), chain_filter.clone(), not_while_syncing_filter.clone(), task_spawner_filter.clone(), @@ -2452,7 +2472,7 @@ pub fn serve( // GET validator/blocks/{slot} let get_validator_blocks = get_validator_blocks( - any_version.clone().clone(), + any_version.clone(), chain_filter.clone(), not_while_syncing_filter.clone(), task_spawner_filter.clone(), @@ -2460,7 +2480,15 @@ pub fn serve( // GET validator/blinded_blocks/{slot} let get_validator_blinded_blocks = get_validator_blinded_blocks( - eth_v1.clone().clone(), + eth_v1.clone(), + chain_filter.clone(), + not_while_syncing_filter.clone(), + task_spawner_filter.clone(), + ); + + // GET validator/execution_payload_envelope/{slot}/{builder_index} + let get_validator_execution_payload_envelope = get_validator_execution_payload_envelope( + eth_v1.clone(), chain_filter.clone(), not_while_syncing_filter.clone(), task_spawner_filter.clone(), @@ -2468,7 +2496,7 @@ pub fn serve( // GET validator/attestation_data?slot,committee_index let get_validator_attestation_data = get_validator_attestation_data( - eth_v1.clone().clone(), + eth_v1.clone(), chain_filter.clone(), not_while_syncing_filter.clone(), task_spawner_filter.clone(), @@ -2476,7 +2504,7 @@ pub fn serve( // GET validator/aggregate_attestation?attestation_data_root,slot let get_validator_aggregate_attestation = get_validator_aggregate_attestation( - any_version.clone().clone(), + any_version.clone(), chain_filter.clone(), not_while_syncing_filter.clone(), task_spawner_filter.clone(), @@ -2484,7 +2512,7 @@ pub fn serve( // POST validator/duties/attester/{epoch} let post_validator_duties_attester = post_validator_duties_attester( - eth_v1.clone().clone(), + eth_v1.clone(), chain_filter.clone(), not_while_syncing_filter.clone(), task_spawner_filter.clone(), @@ -2492,7 +2520,7 @@ pub fn serve( // POST validator/duties/sync/{epoch} let post_validator_duties_sync = post_validator_duties_sync( - eth_v1.clone().clone(), + eth_v1.clone(), chain_filter.clone(), not_while_syncing_filter.clone(), task_spawner_filter.clone(), @@ -2500,7 +2528,7 @@ pub fn serve( // GET validator/sync_committee_contribution let get_validator_sync_committee_contribution = get_validator_sync_committee_contribution( - eth_v1.clone().clone(), + eth_v1.clone(), chain_filter.clone(), not_while_syncing_filter.clone(), task_spawner_filter.clone(), @@ -2508,7 +2536,7 @@ pub fn serve( // POST validator/aggregate_and_proofs let post_validator_aggregate_and_proofs = post_validator_aggregate_and_proofs( - any_version.clone().clone(), + any_version.clone(), chain_filter.clone(), network_tx_filter.clone(), not_while_syncing_filter.clone(), @@ -2516,7 +2544,7 @@ pub fn serve( ); let post_validator_contribution_and_proofs = post_validator_contribution_and_proofs( - eth_v1.clone().clone(), + eth_v1.clone(), chain_filter.clone(), network_tx_filter.clone(), not_while_syncing_filter.clone(), @@ -2526,7 +2554,7 @@ pub fn serve( // POST validator/beacon_committee_subscriptions let post_validator_beacon_committee_subscriptions = post_validator_beacon_committee_subscriptions( - eth_v1.clone().clone(), + eth_v1.clone(), chain_filter.clone(), validator_subscription_tx_filter.clone(), task_spawner_filter.clone(), @@ -2534,7 +2562,7 @@ pub fn serve( // POST validator/prepare_beacon_proposer let post_validator_prepare_beacon_proposer = post_validator_prepare_beacon_proposer( - eth_v1.clone().clone(), + eth_v1.clone(), chain_filter.clone(), network_tx_filter.clone(), not_while_syncing_filter.clone(), @@ -2543,13 +2571,13 @@ pub fn serve( // POST validator/register_validator let post_validator_register_validator = post_validator_register_validator( - eth_v1.clone().clone(), + eth_v1.clone(), chain_filter.clone(), task_spawner_filter.clone(), ); // POST validator/sync_committee_subscriptions let post_validator_sync_committee_subscriptions = post_validator_sync_committee_subscriptions( - eth_v1.clone().clone(), + eth_v1.clone(), chain_filter.clone(), validator_subscription_tx_filter.clone(), task_spawner_filter.clone(), @@ -2557,7 +2585,7 @@ pub fn serve( // POST validator/liveness/{epoch} let post_validator_liveness_epoch = post_validator_liveness_epoch( - eth_v1.clone().clone(), + eth_v1.clone(), chain_filter.clone(), task_spawner_filter.clone(), ); @@ -3336,6 +3364,7 @@ pub fn serve( .uor(get_validator_duties_proposer) .uor(get_validator_blocks) .uor(get_validator_blinded_blocks) + .uor(get_validator_execution_payload_envelope) .uor(get_validator_attestation_data) .uor(get_validator_aggregate_attestation) .uor(get_validator_sync_committee_contribution) @@ -3374,7 +3403,8 @@ pub fn serve( post_beacon_blocks_ssz .uor(post_beacon_blocks_v2_ssz) .uor(post_beacon_blinded_blocks_ssz) - .uor(post_beacon_blinded_blocks_v2_ssz), + .uor(post_beacon_blinded_blocks_v2_ssz) + .uor(post_beacon_execution_payload_envelope_ssz), ) .uor(post_beacon_blocks) .uor(post_beacon_blinded_blocks) @@ -3386,6 +3416,7 @@ pub fn serve( .uor(post_beacon_pool_voluntary_exits) .uor(post_beacon_pool_sync_committees) .uor(post_beacon_pool_bls_to_execution_changes) + .uor(post_beacon_execution_payload_envelope) .uor(post_beacon_state_validators) .uor(post_beacon_state_validator_balances) .uor(post_beacon_state_validator_identities) diff --git a/beacon_node/http_api/src/produce_block.rs b/beacon_node/http_api/src/produce_block.rs index 6a549c91ef..607221686f 100644 --- a/beacon_node/http_api/src/produce_block.rs +++ b/beacon_node/http_api/src/produce_block.rs @@ -10,8 +10,8 @@ use beacon_chain::graffiti_calculator::GraffitiSettings; use beacon_chain::{ BeaconBlockResponseWrapper, BeaconChain, BeaconChainTypes, ProduceBlockVerification, }; -use eth2::beacon_response::ForkVersionedResponse; use eth2::types::{self as api_types, ProduceBlockV3Metadata, SkipRandaoVerification}; +use eth2::{beacon_response::ForkVersionedResponse, types::ProduceBlockV4Metadata}; use ssz::Encode; use std::sync::Arc; use tracing::instrument; @@ -43,6 +43,49 @@ pub fn get_randao_verification( Ok(randao_verification) } +#[instrument( + name = "lh_produce_block_v4", + skip_all, + fields(%slot) +)] +pub async fn produce_block_v4( + accept_header: Option, + chain: Arc>, + slot: Slot, + query: api_types::ValidatorBlocksQuery, +) -> Result, warp::Rejection> { + let randao_reveal = query.randao_reveal.decompress().map_err(|e| { + warp_utils::reject::custom_bad_request(format!( + "randao reveal is not a valid BLS signature: {:?}", + e + )) + })?; + + let randao_verification = get_randao_verification(&query, randao_reveal.is_infinity())?; + let builder_boost_factor = if query.builder_boost_factor == Some(DEFAULT_BOOST_FACTOR) { + None + } else { + query.builder_boost_factor + }; + + let graffiti_settings = GraffitiSettings::new(query.graffiti, query.graffiti_policy); + + let (block, consensus_block_value) = chain + .produce_block_with_verification_gloas( + randao_reveal, + slot, + graffiti_settings, + randao_verification, + builder_boost_factor, + ) + .await + .map_err(|e| { + warp_utils::reject::custom_bad_request(format!("failed to fetch a block: {:?}", e)) + })?; + + build_response_v4::(block, consensus_block_value, accept_header, &chain.spec) +} + #[instrument( name = "lh_produce_block_v3", skip_all, @@ -87,6 +130,45 @@ pub async fn produce_block_v3( build_response_v3(chain, block_response_type, accept_header) } +pub fn build_response_v4( + block: BeaconBlock>, + consensus_block_value: u64, + accept_header: Option, + spec: &ChainSpec, +) -> Result, warp::Rejection> { + let fork_name = block + .to_ref() + .fork_name(spec) + .map_err(inconsistent_fork_rejection)?; + let consensus_block_value_wei = + Uint256::from(consensus_block_value) * Uint256::from(1_000_000_000u64); + + let metadata = ProduceBlockV4Metadata { + consensus_version: fork_name, + consensus_block_value: consensus_block_value_wei, + }; + + match accept_header { + Some(api_types::Accept::Ssz) => Response::builder() + .status(200) + .body(block.as_ssz_bytes().into()) + .map(|res: Response| add_ssz_content_type_header(res)) + .map(|res: Response| add_consensus_version_header(res, fork_name)) + .map(|res| add_consensus_block_value_header(res, consensus_block_value_wei)) + .map_err(|e| -> warp::Rejection { + warp_utils::reject::custom_server_error(format!("failed to create response: {}", e)) + }), + _ => Ok(warp::reply::json(&ForkVersionedResponse { + version: fork_name, + metadata, + data: block, + }) + .into_response()) + .map(|res| add_consensus_version_header(res, fork_name)) + .map(|res| add_consensus_block_value_header(res, consensus_block_value_wei)), + } +} + pub fn build_response_v3( chain: Arc>, block_response: BeaconBlockResponseWrapper, diff --git a/beacon_node/http_api/src/validator/execution_payload_envelope.rs b/beacon_node/http_api/src/validator/execution_payload_envelope.rs new file mode 100644 index 0000000000..c40b375e49 --- /dev/null +++ b/beacon_node/http_api/src/validator/execution_payload_envelope.rs @@ -0,0 +1,110 @@ +use crate::task_spawner::{Priority, TaskSpawner}; +use crate::utils::{ + ChainFilter, EthV1Filter, NotWhileSyncingFilter, ResponseFilter, TaskSpawnerFilter, +}; +use beacon_chain::{BeaconChain, BeaconChainTypes}; +use eth2::beacon_response::{EmptyMetadata, ForkVersionedResponse}; +use eth2::types::Accept; +use ssz::Encode; +use std::sync::Arc; +use tracing::debug; +use types::Slot; +use warp::http::Response; +use warp::{Filter, Rejection}; + +// GET validator/execution_payload_envelope/{slot}/{builder_index} +pub fn get_validator_execution_payload_envelope( + eth_v1: EthV1Filter, + chain_filter: ChainFilter, + not_while_syncing_filter: NotWhileSyncingFilter, + task_spawner_filter: TaskSpawnerFilter, +) -> ResponseFilter { + eth_v1 + .and(warp::path("validator")) + .and(warp::path("execution_payload_envelope")) + .and(warp::path::param::().or_else(|_| async { + Err(warp_utils::reject::custom_bad_request( + "Invalid slot".to_string(), + )) + })) + .and(warp::path::param::().or_else(|_| async { + Err(warp_utils::reject::custom_bad_request( + "Invalid builder_index".to_string(), + )) + })) + .and(warp::path::end()) + .and(warp::header::optional::("accept")) + .and(not_while_syncing_filter) + .and(task_spawner_filter) + .and(chain_filter) + .then( + |slot: Slot, + // TODO(gloas) we're only doing local building + // we'll need to implement builder index logic + // eventually. + _builder_index: u64, + accept_header: Option, + not_synced_filter: Result<(), Rejection>, + task_spawner: TaskSpawner, + chain: Arc>| { + task_spawner.spawn_async_with_rejection(Priority::P0, async move { + debug!(?slot, "Execution payload envelope request from HTTP API"); + + not_synced_filter?; + + // Get the envelope from the pending cache (local building only) + let envelope = chain + .pending_payload_envelopes + .read() + .get(slot) + .cloned() + .ok_or_else(|| { + warp_utils::reject::custom_not_found(format!( + "Execution payload envelope not available for slot {slot}" + )) + })?; + + let fork_name = chain.spec.fork_name_at_slot::(slot); + + match accept_header { + Some(Accept::Ssz) => Response::builder() + .status(200) + .header("Content-Type", "application/octet-stream") + .header("Eth-Consensus-Version", fork_name.to_string()) + .body(envelope.as_ssz_bytes().into()) + .map_err(|e| { + warp_utils::reject::custom_server_error(format!( + "Failed to build SSZ response: {e}" + )) + }), + _ => { + let json_response = ForkVersionedResponse { + version: fork_name, + metadata: EmptyMetadata {}, + data: envelope, + }; + Response::builder() + .status(200) + .header("Content-Type", "application/json") + .header("Eth-Consensus-Version", fork_name.to_string()) + .body( + serde_json::to_string(&json_response) + .map_err(|e| { + warp_utils::reject::custom_server_error(format!( + "Failed to serialize response: {e}" + )) + })? + .into(), + ) + .map_err(|e| { + warp_utils::reject::custom_server_error(format!( + "Failed to build JSON response: {e}" + )) + }) + } + } + }) + }, + ) + .boxed() +} diff --git a/beacon_node/http_api/src/validator/mod.rs b/beacon_node/http_api/src/validator/mod.rs index 0704c52095..df237d9f9b 100644 --- a/beacon_node/http_api/src/validator/mod.rs +++ b/beacon_node/http_api/src/validator/mod.rs @@ -1,4 +1,6 @@ -use crate::produce_block::{produce_blinded_block_v2, produce_block_v2, produce_block_v3}; +use crate::produce_block::{ + produce_blinded_block_v2, produce_block_v2, produce_block_v3, produce_block_v4, +}; use crate::task_spawner::{Priority, TaskSpawner}; use crate::utils::{ AnyVersionFilter, ChainFilter, EthV1Filter, NetworkTxFilter, NotWhileSyncingFilter, @@ -31,6 +33,8 @@ use types::{ use warp::{Filter, Rejection, Reply}; use warp_utils::reject::convert_rejection; +pub mod execution_payload_envelope; + /// Uses the `chain.validator_pubkey_cache` to resolve a pubkey to a validator /// index and then ensures that the validator exists in the given `state`. pub fn pubkey_to_validator_index( @@ -316,7 +320,11 @@ pub fn get_validator_blocks( not_synced_filter?; - if endpoint_version == V3 { + // Use V4 block production for Gloas fork + let fork_name = chain.spec.fork_name_at_slot::(slot); + if fork_name.gloas_enabled() { + produce_block_v4(accept_header, chain, slot, query).await + } else if endpoint_version == V3 { produce_block_v3(accept_header, chain, slot, query).await } else { produce_block_v2(accept_header, chain, slot, query).await @@ -662,15 +670,26 @@ pub fn post_validator_prepare_beacon_proposer( ) .await; - chain - .prepare_beacon_proposer(current_slot) - .await - .map_err(|e| { - warp_utils::reject::custom_bad_request(format!( - "error updating proposer preparations: {:?}", - e - )) - })?; + // TODO(gloas): verify this is correct. We skip proposer preparation for + // GLOAS because the execution payload is no longer embedded in the beacon + // block (it's in the payload envelope), so the head block's + // execution_payload() is unavailable. + let next_slot = current_slot + 1; + if !chain + .spec + .fork_name_at_slot::(next_slot) + .gloas_enabled() + { + chain + .prepare_beacon_proposer(current_slot) + .await + .map_err(|e| { + warp_utils::reject::custom_bad_request(format!( + "error updating proposer preparations: {:?}", + e + )) + })?; + } if chain.spec.is_peer_das_scheduled() { let (finalized_beacon_state, _, _) = diff --git a/beacon_node/http_api/tests/tests.rs b/beacon_node/http_api/tests/tests.rs index 6787d1ab9e..7e3eb8b980 100644 --- a/beacon_node/http_api/tests/tests.rs +++ b/beacon_node/http_api/tests/tests.rs @@ -6,14 +6,15 @@ use beacon_chain::{ AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType, test_spec, }, }; -use bls::{AggregateSignature, Keypair, PublicKeyBytes, Signature, SignatureBytes}; +use bls::{AggregateSignature, Keypair, PublicKeyBytes, SecretKey, Signature, SignatureBytes}; use eth2::{ BeaconNodeHttpClient, Error, Error::ServerMessage, Timeouts, mixin::{RequestAccept, ResponseForkName, ResponseOptional}, types::{ - BlockId as CoreBlockId, ForkChoiceNode, ProduceBlockV3Response, StateId as CoreStateId, *, + BlockId as CoreBlockId, ForkChoiceNode, ProduceBlockV3Response, ProduceBlockV4Metadata, + StateId as CoreStateId, *, }, }; use execution_layer::expected_gas_limit; @@ -47,7 +48,8 @@ use tree_hash::TreeHash; use types::ApplicationDomain; use types::{ Domain, EthSpec, ExecutionBlockHash, Hash256, MainnetEthSpec, RelativeEpoch, SelectionProof, - SignedRoot, SingleAttestation, Slot, attestation::AttestationBase, + SignedExecutionPayloadEnvelope, SignedRoot, SingleAttestation, Slot, + attestation::AttestationBase, consts::gloas::BUILDER_INDEX_SELF_BUILD, }; type E = MainnetEthSpec; @@ -3726,6 +3728,229 @@ impl ApiTester { self } + /// Get the proposer secret key and randao reveal for the given slot. + async fn proposer_setup( + &self, + slot: Slot, + epoch: Epoch, + fork: &Fork, + genesis_validators_root: Hash256, + ) -> (SecretKey, SignatureBytes) { + let proposer_pubkey_bytes = self + .client + .get_validator_duties_proposer(epoch) + .await + .unwrap() + .data + .into_iter() + .find(|duty| duty.slot == slot) + .map(|duty| duty.pubkey) + .unwrap(); + let proposer_pubkey = (&proposer_pubkey_bytes).try_into().unwrap(); + + let sk = self + .validator_keypairs() + .iter() + .find(|kp| kp.pk == proposer_pubkey) + .map(|kp| kp.sk.clone()) + .unwrap(); + + let randao_reveal = { + let domain = + self.chain + .spec + .get_domain(epoch, Domain::Randao, fork, genesis_validators_root); + let message = epoch.signing_root(domain); + sk.sign(message).into() + }; + + (sk, randao_reveal) + } + + /// Assert block metadata and verify the envelope cache. + fn assert_v4_block_metadata( + &self, + block: &BeaconBlock, + metadata: &ProduceBlockV4Metadata, + slot: Slot, + ) { + assert_eq!( + metadata.consensus_version, + block.to_ref().fork_name(&self.chain.spec).unwrap() + ); + assert!(!metadata.consensus_block_value.is_zero()); + + let block_root = block.tree_hash_root(); + let envelope = self + .chain + .pending_payload_envelopes + .read() + .get(slot) + .cloned() + .expect("envelope should exist in pending cache for local building"); + assert_eq!(envelope.beacon_block_root, block_root); + assert_eq!(envelope.slot, slot); + } + + /// Assert envelope fields match the expected block root and slot. + fn assert_envelope_fields( + &self, + envelope: &ExecutionPayloadEnvelope, + block_root: Hash256, + slot: Slot, + ) { + assert_eq!(envelope.beacon_block_root, block_root); + assert_eq!(envelope.slot, slot); + assert_eq!(envelope.builder_index, BUILDER_INDEX_SELF_BUILD); + assert_ne!(envelope.state_root, Hash256::ZERO); + } + + /// Sign an execution payload envelope. + fn sign_envelope( + &self, + envelope: ExecutionPayloadEnvelope, + sk: &SecretKey, + epoch: Epoch, + fork: &Fork, + genesis_validators_root: Hash256, + ) -> SignedExecutionPayloadEnvelope { + let domain = + self.chain + .spec + .get_domain(epoch, Domain::BeaconBuilder, fork, genesis_validators_root); + let signing_root = envelope.signing_root(domain); + let signature = sk.sign(signing_root); + + SignedExecutionPayloadEnvelope { + message: envelope, + signature, + } + } + + /// Test V4 block production (JSON). Only runs if Gloas is scheduled. + pub async fn test_block_production_v4(self) -> Self { + if !self.chain.spec.is_gloas_scheduled() { + return self; + } + + let fork = self.chain.canonical_head.cached_head().head_fork(); + let genesis_validators_root = self.chain.genesis_validators_root; + + for _ in 0..E::slots_per_epoch() * 3 { + let slot = self.chain.slot().unwrap(); + let epoch = self.chain.epoch().unwrap(); + let fork_name = self.chain.spec.fork_name_at_slot::(slot); + + if !fork_name.gloas_enabled() { + self.chain.slot_clock.set_slot(slot.as_u64() + 1); + continue; + } + + let (sk, randao_reveal) = self + .proposer_setup(slot, epoch, &fork, genesis_validators_root) + .await; + + let (response, metadata) = self + .client + .get_validator_blocks_v4::(slot, &randao_reveal, None, None, None) + .await + .unwrap(); + let block = response.data; + + self.assert_v4_block_metadata(&block, &metadata, slot); + + let envelope = self + .client + .get_validator_execution_payload_envelope::(slot, BUILDER_INDEX_SELF_BUILD) + .await + .unwrap() + .data; + + self.assert_envelope_fields(&envelope, block.tree_hash_root(), slot); + + let signed_block = block.sign(&sk, &fork, genesis_validators_root, &self.chain.spec); + let signed_block_request = + PublishBlockRequest::try_from(Arc::new(signed_block.clone())).unwrap(); + self.client + .post_beacon_blocks_v2(&signed_block_request, None) + .await + .unwrap(); + assert_eq!(self.chain.head_beacon_block(), Arc::new(signed_block)); + + let signed_envelope = + self.sign_envelope(envelope, &sk, epoch, &fork, genesis_validators_root); + self.client + .post_beacon_execution_payload_envelope(&signed_envelope, fork_name) + .await + .unwrap(); + + self.chain.slot_clock.set_slot(slot.as_u64() + 1); + } + + self + } + + /// Test V4 block production (SSZ). Only runs if Gloas is scheduled. + pub async fn test_block_production_v4_ssz(self) -> Self { + if !self.chain.spec.is_gloas_scheduled() { + return self; + } + + let fork = self.chain.canonical_head.cached_head().head_fork(); + let genesis_validators_root = self.chain.genesis_validators_root; + + for _ in 0..E::slots_per_epoch() * 3 { + let slot = self.chain.slot().unwrap(); + let epoch = self.chain.epoch().unwrap(); + let fork_name = self.chain.spec.fork_name_at_slot::(slot); + + if !fork_name.gloas_enabled() { + self.chain.slot_clock.set_slot(slot.as_u64() + 1); + continue; + } + + let (sk, randao_reveal) = self + .proposer_setup(slot, epoch, &fork, genesis_validators_root) + .await; + + let (block, metadata) = self + .client + .get_validator_blocks_v4_ssz::(slot, &randao_reveal, None, None, None) + .await + .unwrap(); + + self.assert_v4_block_metadata(&block, &metadata, slot); + + let envelope = self + .client + .get_validator_execution_payload_envelope_ssz::(slot, BUILDER_INDEX_SELF_BUILD) + .await + .unwrap(); + + self.assert_envelope_fields(&envelope, block.tree_hash_root(), slot); + + let signed_block = block.sign(&sk, &fork, genesis_validators_root, &self.chain.spec); + let signed_block_request = + PublishBlockRequest::try_from(Arc::new(signed_block.clone())).unwrap(); + self.client + .post_beacon_blocks_v2_ssz(&signed_block_request, None) + .await + .unwrap(); + assert_eq!(self.chain.head_beacon_block(), Arc::new(signed_block)); + + let signed_envelope = + self.sign_envelope(envelope, &sk, epoch, &fork, genesis_validators_root); + self.client + .post_beacon_execution_payload_envelope_ssz(&signed_envelope, fork_name) + .await + .unwrap(); + + self.chain.slot_clock.set_slot(slot.as_u64() + 1); + } + + self + } + pub async fn test_block_production_no_verify_randao(self) -> Self { for _ in 0..E::slots_per_epoch() { let slot = self.chain.slot().unwrap(); @@ -7459,6 +7684,22 @@ async fn block_production_v3_ssz_with_skip_slots() { .await; } +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn block_production_v4() { + ApiTester::new_with_hard_forks() + .await + .test_block_production_v4() + .await; +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn block_production_v4_ssz() { + ApiTester::new_with_hard_forks() + .await + .test_block_production_v4_ssz() + .await; +} + #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn blinded_block_production_full_payload_premerge() { ApiTester::new().await.test_blinded_block_production().await; diff --git a/common/eth2/src/lib.rs b/common/eth2/src/lib.rs index cdf63a3c67..a5b4f9afdd 100644 --- a/common/eth2/src/lib.rs +++ b/common/eth2/src/lib.rs @@ -42,7 +42,7 @@ use reqwest::{ #[cfg(feature = "events")] use reqwest_eventsource::{Event, RequestBuilderExt}; use serde::{Serialize, de::DeserializeOwned}; -use ssz::Encode; +use ssz::{Decode, Encode}; use std::fmt; use std::future::Future; use std::time::Duration; @@ -50,6 +50,7 @@ use std::time::Duration; pub const V1: EndpointVersion = EndpointVersion(1); pub const V2: EndpointVersion = EndpointVersion(2); pub const V3: EndpointVersion = EndpointVersion(3); +pub const V4: EndpointVersion = EndpointVersion(4); pub const CONSENSUS_VERSION_HEADER: &str = "Eth-Consensus-Version"; pub const EXECUTION_PAYLOAD_BLINDED_HEADER: &str = "Eth-Execution-Payload-Blinded"; @@ -2406,6 +2407,277 @@ impl BeaconNodeHttpClient { opt_response.ok_or(Error::StatusCode(StatusCode::NOT_FOUND)) } + /// returns `GET v4/validator/blocks/{slot}` URL path + pub async fn get_validator_blocks_v4_path( + &self, + slot: Slot, + randao_reveal: &SignatureBytes, + graffiti: Option<&Graffiti>, + skip_randao_verification: SkipRandaoVerification, + builder_booster_factor: Option, + graffiti_policy: Option, + ) -> Result { + let mut path = self.eth_path(V4)?; + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("validator") + .push("blocks") + .push(&slot.to_string()); + + path.query_pairs_mut() + .append_pair("randao_reveal", &randao_reveal.to_string()); + + if let Some(graffiti) = graffiti { + path.query_pairs_mut() + .append_pair("graffiti", &graffiti.to_string()); + } + + if skip_randao_verification == SkipRandaoVerification::Yes { + path.query_pairs_mut() + .append_pair("skip_randao_verification", ""); + } + + if let Some(builder_booster_factor) = builder_booster_factor { + path.query_pairs_mut() + .append_pair("builder_boost_factor", &builder_booster_factor.to_string()); + } + + if let Some(GraffitiPolicy::AppendClientVersions) = graffiti_policy { + path.query_pairs_mut() + .append_pair("graffiti_policy", "AppendClientVersions"); + } + + Ok(path) + } + + /// `GET v4/validator/blocks/{slot}` + pub async fn get_validator_blocks_v4( + &self, + slot: Slot, + randao_reveal: &SignatureBytes, + graffiti: Option<&Graffiti>, + builder_booster_factor: Option, + graffiti_policy: Option, + ) -> Result< + ( + ForkVersionedResponse, ProduceBlockV4Metadata>, + ProduceBlockV4Metadata, + ), + Error, + > { + self.get_validator_blocks_v4_modular( + slot, + randao_reveal, + graffiti, + SkipRandaoVerification::No, + builder_booster_factor, + graffiti_policy, + ) + .await + } + + /// `GET v4/validator/blocks/{slot}` + pub async fn get_validator_blocks_v4_modular( + &self, + slot: Slot, + randao_reveal: &SignatureBytes, + graffiti: Option<&Graffiti>, + skip_randao_verification: SkipRandaoVerification, + builder_booster_factor: Option, + graffiti_policy: Option, + ) -> Result< + ( + ForkVersionedResponse, ProduceBlockV4Metadata>, + ProduceBlockV4Metadata, + ), + Error, + > { + let path = self + .get_validator_blocks_v4_path( + slot, + randao_reveal, + graffiti, + skip_randao_verification, + builder_booster_factor, + graffiti_policy, + ) + .await?; + + let opt_result = self + .get_response_with_response_headers( + path, + Accept::Json, + self.timeouts.get_validator_block, + |response, headers| async move { + let header_metadata = ProduceBlockV4Metadata::try_from(&headers) + .map_err(Error::InvalidHeaders)?; + let block_response = response + .json::, ProduceBlockV4Metadata>>() + .await?; + Ok((block_response, header_metadata)) + }, + ) + .await?; + + opt_result.ok_or(Error::StatusCode(StatusCode::NOT_FOUND)) + } + + /// `GET v4/validator/blocks/{slot}` in ssz format + pub async fn get_validator_blocks_v4_ssz( + &self, + slot: Slot, + randao_reveal: &SignatureBytes, + graffiti: Option<&Graffiti>, + builder_booster_factor: Option, + graffiti_policy: Option, + ) -> Result<(BeaconBlock, ProduceBlockV4Metadata), Error> { + self.get_validator_blocks_v4_modular_ssz::( + slot, + randao_reveal, + graffiti, + SkipRandaoVerification::No, + builder_booster_factor, + graffiti_policy, + ) + .await + } + + /// `GET v4/validator/blocks/{slot}` in ssz format + pub async fn get_validator_blocks_v4_modular_ssz( + &self, + slot: Slot, + randao_reveal: &SignatureBytes, + graffiti: Option<&Graffiti>, + skip_randao_verification: SkipRandaoVerification, + builder_booster_factor: Option, + graffiti_policy: Option, + ) -> Result<(BeaconBlock, ProduceBlockV4Metadata), Error> { + let path = self + .get_validator_blocks_v4_path( + slot, + randao_reveal, + graffiti, + skip_randao_verification, + builder_booster_factor, + graffiti_policy, + ) + .await?; + + let opt_response = self + .get_response_with_response_headers( + path, + Accept::Ssz, + self.timeouts.get_validator_block, + |response, headers| async move { + let metadata = ProduceBlockV4Metadata::try_from(&headers) + .map_err(Error::InvalidHeaders)?; + let response_bytes = response.bytes().await?; + + let block = BeaconBlock::from_ssz_bytes_for_fork( + &response_bytes, + metadata.consensus_version, + ) + .map_err(Error::InvalidSsz)?; + + Ok((block, metadata)) + }, + ) + .await?; + + opt_response.ok_or(Error::StatusCode(StatusCode::NOT_FOUND)) + } + + /// `GET v1/validator/execution_payload_envelope/{slot}/{builder_index}` + pub async fn get_validator_execution_payload_envelope( + &self, + slot: Slot, + builder_index: u64, + ) -> Result>, Error> { + let mut path = self.eth_path(V1)?; + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("validator") + .push("execution_payload_envelope") + .push(&slot.to_string()) + .push(&builder_index.to_string()); + + self.get(path).await + } + + /// `GET v1/validator/execution_payload_envelope/{slot}/{builder_index}` in SSZ format + pub async fn get_validator_execution_payload_envelope_ssz( + &self, + slot: Slot, + builder_index: u64, + ) -> Result, Error> { + let mut path = self.eth_path(V1)?; + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("validator") + .push("execution_payload_envelope") + .push(&slot.to_string()) + .push(&builder_index.to_string()); + + let opt_response = self + .get_bytes_opt_accept_header(path, Accept::Ssz, self.timeouts.get_validator_block) + .await?; + + let response_bytes = opt_response.ok_or(Error::StatusCode(StatusCode::NOT_FOUND))?; + + ExecutionPayloadEnvelope::from_ssz_bytes(&response_bytes).map_err(Error::InvalidSsz) + } + + /// `POST v1/beacon/execution_payload_envelope` + pub async fn post_beacon_execution_payload_envelope( + &self, + envelope: &SignedExecutionPayloadEnvelope, + fork_name: ForkName, + ) -> Result<(), Error> { + let mut path = self.eth_path(V1)?; + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("beacon") + .push("execution_payload_envelope"); + + self.post_generic_with_consensus_version( + path, + envelope, + Some(self.timeouts.proposal), + fork_name, + ) + .await?; + + Ok(()) + } + + /// `POST v1/beacon/execution_payload_envelope` in SSZ format + pub async fn post_beacon_execution_payload_envelope_ssz( + &self, + envelope: &SignedExecutionPayloadEnvelope, + fork_name: ForkName, + ) -> Result<(), Error> { + let mut path = self.eth_path(V1)?; + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("beacon") + .push("execution_payload_envelope"); + + self.post_generic_with_consensus_version_and_ssz_body( + path, + envelope.as_ssz_bytes(), + Some(self.timeouts.proposal), + fork_name, + ) + .await?; + + Ok(()) + } + /// `GET v2/validator/blocks/{slot}` in ssz format pub async fn get_validator_blocks_ssz( &self, diff --git a/common/eth2/src/types.rs b/common/eth2/src/types.rs index 0c9c0b95f0..f8376d430c 100644 --- a/common/eth2/src/types.rs +++ b/common/eth2/src/types.rs @@ -1723,7 +1723,7 @@ pub type JsonProduceBlockV3Response = pub enum FullBlockContents { /// This is a full deneb variant with block and blobs. BlockContents(BlockContents), - /// This variant is for all pre-deneb full blocks. + /// This variant is for all pre-deneb full blocks or post-gloas beacon block. Block(BeaconBlock), } @@ -1751,6 +1751,20 @@ pub struct ProduceBlockV3Metadata { pub consensus_block_value: Uint256, } +/// Metadata about a `produce_block_v4` response which is returned in the body & headers. +#[derive(Debug, Deserialize, Serialize)] +pub struct ProduceBlockV4Metadata { + // The consensus version is serialized & deserialized by `ForkVersionedResponse`. + #[serde( + skip_serializing, + skip_deserializing, + default = "dummy_consensus_version" + )] + pub consensus_version: ForkName, + #[serde(with = "serde_utils::u256_dec")] + pub consensus_block_value: Uint256, +} + impl FullBlockContents { pub fn new(block: BeaconBlock, blob_data: Option<(KzgProofs, BlobsList)>) -> Self { match blob_data { @@ -1907,6 +1921,27 @@ impl TryFrom<&HeaderMap> for ProduceBlockV3Metadata { } } +impl TryFrom<&HeaderMap> for ProduceBlockV4Metadata { + type Error = String; + + fn try_from(headers: &HeaderMap) -> Result { + let consensus_version = parse_required_header(headers, CONSENSUS_VERSION_HEADER, |s| { + s.parse::() + .map_err(|e| format!("invalid {CONSENSUS_VERSION_HEADER}: {e:?}")) + })?; + let consensus_block_value = + parse_required_header(headers, CONSENSUS_BLOCK_VALUE_HEADER, |s| { + Uint256::from_str_radix(s, 10) + .map_err(|e| format!("invalid {CONSENSUS_BLOCK_VALUE_HEADER}: {e:?}")) + })?; + + Ok(ProduceBlockV4Metadata { + consensus_version, + consensus_block_value, + }) + } +} + /// A wrapper over a [`SignedBeaconBlock`] or a [`SignedBlockContents`]. #[derive(Clone, Debug, PartialEq, Encode, Serialize)] #[serde(untagged)] @@ -1954,7 +1989,7 @@ impl PublishBlockRequest { /// SSZ decode with fork variant determined by `fork_name`. pub fn from_ssz_bytes(bytes: &[u8], fork_name: ForkName) -> Result { - if fork_name.deneb_enabled() { + if fork_name.deneb_enabled() && !fork_name.gloas_enabled() { let mut builder = ssz::SszDecoderBuilder::new(bytes); builder.register_anonymous_variable_length_item()?; builder.register_type::>()?; diff --git a/consensus/state_processing/src/envelope_processing.rs b/consensus/state_processing/src/envelope_processing.rs index d46728dbbc..c2cfeae5d3 100644 --- a/consensus/state_processing/src/envelope_processing.rs +++ b/consensus/state_processing/src/envelope_processing.rs @@ -20,6 +20,23 @@ macro_rules! envelope_verify { }; } +/// The strategy to be used when validating the payloads state root. +#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] +#[derive(PartialEq, Clone, Copy)] +pub enum VerifyStateRoot { + /// Validate state root. + True, + /// Do not validate state root. Use with caution. + /// This should only be used when first constructing the payload envelope. + False, +} + +impl VerifyStateRoot { + pub fn is_true(self) -> bool { + self == VerifyStateRoot::True + } +} + #[derive(Debug, Clone)] pub enum EnvelopeProcessingError { /// Bad Signature @@ -111,6 +128,7 @@ pub fn process_execution_payload_envelope( parent_state_root: Option, signed_envelope: &SignedExecutionPayloadEnvelope, verify_signatures: VerifySignatures, + verify_state_root: VerifyStateRoot, spec: &ChainSpec, ) -> Result<(), EnvelopeProcessingError> { if verify_signatures.is_true() { @@ -264,15 +282,17 @@ pub fn process_execution_payload_envelope( .map_err(EnvelopeProcessingError::BitFieldError)?; *state.latest_block_hash_mut()? = payload.block_hash; - // Verify the state root - let state_root = state.canonical_root()?; - envelope_verify!( - envelope.state_root == state_root, - EnvelopeProcessingError::InvalidStateRoot { - state: state_root, - envelope: envelope.state_root, - } - ); + if verify_state_root.is_true() { + // Verify the state root + let state_root = state.canonical_root()?; + envelope_verify!( + envelope.state_root == state_root, + EnvelopeProcessingError::InvalidStateRoot { + state: state_root, + envelope: envelope.state_root, + } + ); + } Ok(()) } diff --git a/consensus/types/src/builder/builder_bid.rs b/consensus/types/src/builder/builder_bid.rs index 1018fadb64..e706b01283 100644 --- a/consensus/types/src/builder/builder_bid.rs +++ b/consensus/types/src/builder/builder_bid.rs @@ -196,7 +196,7 @@ impl SignedBuilderBid { .pubkey() .decompress() .map(|pubkey| { - let domain = spec.get_builder_domain(); + let domain = spec.get_builder_application_domain(); let message = self.message.signing_root(domain); self.signature.verify(&pubkey, message) }) diff --git a/consensus/types/src/core/application_domain.rs b/consensus/types/src/core/application_domain.rs index 5e33f2dfd5..ff55a91034 100644 --- a/consensus/types/src/core/application_domain.rs +++ b/consensus/types/src/core/application_domain.rs @@ -4,6 +4,7 @@ pub const APPLICATION_DOMAIN_BUILDER: u32 = 16777216; #[derive(Debug, PartialEq, Clone, Copy)] pub enum ApplicationDomain { + /// NOTE: This domain is only used for out-of-protocol block building, DO NOT use it for Gloas/ePBS. Builder, } diff --git a/consensus/types/src/core/chain_spec.rs b/consensus/types/src/core/chain_spec.rs index 6d25e3baf4..adf87dee94 100644 --- a/consensus/types/src/core/chain_spec.rs +++ b/consensus/types/src/core/chain_spec.rs @@ -549,7 +549,9 @@ impl ChainSpec { // This should be updated to include the current fork and the genesis validators root, but discussion is ongoing: // // https://github.com/ethereum/builder-specs/issues/14 - pub fn get_builder_domain(&self) -> Hash256 { + // + // NOTE: This domain is only used for out-of-protocol block building, DO NOT use it for Gloas/ePBS. + pub fn get_builder_application_domain(&self) -> Hash256 { self.compute_domain( Domain::ApplicationMask(ApplicationDomain::Builder), self.genesis_fork_version, diff --git a/consensus/types/src/validator/validator_registration_data.rs b/consensus/types/src/validator/validator_registration_data.rs index a0a1df7dc5..df2293cbae 100644 --- a/consensus/types/src/validator/validator_registration_data.rs +++ b/consensus/types/src/validator/validator_registration_data.rs @@ -31,7 +31,7 @@ impl SignedValidatorRegistrationData { .pubkey .decompress() .map(|pubkey| { - let domain = spec.get_builder_domain(); + let domain = spec.get_builder_application_domain(); let message = self.message.signing_root(domain); self.signature.verify(&pubkey, message) }) diff --git a/testing/ef_tests/src/cases/operations.rs b/testing/ef_tests/src/cases/operations.rs index 2f08727045..59d2bef24e 100644 --- a/testing/ef_tests/src/cases/operations.rs +++ b/testing/ef_tests/src/cases/operations.rs @@ -5,6 +5,7 @@ use crate::decode::{ssz_decode_file, ssz_decode_file_with, ssz_decode_state, yam use serde::Deserialize; use ssz::Decode; use state_processing::common::update_progressive_balances_cache::initialize_progressive_balances_cache; +use state_processing::envelope_processing::VerifyStateRoot; use state_processing::epoch_cache::initialize_epoch_cache; use state_processing::per_block_processing::process_operations::{ process_consolidation_requests, process_deposit_requests_post_gloas, @@ -458,7 +459,14 @@ impl Operation for SignedExecutionPayloadEnvelope { .as_ref() .is_some_and(|e| e.execution_valid); if valid { - process_execution_payload_envelope(state, None, self, VerifySignatures::True, spec) + process_execution_payload_envelope( + state, + None, + self, + VerifySignatures::True, + VerifyStateRoot::True, + spec, + ) } else { Err(EnvelopeProcessingError::ExecutionInvalid) } diff --git a/validator_client/lighthouse_validator_store/src/lib.rs b/validator_client/lighthouse_validator_store/src/lib.rs index 7b6a582363..7806482ffb 100644 --- a/validator_client/lighthouse_validator_store/src/lib.rs +++ b/validator_client/lighthouse_validator_store/src/lib.rs @@ -20,12 +20,12 @@ use task_executor::TaskExecutor; use tracing::{error, info, instrument, warn}; use types::{ AbstractExecPayload, Address, AggregateAndProof, Attestation, BeaconBlock, BlindedPayload, - ChainSpec, ContributionAndProof, Domain, Epoch, EthSpec, Fork, Graffiti, Hash256, - SelectionProof, SignedAggregateAndProof, SignedBeaconBlock, SignedContributionAndProof, - SignedRoot, SignedValidatorRegistrationData, SignedVoluntaryExit, Slot, - SyncAggregatorSelectionData, SyncCommitteeContribution, SyncCommitteeMessage, - SyncSelectionProof, SyncSubnetId, ValidatorRegistrationData, VoluntaryExit, - graffiti::GraffitiString, + ChainSpec, ContributionAndProof, Domain, Epoch, EthSpec, ExecutionPayloadEnvelope, Fork, + FullPayload, Graffiti, Hash256, SelectionProof, SignedAggregateAndProof, SignedBeaconBlock, + SignedContributionAndProof, SignedExecutionPayloadEnvelope, SignedRoot, + SignedValidatorRegistrationData, SignedVoluntaryExit, Slot, SyncAggregatorSelectionData, + SyncCommitteeContribution, SyncCommitteeMessage, SyncSelectionProof, SyncSubnetId, + ValidatorRegistrationData, VoluntaryExit, graffiti::GraffitiString, }; use validator_store::{ DoppelgangerStatus, Error as ValidatorStoreError, ProposalData, SignedBlock, UnsignedBlock, @@ -954,7 +954,7 @@ impl ValidatorStore for LighthouseValidatorS &self, validator_registration_data: ValidatorRegistrationData, ) -> Result { - let domain_hash = self.spec.get_builder_domain(); + let domain_hash = self.spec.get_builder_application_domain(); let signing_root = validator_registration_data.signing_root(domain_hash); let signing_method = @@ -1242,4 +1242,35 @@ impl ValidatorStore for LighthouseValidatorS .get_builder_proposals_defaulting(validator.get_builder_proposals()), }) } + + /// Sign an `ExecutionPayloadEnvelope` for Gloas (local building). + /// The proposer acts as the builder and signs with the BeaconBuilder domain. + async fn sign_execution_payload_envelope( + &self, + validator_pubkey: PublicKeyBytes, + envelope: ExecutionPayloadEnvelope, + ) -> Result, Error> { + let signing_context = self.signing_context( + Domain::BeaconBuilder, + envelope.slot.epoch(E::slots_per_epoch()), + ); + + // Execution payload envelope signing is not slashable, bypass doppelganger protection. + let signing_method = self.doppelganger_bypassed_signing_method(validator_pubkey)?; + + let signature = signing_method + .get_signature::>( + SignableMessage::ExecutionPayloadEnvelope(&envelope), + signing_context, + &self.spec, + &self.task_executor, + ) + .await + .map_err(Error::SpecificError)?; + + Ok(SignedExecutionPayloadEnvelope { + message: envelope, + signature, + }) + } } diff --git a/validator_client/signing_method/src/lib.rs b/validator_client/signing_method/src/lib.rs index bf3cc6a17d..c132d86c17 100644 --- a/validator_client/signing_method/src/lib.rs +++ b/validator_client/signing_method/src/lib.rs @@ -49,6 +49,7 @@ pub enum SignableMessage<'a, E: EthSpec, Payload: AbstractExecPayload = FullP SignedContributionAndProof(&'a ContributionAndProof), ValidatorRegistration(&'a ValidatorRegistrationData), VoluntaryExit(&'a VoluntaryExit), + ExecutionPayloadEnvelope(&'a ExecutionPayloadEnvelope), } impl> SignableMessage<'_, E, Payload> { @@ -70,6 +71,7 @@ impl> SignableMessage<'_, E, Payload SignableMessage::SignedContributionAndProof(c) => c.signing_root(domain), SignableMessage::ValidatorRegistration(v) => v.signing_root(domain), SignableMessage::VoluntaryExit(exit) => exit.signing_root(domain), + SignableMessage::ExecutionPayloadEnvelope(e) => e.signing_root(domain), } } } @@ -233,6 +235,9 @@ impl SigningMethod { Web3SignerObject::ValidatorRegistration(v) } SignableMessage::VoluntaryExit(e) => Web3SignerObject::VoluntaryExit(e), + SignableMessage::ExecutionPayloadEnvelope(e) => { + Web3SignerObject::ExecutionPayloadEnvelope(e) + } }; // Determine the Web3Signer message type. diff --git a/validator_client/signing_method/src/web3signer.rs b/validator_client/signing_method/src/web3signer.rs index 246d9e9e09..e6fc8f3ba2 100644 --- a/validator_client/signing_method/src/web3signer.rs +++ b/validator_client/signing_method/src/web3signer.rs @@ -19,6 +19,8 @@ pub enum MessageType { SyncCommitteeSelectionProof, SyncCommitteeContributionAndProof, ValidatorRegistration, + // TODO(gloas) verify w/ web3signer specs + ExecutionPayloadEnvelope, } #[derive(Debug, PartialEq, Copy, Clone, Serialize)] @@ -75,6 +77,7 @@ pub enum Web3SignerObject<'a, E: EthSpec, Payload: AbstractExecPayload> { SyncAggregatorSelectionData(&'a SyncAggregatorSelectionData), ContributionAndProof(&'a ContributionAndProof), ValidatorRegistration(&'a ValidatorRegistrationData), + ExecutionPayloadEnvelope(&'a ExecutionPayloadEnvelope), } impl<'a, E: EthSpec, Payload: AbstractExecPayload> Web3SignerObject<'a, E, Payload> { @@ -140,6 +143,7 @@ impl<'a, E: EthSpec, Payload: AbstractExecPayload> Web3SignerObject<'a, E, Pa MessageType::SyncCommitteeContributionAndProof } Web3SignerObject::ValidatorRegistration(_) => MessageType::ValidatorRegistration, + Web3SignerObject::ExecutionPayloadEnvelope(_) => MessageType::ExecutionPayloadEnvelope, } } } diff --git a/validator_client/validator_services/src/block_service.rs b/validator_client/validator_services/src/block_service.rs index df4c9b223c..1535f50663 100644 --- a/validator_client/validator_services/src/block_service.rs +++ b/validator_client/validator_services/src/block_service.rs @@ -14,6 +14,7 @@ use std::time::Duration; use task_executor::TaskExecutor; use tokio::sync::mpsc; use tracing::{Instrument, debug, error, info, info_span, instrument, trace, warn}; +use types::consts::gloas::BUILDER_INDEX_SELF_BUILD; use types::{BlockType, ChainSpec, EthSpec, Graffiti, Slot}; use validator_store::{Error as ValidatorStoreError, SignedBlock, UnsignedBlock, ValidatorStore}; @@ -334,7 +335,7 @@ impl BlockService { #[instrument(skip_all, fields(%slot, ?validator_pubkey))] async fn sign_and_publish_block( &self, - proposer_fallback: ProposerFallback, + proposer_fallback: &ProposerFallback, slot: Slot, graffiti: Option, validator_pubkey: &PublicKeyBytes, @@ -460,73 +461,145 @@ impl BlockService { info!(slot = slot.as_u64(), "Requesting unsigned block"); - // Request an SSZ block from all beacon nodes in order, returning on the first successful response. - // If all nodes fail, run a second pass falling back to JSON. - // - // Proposer nodes will always be tried last during each pass since it's likely that they don't have a - // great view of attestations on the network. - let ssz_block_response = proposer_fallback - .request_proposers_last(|beacon_node| async move { - let _get_timer = validator_metrics::start_timer_vec( - &validator_metrics::BLOCK_SERVICE_TIMES, - &[validator_metrics::BEACON_BLOCK_HTTP_GET], - ); - beacon_node - .get_validator_blocks_v3_ssz::( - slot, - randao_reveal_ref, - graffiti.as_ref(), - builder_boost_factor, - self_ref.graffiti_policy, - ) - .await - }) - .await; + // Check if Gloas fork is active at this slot + let fork_name = self_ref.chain_spec.fork_name_at_slot::(slot); - let block_response = match ssz_block_response { - Ok((ssz_block_response, _metadata)) => ssz_block_response, - Err(e) => { - warn!( - slot = slot.as_u64(), - error = %e, - "SSZ block production failed, falling back to JSON" - ); + let (block_proposer, unsigned_block) = if fork_name.gloas_enabled() { + // Use V4 block production for Gloas + // Request an SSZ block from all beacon nodes in order, returning on the first successful response. + // If all nodes fail, run a second pass falling back to JSON. + let ssz_block_response = proposer_fallback + .request_proposers_last(|beacon_node| async move { + let _get_timer = validator_metrics::start_timer_vec( + &validator_metrics::BLOCK_SERVICE_TIMES, + &[validator_metrics::BEACON_BLOCK_HTTP_GET], + ); + beacon_node + .get_validator_blocks_v4_ssz::( + slot, + randao_reveal_ref, + graffiti.as_ref(), + builder_boost_factor, + self_ref.graffiti_policy, + ) + .await + }) + .await; - proposer_fallback - .request_proposers_last(|beacon_node| async move { - let _get_timer = validator_metrics::start_timer_vec( - &validator_metrics::BLOCK_SERVICE_TIMES, - &[validator_metrics::BEACON_BLOCK_HTTP_GET], - ); - let (json_block_response, _metadata) = beacon_node - .get_validator_blocks_v3::( - slot, - randao_reveal_ref, - graffiti.as_ref(), - builder_boost_factor, - self_ref.graffiti_policy, - ) - .await - .map_err(|e| { - BlockError::Recoverable(format!( - "Error from beacon node when producing block: {:?}", - e - )) - })?; + let block_response = match ssz_block_response { + Ok((ssz_block_response, _metadata)) => ssz_block_response, + Err(e) => { + warn!( + slot = slot.as_u64(), + error = %e, + "SSZ V4 block production failed, falling back to JSON" + ); - Ok(json_block_response.data) - }) - .await - .map_err(BlockError::from)? - } - }; + proposer_fallback + .request_proposers_last(|beacon_node| async move { + let _get_timer = validator_metrics::start_timer_vec( + &validator_metrics::BLOCK_SERVICE_TIMES, + &[validator_metrics::BEACON_BLOCK_HTTP_GET], + ); + let (json_block_response, _metadata) = beacon_node + .get_validator_blocks_v4::( + slot, + randao_reveal_ref, + graffiti.as_ref(), + builder_boost_factor, + self_ref.graffiti_policy, + ) + .await + .map_err(|e| { + BlockError::Recoverable(format!( + "Error from beacon node when producing block: {:?}", + e + )) + })?; - let (block_proposer, unsigned_block) = match block_response { - eth2::types::ProduceBlockV3Response::Full(block) => { - (block.block().proposer_index(), UnsignedBlock::Full(block)) - } - eth2::types::ProduceBlockV3Response::Blinded(block) => { - (block.proposer_index(), UnsignedBlock::Blinded(block)) + Ok(json_block_response.data) + }) + .await + .map_err(BlockError::from)? + } + }; + + // Gloas blocks don't have blobs (they're in the execution layer) + let block_contents = eth2::types::FullBlockContents::Block(block_response); + ( + block_contents.block().proposer_index(), + UnsignedBlock::Full(block_contents), + ) + } else { + // Use V3 block production for pre-Gloas forks + // Request an SSZ block from all beacon nodes in order, returning on the first successful response. + // If all nodes fail, run a second pass falling back to JSON. + // + // Proposer nodes will always be tried last during each pass since it's likely that they don't have a + // great view of attestations on the network. + let ssz_block_response = proposer_fallback + .request_proposers_last(|beacon_node| async move { + let _get_timer = validator_metrics::start_timer_vec( + &validator_metrics::BLOCK_SERVICE_TIMES, + &[validator_metrics::BEACON_BLOCK_HTTP_GET], + ); + beacon_node + .get_validator_blocks_v3_ssz::( + slot, + randao_reveal_ref, + graffiti.as_ref(), + builder_boost_factor, + self_ref.graffiti_policy, + ) + .await + }) + .await; + + let block_response = match ssz_block_response { + Ok((ssz_block_response, _metadata)) => ssz_block_response, + Err(e) => { + warn!( + slot = slot.as_u64(), + error = %e, + "SSZ block production failed, falling back to JSON" + ); + + proposer_fallback + .request_proposers_last(|beacon_node| async move { + let _get_timer = validator_metrics::start_timer_vec( + &validator_metrics::BLOCK_SERVICE_TIMES, + &[validator_metrics::BEACON_BLOCK_HTTP_GET], + ); + let (json_block_response, _metadata) = beacon_node + .get_validator_blocks_v3::( + slot, + randao_reveal_ref, + graffiti.as_ref(), + builder_boost_factor, + self_ref.graffiti_policy, + ) + .await + .map_err(|e| { + BlockError::Recoverable(format!( + "Error from beacon node when producing block: {:?}", + e + )) + })?; + + Ok(json_block_response.data) + }) + .await + .map_err(BlockError::from)? + } + }; + + match block_response { + eth2::types::ProduceBlockV3Response::Full(block) => { + (block.block().proposer_index(), UnsignedBlock::Full(block)) + } + eth2::types::ProduceBlockV3Response::Blinded(block) => { + (block.proposer_index(), UnsignedBlock::Blinded(block)) + } } }; @@ -539,7 +612,7 @@ impl BlockService { self_ref .sign_and_publish_block( - proposer_fallback, + &proposer_fallback, slot, graffiti, &validator_pubkey, @@ -547,6 +620,108 @@ impl BlockService { ) .await?; + // TODO(gloas) we only need to fetch, sign and publish the envelope in the local building case. + // Right now we always default to local building. Once we implement trustless/trusted builder logic + // we should check the bid for index == BUILDER_INDEX_SELF_BUILD + if fork_name.gloas_enabled() { + self_ref + .fetch_sign_and_publish_payload_envelope( + &proposer_fallback, + slot, + &validator_pubkey, + ) + .await?; + } + + Ok(()) + } + + /// Fetch, sign, and publish the execution payload envelope for Gloas. + /// This should be called after the block has been published. + /// + /// TODO(gloas): For multi-BN setups, we need to track which beacon node produced the block + /// and fetch the envelope from that same node. The envelope is cached per-BN, + /// so fetching from a different BN than the one that built the block will fail. + /// See: https://github.com/sigp/lighthouse/pull/8313 + #[instrument(skip_all)] + async fn fetch_sign_and_publish_payload_envelope( + &self, + _proposer_fallback: &ProposerFallback, + slot: Slot, + validator_pubkey: &PublicKeyBytes, + ) -> Result<(), BlockError> { + info!(slot = slot.as_u64(), "Fetching execution payload envelope"); + + // Fetch the envelope from the beacon node. Use builder_index=BUILDER_INDEX_SELF_BUILD for local building. + // TODO(gloas): Use proposer_fallback once multi-BN is supported. + let envelope = self + .beacon_nodes + .first_success(|beacon_node| async move { + beacon_node + .get_validator_execution_payload_envelope_ssz::( + slot, + BUILDER_INDEX_SELF_BUILD, + ) + .await + .map_err(|e| { + BlockError::Recoverable(format!( + "Error fetching execution payload envelope: {:?}", + e + )) + }) + }) + .await?; + + info!( + slot = slot.as_u64(), + beacon_block_root = %envelope.beacon_block_root, + "Received execution payload envelope, signing" + ); + + // Sign the envelope + let signed_envelope = self + .validator_store + .sign_execution_payload_envelope(*validator_pubkey, envelope) + .await + .map_err(|e| { + BlockError::Recoverable(format!( + "Error signing execution payload envelope: {:?}", + e + )) + })?; + + info!( + slot = slot.as_u64(), + "Signed execution payload envelope, publishing" + ); + + let fork_name = self.chain_spec.fork_name_at_slot::(slot); + + // Publish the signed envelope + // TODO(gloas): Use proposer_fallback once multi-BN is supported. + self.beacon_nodes + .first_success(|beacon_node| { + let signed_envelope = signed_envelope.clone(); + async move { + beacon_node + .post_beacon_execution_payload_envelope_ssz(&signed_envelope, fork_name) + .await + .map_err(|e| { + BlockError::Recoverable(format!( + "Error publishing execution payload envelope: {:?}", + e + )) + }) + } + }) + .await?; + + info!( + slot = slot.as_u64(), + beacon_block_root = %signed_envelope.message.beacon_block_root, + "Successfully published signed execution payload envelope" + ); + Ok(()) } diff --git a/validator_client/validator_store/src/lib.rs b/validator_client/validator_store/src/lib.rs index 4fdbb8064c..87ab669e8d 100644 --- a/validator_client/validator_store/src/lib.rs +++ b/validator_client/validator_store/src/lib.rs @@ -5,8 +5,9 @@ use std::fmt::Debug; use std::future::Future; use std::sync::Arc; use types::{ - Address, Attestation, AttestationError, BlindedBeaconBlock, Epoch, EthSpec, Graffiti, Hash256, - SelectionProof, SignedAggregateAndProof, SignedBlindedBeaconBlock, SignedContributionAndProof, + Address, Attestation, AttestationError, BlindedBeaconBlock, Epoch, EthSpec, + ExecutionPayloadEnvelope, Graffiti, Hash256, SelectionProof, SignedAggregateAndProof, + SignedBlindedBeaconBlock, SignedContributionAndProof, SignedExecutionPayloadEnvelope, SignedValidatorRegistrationData, Slot, SyncCommitteeContribution, SyncCommitteeMessage, SyncSelectionProof, SyncSubnetId, ValidatorRegistrationData, }; @@ -178,6 +179,13 @@ pub trait ValidatorStore: Send + Sync { /// runs. fn prune_slashing_protection_db(&self, current_epoch: Epoch, first_run: bool); + /// Sign an `ExecutionPayloadEnvelope` for Gloas. + fn sign_execution_payload_envelope( + &self, + validator_pubkey: PublicKeyBytes, + envelope: ExecutionPayloadEnvelope, + ) -> impl Future, Error>> + Send; + /// Returns `ProposalData` for the provided `pubkey` if it exists in `InitializedValidators`. /// `ProposalData` fields include defaulting logic described in `get_fee_recipient_defaulting`, /// `get_gas_limit_defaulting`, and `get_builder_proposals_defaulting`.