Gloas local block building MVP (#8754)

The flow for local block building is
1. Create execution payload and bid
2. Construct beacon block
3. Sign beacon block and publish
4. Sign execution payload and publish

This PR adds the beacon block v4 flow , GET payload envelope and POST payload envelope (local block building only). The spec for these endpoints can be found here:  https://github.com/ethereum/beacon-APIs/pull/552  and is subject to change.

We needed a way to store the unsigned execution payload envelope associated to the execution payload bid that was included in the block. I introduced a new cache that stores these unsigned execution payload envelopes. the GET payload envelope queries this cache directly so that a proposer, after publishing a block, can fetch the payload envelope + sign and publish it.

I kept payload signing and publishing within the validators block service to keep things simple for now. The idea was to build out a block production MVP for devnet 0, try not to affect any non gloas code paths and build things out in such a way that it will be easy to deprecate pre-gloas code paths later on (for example block production v2 and v3).

We will eventually need to track which beacon node was queried for the block so that we can later query it for the payload. But thats not needed for the devnet.


  


Co-Authored-By: Eitan Seri- Levi <eserilev@gmail.com>

Co-Authored-By: Michael Sproul <michael@sigmaprime.io>

Co-Authored-By: Jimmy Chen <jchen.tc@gmail.com>

Co-Authored-By: Eitan Seri-Levi <eserilev@ucsc.edu>
This commit is contained in:
Eitan Seri-Levi
2026-02-16 18:09:35 -08:00
committed by GitHub
parent 945f6637c5
commit eec0700f94
31 changed files with 2656 additions and 346 deletions

View File

@@ -0,0 +1,890 @@
use std::collections::HashMap;
use std::marker::PhantomData;
use std::sync::Arc;
use bls::Signature;
use execution_layer::{
BlockProposalContentsGloas, BuilderParams, PayloadAttributes, PayloadParameters,
};
use operation_pool::CompactAttestationRef;
use ssz::Encode;
use state_processing::common::get_attesting_indices_from_state;
use state_processing::envelope_processing::{VerifyStateRoot, process_execution_payload_envelope};
use state_processing::epoch_cache::initialize_epoch_cache;
use state_processing::per_block_processing::{
compute_timestamp_at_slot, get_expected_withdrawals, verify_attestation_for_block_inclusion,
};
use state_processing::{
BlockSignatureStrategy, ConsensusContext, VerifyBlockRoot, VerifySignatures,
};
use state_processing::{VerifyOperation, state_advance::complete_state_advance};
use task_executor::JoinHandle;
use tracing::{Instrument, Span, debug, debug_span, error, instrument, trace, warn};
use tree_hash::TreeHash;
use types::consts::gloas::BUILDER_INDEX_SELF_BUILD;
use types::{
Address, Attestation, AttestationElectra, AttesterSlashing, AttesterSlashingElectra,
BeaconBlock, BeaconBlockBodyGloas, BeaconBlockGloas, BeaconState, BeaconStateError,
BuilderIndex, Deposit, Eth1Data, EthSpec, ExecutionBlockHash, ExecutionPayloadBid,
ExecutionPayloadEnvelope, ExecutionPayloadGloas, ExecutionRequests, FullPayload, Graffiti,
Hash256, PayloadAttestation, ProposerSlashing, RelativeEpoch, SignedBeaconBlock,
SignedBlsToExecutionChange, SignedExecutionPayloadBid, SignedExecutionPayloadEnvelope,
SignedVoluntaryExit, Slot, SyncAggregate, Withdrawal, Withdrawals,
};
use crate::{
BeaconChain, BeaconChainError, BeaconChainTypes, BlockProductionError,
ProduceBlockVerification, graffiti_calculator::GraffitiSettings, metrics,
};
pub const BID_VALUE_SELF_BUILD: u64 = 0;
pub const EXECUTION_PAYMENT_TRUSTLESS_BUILD: u64 = 0;
type ConsensusBlockValue = u64;
type BlockProductionResult<E> = (BeaconBlock<E, FullPayload<E>>, ConsensusBlockValue);
pub type PreparePayloadResult<E> = Result<BlockProposalContentsGloas<E>, BlockProductionError>;
pub type PreparePayloadHandle<E> = JoinHandle<Option<PreparePayloadResult<E>>>;
pub struct PartialBeaconBlock<E: EthSpec> {
slot: Slot,
proposer_index: u64,
parent_root: Hash256,
randao_reveal: Signature,
eth1_data: Eth1Data,
graffiti: Graffiti,
proposer_slashings: Vec<ProposerSlashing>,
attester_slashings: Vec<AttesterSlashingElectra<E>>,
attestations: Vec<AttestationElectra<E>>,
payload_attestations: Vec<PayloadAttestation<E>>,
deposits: Vec<Deposit>,
voluntary_exits: Vec<SignedVoluntaryExit>,
sync_aggregate: Option<SyncAggregate<E>>,
bls_to_execution_changes: Vec<SignedBlsToExecutionChange>,
}
/// Data needed to construct an ExecutionPayloadEnvelope.
/// The envelope requires the beacon_block_root which can only be computed after the block exists.
pub struct ExecutionPayloadData<E: types::EthSpec> {
pub payload: ExecutionPayloadGloas<E>,
pub execution_requests: ExecutionRequests<E>,
pub builder_index: BuilderIndex,
pub slot: Slot,
}
impl<T: BeaconChainTypes> BeaconChain<T> {
pub async fn produce_block_with_verification_gloas(
self: &Arc<Self>,
randao_reveal: Signature,
slot: Slot,
graffiti_settings: GraffitiSettings,
verification: ProduceBlockVerification,
_builder_boost_factor: Option<u64>,
) -> Result<BlockProductionResult<T::EthSpec>, BlockProductionError> {
metrics::inc_counter(&metrics::BLOCK_PRODUCTION_REQUESTS);
let _complete_timer = metrics::start_timer(&metrics::BLOCK_PRODUCTION_TIMES);
// Part 1/2 (blocking)
//
// Load the parent state from disk.
let chain = self.clone();
let span = Span::current();
let (state, state_root_opt) = self
.task_executor
.spawn_blocking_handle(
move || {
let _guard =
debug_span!(parent: span, "load_state_for_block_production").entered();
chain.load_state_for_block_production(slot)
},
"load_state_for_block_production",
)
.ok_or(BlockProductionError::ShuttingDown)?
.await
.map_err(BlockProductionError::TokioJoin)??;
// Part 2/2 (async, with some blocking components)
//
// Produce the block upon the state
self.produce_block_on_state_gloas(
state,
state_root_opt,
slot,
randao_reveal,
graffiti_settings,
verification,
)
.await
}
// TODO(gloas) need to implement builder boost factor logic
#[instrument(level = "debug", skip_all)]
pub async fn produce_block_on_state_gloas(
self: &Arc<Self>,
state: BeaconState<T::EthSpec>,
state_root_opt: Option<Hash256>,
produce_at_slot: Slot,
randao_reveal: Signature,
graffiti_settings: GraffitiSettings,
verification: ProduceBlockVerification,
) -> Result<BlockProductionResult<T::EthSpec>, BlockProductionError> {
// Part 1/3 (blocking)
//
// Perform the state advance and block-packing functions.
let chain = self.clone();
let graffiti = self
.graffiti_calculator
.get_graffiti(graffiti_settings)
.await;
let span = Span::current();
let (partial_beacon_block, state) = self
.task_executor
.spawn_blocking_handle(
move || {
let _guard =
debug_span!(parent: span, "produce_partial_beacon_block_gloas").entered();
chain.produce_partial_beacon_block_gloas(
state,
state_root_opt,
produce_at_slot,
randao_reveal,
graffiti,
)
},
"produce_partial_beacon_block_gloas",
)
.ok_or(BlockProductionError::ShuttingDown)?
.await
.map_err(BlockProductionError::TokioJoin)??;
// Part 2/3 (async)
//
// Produce the execution payload bid.
// TODO(gloas) this is strictly for building local bids
// We'll need to build out trustless/trusted bid paths.
let (execution_payload_bid, state, payload_data) = self
.clone()
.produce_execution_payload_bid(
state,
produce_at_slot,
BID_VALUE_SELF_BUILD,
BUILDER_INDEX_SELF_BUILD,
)
.await?;
// Part 3/3 (blocking)
//
// Complete the block with the execution payload bid.
let chain = self.clone();
let span = Span::current();
self.task_executor
.spawn_blocking_handle(
move || {
let _guard =
debug_span!(parent: span, "complete_partial_beacon_block_gloas").entered();
chain.complete_partial_beacon_block_gloas(
partial_beacon_block,
execution_payload_bid,
payload_data,
state,
verification,
)
},
"complete_partial_beacon_block_gloas",
)
.ok_or(BlockProductionError::ShuttingDown)?
.await
.map_err(BlockProductionError::TokioJoin)?
}
#[allow(clippy::too_many_arguments)]
#[allow(clippy::type_complexity)]
fn produce_partial_beacon_block_gloas(
self: &Arc<Self>,
mut state: BeaconState<T::EthSpec>,
state_root_opt: Option<Hash256>,
produce_at_slot: Slot,
randao_reveal: Signature,
graffiti: Graffiti,
) -> Result<(PartialBeaconBlock<T::EthSpec>, BeaconState<T::EthSpec>), BlockProductionError>
{
// It is invalid to try to produce a block using a state from a future slot.
if state.slot() > produce_at_slot {
return Err(BlockProductionError::StateSlotTooHigh {
produce_at_slot,
state_slot: state.slot(),
});
}
let slot_timer = metrics::start_timer(&metrics::BLOCK_PRODUCTION_SLOT_PROCESS_TIMES);
// Ensure the state has performed a complete transition into the required slot.
complete_state_advance(&mut state, state_root_opt, produce_at_slot, &self.spec)?;
drop(slot_timer);
state.build_committee_cache(RelativeEpoch::Current, &self.spec)?;
state.apply_pending_mutations()?;
let parent_root = if state.slot() > 0 {
*state
.get_block_root(state.slot() - 1)
.map_err(|_| BlockProductionError::UnableToGetBlockRootFromState)?
} else {
state.latest_block_header().canonical_root()
};
let proposer_index = state.get_beacon_proposer_index(state.slot(), &self.spec)? as u64;
let slashings_and_exits_span = debug_span!("get_slashings_and_exits").entered();
let (mut proposer_slashings, mut attester_slashings, mut voluntary_exits) =
self.op_pool.get_slashings_and_exits(&state, &self.spec);
drop(slashings_and_exits_span);
let eth1_data = state.eth1_data().clone();
let deposits = vec![];
let bls_changes_span = debug_span!("get_bls_to_execution_changes").entered();
let bls_to_execution_changes = self
.op_pool
.get_bls_to_execution_changes(&state, &self.spec);
drop(bls_changes_span);
// Iterate through the naive aggregation pool and ensure all the attestations from there
// are included in the operation pool.
{
let _guard = debug_span!("import_naive_aggregation_pool").entered();
let _unagg_import_timer =
metrics::start_timer(&metrics::BLOCK_PRODUCTION_UNAGGREGATED_TIMES);
for attestation in self.naive_aggregation_pool.read().iter() {
let import = |attestation: &Attestation<T::EthSpec>| {
let attesting_indices =
get_attesting_indices_from_state(&state, attestation.to_ref())?;
self.op_pool
.insert_attestation(attestation.clone(), attesting_indices)
};
if let Err(e) = import(attestation) {
// Don't stop block production if there's an error, just create a log.
error!(
reason = ?e,
"Attestation did not transfer to op pool"
);
}
}
};
let mut attestations = {
let _guard = debug_span!("pack_attestations").entered();
let _attestation_packing_timer =
metrics::start_timer(&metrics::BLOCK_PRODUCTION_ATTESTATION_TIMES);
// Epoch cache and total balance cache are required for op pool packing.
state.build_total_active_balance_cache(&self.spec)?;
initialize_epoch_cache(&mut state, &self.spec)?;
let mut prev_filter_cache = HashMap::new();
let prev_attestation_filter = |att: &CompactAttestationRef<T::EthSpec>| {
self.filter_op_pool_attestation(&mut prev_filter_cache, att, &state)
};
let mut curr_filter_cache = HashMap::new();
let curr_attestation_filter = |att: &CompactAttestationRef<T::EthSpec>| {
self.filter_op_pool_attestation(&mut curr_filter_cache, att, &state)
};
self.op_pool
.get_attestations(
&state,
prev_attestation_filter,
curr_attestation_filter,
&self.spec,
)
.map_err(BlockProductionError::OpPoolError)?
};
// If paranoid mode is enabled re-check the signatures of every included message.
// This will be a lot slower but guards against bugs in block production and can be
// quickly rolled out without a release.
if self.config.paranoid_block_proposal {
let mut tmp_ctxt = ConsensusContext::new(state.slot());
attestations.retain(|att| {
verify_attestation_for_block_inclusion(
&state,
att.to_ref(),
&mut tmp_ctxt,
VerifySignatures::True,
&self.spec,
)
.map_err(|e| {
warn!(
err = ?e,
block_slot = %state.slot(),
attestation = ?att,
"Attempted to include an invalid attestation"
);
})
.is_ok()
});
proposer_slashings.retain(|slashing| {
slashing
.clone()
.validate(&state, &self.spec)
.map_err(|e| {
warn!(
err = ?e,
block_slot = %state.slot(),
?slashing,
"Attempted to include an invalid proposer slashing"
);
})
.is_ok()
});
attester_slashings.retain(|slashing| {
slashing
.clone()
.validate(&state, &self.spec)
.map_err(|e| {
warn!(
err = ?e,
block_slot = %state.slot(),
?slashing,
"Attempted to include an invalid attester slashing"
);
})
.is_ok()
});
voluntary_exits.retain(|exit| {
exit.clone()
.validate(&state, &self.spec)
.map_err(|e| {
warn!(
err = ?e,
block_slot = %state.slot(),
?exit,
"Attempted to include an invalid proposer slashing"
);
})
.is_ok()
});
// TODO(gloas) verifiy payload attestation signature here as well
}
let attester_slashings = attester_slashings
.into_iter()
.filter_map(|a| match a {
AttesterSlashing::Base(_) => None,
AttesterSlashing::Electra(a) => Some(a),
})
.collect::<Vec<_>>();
let attestations = attestations
.into_iter()
.filter_map(|a| match a {
Attestation::Base(_) => None,
Attestation::Electra(a) => Some(a),
})
.collect::<Vec<_>>();
let slot = state.slot();
let sync_aggregate = if matches!(&state, BeaconState::Base(_)) {
None
} else {
let sync_aggregate = self
.op_pool
.get_sync_aggregate(&state)
.map_err(BlockProductionError::OpPoolError)?
.unwrap_or_else(|| {
warn!(
slot = %state.slot(),
"Producing block with no sync contributions"
);
SyncAggregate::new()
});
Some(sync_aggregate)
};
Ok((
PartialBeaconBlock {
slot,
proposer_index,
parent_root,
randao_reveal,
eth1_data,
graffiti,
proposer_slashings,
attester_slashings,
attestations,
deposits,
voluntary_exits,
sync_aggregate,
// TODO(gloas) need to implement payload attestations
payload_attestations: vec![],
bls_to_execution_changes,
},
state,
))
}
#[allow(clippy::type_complexity)]
fn complete_partial_beacon_block_gloas(
&self,
partial_beacon_block: PartialBeaconBlock<T::EthSpec>,
signed_execution_payload_bid: SignedExecutionPayloadBid<T::EthSpec>,
payload_data: Option<ExecutionPayloadData<T::EthSpec>>,
mut state: BeaconState<T::EthSpec>,
verification: ProduceBlockVerification,
) -> Result<(BeaconBlock<T::EthSpec, FullPayload<T::EthSpec>>, u64), BlockProductionError> {
let PartialBeaconBlock {
slot,
proposer_index,
parent_root,
randao_reveal,
eth1_data,
graffiti,
proposer_slashings,
attester_slashings,
attestations,
deposits,
voluntary_exits,
sync_aggregate,
payload_attestations,
bls_to_execution_changes,
} = partial_beacon_block;
let beacon_block = match &state {
BeaconState::Base(_)
| BeaconState::Altair(_)
| BeaconState::Bellatrix(_)
| BeaconState::Capella(_)
| BeaconState::Deneb(_)
| BeaconState::Electra(_)
| BeaconState::Fulu(_) => {
return Err(BlockProductionError::InvalidBlockVariant(
"Cannot construct a block pre-Gloas".to_owned(),
));
}
BeaconState::Gloas(_) => BeaconBlock::Gloas(BeaconBlockGloas {
slot,
proposer_index,
parent_root,
state_root: Hash256::ZERO,
body: BeaconBlockBodyGloas {
randao_reveal,
eth1_data,
graffiti,
proposer_slashings: proposer_slashings
.try_into()
.map_err(BlockProductionError::SszTypesError)?,
attester_slashings: attester_slashings
.try_into()
.map_err(BlockProductionError::SszTypesError)?,
attestations: attestations
.try_into()
.map_err(BlockProductionError::SszTypesError)?,
deposits: deposits
.try_into()
.map_err(BlockProductionError::SszTypesError)?,
voluntary_exits: voluntary_exits
.try_into()
.map_err(BlockProductionError::SszTypesError)?,
sync_aggregate: sync_aggregate
.ok_or(BlockProductionError::MissingSyncAggregate)?,
bls_to_execution_changes: bls_to_execution_changes
.try_into()
.map_err(BlockProductionError::SszTypesError)?,
signed_execution_payload_bid,
payload_attestations: payload_attestations
.try_into()
.map_err(BlockProductionError::SszTypesError)?,
_phantom: PhantomData::<FullPayload<T::EthSpec>>,
},
}),
};
let signed_beacon_block = SignedBeaconBlock::from_block(
beacon_block,
// The block is not signed here, that is the task of a validator client.
Signature::empty(),
);
let block_size = signed_beacon_block.ssz_bytes_len();
debug!(%block_size, "Produced block on state");
metrics::observe(&metrics::BLOCK_SIZE, block_size as f64);
if block_size > self.config.max_network_size {
return Err(BlockProductionError::BlockTooLarge(block_size));
}
let process_timer = metrics::start_timer(&metrics::BLOCK_PRODUCTION_PROCESS_TIMES);
let signature_strategy = match verification {
ProduceBlockVerification::VerifyRandao => BlockSignatureStrategy::VerifyRandao,
ProduceBlockVerification::NoVerification => BlockSignatureStrategy::NoVerification,
};
// Use a context without block root or proposer index so that both are checked.
let mut ctxt = ConsensusContext::new(signed_beacon_block.slot());
let consensus_block_value = self
.compute_beacon_block_reward(signed_beacon_block.message(), &mut state)
.map(|reward| reward.total)
.unwrap_or(0);
state_processing::per_block_processing(
&mut state,
&signed_beacon_block,
signature_strategy,
VerifyBlockRoot::True,
&mut ctxt,
&self.spec,
)?;
drop(process_timer);
let state_root_timer = metrics::start_timer(&metrics::BLOCK_PRODUCTION_STATE_ROOT_TIMES);
let state_root = state.update_tree_hash_cache()?;
drop(state_root_timer);
let (mut block, _) = signed_beacon_block.deconstruct();
*block.state_root_mut() = state_root;
// Construct and cache the ExecutionPayloadEnvelope if we have payload data.
// For local building, we always have payload data.
// For trustless building, the builder will provide the envelope separately.
if let Some(payload_data) = payload_data {
let beacon_block_root = block.tree_hash_root();
let execution_payload_envelope = ExecutionPayloadEnvelope {
payload: payload_data.payload,
execution_requests: payload_data.execution_requests,
builder_index: payload_data.builder_index,
beacon_block_root,
slot: payload_data.slot,
state_root: Hash256::ZERO,
};
let mut signed_envelope = SignedExecutionPayloadEnvelope {
message: execution_payload_envelope,
signature: Signature::empty(),
};
// TODO(gloas) add better error variant
// We skip state root verification here because the relevant state root
// cant be calculated until after the new block has been constructed.
process_execution_payload_envelope(
&mut state,
None,
&signed_envelope,
VerifySignatures::False,
VerifyStateRoot::False,
&self.spec,
)
.map_err(|_| {
BlockProductionError::GloasNotImplemented(
"process_execution_payload_envelope failed".to_owned(),
)
})?;
signed_envelope.message.state_root = state.update_tree_hash_cache()?;
// Cache the envelope for later retrieval by the validator for signing and publishing.
let envelope_slot = payload_data.slot;
// TODO(gloas) might be safer to cache by root instead of by slot.
// We should revisit this once this code path + beacon api spec matures
self.pending_payload_envelopes
.write()
.insert(envelope_slot, signed_envelope.message);
debug!(
%beacon_block_root,
slot = %envelope_slot,
"Cached pending execution payload envelope"
);
}
metrics::inc_counter(&metrics::BLOCK_PRODUCTION_SUCCESSES);
trace!(
parent = ?block.parent_root(),
attestations = block.body().attestations_len(),
slot = %block.slot(),
"Produced beacon block"
);
Ok((block, consensus_block_value))
}
// TODO(gloas) introduce `ProposerPreferences` so we can build out trustless
// bid building. Right now this only works for local building.
/// Produce an `ExecutionPayloadBid` for some `slot` upon the given `state`.
/// This function assumes we've already advanced `state`.
///
/// Returns the signed bid, the state, and optionally the payload data needed to construct
/// the `ExecutionPayloadEnvelope` after the beacon block is created.
///
/// For local building, payload data is always returned (`Some`).
/// For trustless building, the builder provides the envelope separately, so `None` is returned.
#[allow(clippy::type_complexity)]
#[instrument(level = "debug", skip_all)]
pub async fn produce_execution_payload_bid(
self: Arc<Self>,
mut state: BeaconState<T::EthSpec>,
produce_at_slot: Slot,
bid_value: u64,
builder_index: BuilderIndex,
) -> Result<
(
SignedExecutionPayloadBid<T::EthSpec>,
BeaconState<T::EthSpec>,
Option<ExecutionPayloadData<T::EthSpec>>,
),
BlockProductionError,
> {
// TODO(gloas) For non local building, add sanity check on value
// The builder MUST have enough excess balance to fulfill this bid (i.e. `value`) and all pending payments.
// TODO(gloas) add metrics for execution payload bid production
let parent_root = if state.slot() > 0 {
*state
.get_block_root(state.slot() - 1)
.map_err(|_| BlockProductionError::UnableToGetBlockRootFromState)?
} else {
state.latest_block_header().canonical_root()
};
let proposer_index = state.get_beacon_proposer_index(state.slot(), &self.spec)? as u64;
let pubkey = state
.validators()
.get(proposer_index as usize)
.map(|v| v.pubkey)
.ok_or(BlockProductionError::BeaconChain(Box::new(
BeaconChainError::ValidatorIndexUnknown(proposer_index as usize),
)))?;
let builder_params = BuilderParams {
pubkey,
slot: state.slot(),
chain_health: self
.is_healthy(&parent_root)
.map_err(|e| BlockProductionError::BeaconChain(Box::new(e)))?,
};
// TODO(gloas) this should be BlockProductionVersion::V4
// V3 is okay for now as long as we're not connected to a builder
// TODO(gloas) add builder boost factor
let prepare_payload_handle = get_execution_payload_gloas(
self.clone(),
&state,
parent_root,
proposer_index,
builder_params,
)?;
let block_proposal_contents = prepare_payload_handle
.await
.map_err(BlockProductionError::TokioJoin)?
.ok_or(BlockProductionError::ShuttingDown)??;
let BlockProposalContentsGloas {
payload,
payload_value: _,
execution_requests,
blob_kzg_commitments,
blobs_and_proofs: _,
} = block_proposal_contents;
let state_root = state.update_tree_hash_cache()?;
// TODO(gloas) since we are defaulting to local building, execution payment is 0
// execution payment should only be set to > 0 for trusted building.
let bid = ExecutionPayloadBid::<T::EthSpec> {
parent_block_hash: state.latest_block_hash()?.to_owned(),
parent_block_root: state.get_latest_block_root(state_root),
block_hash: payload.block_hash,
prev_randao: payload.prev_randao,
fee_recipient: Address::ZERO,
gas_limit: payload.gas_limit,
builder_index,
slot: produce_at_slot,
value: bid_value,
execution_payment: EXECUTION_PAYMENT_TRUSTLESS_BUILD,
blob_kzg_commitments,
};
// Store payload data for envelope construction after block is created
let payload_data = ExecutionPayloadData {
payload,
execution_requests,
builder_index,
slot: produce_at_slot,
};
// TODO(gloas) this is only local building
// we'll need to implement builder signature for the trustless path
Ok((
SignedExecutionPayloadBid {
message: bid,
// TODO(gloas) return better error variant here
signature: Signature::infinity().map_err(|_| {
BlockProductionError::GloasNotImplemented(
"Failed to generate infinity signature".to_owned(),
)
})?,
},
state,
// Local building always returns payload data.
// Trustless building would return None here.
Some(payload_data),
))
}
}
/// Gets an execution payload for inclusion in a block.
///
/// ## Errors
///
/// Will return an error when using a pre-Gloas `state`. Ensure to only run this function
/// after the Gloas fork.
///
/// ## Specification
///
/// Equivalent to the `get_execution_payload` function in the Validator Guide:
///
/// https://github.com/ethereum/consensus-specs/blob/v1.1.5/specs/merge/validator.md#block-proposal
fn get_execution_payload_gloas<T: BeaconChainTypes>(
chain: Arc<BeaconChain<T>>,
state: &BeaconState<T::EthSpec>,
parent_beacon_block_root: Hash256,
proposer_index: u64,
builder_params: BuilderParams,
) -> Result<PreparePayloadHandle<T::EthSpec>, BlockProductionError> {
// Compute all required values from the `state` now to avoid needing to pass it into a spawned
// task.
let spec = &chain.spec;
let current_epoch = state.current_epoch();
let timestamp =
compute_timestamp_at_slot(state, state.slot(), spec).map_err(BeaconStateError::from)?;
let random = *state.get_randao_mix(current_epoch)?;
let latest_execution_block_hash = *state.latest_block_hash()?;
let latest_gas_limit = state.latest_execution_payload_bid()?.gas_limit;
let withdrawals =
Withdrawals::<T::EthSpec>::from(get_expected_withdrawals(state, spec)?).into();
// Spawn a task to obtain the execution payload from the EL via a series of async calls. The
// `join_handle` can be used to await the result of the function.
let join_handle = chain
.task_executor
.clone()
.spawn_handle(
async move {
prepare_execution_payload::<T>(
&chain,
timestamp,
random,
proposer_index,
latest_execution_block_hash,
latest_gas_limit,
builder_params,
withdrawals,
parent_beacon_block_root,
)
.await
}
.instrument(debug_span!("prepare_execution_payload")),
"prepare_execution_payload",
)
.ok_or(BlockProductionError::ShuttingDown)?;
Ok(join_handle)
}
/// Prepares an execution payload for inclusion in a block.
///
/// ## Errors
///
/// Will return an error when using a pre-Gloas fork `state`. Ensure to only run this function
/// after the Gloas fork.
///
/// ## Specification
///
/// Equivalent to the `prepare_execution_payload` function in the Validator Guide:
///
/// https://github.com/ethereum/consensus-specs/blob/v1.1.5/specs/merge/validator.md#block-proposal
#[allow(clippy::too_many_arguments)]
async fn prepare_execution_payload<T>(
chain: &Arc<BeaconChain<T>>,
timestamp: u64,
random: Hash256,
proposer_index: u64,
parent_block_hash: ExecutionBlockHash,
parent_gas_limit: u64,
builder_params: BuilderParams,
withdrawals: Vec<Withdrawal>,
parent_beacon_block_root: Hash256,
) -> Result<BlockProposalContentsGloas<T::EthSpec>, BlockProductionError>
where
T: BeaconChainTypes,
{
let spec = &chain.spec;
let fork = spec.fork_name_at_slot::<T::EthSpec>(builder_params.slot);
let execution_layer = chain
.execution_layer
.as_ref()
.ok_or(BlockProductionError::ExecutionLayerMissing)?;
// Try to obtain the fork choice update parameters from the cached head.
//
// Use a blocking task to interact with the `canonical_head` lock otherwise we risk blocking the
// core `tokio` executor.
let inner_chain = chain.clone();
let forkchoice_update_params = chain
.spawn_blocking_handle(
move || {
inner_chain
.canonical_head
.cached_head()
.forkchoice_update_parameters()
},
"prepare_execution_payload_forkchoice_update_params",
)
.instrument(debug_span!("forkchoice_update_params"))
.await
.map_err(|e| BlockProductionError::BeaconChain(Box::new(e)))?;
let suggested_fee_recipient = execution_layer
.get_suggested_fee_recipient(proposer_index)
.await;
let payload_attributes = PayloadAttributes::new(
timestamp,
random,
suggested_fee_recipient,
Some(withdrawals),
Some(parent_beacon_block_root),
);
let target_gas_limit = execution_layer.get_proposer_gas_limit(proposer_index).await;
let payload_parameters = PayloadParameters {
parent_hash: parent_block_hash,
parent_gas_limit,
proposer_gas_limit: target_gas_limit,
payload_attributes: &payload_attributes,
forkchoice_update_params: &forkchoice_update_params,
current_fork: fork,
};
let block_contents = execution_layer
.get_payload_gloas(payload_parameters)
.await
.map_err(BlockProductionError::GetPayloadFailed)?;
Ok(block_contents)
}

View File

@@ -0,0 +1,223 @@
use std::{sync::Arc, time::Duration};
use proto_array::ProposerHeadError;
use slot_clock::SlotClock;
use tracing::{debug, error, info, instrument, warn};
use types::{BeaconState, Hash256, Slot};
use crate::{
BeaconChain, BeaconChainTypes, BlockProductionError, StateSkipConfig,
fork_choice_signal::ForkChoiceWaitResult, metrics,
};
mod gloas;
impl<T: BeaconChainTypes> BeaconChain<T> {
/// Load a beacon state from the database for block production. This is a long-running process
/// that should not be performed in an `async` context.
pub(crate) fn load_state_for_block_production(
self: &Arc<Self>,
slot: Slot,
) -> Result<(BeaconState<T::EthSpec>, Option<Hash256>), BlockProductionError> {
let fork_choice_timer = metrics::start_timer(&metrics::BLOCK_PRODUCTION_FORK_CHOICE_TIMES);
self.wait_for_fork_choice_before_block_production(slot)?;
drop(fork_choice_timer);
let state_load_timer = metrics::start_timer(&metrics::BLOCK_PRODUCTION_STATE_LOAD_TIMES);
// Atomically read some values from the head whilst avoiding holding cached head `Arc` any
// longer than necessary.
let (head_slot, head_block_root, head_state_root) = {
let head = self.canonical_head.cached_head();
(
head.head_slot(),
head.head_block_root(),
head.head_state_root(),
)
};
let (state, state_root_opt) = if head_slot < slot {
// Attempt an aggressive re-org if configured and the conditions are right.
if let Some((re_org_state, re_org_state_root)) =
self.get_state_for_re_org(slot, head_slot, head_block_root)
{
info!(
%slot,
head_to_reorg = %head_block_root,
"Proposing block to re-org current head"
);
(re_org_state, Some(re_org_state_root))
} else {
// Fetch the head state advanced through to `slot`, which should be present in the
// state cache thanks to the state advance timer.
let (state_root, state) = self
.store
.get_advanced_hot_state(head_block_root, slot, head_state_root)
.map_err(BlockProductionError::FailedToLoadState)?
.ok_or(BlockProductionError::UnableToProduceAtSlot(slot))?;
(state, Some(state_root))
}
} else {
warn!(
message = "this block is more likely to be orphaned",
%slot,
"Producing block that conflicts with head"
);
let state = self
.state_at_slot(slot - 1, StateSkipConfig::WithStateRoots)
.map_err(|_| BlockProductionError::UnableToProduceAtSlot(slot))?;
(state, None)
};
drop(state_load_timer);
Ok((state, state_root_opt))
}
/// If configured, wait for the fork choice run at the start of the slot to complete.
#[instrument(level = "debug", skip_all)]
fn wait_for_fork_choice_before_block_production(
self: &Arc<Self>,
slot: Slot,
) -> Result<(), BlockProductionError> {
if let Some(rx) = &self.fork_choice_signal_rx {
let current_slot = self
.slot()
.map_err(|_| BlockProductionError::UnableToReadSlot)?;
let timeout = Duration::from_millis(self.config.fork_choice_before_proposal_timeout_ms);
if slot == current_slot || slot == current_slot + 1 {
match rx.wait_for_fork_choice(slot, timeout) {
ForkChoiceWaitResult::Success(fc_slot) => {
debug!(
%slot,
fork_choice_slot = %fc_slot,
"Fork choice successfully updated before block production"
);
}
ForkChoiceWaitResult::Behind(fc_slot) => {
warn!(
fork_choice_slot = %fc_slot,
%slot,
message = "this block may be orphaned",
"Fork choice notifier out of sync with block production"
);
}
ForkChoiceWaitResult::TimeOut => {
warn!(
message = "this block may be orphaned",
"Timed out waiting for fork choice before proposal"
);
}
}
} else {
error!(
%slot,
%current_slot,
message = "check clock sync, this block may be orphaned",
"Producing block at incorrect slot"
);
}
}
Ok(())
}
/// Fetch the beacon state to use for producing a block if a 1-slot proposer re-org is viable.
///
/// This function will return `None` if proposer re-orgs are disabled.
#[instrument(skip_all, level = "debug")]
fn get_state_for_re_org(
&self,
slot: Slot,
head_slot: Slot,
canonical_head: Hash256,
) -> Option<(BeaconState<T::EthSpec>, Hash256)> {
let re_org_head_threshold = self.config.re_org_head_threshold?;
let re_org_parent_threshold = self.config.re_org_parent_threshold?;
if self.spec.proposer_score_boost.is_none() {
warn!(
reason = "this network does not have proposer boost enabled",
"Ignoring proposer re-org configuration"
);
return None;
}
let slot_delay = self
.slot_clock
.seconds_from_current_slot_start()
.or_else(|| {
warn!(error = "unable to read slot clock", "Not attempting re-org");
None
})?;
// Attempt a proposer re-org if:
//
// 1. It seems we have time to propagate and still receive the proposer boost.
// 2. The current head block was seen late.
// 3. The `get_proposer_head` conditions from fork choice pass.
let proposing_on_time =
slot_delay < self.config.re_org_cutoff(self.spec.get_slot_duration());
if !proposing_on_time {
debug!(reason = "not proposing on time", "Not attempting re-org");
return None;
}
let head_late = self.block_observed_after_attestation_deadline(canonical_head, head_slot);
if !head_late {
debug!(reason = "head not late", "Not attempting re-org");
return None;
}
// Is the current head weak and appropriate for re-orging?
let proposer_head_timer =
metrics::start_timer(&metrics::BLOCK_PRODUCTION_GET_PROPOSER_HEAD_TIMES);
let proposer_head = self
.canonical_head
.fork_choice_read_lock()
.get_proposer_head(
slot,
canonical_head,
re_org_head_threshold,
re_org_parent_threshold,
&self.config.re_org_disallowed_offsets,
self.config.re_org_max_epochs_since_finalization,
)
.map_err(|e| match e {
ProposerHeadError::DoNotReOrg(reason) => {
debug!(
%reason,
"Not attempting re-org"
);
}
ProposerHeadError::Error(e) => {
warn!(
error = ?e,
"Not attempting re-org"
);
}
})
.ok()?;
drop(proposer_head_timer);
let re_org_parent_block = proposer_head.parent_node.root;
let (state_root, state) = self
.store
.get_advanced_hot_state_from_cache(re_org_parent_block, slot)
.or_else(|| {
warn!(reason = "no state in cache", "Not attempting re-org");
None
})?;
info!(
weak_head = ?canonical_head,
parent = ?re_org_parent_block,
head_weight = proposer_head.head_node.weight,
threshold_weight = proposer_head.re_org_head_weight_threshold,
"Attempting re-org due to weak head"
);
Some((state, state_root))
}
}