Merge conlficts

This commit is contained in:
Eitan Seri- Levi
2026-03-26 21:50:30 -07:00
158 changed files with 4877 additions and 3908 deletions

View File

@@ -13,7 +13,7 @@ use crate::block_verification::{
signature_verify_chain_segment, verify_header_signature,
};
use crate::block_verification_types::{
AsBlock, AvailableExecutedBlock, BlockImportData, ExecutedBlock, RpcBlock,
AsBlock, AvailableExecutedBlock, BlockImportData, ExecutedBlock, RangeSyncBlock,
};
pub use crate::canonical_head::CanonicalHead;
use crate::chain_config::ChainConfig;
@@ -28,7 +28,6 @@ use crate::envelope_times_cache::EnvelopeTimesCache;
use crate::errors::{BeaconChainError as Error, BlockProductionError};
use crate::events::ServerSentEventHandler;
use crate::execution_payload::{NotifyExecutionLayer, PreparePayloadHandle, get_execution_payload};
use crate::execution_payload_envelope_streamer::PayloadEnvelopeStreamer;
use crate::fetch_blobs::EngineGetBlobsOutput;
use crate::fork_choice_signal::{ForkChoiceSignalRx, ForkChoiceSignalTx};
use crate::graffiti_calculator::{GraffitiCalculator, GraffitiSettings};
@@ -55,6 +54,8 @@ use crate::observed_block_producers::ObservedBlockProducers;
use crate::observed_data_sidecars::ObservedDataSidecars;
use crate::observed_operations::{ObservationOutcome, ObservedOperations};
use crate::observed_slashable::ObservedSlashable;
#[cfg(not(test))]
use crate::payload_envelope_streamer::{EnvelopeRequestSource, launch_payload_envelope_stream};
use crate::pending_payload_envelopes::PendingPayloadEnvelopes;
use crate::persisted_beacon_chain::PersistedBeaconChain;
use crate::persisted_custody::persist_custody_context;
@@ -66,7 +67,6 @@ use crate::sync_committee_verification::{
};
use crate::validator_monitor::{
HISTORIC_EPOCHS as VALIDATOR_MONITOR_HISTORIC_EPOCHS, ValidatorMonitor, get_slot_delay_ms,
timestamp_now,
};
use crate::validator_pubkey_cache::ValidatorPubkeyCache;
use crate::{
@@ -140,7 +140,7 @@ use types::*;
pub type ForkChoiceError = fork_choice::Error<crate::ForkChoiceStoreError>;
/// Alias to appease clippy.
type HashBlockTuple<E> = (Hash256, RpcBlock<E>);
type HashBlockTuple<E> = (Hash256, RangeSyncBlock<E>);
// These keys are all zero because they get stored in different columns, see `DBColumn` type.
pub const BEACON_CHAIN_DB_KEY: Hash256 = Hash256::ZERO;
@@ -1138,56 +1138,19 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
.map_or_else(|| self.get_blobs(block_root), Ok)
}
/// Returns the execution payload envelopes at the given roots, if any.
///
/// Will also check any associated caches. The expected use for this function is *only* for returning blocks requested
/// from P2P peers.
///
/// ## Errors
///
/// May return a database error.
#[allow(clippy::type_complexity)]
pub fn get_payload_envelopes_checking_caches(
self: &Arc<Self>,
block_roots: Vec<Hash256>,
) -> Result<
impl Stream<
Item = (
Hash256,
Arc<Result<Option<Arc<SignedExecutionPayloadEnvelope<T::EthSpec>>>, Error>>,
),
>,
Error,
> {
Ok(PayloadEnvelopeStreamer::<T>::new(
self.execution_layer.clone(),
self.store.clone(),
self.task_executor.clone(),
CheckCaches::Yes,
)?
.launch_stream(block_roots))
}
#[cfg(not(test))]
#[allow(clippy::type_complexity)]
pub fn get_payload_envelopes(
self: &Arc<Self>,
block_roots: Vec<Hash256>,
) -> Result<
impl Stream<
Item = (
Hash256,
Arc<Result<Option<Arc<SignedExecutionPayloadEnvelope<T::EthSpec>>>, Error>>,
),
>,
Error,
request_source: EnvelopeRequestSource,
) -> impl Stream<
Item = (
Hash256,
Arc<Result<Option<Arc<SignedExecutionPayloadEnvelope<T::EthSpec>>>, Error>>,
),
> {
Ok(PayloadEnvelopeStreamer::<T>::new(
self.execution_layer.clone(),
self.store.clone(),
self.task_executor.clone(),
CheckCaches::No,
)?
.launch_stream(block_roots))
launch_payload_envelope_stream(self.clone(), block_roots, request_source)
}
pub fn get_data_columns_checking_all_caches(
@@ -2109,6 +2072,8 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
// required information.
(justified_checkpoint, committee_len)
} else {
// We assume that the `Pending` state has the same shufflings as a `Full` state
// for the same block. Analysis: https://hackmd.io/@dapplion/gloas_dependant_root
let (advanced_state_root, mut state) = self
.store
.get_advanced_hot_state(
@@ -2832,7 +2797,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
/// This method is potentially long-running and should not run on the core executor.
pub fn filter_chain_segment(
self: &Arc<Self>,
chain_segment: Vec<RpcBlock<T::EthSpec>>,
chain_segment: Vec<RangeSyncBlock<T::EthSpec>>,
) -> Result<Vec<HashBlockTuple<T::EthSpec>>, Box<ChainSegmentResult>> {
// This function will never import any blocks.
let imported_blocks = vec![];
@@ -2941,7 +2906,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
/// `Self::process_block`.
pub async fn process_chain_segment(
self: &Arc<Self>,
chain_segment: Vec<RpcBlock<T::EthSpec>>,
chain_segment: Vec<RangeSyncBlock<T::EthSpec>>,
notify_execution_layer: NotifyExecutionLayer,
) -> ChainSegmentResult {
for block in chain_segment.iter() {
@@ -4135,23 +4100,10 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
// See https://github.com/sigp/lighthouse/issues/2028
let (_, signed_block, block_data) = signed_block.deconstruct();
match self.get_blobs_or_columns_store_op(block_root, signed_block.slot(), block_data) {
Ok(Some(blobs_or_columns_store_op)) => {
ops.push(blobs_or_columns_store_op);
}
Ok(None) => {}
Err(e) => {
error!(
msg = "Restoring fork choice from disk",
error = &e,
?block_root,
"Failed to store data columns into the database"
);
return Err(self
.handle_import_block_db_write_error(fork_choice)
.err()
.unwrap_or(BlockError::InternalError(e)));
}
if let Some(blobs_or_columns_store_op) =
self.get_blobs_or_columns_store_op(block_root, signed_block.slot(), block_data)
{
ops.push(blobs_or_columns_store_op);
}
let block = signed_block.message();
@@ -4181,7 +4133,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
// We're declaring the block "imported" at this point, since fork choice and the DB know
// about it.
let block_time_imported = timestamp_now();
let block_time_imported = self.slot_clock.now_duration().unwrap_or(Duration::MAX);
// compute state proofs for light client updates before inserting the state into the
// snapshot cache.
@@ -4768,6 +4720,8 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
if cached_head.head_block_root() == parent_block_root {
(Cow::Borrowed(head_state), cached_head.head_state_root())
} else {
// TODO(gloas): this function needs updating to be envelope-aware
// See: https://github.com/sigp/lighthouse/issues/8957
let block = self
.get_blinded_block(&parent_block_root)?
.ok_or(Error::MissingBeaconBlock(parent_block_root))?;
@@ -6567,11 +6521,11 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
) -> Result<V, E> {
crate::beacon_proposer_cache::with_proposer_cache(
&self.beacon_proposer_cache,
&self.spec,
shuffling_decision_block,
proposal_epoch,
accessor,
state_provider,
&self.spec,
)
}
@@ -6710,6 +6664,8 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
let (mut state, state_root) = if let Some((state, state_root)) = head_state_opt {
(state, state_root)
} else {
// We assume that the `Pending` state has the same shufflings as a `Full` state
// for the same block. Analysis: https://hackmd.io/@dapplion/gloas_dependant_root
let (state_root, state) = self
.store
.get_advanced_hot_state(
@@ -6784,6 +6740,9 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
let mut prev_block_root = None;
let mut prev_beacon_state = None;
// Collect all blocks.
let mut blocks = vec![];
for res in self.forwards_iter_block_roots(from_slot)? {
let (beacon_block_root, _) = res?;
@@ -6799,16 +6758,42 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
.ok_or_else(|| {
Error::DBInconsistent(format!("Missing block {}", beacon_block_root))
})?;
let beacon_state_root = beacon_block.state_root();
blocks.push((beacon_block_root, Arc::new(beacon_block)));
}
// Collect states, using the next blocks to determine if states are full (have Gloas
// payloads).
for (i, (block_root, block)) in blocks.iter().enumerate() {
let (opt_envelope, state_root) = if block.fork_name_unchecked().gloas_enabled() {
let opt_envelope = self.store.get_payload_envelope(block_root)?.map(Arc::new);
if let Some((_, next_block)) = blocks.get(i + 1) {
let block_hash = block.payload_bid_block_hash()?;
if next_block.is_parent_block_full(block_hash) {
let envelope = opt_envelope.ok_or_else(|| {
Error::DBInconsistent(format!("Missing envelope {block_root:?}"))
})?;
let state_root = envelope.message.state_root;
(Some(envelope), state_root)
} else {
(None, block.state_root())
}
} else {
// TODO(gloas): should use fork choice/cached head for last block in sequence
opt_envelope
.as_ref()
.map_or((None, block.state_root()), |envelope| {
(Some(envelope.clone()), envelope.message.state_root)
})
}
} else {
(None, block.state_root())
};
// This branch is reached from the HTTP API. We assume the user wants
// to cache states so that future calls are faster.
let mut beacon_state = self
.store
.get_state(&beacon_state_root, Some(beacon_block.slot()), true)?
.ok_or_else(|| {
Error::DBInconsistent(format!("Missing state {:?}", beacon_state_root))
})?;
.get_state(&state_root, Some(block.slot()), true)?
.ok_or_else(|| Error::DBInconsistent(format!("Missing state {:?}", state_root)))?;
// This beacon state might come from the freezer DB, which means it could have pending
// updates or lots of untethered memory. We rebase it on the previous state in order to
@@ -6821,12 +6806,14 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
prev_beacon_state = Some(beacon_state.clone());
let snapshot = BeaconSnapshot {
beacon_block: Arc::new(beacon_block),
beacon_block_root,
beacon_block: block.clone(),
execution_envelope: opt_envelope,
beacon_block_root: *block_root,
beacon_state,
};
dump.push(snapshot);
}
Ok(dump)
}
@@ -7253,16 +7240,16 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
block_root: Hash256,
block_slot: Slot,
block_data: AvailableBlockData<T::EthSpec>,
) -> Result<Option<StoreOp<'_, T::EthSpec>>, String> {
) -> Option<StoreOp<'_, T::EthSpec>> {
match block_data {
AvailableBlockData::NoData => Ok(None),
AvailableBlockData::NoData => None,
AvailableBlockData::Blobs(blobs) => {
debug!(
%block_root,
count = blobs.len(),
"Writing blobs to store"
);
Ok(Some(StoreOp::PutBlobs(block_root, blobs)))
Some(StoreOp::PutBlobs(block_root, blobs))
}
AvailableBlockData::DataColumns(mut data_columns) => {
let columns_to_custody = self.custody_columns_for_epoch(Some(
@@ -7278,7 +7265,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
count = data_columns.len(),
"Writing data columns to store"
);
Ok(Some(StoreOp::PutDataColumns(block_root, data_columns)))
Some(StoreOp::PutDataColumns(block_root, data_columns))
}
}
}