Add checkpoint sync

This commit is contained in:
Pawan Dhananjay
2026-03-30 19:41:09 -07:00
parent aa5292df99
commit 871697280e
7 changed files with 155 additions and 7 deletions

View File

@@ -941,6 +941,28 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
)?
}
/// Returns the Pending (pre-payload) state root at the given slot in the canonical chain.
///
/// In ePBS (Gloas+), if the canonical state at `slot` is Full (post-payload), this resolves
/// to the same-slot Pending state root. For skipped slots or pre-Gloas, returns the canonical
/// state root unchanged.
pub fn pending_state_root_at_slot(&self, request_slot: Slot) -> Result<Option<Hash256>, Error> {
let Some(root) = self.state_root_at_slot(request_slot)? else {
return Ok(None);
};
// Pre-Gloas: all states are inherently Pending.
if !self
.spec
.fork_name_at_slot::<T::EthSpec>(request_slot)
.gloas_enabled()
{
return Ok(Some(root));
}
Ok(Some(self.store.resolve_pending_state_root(&root)?))
}
/// Returns the block root at the given slot, if any. Only returns roots in the canonical chain.
///
/// ## Notes

View File

@@ -42,6 +42,7 @@ use store::{Error as StoreError, HotColdDB, ItemStore, KeyValueStoreOp};
use task_executor::{ShutdownReason, TaskExecutor};
use tracing::{debug, error, info, warn};
use tree_hash::TreeHash;
use types::SignedExecutionPayloadEnvelope;
use types::data::CustodyIndex;
use types::{
BeaconBlock, BeaconState, BlobSidecarList, ChainSpec, ColumnIndex, DataColumnSidecarList,
@@ -426,6 +427,7 @@ where
mut weak_subj_state: BeaconState<E>,
weak_subj_block: SignedBeaconBlock<E>,
weak_subj_blobs: Option<BlobSidecarList<E>>,
weak_subj_payload: Option<SignedExecutionPayloadEnvelope<E>>,
genesis_state: BeaconState<E>,
) -> Result<Self, String> {
let store = self
@@ -601,6 +603,13 @@ where
.map_err(|e| format!("Failed to store weak subjectivity blobs: {e:?}"))?;
}
}
if let Some(ref envelope) = weak_subj_payload {
store
.put_payload_envelope(&weak_subj_block_root, envelope.clone())
.map_err(|e| {
format!("Failed to store weak subjectivity payload envelope: {e:?}")
})?;
}
// Stage the database's metadata fields for atomic storage when `build` is called.
// This prevents the database from restarting in an inconsistent state if the anchor
@@ -617,10 +626,25 @@ where
.map_err(|e| format!("Failed to initialize data column info: {:?}", e))?,
);
if self
.spec
.fork_name_at_slot::<E>(weak_subj_slot)
.gloas_enabled()
{
let envelope = weak_subj_payload.as_ref().ok_or_else(|| {
"Gloas checkpoint sync requires an execution payload envelope".to_string()
})?;
if envelope.message.beacon_block_root != weak_subj_block_root {
return Err(format!(
"Envelope beacon_block_root {:?} does not match block root {:?}",
envelope.message.beacon_block_root, weak_subj_block_root
));
}
}
// TODO(gloas): add check that checkpoint state is Pending
let snapshot = BeaconSnapshot {
beacon_block_root: weak_subj_block_root,
execution_envelope: None,
execution_envelope: weak_subj_payload.map(Arc::new),
beacon_block: Arc::new(weak_subj_block),
beacon_state: weak_subj_state,
};

View File

@@ -372,6 +372,7 @@ where
anchor_state,
anchor_block,
anchor_blobs,
None,
genesis_state,
)?
}
@@ -445,6 +446,21 @@ where
None
};
let envelope = if spec
.fork_name_at_slot::<E>(finalized_block_slot)
.gloas_enabled()
{
debug!("Downloading payload");
remote
.get_beacon_execution_payload_envelope(BlockId::Slot(finalized_block_slot))
.await
.map_err(|e| format!("Error fetching finalized blobs from remote: {e:?}"))?
.map(|resp| resp.into_data())
} else {
None
};
debug!("Downloaded finalized payload");
let genesis_state = genesis_state(&runtime_context, &config).await?;
info!(
@@ -454,7 +470,7 @@ where
"Loaded checkpoint block and state"
);
builder.weak_subjectivity_state(state, block, blobs, genesis_state)?
builder.weak_subjectivity_state(state, block, blobs, envelope, genesis_state)?
}
ClientGenesis::DepositContract => {
return Err("Loading genesis from deposit contract no longer supported".to_string());

View File

@@ -43,14 +43,32 @@ impl StateId {
chain.canonical_head.cached_head().finalized_checkpoint();
let (slot, execution_optimistic) =
checkpoint_slot_and_execution_optimistic(chain, finalized_checkpoint)?;
(slot, execution_optimistic, true)
let root = chain
.pending_state_root_at_slot(slot)
.map_err(warp_utils::reject::unhandled_error)?
.ok_or_else(|| {
warp_utils::reject::custom_not_found(format!(
"beacon state at slot {}",
slot
))
})?;
return Ok((root, execution_optimistic, true));
}
CoreStateId::Justified => {
let justified_checkpoint =
chain.canonical_head.cached_head().justified_checkpoint();
let (slot, execution_optimistic) =
checkpoint_slot_and_execution_optimistic(chain, justified_checkpoint)?;
(slot, execution_optimistic, false)
let root = chain
.pending_state_root_at_slot(slot)
.map_err(warp_utils::reject::unhandled_error)?
.ok_or_else(|| {
warp_utils::reject::custom_not_found(format!(
"beacon state at slot {}",
slot
))
})?;
return Ok((root, execution_optimistic, false));
}
CoreStateId::Slot(slot) => (
*slot,

View File

@@ -35,7 +35,7 @@ use std::marker::PhantomData;
use std::sync::Arc;
use strum::IntoEnumIterator;
use tracing::{debug, error, info, warn};
use types::{ColumnIndex, Epoch, EthSpec};
use types::{ColumnIndex, Epoch, EthSpec, ForkName};
/// Blocks are downloaded in batches from peers. This constant specifies how many epochs worth of
/// blocks per batch are requested _at most_. A batch may request less blocks to account for
@@ -218,6 +218,14 @@ impl<T: BeaconChainTypes> BackFillSync<T> {
match self.state() {
BackFillState::Syncing => {} // already syncing ignore.
BackFillState::Paused => {
if self
.beacon_chain
.spec
.fork_name_at_epoch(self.to_be_downloaded)
>= ForkName::Gloas
{
return Ok(SyncStart::NotSyncing);
}
if self
.network_globals
.peers

View File

@@ -16,10 +16,10 @@ pub use data_columns_by_range::DataColumnsByRangeRequestItems;
pub use data_columns_by_root::{
DataColumnsByRootRequestItems, DataColumnsByRootSingleBlockRequest,
};
pub use payload_envelopes_by_range::PayloadEnvelopesByRangeRequestItems;
pub use payload_envelopes_by_root::{
PayloadEnvelopesByRootRequestItems, PayloadEnvelopesByRootSingleRequest,
};
pub use payload_envelopes_by_range::PayloadEnvelopesByRangeRequestItems;
use crate::metrics;
@@ -31,8 +31,8 @@ mod blocks_by_range;
mod blocks_by_root;
mod data_columns_by_range;
mod data_columns_by_root;
mod payload_envelopes_by_root;
mod payload_envelopes_by_range;
mod payload_envelopes_by_root;
#[derive(Debug, PartialEq, Eq, IntoStaticStr)]
pub enum LookupVerifyError {

View File

@@ -1951,6 +1951,66 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> HotColdDB<E, Hot, Cold>
}
}
/// Resolve a canonical state root to the Pending (pre-payload) state root at the same slot.
///
/// In ePBS, checkpoint states (finalized, justified) should be returned as their Pending
/// variant. This function takes a canonical state root and:
///
/// - If the state is already Pending (or pre-Gloas), returns it unchanged.
/// - If the state is Full due to a payload applied at this slot, returns the same-slot
/// Pending state root via `previous_state_root`.
/// - If the state is at a skipped slot (inheriting Full status from a prior slot), returns
/// it unchanged — there is no distinct Pending state at a skipped slot.
pub fn resolve_pending_state_root(&self, state_root: &Hash256) -> Result<Hash256, Error> {
// Fast path: split state is always Pending.
let split = self.get_split_info();
if *state_root == split.state_root {
return Ok(split.state_root);
}
// Try hot DB first.
if let Some(summary) = self.load_hot_state_summary(state_root)? {
// Pre-Gloas states are always Pending.
if !self
.spec
.fork_name_at_slot::<E>(summary.slot)
.gloas_enabled()
{
return Ok(*state_root);
}
// Genesis state is always Pending.
if summary.previous_state_root.is_zero() {
return Ok(*state_root);
}
// Load the previous state summary. If it has the same slot, the current state is
// Full (post-payload) and the previous state is Pending (post-block). Return the
// Pending state root.
let previous_summary = self
.load_hot_state_summary(&summary.previous_state_root)?
.ok_or(Error::MissingHotStateSummary(summary.previous_state_root))?;
if previous_summary.slot == summary.slot {
// This is a Full state at a non-skipped slot. Return the Pending state root.
return Ok(summary.previous_state_root);
}
// Either already Pending (block at this slot) or a skipped slot — return as-is.
return Ok(*state_root);
}
// Try cold DB.
if let Some(_slot) = self.load_cold_state_slot(state_root)? {
// Cold DB states: the non-canonical payload variant is pruned during migration.
// Return whatever is stored. In practice, finalized/justified states are almost
// always in the hot DB or at the split point.
return Ok(*state_root);
}
Err(Error::MissingHotStateSummary(*state_root))
}
fn load_hot_hdiff_buffer(&self, state_root: Hash256) -> Result<HDiffBuffer, Error> {
if let Some(buffer) = self
.state_cache