Block proposal optimisations (#8156)

Closes:

- https://github.com/sigp/lighthouse/issues/4412

This should reduce Lighthouse's block proposal times on Holesky and prevent us getting reorged.


  - [x] Allow the head state to be advanced further than 1 slot. This lets us avoid epoch processing on hot paths including block production, by having new epoch boundaries pre-computed and available in the state cache.
- [x] Use the finalized state to prune the op pool. We were previously using the head state and trying to infer slashing/exit relevance based on `exit_epoch`. However some exit epochs are far in the future, despite occurring recently.


Co-Authored-By: Michael Sproul <michael@sigmaprime.io>
This commit is contained in:
Michael Sproul
2025-10-08 17:09:12 +11:00
committed by GitHub
parent 2a433bc406
commit 13dfa9200f
4 changed files with 53 additions and 72 deletions

View File

@@ -5233,16 +5233,20 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
None
};
let slashings_and_exits_span = debug_span!("get_slashings_and_exits").entered();
let (mut proposer_slashings, mut attester_slashings, mut voluntary_exits) =
self.op_pool.get_slashings_and_exits(&state, &self.spec);
drop(slashings_and_exits_span);
let eth1_data = state.eth1_data().clone();
let deposits = vec![];
let bls_changes_span = debug_span!("get_bls_to_execution_changes").entered();
let bls_to_execution_changes = self
.op_pool
.get_bls_to_execution_changes(&state, &self.spec);
drop(bls_changes_span);
// Iterate through the naive aggregation pool and ensure all the attestations from there
// are included in the operation pool.

View File

@@ -937,13 +937,6 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
.execution_status
.is_optimistic_or_invalid();
self.op_pool.prune_all(
&new_snapshot.beacon_block,
&new_snapshot.beacon_state,
self.epoch()?,
&self.spec,
);
self.observed_block_producers.write().prune(
new_view
.finalized_checkpoint
@@ -982,9 +975,9 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
}));
}
// The store migration task requires the *state at the slot of the finalized epoch*,
// rather than the state of the latest finalized block. These two values will only
// differ when the first slot of the finalized epoch is a skip slot.
// The store migration task and op pool pruning require the *state at the first slot of the
// finalized epoch*, rather than the state of the latest finalized block. These two values
// will only differ when the first slot of the finalized epoch is a skip slot.
//
// Use the `StateRootsIterator` directly rather than `BeaconChain::state_root_at_slot`
// to ensure we use the same state that we just set as the head.
@@ -1006,6 +999,23 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
)?
.ok_or(Error::MissingFinalizedStateRoot(new_finalized_slot))?;
let update_cache = true;
let new_finalized_state = self
.store
.get_hot_state(&new_finalized_state_root, update_cache)?
.ok_or(Error::MissingBeaconState(new_finalized_state_root))?;
self.op_pool.prune_all(
&new_snapshot.beacon_block,
&new_snapshot.beacon_state,
&new_finalized_state,
self.epoch()?,
&self.spec,
);
// We just pass the state root to the finalization thread. It should be able to reload the
// state from the state_cache near instantly anyway. We could experiment with sending the
// state over a channel in future, but it's probably no quicker.
self.store_migrator.process_finalization(
new_finalized_state_root.into(),
new_view.finalized_checkpoint,

View File

@@ -33,7 +33,7 @@ use types::{AttestationShufflingId, BeaconStateError, EthSpec, Hash256, Relative
///
/// This avoids doing unnecessary work whilst the node is syncing or has perhaps been put to sleep
/// for some period of time.
const MAX_ADVANCE_DISTANCE: u64 = 4;
const MAX_ADVANCE_DISTANCE: u64 = 256;
/// Similarly for fork choice: avoid the fork choice lookahead during sync.
///
@@ -49,17 +49,7 @@ enum Error {
HeadMissingFromSnapshotCache(#[allow(dead_code)] Hash256),
BeaconState(#[allow(dead_code)] BeaconStateError),
Store(#[allow(dead_code)] store::Error),
MaxDistanceExceeded {
current_slot: Slot,
head_slot: Slot,
},
StateAlreadyAdvanced {
block_root: Hash256,
},
BadStateSlot {
_state_slot: Slot,
_block_slot: Slot,
},
MaxDistanceExceeded { current_slot: Slot, head_slot: Slot },
}
impl From<BeaconChainError> for Error {
@@ -180,9 +170,6 @@ async fn state_advance_timer<T: BeaconChainTypes>(
error = ?e,
"Failed to advance head state"
),
Err(Error::StateAlreadyAdvanced { block_root }) => {
debug!(?block_root, "State already advanced on slot")
}
Err(Error::MaxDistanceExceeded {
current_slot,
head_slot,
@@ -295,25 +282,6 @@ fn advance_head<T: BeaconChainTypes>(beacon_chain: &Arc<BeaconChain<T>>) -> Resu
.get_advanced_hot_state(head_block_root, current_slot, head_block_state_root)?
.ok_or(Error::HeadMissingFromSnapshotCache(head_block_root))?;
// Protect against advancing a state more than a single slot.
//
// Advancing more than one slot without storing the intermediate state would corrupt the
// database. Future works might store intermediate states inside this function.
match state.slot().cmp(&state.latest_block_header().slot) {
std::cmp::Ordering::Equal => (),
std::cmp::Ordering::Greater => {
return Err(Error::StateAlreadyAdvanced {
block_root: head_block_root,
});
}
std::cmp::Ordering::Less => {
return Err(Error::BadStateSlot {
_block_slot: state.latest_block_header().slot,
_state_slot: state.slot(),
});
}
}
let initial_slot = state.slot();
let initial_epoch = state.current_epoch();

View File

@@ -457,32 +457,35 @@ impl<E: EthSpec> OperationPool<E> {
.collect()
}
/// Prune proposer slashings for validators which are exited in the finalized epoch.
pub fn prune_proposer_slashings(&self, head_state: &BeaconState<E>) {
/// Prune proposer slashings for validators which are already slashed or exited in the finalized
/// epoch.
pub fn prune_proposer_slashings(&self, finalized_state: &BeaconState<E>) {
prune_validator_hash_map(
&mut self.proposer_slashings.write(),
|_, validator| validator.exit_epoch <= head_state.finalized_checkpoint().epoch,
head_state,
|_, validator| {
validator.slashed || validator.exit_epoch <= finalized_state.current_epoch()
},
finalized_state,
);
}
/// Prune attester slashings for all slashed or withdrawn validators, or attestations on another
/// fork.
pub fn prune_attester_slashings(&self, head_state: &BeaconState<E>) {
pub fn prune_attester_slashings(&self, finalized_state: &BeaconState<E>) {
self.attester_slashings.write().retain(|slashing| {
// Check that the attestation's signature is still valid wrt the fork version.
let signature_ok = slashing.signature_is_still_valid(&head_state.fork());
// We might be a bit slower to detect signature staleness by using the finalized state
// here, but we filter when proposing anyway, so in the worst case we just keep some
// stuff around until we finalize.
let signature_ok = slashing.signature_is_still_valid(&finalized_state.fork());
// Slashings that don't slash any validators can also be dropped.
let slashing_ok = get_slashable_indices_modular(
head_state,
finalized_state,
slashing.as_inner().to_ref(),
|_, validator| {
// Declare that a validator is still slashable if they have not exited prior
// to the finalized epoch.
//
// We cannot check the `slashed` field since the `head` is not finalized and
// a fork could un-slash someone.
validator.exit_epoch > head_state.finalized_checkpoint().epoch
// Declare that a validator is still slashable if they have not been slashed in
// the finalized state, and have not exited at the finalized epoch.
!validator.slashed && validator.exit_epoch > finalized_state.current_epoch()
},
)
.is_ok_and(|indices| !indices.is_empty());
@@ -531,17 +534,12 @@ impl<E: EthSpec> OperationPool<E> {
)
}
/// Prune if validator has already exited at or before the finalized checkpoint of the head.
pub fn prune_voluntary_exits(&self, head_state: &BeaconState<E>) {
/// Prune if validator has already exited in the finalized state.
pub fn prune_voluntary_exits(&self, finalized_state: &BeaconState<E>, spec: &ChainSpec) {
prune_validator_hash_map(
&mut self.voluntary_exits.write(),
// This condition is slightly too loose, since there will be some finalized exits that
// are missed here.
//
// We choose simplicity over the gain of pruning more exits since they are small and
// should not be seen frequently.
|_, validator| validator.exit_epoch <= head_state.finalized_checkpoint().epoch,
head_state,
|_, validator| validator.exit_epoch != spec.far_future_epoch,
finalized_state,
);
}
@@ -642,14 +640,15 @@ impl<E: EthSpec> OperationPool<E> {
&self,
head_block: &SignedBeaconBlock<E, Payload>,
head_state: &BeaconState<E>,
finalized_state: &BeaconState<E>,
current_epoch: Epoch,
spec: &ChainSpec,
) {
self.prune_attestations(current_epoch);
self.prune_sync_contributions(head_state.slot());
self.prune_proposer_slashings(head_state);
self.prune_attester_slashings(head_state);
self.prune_voluntary_exits(head_state);
self.prune_proposer_slashings(finalized_state);
self.prune_attester_slashings(finalized_state);
self.prune_voluntary_exits(finalized_state, spec);
self.prune_bls_to_execution_changes(head_block, head_state, spec);
}
@@ -758,14 +757,14 @@ where
fn prune_validator_hash_map<T, F, E: EthSpec>(
map: &mut HashMap<u64, SigVerifiedOp<T, E>>,
prune_if: F,
head_state: &BeaconState<E>,
state: &BeaconState<E>,
) where
F: Fn(u64, &Validator) -> bool,
T: VerifyOperation<E>,
{
map.retain(|&validator_index, op| {
op.signature_is_still_valid(&head_state.fork())
&& head_state
op.signature_is_still_valid(&state.fork())
&& state
.validators()
.get(validator_index as usize)
.is_none_or(|validator| !prune_if(validator_index, validator))