mirror of
https://github.com/sigp/lighthouse.git
synced 2026-03-02 16:21:42 +00:00
Reimport the checkpoint sync block (#8417)
We want to not require checkpoint sync starts to include the required custody data columns, and instead fetch them from p2p. Closes https://github.com/sigp/lighthouse/issues/6837 The checkpoint sync slot can: 1. Be the first slot in the epoch, such that the epoch of the block == the start checkpoint epoch 2. Be in an epoch prior to the start checkpoint epoch In both cases backfill sync already fetches that epoch worth of blocks with current code. This PR modifies the backfill import filter function to allow to re-importing the oldest block slot in the DB. I feel this solution is sufficient unless I'm missing something. ~~I have not tested this yet!~~ Michael has tested this and it works. Co-Authored-By: dapplion <35266934+dapplion@users.noreply.github.com> Co-Authored-By: Michael Sproul <michael@sigmaprime.io>
This commit is contained in:
@@ -1,5 +1,5 @@
|
||||
use crate::data_availability_checker::{AvailableBlock, AvailableBlockData};
|
||||
use crate::{BeaconChain, BeaconChainTypes, metrics};
|
||||
use crate::{BeaconChain, BeaconChainTypes, WhenSlotSkipped, metrics};
|
||||
use itertools::Itertools;
|
||||
use state_processing::{
|
||||
per_block_processing::ParallelSignatureSets,
|
||||
@@ -34,6 +34,8 @@ pub enum HistoricalBlockError {
|
||||
ValidatorPubkeyCacheTimeout,
|
||||
/// Logic error: should never occur.
|
||||
IndexOutOfBounds,
|
||||
/// Logic error: should never occur.
|
||||
MissingOldestBlockRoot { slot: Slot },
|
||||
/// Internal store error
|
||||
StoreError(StoreError),
|
||||
}
|
||||
@@ -56,7 +58,8 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
/// `SignatureSetError` or `InvalidSignature` will be returned.
|
||||
///
|
||||
/// To align with sync we allow some excess blocks with slots greater than or equal to
|
||||
/// `oldest_block_slot` to be provided. They will be ignored without being checked.
|
||||
/// `oldest_block_slot` to be provided. They will be re-imported to fill the columns of the
|
||||
/// checkpoint sync block.
|
||||
///
|
||||
/// This function should not be called concurrently with any other function that mutates
|
||||
/// the anchor info (including this function itself). If a concurrent mutation occurs that
|
||||
@@ -72,9 +75,12 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
let blob_info = self.store.get_blob_info();
|
||||
let data_column_info = self.store.get_data_column_info();
|
||||
|
||||
// Take all blocks with slots less than the oldest block slot.
|
||||
// Take all blocks with slots less than or equal to the oldest block slot.
|
||||
//
|
||||
// This allows for reimport of the blobs/columns for the finalized block after checkpoint
|
||||
// sync.
|
||||
let num_relevant = blocks.partition_point(|available_block| {
|
||||
available_block.block().slot() < anchor_info.oldest_block_slot
|
||||
available_block.block().slot() <= anchor_info.oldest_block_slot
|
||||
});
|
||||
|
||||
let total_blocks = blocks.len();
|
||||
@@ -95,6 +101,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
}
|
||||
|
||||
let mut expected_block_root = anchor_info.oldest_block_parent;
|
||||
let mut last_block_root = expected_block_root;
|
||||
let mut prev_block_slot = anchor_info.oldest_block_slot;
|
||||
let mut new_oldest_blob_slot = blob_info.oldest_blob_slot;
|
||||
let mut new_oldest_data_column_slot = data_column_info.oldest_data_column_slot;
|
||||
@@ -107,7 +114,27 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
for available_block in blocks_to_import.into_iter().rev() {
|
||||
let (block_root, block, block_data) = available_block.deconstruct();
|
||||
|
||||
if block_root != expected_block_root {
|
||||
if block.slot() == anchor_info.oldest_block_slot {
|
||||
// When reimporting, verify that this is actually the same block (same block root).
|
||||
let oldest_block_root = self
|
||||
.block_root_at_slot(block.slot(), WhenSlotSkipped::None)
|
||||
.ok()
|
||||
.flatten()
|
||||
.ok_or(HistoricalBlockError::MissingOldestBlockRoot { slot: block.slot() })?;
|
||||
if block_root != oldest_block_root {
|
||||
return Err(HistoricalBlockError::MismatchedBlockRoot {
|
||||
block_root,
|
||||
expected_block_root: oldest_block_root,
|
||||
});
|
||||
}
|
||||
|
||||
debug!(
|
||||
?block_root,
|
||||
slot = %block.slot(),
|
||||
"Re-importing historic block"
|
||||
);
|
||||
last_block_root = block_root;
|
||||
} else if block_root != expected_block_root {
|
||||
return Err(HistoricalBlockError::MismatchedBlockRoot {
|
||||
block_root,
|
||||
expected_block_root,
|
||||
@@ -198,7 +225,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
.ok_or(HistoricalBlockError::IndexOutOfBounds)?
|
||||
.iter()
|
||||
.map(|block| block.parent_root())
|
||||
.chain(iter::once(anchor_info.oldest_block_parent));
|
||||
.chain(iter::once(last_block_root));
|
||||
let signature_set = signed_blocks
|
||||
.iter()
|
||||
.zip_eq(block_roots)
|
||||
|
||||
@@ -842,7 +842,7 @@ impl<E: EthSpec> MockBuilder<E> {
|
||||
.beacon_client
|
||||
.get_beacon_blocks::<E>(BlockId::Finalized)
|
||||
.await
|
||||
.map_err(|_| "couldn't get finalized block".to_string())?
|
||||
.map_err(|e| format!("couldn't get finalized block: {e:?}"))?
|
||||
.ok_or_else(|| "missing finalized block".to_string())?
|
||||
.data()
|
||||
.message()
|
||||
@@ -855,7 +855,7 @@ impl<E: EthSpec> MockBuilder<E> {
|
||||
.beacon_client
|
||||
.get_beacon_blocks::<E>(BlockId::Justified)
|
||||
.await
|
||||
.map_err(|_| "couldn't get justified block".to_string())?
|
||||
.map_err(|e| format!("couldn't get justified block: {e:?}"))?
|
||||
.ok_or_else(|| "missing justified block".to_string())?
|
||||
.data()
|
||||
.message()
|
||||
|
||||
@@ -804,6 +804,16 @@ impl<T: BeaconChainTypes> NetworkBeaconProcessor<T> {
|
||||
// The peer is faulty if they bad signatures.
|
||||
Some(PeerAction::LowToleranceError)
|
||||
}
|
||||
HistoricalBlockError::MissingOldestBlockRoot { slot } => {
|
||||
warn!(
|
||||
%slot,
|
||||
error = "missing_oldest_block_root",
|
||||
"Backfill batch processing error"
|
||||
);
|
||||
// This is an internal error, do not penalize the peer.
|
||||
None
|
||||
}
|
||||
|
||||
HistoricalBlockError::ValidatorPubkeyCacheTimeout => {
|
||||
warn!(
|
||||
error = "pubkey_cache_timeout",
|
||||
|
||||
@@ -649,9 +649,15 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> HotColdDB<E, Hot, Cold>
|
||||
.inspect(|cache| cache.lock().put_block(*block_root, full_block.clone()));
|
||||
|
||||
DatabaseBlock::Full(full_block)
|
||||
} else if !self.config.prune_payloads {
|
||||
} else if !self.config.prune_payloads || *block_root == split.block_root {
|
||||
// If payload pruning is disabled there's a chance we may have the payload of
|
||||
// this finalized block. Attempt to load it but don't error in case it's missing.
|
||||
//
|
||||
// We also allow for the split block's payload to be loaded *if it exists*. This is
|
||||
// necessary on startup when syncing from an unaligned checkpoint (a checkpoint state
|
||||
// at a skipped slot), and then loading the canonical head (with payload). If we modify
|
||||
// payload pruning in future so that it doesn't prune the split block's payload, then
|
||||
// this case could move to the case above where we error if the payload is missing.
|
||||
let fork_name = blinded_block.fork_name(&self.spec)?;
|
||||
if let Some(payload) = self.get_execution_payload(block_root, fork_name)? {
|
||||
DatabaseBlock::Full(
|
||||
|
||||
Reference in New Issue
Block a user