Merge branch 'unstable' of https://github.com/sigp/lighthouse into gloas-payload-processing

This commit is contained in:
Eitan Seri- Levi
2026-02-11 23:35:28 -08:00
36 changed files with 509 additions and 120 deletions

View File

@@ -1799,10 +1799,12 @@ pub fn check_block_relevancy<T: BeaconChainTypes>(
) -> Result<Hash256, BlockError> {
let block = signed_block.message();
let present_slot = chain.slot()?;
// Do not process blocks from the future.
if block.slot() > chain.slot()? {
if block.slot() > present_slot {
return Err(BlockError::FutureSlot {
present_slot: chain.slot()?,
present_slot,
block_slot: block.slot(),
});
}

View File

@@ -372,8 +372,8 @@ where
// Initialize anchor info before attempting to write the genesis state.
// Since v4.4.0 we will set the anchor with a dummy state upper limit in order to prevent
// historic states from being retained (unless `--reconstruct-historic-states` is set).
let retain_historic_states = self.chain_config.reconstruct_historic_states;
// historic states from being retained (unless `--archive` is set).
let retain_historic_states = self.chain_config.archive;
let genesis_beacon_block = genesis_block(&mut beacon_state, &self.spec)?;
self.pending_io_batch.push(
store
@@ -529,7 +529,7 @@ where
// case it will be stored in the hot DB. In this case, we need to ensure the store's anchor
// is initialised prior to storing the state, as the anchor is required for working out
// hdiff storage strategies.
let retain_historic_states = self.chain_config.reconstruct_historic_states;
let retain_historic_states = self.chain_config.archive;
self.pending_io_batch.push(
store
.init_anchor_info(
@@ -1125,9 +1125,7 @@ where
);
// Check for states to reconstruct (in the background).
if beacon_chain.config.reconstruct_historic_states
&& beacon_chain.store.get_oldest_block_slot() == 0
{
if beacon_chain.config.archive && beacon_chain.store.get_oldest_block_slot() == 0 {
beacon_chain.store_migrator.process_reconstruction();
}

View File

@@ -38,7 +38,7 @@ pub struct ChainConfig {
/// If `None`, there is no weak subjectivity verification.
pub weak_subjectivity_checkpoint: Option<Checkpoint>,
/// Determine whether to reconstruct historic states, usually after a checkpoint sync.
pub reconstruct_historic_states: bool,
pub archive: bool,
/// The max size of a message that can be sent over the network.
pub max_network_size: usize,
/// Maximum percentage of the head committee weight at which to attempt re-orging the canonical head.
@@ -130,7 +130,7 @@ impl Default for ChainConfig {
Self {
import_max_skip_slots: None,
weak_subjectivity_checkpoint: None,
reconstruct_historic_states: false,
archive: false,
max_network_size: 10 * 1_048_576, // 10M
re_org_head_threshold: Some(DEFAULT_RE_ORG_HEAD_THRESHOLD),
re_org_parent_threshold: Some(DEFAULT_RE_ORG_PARENT_THRESHOLD),

View File

@@ -305,10 +305,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
// If backfill has completed and the chain is configured to reconstruct historic states,
// send a message to the background migrator instructing it to begin reconstruction.
// This can only happen if we have backfilled all the way to genesis.
if backfill_complete
&& self.genesis_backfill_slot == Slot::new(0)
&& self.config.reconstruct_historic_states
{
if backfill_complete && self.genesis_backfill_slot == Slot::new(0) && self.config.archive {
self.store_migrator.process_reconstruction();
}

View File

@@ -54,7 +54,7 @@ fn get_harness(validator_count: usize) -> BeaconChainHarness<EphemeralHarnessTyp
let harness = BeaconChainHarness::builder(MainnetEthSpec)
.spec(spec)
.chain_config(ChainConfig {
reconstruct_historic_states: true,
archive: true,
..ChainConfig::default()
})
.keypairs(KEYPAIRS[0..validator_count].to_vec())
@@ -91,7 +91,7 @@ fn get_harness_capella_spec(
let harness = BeaconChainHarness::builder(MainnetEthSpec)
.spec(spec.clone())
.chain_config(ChainConfig {
reconstruct_historic_states: true,
archive: true,
..ChainConfig::default()
})
.keypairs(validator_keypairs)

View File

@@ -29,7 +29,7 @@ fn get_harness(
let harness = BeaconChainHarness::builder(MainnetEthSpec)
.spec(spec)
.chain_config(ChainConfig {
reconstruct_historic_states: true,
archive: true,
..ChainConfig::default()
})
.keypairs(KEYPAIRS[0..validator_count].to_vec())

View File

@@ -119,7 +119,7 @@ fn get_harness(
let harness = BeaconChainHarness::builder(MainnetEthSpec)
.default_spec()
.chain_config(ChainConfig {
reconstruct_historic_states: true,
archive: true,
..ChainConfig::default()
})
.keypairs(KEYPAIRS[0..validator_count].to_vec())

View File

@@ -32,7 +32,7 @@ fn get_harness(
let harness = BeaconChainHarness::builder(MainnetEthSpec)
.spec(spec)
.chain_config(ChainConfig {
reconstruct_historic_states: true,
archive: true,
..ChainConfig::default()
})
.keypairs(KEYPAIRS[0..validator_count].to_vec())

View File

@@ -50,7 +50,7 @@ impl InvalidPayloadRig {
let harness = BeaconChainHarness::builder(MainnetEthSpec)
.spec(spec.into())
.chain_config(ChainConfig {
reconstruct_historic_states: true,
archive: true,
..ChainConfig::default()
})
.deterministic_keypairs(VALIDATOR_COUNT)

View File

@@ -29,7 +29,7 @@ static KEYPAIRS: LazyLock<Vec<Keypair>> =
fn get_harness(spec: ChainSpec) -> BeaconChainHarness<EphemeralHarnessType<E>> {
let chain_config = ChainConfig {
reconstruct_historic_states: true,
archive: true,
..Default::default()
};
@@ -48,7 +48,7 @@ fn get_harness(spec: ChainSpec) -> BeaconChainHarness<EphemeralHarnessType<E>> {
fn get_electra_harness(spec: ChainSpec) -> BeaconChainHarness<EphemeralHarnessType<E>> {
let chain_config = ChainConfig {
reconstruct_historic_states: true,
archive: true,
..Default::default()
};

View File

@@ -70,7 +70,7 @@ async fn schema_stability() {
let store = get_store(&datadir, store_config, spec.clone());
let chain_config = ChainConfig {
reconstruct_historic_states: true,
archive: true,
..ChainConfig::default()
};

View File

@@ -100,7 +100,7 @@ fn get_harness(
) -> TestHarness {
// Most tests expect to retain historic states, so we use this as the default.
let chain_config = ChainConfig {
reconstruct_historic_states: true,
archive: true,
..ChainConfig::default()
};
get_harness_generic(
@@ -118,7 +118,7 @@ fn get_harness_import_all_data_columns(
// Most tests expect to retain historic states, so we use this as the default.
let chain_config = ChainConfig {
ignore_ws_check: true,
reconstruct_historic_states: true,
archive: true,
..ChainConfig::default()
};
get_harness_generic(
@@ -2876,7 +2876,7 @@ async fn reproduction_unaligned_checkpoint_sync_pruned_payload() {
slot_clock.set_slot(harness.get_current_slot().as_u64());
let chain_config = ChainConfig {
reconstruct_historic_states: true,
archive: true,
..ChainConfig::default()
};
@@ -3030,9 +3030,9 @@ async fn weak_subjectivity_sync_test(
slot_clock.set_slot(harness.get_current_slot().as_u64());
let chain_config = ChainConfig {
// Set reconstruct_historic_states to true from the start in the genesis case. This makes
// Set archive to true from the start in the genesis case. This makes
// some of the later checks more uniform across the genesis/non-genesis cases.
reconstruct_historic_states: checkpoint_slot == 0,
archive: checkpoint_slot == 0,
..ChainConfig::default()
};
@@ -3685,7 +3685,7 @@ async fn process_blocks_and_attestations_for_unaligned_checkpoint() {
let temp = tempdir().unwrap();
let store = get_store(&temp);
let chain_config = ChainConfig {
reconstruct_historic_states: false,
archive: false,
..ChainConfig::default()
};
let harness = get_harness_generic(
@@ -4110,16 +4110,13 @@ async fn revert_minority_fork_on_resume() {
// version is correct. This is the easiest schema test to write without historic versions of
// Lighthouse on-hand, but has the disadvantage that the min version needs to be adjusted manually
// as old downgrades are deprecated.
async fn schema_downgrade_to_min_version(
store_config: StoreConfig,
reconstruct_historic_states: bool,
) {
async fn schema_downgrade_to_min_version(store_config: StoreConfig, archive: bool) {
let num_blocks_produced = E::slots_per_epoch() * 4;
let db_path = tempdir().unwrap();
let spec = test_spec::<E>();
let chain_config = ChainConfig {
reconstruct_historic_states,
archive,
..ChainConfig::default()
};
@@ -4174,7 +4171,7 @@ async fn schema_downgrade_to_min_version(
.build();
// Check chain dump for appropriate range depending on whether this is an archive node.
let chain_dump_start_slot = if reconstruct_historic_states {
let chain_dump_start_slot = if archive {
Slot::new(0)
} else {
store.get_split_slot()
@@ -5154,7 +5151,7 @@ async fn ancestor_state_root_prior_to_split() {
..StoreConfig::default()
};
let chain_config = ChainConfig {
reconstruct_historic_states: false,
archive: false,
..ChainConfig::default()
};
@@ -5247,7 +5244,7 @@ async fn replay_from_split_state() {
..StoreConfig::default()
};
let chain_config = ChainConfig {
reconstruct_historic_states: false,
archive: false,
..ChainConfig::default()
};

View File

@@ -33,7 +33,7 @@ fn get_harness(validator_count: usize) -> BeaconChainHarness<EphemeralHarnessTyp
get_harness_with_config(
validator_count,
ChainConfig {
reconstruct_historic_states: true,
archive: true,
..Default::default()
},
)
@@ -44,7 +44,7 @@ fn get_harness_with_spec(
spec: &ChainSpec,
) -> BeaconChainHarness<EphemeralHarnessType<MainnetEthSpec>> {
let chain_config = ChainConfig {
reconstruct_historic_states: true,
archive: true,
..Default::default()
};
let harness = BeaconChainHarness::builder(MainnetEthSpec)
@@ -85,7 +85,7 @@ fn get_harness_semi_supernode(
let harness = BeaconChainHarness::builder(MinimalEthSpec)
.default_spec()
.chain_config(ChainConfig {
reconstruct_historic_states: true,
archive: true,
..Default::default()
})
.keypairs(KEYPAIRS[0..validator_count].to_vec())
@@ -950,7 +950,7 @@ async fn pseudo_finalize_test_generic(
let num_blocks_produced = MinimalEthSpec::slots_per_epoch() * 5;
let chain_config = ChainConfig {
reconstruct_historic_states: true,
archive: true,
epochs_per_migration,
..Default::default()
};