From 27aabe8159912bee00e4acde3eb9f76ddc79a1a0 Mon Sep 17 00:00:00 2001 From: Eitan Seri-Levi Date: Mon, 17 Mar 2025 23:21:05 -0600 Subject: [PATCH] Pseudo finalization endpoint (#7103) This is a backport of: - https://github.com/sigp/lighthouse/pull/7059 - https://github.com/sigp/lighthouse/pull/7071 For: - https://github.com/sigp/lighthouse/issues/7039 Introduce a new lighthouse endpoint that allows a user to force a pseudo finalization. This migrates data to the freezer db and prunes sidechains which may help reduce disk space issues on non finalized networks like Holesky We also ban peers that send us blocks that conflict with the manually finalized checkpoint. There were some CI fixes in https://github.com/sigp/lighthouse/pull/7071 that I tried including here Co-authored with: @jimmygchen @pawanjay176 @michaelsproul --- beacon_node/beacon_chain/src/beacon_chain.rs | 41 +++- .../beacon_chain/src/block_verification.rs | 17 +- beacon_node/beacon_chain/src/errors.rs | 7 +- beacon_node/beacon_chain/src/migrate.rs | 60 +++++- beacon_node/beacon_chain/tests/tests.rs | 186 +++++++++++++++++- beacon_node/http_api/src/lib.rs | 40 +++- .../network_beacon_processor/sync_methods.rs | 13 ++ common/eth2/src/types.rs | 7 + consensus/fork_choice/src/fork_choice.rs | 5 + 9 files changed, 353 insertions(+), 23 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index b24211009e..f73c17f79c 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -42,7 +42,7 @@ use crate::light_client_optimistic_update_verification::{ Error as LightClientOptimisticUpdateError, VerifiedLightClientOptimisticUpdate, }; use crate::light_client_server_cache::LightClientServerCache; -use crate::migrate::BackgroundMigrator; +use crate::migrate::{BackgroundMigrator, ManualFinalizationNotification}; use crate::naive_aggregation_pool::{ AggregatedAttestationMap, Error as NaiveAggregationError, NaiveAggregationPool, SyncContributionAggregateMap, @@ -118,8 +118,8 @@ use std::sync::Arc; use std::time::Duration; use store::iter::{BlockRootsIterator, ParentRootBlockIterator, StateRootsIterator}; use store::{ - BlobSidecarListFromRoot, DatabaseBlock, Error as DBError, HotColdDB, KeyValueStore, - KeyValueStoreOp, StoreItem, StoreOp, + BlobSidecarListFromRoot, DatabaseBlock, Error as DBError, HotColdDB, HotStateSummary, + KeyValueStore, KeyValueStoreOp, StoreItem, StoreOp, }; use task_executor::{ShutdownReason, TaskExecutor}; use tokio::sync::oneshot; @@ -1711,6 +1711,41 @@ impl BeaconChain { } } + pub fn manually_finalize_state( + &self, + state_root: Hash256, + checkpoint: Checkpoint, + ) -> Result<(), Error> { + let HotStateSummary { + slot, + latest_block_root, + .. + } = self + .store + .load_hot_state_summary(&state_root) + .map_err(BeaconChainError::DBError)? + .ok_or(BeaconChainError::MissingHotStateSummary(state_root))?; + + if slot != checkpoint.epoch.start_slot(T::EthSpec::slots_per_epoch()) + || latest_block_root != *checkpoint.root + { + return Err(BeaconChainError::InvalidCheckpoint { + state_root, + checkpoint, + }); + } + + let notif = ManualFinalizationNotification { + state_root: state_root.into(), + checkpoint, + head_tracker: self.head_tracker.clone(), + genesis_block_root: self.genesis_block_root, + }; + + self.store_migrator.process_manual_finalization(notif); + Ok(()) + } + /// Returns an aggregated `Attestation`, if any, that has a matching `attestation.data`. /// /// The attestation will be obtained from `self.naive_aggregation_pool`. diff --git a/beacon_node/beacon_chain/src/block_verification.rs b/beacon_node/beacon_chain/src/block_verification.rs index 1bac6cac0a..4a5282a1d7 100644 --- a/beacon_node/beacon_chain/src/block_verification.rs +++ b/beacon_node/beacon_chain/src/block_verification.rs @@ -1767,7 +1767,22 @@ pub fn check_block_is_finalized_checkpoint_or_descendant< fork_choice: &BeaconForkChoice, block: B, ) -> Result { - if fork_choice.is_finalized_checkpoint_or_descendant(block.parent_root()) { + // If we have a split block newer than finalization then we also ban blocks which are not + // descended from that split block. It's important not to try checking `is_descendant` if + // finality is ahead of the split and the split block has been pruned, as `is_descendant` will + // return `false` in this case. + let finalized_slot = fork_choice + .finalized_checkpoint() + .epoch + .start_slot(T::EthSpec::slots_per_epoch()); + let split = chain.store.get_split_info(); + let is_descendant_from_split_block = split.slot == 0 + || split.slot <= finalized_slot + || fork_choice.is_descendant(split.block_root, block.parent_root()); + + if fork_choice.is_finalized_checkpoint_or_descendant(block.parent_root()) + && is_descendant_from_split_block + { Ok(block) } else { // If fork choice does *not* consider the parent to be a descendant of the finalized block, diff --git a/beacon_node/beacon_chain/src/errors.rs b/beacon_node/beacon_chain/src/errors.rs index 2e13ab4090..8509c52c8a 100644 --- a/beacon_node/beacon_chain/src/errors.rs +++ b/beacon_node/beacon_chain/src/errors.rs @@ -61,6 +61,7 @@ pub enum BeaconChainError { ForkChoiceStoreError(ForkChoiceStoreError), MissingBeaconBlock(Hash256), MissingBeaconState(Hash256), + MissingHotStateSummary(Hash256), SlotProcessingError(SlotProcessingError), EpochProcessingError(EpochProcessingError), StateAdvanceError(StateAdvanceError), @@ -181,9 +182,9 @@ pub enum BeaconChainError { execution_block_hash: Option, }, ForkchoiceUpdate(execution_layer::Error), - FinalizedCheckpointMismatch { - head_state: Checkpoint, - fork_choice: Hash256, + InvalidCheckpoint { + state_root: Hash256, + checkpoint: Checkpoint, }, InvalidSlot(Slot), HeadBlockNotFullyVerified { diff --git a/beacon_node/beacon_chain/src/migrate.rs b/beacon_node/beacon_chain/src/migrate.rs index a8543fab9b..d4ee0fc7c1 100644 --- a/beacon_node/beacon_chain/src/migrate.rs +++ b/beacon_node/beacon_chain/src/migrate.rs @@ -124,14 +124,22 @@ pub enum Notification { Finalization(FinalizationNotification), Reconstruction, PruneBlobs(Epoch), + ManualFinalization(ManualFinalizationNotification), +} + +pub struct ManualFinalizationNotification { + pub state_root: BeaconStateHash, + pub checkpoint: Checkpoint, + pub head_tracker: Arc, + pub genesis_block_root: Hash256, } pub struct FinalizationNotification { - finalized_state_root: BeaconStateHash, - finalized_checkpoint: Checkpoint, - head_tracker: Arc, - prev_migration: Arc>, - genesis_block_root: Hash256, + pub finalized_state_root: BeaconStateHash, + pub finalized_checkpoint: Checkpoint, + pub head_tracker: Arc, + pub prev_migration: Arc>, + pub genesis_block_root: Hash256, } impl, Cold: ItemStore> BackgroundMigrator { @@ -190,6 +198,14 @@ impl, Cold: ItemStore> BackgroundMigrator, Cold: ItemStore> BackgroundMigrator>, + notif: ManualFinalizationNotification, + log: &Logger, + ) { + // We create a "dummy" prev migration + let prev_migration = PrevMigration { + epoch: Epoch::new(1), + epochs_per_migration: 2, + }; + let notif = FinalizationNotification { + finalized_state_root: notif.state_root, + finalized_checkpoint: notif.checkpoint, + head_tracker: notif.head_tracker, + prev_migration: Arc::new(prev_migration.into()), + genesis_block_root: notif.genesis_block_root, + }; + Self::run_migration(db, notif, log); + } + /// Perform the actual work of `process_finalization`. fn run_migration( db: Arc>, @@ -423,16 +459,27 @@ impl, Cold: ItemStore> BackgroundMigrator reconstruction_notif = Some(notif), Notification::Finalization(fin) => finalization_notif = Some(fin), + Notification::ManualFinalization(fin) => manual_finalization_notif = Some(fin), Notification::PruneBlobs(dab) => prune_blobs_notif = Some(dab), } // Read the rest of the messages in the channel, taking the best of each type. for notif in rx.try_iter() { match notif { Notification::Reconstruction => reconstruction_notif = Some(notif), + Notification::ManualFinalization(fin) => { + if let Some(current) = manual_finalization_notif.as_mut() { + if fin.checkpoint.epoch > current.checkpoint.epoch { + *current = fin; + } + } else { + manual_finalization_notif = Some(fin); + } + } Notification::Finalization(fin) => { if let Some(current) = finalization_notif.as_mut() { if fin.finalized_checkpoint.epoch @@ -455,6 +502,9 @@ impl, Cold: ItemStore> BackgroundMigrator> = LazyLock::new(|| types::test_utils::generate_deterministic_keypairs(VALIDATOR_COUNT)); fn get_harness(validator_count: usize) -> BeaconChainHarness> { + get_harness_with_config( + validator_count, + ChainConfig { + reconstruct_historic_states: true, + ..Default::default() + }, + ) +} + +fn get_harness_with_config( + validator_count: usize, + chain_config: ChainConfig, +) -> BeaconChainHarness> { let harness = BeaconChainHarness::builder(MinimalEthSpec) .default_spec() - .chain_config(ChainConfig { - reconstruct_historic_states: true, - ..ChainConfig::default() - }) + .chain_config(chain_config) .keypairs(KEYPAIRS[0..validator_count].to_vec()) .fresh_ephemeral_store() .mock_execution_layer() @@ -869,3 +881,165 @@ async fn block_roots_skip_slot_behaviour() { "WhenSlotSkipped::Prev should return None on a future slot" ); } + +async fn pseudo_finalize_test_generic( + epochs_per_migration: u64, + expect_true_finalization_migration: bool, +) { + // This test ensures that after pseudo finalization, we can still finalize the chain without issues + let num_blocks_produced = MinimalEthSpec::slots_per_epoch() * 5; + + let chain_config = ChainConfig { + reconstruct_historic_states: true, + epochs_per_migration, + ..Default::default() + }; + let harness = get_harness_with_config(VALIDATOR_COUNT, chain_config); + + let one_third = VALIDATOR_COUNT / 3; + let attesters = (0..one_third).collect(); + + // extend the chain, but don't finalize + harness + .extend_chain( + num_blocks_produced as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::SomeValidators(attesters), + ) + .await; + + harness.advance_slot(); + + let head = harness.chain.head_snapshot(); + let state = &head.beacon_state; + let split = harness.chain.store.get_split_info(); + + assert_eq!( + state.slot(), + num_blocks_produced, + "head should be at the current slot" + ); + assert_eq!( + state.current_epoch(), + num_blocks_produced / MinimalEthSpec::slots_per_epoch(), + "head should be at the expected epoch" + ); + assert_eq!( + state.current_justified_checkpoint().epoch, + 0, + "There should be no justified checkpoint" + ); + assert_eq!( + state.finalized_checkpoint().epoch, + 0, + "There should be no finalized checkpoint" + ); + assert_eq!(split.slot, 0, "Our split point should be unset"); + + let checkpoint = Checkpoint { + epoch: head.beacon_state.current_epoch(), + root: head.beacon_block_root, + }; + + // pseudo finalize + harness + .chain + .manually_finalize_state(head.beacon_state_root(), checkpoint) + .unwrap(); + + let split = harness.chain.store.get_split_info(); + let pseudo_finalized_slot = split.slot; + + assert_eq!( + state.current_justified_checkpoint().epoch, + 0, + "We pseudo finalized, but our justified checkpoint should still be unset" + ); + assert_eq!( + state.finalized_checkpoint().epoch, + 0, + "We pseudo finalized, but our finalized checkpoint should still be unset" + ); + assert_eq!( + split.slot, + head.beacon_state.slot(), + "We pseudo finalized, our split point should be at the current head slot" + ); + + // finalize the chain + harness + .extend_chain( + num_blocks_produced as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; + + harness.advance_slot(); + + let head = harness.chain.head_snapshot(); + let state = &head.beacon_state; + let split = harness.chain.store.get_split_info(); + + assert_eq!( + state.slot(), + num_blocks_produced * 2, + "head should be at the current slot" + ); + assert_eq!( + state.current_epoch(), + (num_blocks_produced * 2) / MinimalEthSpec::slots_per_epoch(), + "head should be at the expected epoch" + ); + assert_eq!( + state.current_justified_checkpoint().epoch, + state.current_epoch() - 1, + "the head should be justified one behind the current epoch" + ); + let finalized_epoch = state.finalized_checkpoint().epoch; + assert_eq!( + finalized_epoch, + state.current_epoch() - 2, + "the head should be finalized two behind the current epoch" + ); + + let expected_split_slot = if pseudo_finalized_slot.epoch(E::slots_per_epoch()) + + epochs_per_migration + > finalized_epoch + { + pseudo_finalized_slot + } else { + finalized_epoch.start_slot(E::slots_per_epoch()) + }; + assert_eq!( + split.slot, expected_split_slot, + "We finalized, our split point should be updated according to epochs_per_migration" + ); + + // In the case that we did not process the true finalization migration (due to + // epochs_per_migration), check that the chain finalized *despite* the absence of the split + // block in fork choice. + // This is a regression test for https://github.com/sigp/lighthouse/pull/7105 + if !expect_true_finalization_migration { + assert_eq!(expected_split_slot, pseudo_finalized_slot); + assert!(!harness + .chain + .canonical_head + .fork_choice_read_lock() + .contains_block(&split.block_root)); + } +} + +#[tokio::test] +async fn pseudo_finalize_basic() { + let epochs_per_migration = 0; + let expect_true_migration = true; + pseudo_finalize_test_generic(epochs_per_migration, expect_true_migration).await; +} + +#[tokio::test] +async fn pseudo_finalize_with_lagging_split_update() { + let epochs_per_migration = 10; + let expect_true_migration = false; + pseudo_finalize_test_generic(epochs_per_migration, expect_true_migration).await; +} diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index 0d89ca76ac..4ccf448b68 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -86,11 +86,11 @@ use tokio_stream::{ }; use types::{ fork_versioned_response::EmptyMetadata, Attestation, AttestationData, AttestationShufflingId, - AttesterSlashing, BeaconStateError, ChainSpec, CommitteeCache, ConfigAndPreset, Epoch, EthSpec, - ForkName, ForkVersionedResponse, Hash256, ProposerPreparationData, ProposerSlashing, - RelativeEpoch, SignedAggregateAndProof, SignedBlindedBeaconBlock, SignedBlsToExecutionChange, - SignedContributionAndProof, SignedValidatorRegistrationData, SignedVoluntaryExit, Slot, - SyncCommitteeMessage, SyncContributionData, + AttesterSlashing, BeaconStateError, ChainSpec, Checkpoint, CommitteeCache, ConfigAndPreset, + Epoch, EthSpec, ForkName, ForkVersionedResponse, Hash256, ProposerPreparationData, + ProposerSlashing, RelativeEpoch, SignedAggregateAndProof, SignedBlindedBeaconBlock, + SignedBlsToExecutionChange, SignedContributionAndProof, SignedValidatorRegistrationData, + SignedVoluntaryExit, Slot, SyncCommitteeMessage, SyncContributionData, }; use validator::pubkey_to_validator_index; use version::{ @@ -4076,6 +4076,35 @@ pub fn serve( }, ); + // POST lighthouse/finalize + let post_lighthouse_finalize = warp::path("lighthouse") + .and(warp::path("finalize")) + .and(warp::path::end()) + .and(warp_utils::json::json()) + .and(task_spawner_filter.clone()) + .and(chain_filter.clone()) + .then( + |request_data: api_types::ManualFinalizationRequestData, + task_spawner: TaskSpawner, + chain: Arc>| { + task_spawner.blocking_json_task(Priority::P0, move || { + let checkpoint = Checkpoint { + epoch: request_data.epoch, + root: request_data.block_root, + }; + + chain + .manually_finalize_state(request_data.state_root, checkpoint) + .map(|_| api_types::GenericResponse::from(request_data)) + .map_err(|e| { + warp_utils::reject::custom_bad_request(format!( + "Failed to finalize state due to error: {e:?}" + )) + }) + }) + }, + ); + // POST lighthouse/liveness let post_lighthouse_liveness = warp::path("lighthouse") .and(warp::path("liveness")) @@ -4848,6 +4877,7 @@ pub fn serve( .uor(post_lighthouse_block_rewards) .uor(post_lighthouse_ui_validator_metrics) .uor(post_lighthouse_ui_validator_info) + .uor(post_lighthouse_finalize) .recover(warp_utils::reject::handle_rejection), ), ) diff --git a/beacon_node/network/src/network_beacon_processor/sync_methods.rs b/beacon_node/network/src/network_beacon_processor/sync_methods.rs index eb03d3c01b..6b8ed607ab 100644 --- a/beacon_node/network/src/network_beacon_processor/sync_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/sync_methods.rs @@ -740,6 +740,19 @@ impl NetworkBeaconProcessor { debug!(self.log, "Finalized or earlier block processed";); Ok(()) } + BlockError::NotFinalizedDescendant { block_parent_root } => { + debug!( + self.log, + "Not syncing to a chain that conflicts with the canonical or manual finalized checkpoint" + ); + Err(ChainSegmentFailed { + message: format!( + "Block with parent_root {} conflicts with our checkpoint state", + block_parent_root + ), + peer_action: Some(PeerAction::Fatal), + }) + } BlockError::GenesisBlock => { debug!(self.log, "Genesis block was processed"); Ok(()) diff --git a/common/eth2/src/types.rs b/common/eth2/src/types.rs index 59374f629d..7d70d242be 100644 --- a/common/eth2/src/types.rs +++ b/common/eth2/src/types.rs @@ -1424,6 +1424,13 @@ pub struct StandardLivenessResponseData { pub is_live: bool, } +#[derive(Debug, Serialize, Deserialize)] +pub struct ManualFinalizationRequestData { + pub state_root: Hash256, + pub epoch: Epoch, + pub block_root: Hash256, +} + #[derive(Debug, Serialize, Deserialize)] pub struct LivenessRequestData { pub epoch: Epoch, diff --git a/consensus/fork_choice/src/fork_choice.rs b/consensus/fork_choice/src/fork_choice.rs index 4c25be950b..ddd5923849 100644 --- a/consensus/fork_choice/src/fork_choice.rs +++ b/consensus/fork_choice/src/fork_choice.rs @@ -1255,6 +1255,11 @@ where .is_finalized_checkpoint_or_descendant::(block_root) } + pub fn is_descendant(&self, ancestor_root: Hash256, descendant_root: Hash256) -> bool { + self.proto_array + .is_descendant(ancestor_root, descendant_root) + } + /// Returns `Ok(true)` if `block_root` has been imported optimistically or deemed invalid. /// /// Returns `Ok(false)` if `block_root`'s execution payload has been elected as fully VALID, if