From 094c36db97f969894d126f1c66f73b695cf710a5 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Thu, 4 Dec 2025 20:56:48 +1100 Subject: [PATCH 1/2] Use correct `Fork` in `verify_header_signature` (#8528) Fix a bug in `verify_header_signature` which tripped up some Lighthouse nodes at the Fusaka fork. The bug was a latent bug in a function that has been present for a long time, but only used by slashers. With Fulu it entered the critical path of blob/column verification -- call stack: - `FetchBlobsBeaconAdapter::process_engine_blobs` - `BeaconChain::process_engine_blobs` - `BeaconChain::check_engine_blobs_availability_and_import` - `BeaconChain::check_blob_header_signature_and_slashability` - `verify_header_signature` Thanks @eserilev for quickly diagnosing the root cause. Change `verify_header_signature` to use `ChainSpec::fork_at_epoch` to compute the `Fork`, rather than using the head state's fork. At a fork boundary the head state's fork is stale and lacks the data for the new fork. Using `fork_at_epoch` ensures that we use the correct fork data and validate transition block's signature correctly. Co-Authored-By: Michael Sproul --- .../beacon_chain/src/block_verification.rs | 6 +- .../beacon_chain/tests/column_verification.rs | 93 +++++++++++++++++++ 2 files changed, 97 insertions(+), 2 deletions(-) diff --git a/beacon_node/beacon_chain/src/block_verification.rs b/beacon_node/beacon_chain/src/block_verification.rs index 5078e24a51..dfcf24cf7e 100644 --- a/beacon_node/beacon_chain/src/block_verification.rs +++ b/beacon_node/beacon_chain/src/block_verification.rs @@ -2122,11 +2122,13 @@ pub fn verify_header_signature( .get(header.message.proposer_index as usize) .cloned() .ok_or(Err::unknown_validator_error(header.message.proposer_index))?; - let head_fork = chain.canonical_head.cached_head().head_fork(); + let fork = chain + .spec + .fork_at_epoch(header.message.slot.epoch(T::EthSpec::slots_per_epoch())); if header.verify_signature::( &proposer_pubkey, - &head_fork, + &fork, chain.genesis_validators_root, &chain.spec, ) { diff --git a/beacon_node/beacon_chain/tests/column_verification.rs b/beacon_node/beacon_chain/tests/column_verification.rs index 229ae1e199..dc99943464 100644 --- a/beacon_node/beacon_chain/tests/column_verification.rs +++ b/beacon_node/beacon_chain/tests/column_verification.rs @@ -115,3 +115,96 @@ async fn rpc_columns_with_invalid_header_signature() { BlockError::InvalidSignature(InvalidSignature::ProposerSignature) )); } + +// Regression test for verify_header_signature bug: it uses head_fork() which is wrong for fork blocks +#[tokio::test] +async fn verify_header_signature_fork_block_bug() { + // Create a spec with all forks enabled at genesis except Fulu which is at epoch 1 + // This allows us to easily create the scenario where the head is at Electra + // but we're trying to verify a block from Fulu epoch + let mut spec = test_spec::(); + + // Only run this test for FORK_NAME=fulu. + if !spec.is_fulu_scheduled() || spec.is_gloas_scheduled() { + return; + } + + spec.altair_fork_epoch = Some(Epoch::new(0)); + spec.bellatrix_fork_epoch = Some(Epoch::new(0)); + spec.capella_fork_epoch = Some(Epoch::new(0)); + spec.deneb_fork_epoch = Some(Epoch::new(0)); + spec.electra_fork_epoch = Some(Epoch::new(0)); + let fulu_fork_epoch = Epoch::new(1); + spec.fulu_fork_epoch = Some(fulu_fork_epoch); + + let spec = Arc::new(spec); + let harness = get_harness(VALIDATOR_COUNT, spec.clone(), NodeCustodyType::Supernode); + harness.execution_block_generator().set_min_blob_count(1); + + // Add some blocks in epoch 0 (Electra) + harness + .extend_chain( + E::slots_per_epoch() as usize - 1, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; + + // Verify we're still in epoch 0 (Electra) + let pre_fork_state = harness.get_current_state(); + assert_eq!(pre_fork_state.current_epoch(), Epoch::new(0)); + assert!(matches!(pre_fork_state, BeaconState::Electra(_))); + + // Now produce a block at the first slot of epoch 1 (Fulu fork). + // make_block will advance the state which will trigger the Electra->Fulu upgrade. + let fork_slot = fulu_fork_epoch.start_slot(E::slots_per_epoch()); + let ((signed_block, opt_blobs), _state_root) = + harness.make_block(pre_fork_state.clone(), fork_slot).await; + let (_, blobs) = opt_blobs.expect("Blobs should be present"); + assert!(!blobs.is_empty(), "Block should have blobs"); + let block_root = signed_block.canonical_root(); + + // Process the block WITHOUT blobs to make it unavailable. + // The block will be accepted but won't become the head because it's not fully available. + // This keeps the head at the pre-fork state (Electra). + harness.advance_slot(); + let rpc_block = harness + .build_rpc_block_from_blobs(block_root, signed_block.clone(), None) + .expect("Should build RPC block"); + let availability = harness + .chain + .process_block( + block_root, + rpc_block, + NotifyExecutionLayer::Yes, + BlockImportSource::RangeSync, + || Ok(()), + ) + .await + .expect("Block should be processed"); + assert_eq!( + availability, + AvailabilityProcessingStatus::MissingComponents(fork_slot, block_root), + "Block should be pending availability" + ); + + // The head should still be in epoch 0 (Electra) because the fork block isn't available + let current_head_state = harness.get_current_state(); + assert_eq!(current_head_state.current_epoch(), Epoch::new(0)); + assert!(matches!(current_head_state, BeaconState::Electra(_))); + + // Now try to process columns for the fork block. + // The bug: verify_header_signature previously used head_fork() which fetched the fork from + // the head state (still Electra fork), but the block was signed with the Fulu fork version. + // This caused an incorrect signature verification failure. + let data_column_sidecars = + generate_data_column_sidecars_from_block(&signed_block, &harness.chain.spec); + + // Now that the bug is fixed, the block should import. + let status = harness + .chain + .process_rpc_custody_columns(data_column_sidecars) + .await + .unwrap(); + assert_eq!(status, AvailabilityProcessingStatus::Imported(block_root)); +} From 9ddd2d8dbedb234d9d8d62002b9912441dfb0b54 Mon Sep 17 00:00:00 2001 From: 0xMushow <105550256+0xMushow@users.noreply.github.com> Date: Thu, 4 Dec 2025 14:35:12 +0100 Subject: [PATCH 2/2] fix(beacon_node): add pruning of observed_column_sidecars (#8531) None I noticed that `observed_column_sidecars` is missing its prune call in the finalization handler, which results in a memory leak on long-running nodes (very slow (**7MB/day**)) : https://github.com/sigp/lighthouse/blob/13dfa9200f822c41ccd81b95a3f052df54c888e9/beacon_node/beacon_chain/src/canonical_head.rs#L940-L959 Both caches use the same generic type `ObservedDataSidecars:` https://github.com/sigp/lighthouse/blob/22ec4b327186c4a4a87d2c8c745caf3b36cb6dd6/beacon_node/beacon_chain/src/beacon_chain.rs#L413-L416 The type's documentation explicitly requires manual pruning: > "*The cache supports pruning based upon the finalized epoch. It does not automatically prune, you must call Self::prune manually.*" https://github.com/sigp/lighthouse/blob/b4704eab4ac8edf0ea0282ed9a5758b784038dd2/beacon_node/beacon_chain/src/observed_data_sidecars.rs#L66-L74 Currently: - `observed_blob_sidecars` => pruned - `observed_column_sidecars` => **NOT** pruned Without pruning, the underlying HashMap accumulates entries indefinitely, causing continuous memory growth until the node restarts. Co-Authored-By: Antoine James --- beacon_node/beacon_chain/src/canonical_head.rs | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/beacon_node/beacon_chain/src/canonical_head.rs b/beacon_node/beacon_chain/src/canonical_head.rs index 7dd4c88c51..92b218f180 100644 --- a/beacon_node/beacon_chain/src/canonical_head.rs +++ b/beacon_node/beacon_chain/src/canonical_head.rs @@ -951,6 +951,13 @@ impl BeaconChain { .start_slot(T::EthSpec::slots_per_epoch()), ); + self.observed_column_sidecars.write().prune( + new_view + .finalized_checkpoint + .epoch + .start_slot(T::EthSpec::slots_per_epoch()), + ); + self.observed_slashable.write().prune( new_view .finalized_checkpoint