Compare commits

...

2 Commits

Author SHA1 Message Date
0xMushow
9ddd2d8dbe fix(beacon_node): add pruning of observed_column_sidecars (#8531)
None


  I noticed that `observed_column_sidecars` is missing its prune call in the finalization handler, which results in a memory leak on long-running nodes (very slow (**7MB/day**)) :

13dfa9200f/beacon_node/beacon_chain/src/canonical_head.rs (L940-L959)

Both caches use the same generic type `ObservedDataSidecars<T>:`
22ec4b3271/beacon_node/beacon_chain/src/beacon_chain.rs (L413-L416)

The type's documentation explicitly requires manual pruning:

>  "*The cache supports pruning based upon the finalized epoch. It does not automatically prune, you must call Self::prune manually.*"


b4704eab4a/beacon_node/beacon_chain/src/observed_data_sidecars.rs (L66-L74)

Currently:
- `observed_blob_sidecars` => pruned
- `observed_column_sidecars` => **NOT** pruned

Without pruning, the underlying HashMap accumulates entries indefinitely, causing continuous memory growth until the node restarts.


Co-Authored-By: Antoine James <antoine@ethereum.org>
2025-12-04 13:35:12 +00:00
Michael Sproul
094c36db97 Use correct Fork in verify_header_signature (#8528)
Fix a bug in `verify_header_signature` which tripped up some Lighthouse nodes at the Fusaka fork. The bug was a latent bug in a function that has been present for a long time, but only used by slashers. With Fulu it entered the critical path of blob/column verification -- call stack:

- `FetchBlobsBeaconAdapter::process_engine_blobs`
- `BeaconChain::process_engine_blobs`
- `BeaconChain::check_engine_blobs_availability_and_import`
- `BeaconChain::check_blob_header_signature_and_slashability`
- `verify_header_signature`

Thanks @eserilev for quickly diagnosing the root cause.


  Change `verify_header_signature` to use `ChainSpec::fork_at_epoch` to compute the `Fork`, rather than using the head state's fork. At a fork boundary the head state's fork is stale and lacks the data for the new fork. Using `fork_at_epoch` ensures that we use the correct fork data and validate transition block's signature correctly.


Co-Authored-By: Michael Sproul <michael@sigmaprime.io>
2025-12-04 09:56:48 +00:00
3 changed files with 104 additions and 2 deletions

View File

@@ -2122,11 +2122,13 @@ pub fn verify_header_signature<T: BeaconChainTypes, Err: BlockBlobError>(
.get(header.message.proposer_index as usize) .get(header.message.proposer_index as usize)
.cloned() .cloned()
.ok_or(Err::unknown_validator_error(header.message.proposer_index))?; .ok_or(Err::unknown_validator_error(header.message.proposer_index))?;
let head_fork = chain.canonical_head.cached_head().head_fork(); let fork = chain
.spec
.fork_at_epoch(header.message.slot.epoch(T::EthSpec::slots_per_epoch()));
if header.verify_signature::<T::EthSpec>( if header.verify_signature::<T::EthSpec>(
&proposer_pubkey, &proposer_pubkey,
&head_fork, &fork,
chain.genesis_validators_root, chain.genesis_validators_root,
&chain.spec, &chain.spec,
) { ) {

View File

@@ -951,6 +951,13 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
.start_slot(T::EthSpec::slots_per_epoch()), .start_slot(T::EthSpec::slots_per_epoch()),
); );
self.observed_column_sidecars.write().prune(
new_view
.finalized_checkpoint
.epoch
.start_slot(T::EthSpec::slots_per_epoch()),
);
self.observed_slashable.write().prune( self.observed_slashable.write().prune(
new_view new_view
.finalized_checkpoint .finalized_checkpoint

View File

@@ -115,3 +115,96 @@ async fn rpc_columns_with_invalid_header_signature() {
BlockError::InvalidSignature(InvalidSignature::ProposerSignature) BlockError::InvalidSignature(InvalidSignature::ProposerSignature)
)); ));
} }
// Regression test for verify_header_signature bug: it uses head_fork() which is wrong for fork blocks
#[tokio::test]
async fn verify_header_signature_fork_block_bug() {
// Create a spec with all forks enabled at genesis except Fulu which is at epoch 1
// This allows us to easily create the scenario where the head is at Electra
// but we're trying to verify a block from Fulu epoch
let mut spec = test_spec::<E>();
// Only run this test for FORK_NAME=fulu.
if !spec.is_fulu_scheduled() || spec.is_gloas_scheduled() {
return;
}
spec.altair_fork_epoch = Some(Epoch::new(0));
spec.bellatrix_fork_epoch = Some(Epoch::new(0));
spec.capella_fork_epoch = Some(Epoch::new(0));
spec.deneb_fork_epoch = Some(Epoch::new(0));
spec.electra_fork_epoch = Some(Epoch::new(0));
let fulu_fork_epoch = Epoch::new(1);
spec.fulu_fork_epoch = Some(fulu_fork_epoch);
let spec = Arc::new(spec);
let harness = get_harness(VALIDATOR_COUNT, spec.clone(), NodeCustodyType::Supernode);
harness.execution_block_generator().set_min_blob_count(1);
// Add some blocks in epoch 0 (Electra)
harness
.extend_chain(
E::slots_per_epoch() as usize - 1,
BlockStrategy::OnCanonicalHead,
AttestationStrategy::AllValidators,
)
.await;
// Verify we're still in epoch 0 (Electra)
let pre_fork_state = harness.get_current_state();
assert_eq!(pre_fork_state.current_epoch(), Epoch::new(0));
assert!(matches!(pre_fork_state, BeaconState::Electra(_)));
// Now produce a block at the first slot of epoch 1 (Fulu fork).
// make_block will advance the state which will trigger the Electra->Fulu upgrade.
let fork_slot = fulu_fork_epoch.start_slot(E::slots_per_epoch());
let ((signed_block, opt_blobs), _state_root) =
harness.make_block(pre_fork_state.clone(), fork_slot).await;
let (_, blobs) = opt_blobs.expect("Blobs should be present");
assert!(!blobs.is_empty(), "Block should have blobs");
let block_root = signed_block.canonical_root();
// Process the block WITHOUT blobs to make it unavailable.
// The block will be accepted but won't become the head because it's not fully available.
// This keeps the head at the pre-fork state (Electra).
harness.advance_slot();
let rpc_block = harness
.build_rpc_block_from_blobs(block_root, signed_block.clone(), None)
.expect("Should build RPC block");
let availability = harness
.chain
.process_block(
block_root,
rpc_block,
NotifyExecutionLayer::Yes,
BlockImportSource::RangeSync,
|| Ok(()),
)
.await
.expect("Block should be processed");
assert_eq!(
availability,
AvailabilityProcessingStatus::MissingComponents(fork_slot, block_root),
"Block should be pending availability"
);
// The head should still be in epoch 0 (Electra) because the fork block isn't available
let current_head_state = harness.get_current_state();
assert_eq!(current_head_state.current_epoch(), Epoch::new(0));
assert!(matches!(current_head_state, BeaconState::Electra(_)));
// Now try to process columns for the fork block.
// The bug: verify_header_signature previously used head_fork() which fetched the fork from
// the head state (still Electra fork), but the block was signed with the Fulu fork version.
// This caused an incorrect signature verification failure.
let data_column_sidecars =
generate_data_column_sidecars_from_block(&signed_block, &harness.chain.spec);
// Now that the bug is fixed, the block should import.
let status = harness
.chain
.process_rpc_custody_columns(data_column_sidecars)
.await
.unwrap();
assert_eq!(status, AvailabilityProcessingStatus::Imported(block_root));
}