Improve block header signature handling (#8253)

Closes:

- https://github.com/sigp/lighthouse/issues/7650


  Reject blob and data column sidecars from RPC with invalid signatures.


Co-Authored-By: Michael Sproul <michael@sigmaprime.io>
This commit is contained in:
Michael Sproul
2025-10-22 00:58:12 +11:00
committed by GitHub
parent 040d992132
commit 21bab0899a
6 changed files with 307 additions and 55 deletions

View File

@@ -3564,7 +3564,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
.await
}
fn check_blobs_for_slashability<'a>(
fn check_blob_header_signature_and_slashability<'a>(
self: &Arc<Self>,
block_root: Hash256,
blobs: impl IntoIterator<Item = &'a BlobSidecar<T::EthSpec>>,
@@ -3575,17 +3575,20 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
.map(|b| b.signed_block_header.clone())
.unique()
{
if verify_header_signature::<T, BlockError>(self, &header).is_ok() {
slashable_cache
.observe_slashable(
header.message.slot,
header.message.proposer_index,
block_root,
)
.map_err(|e| BlockError::BeaconChainError(Box::new(e.into())))?;
if let Some(slasher) = self.slasher.as_ref() {
slasher.accept_block_header(header);
}
// Return an error if *any* header signature is invalid, we do not want to import this
// list of blobs into the DA checker. However, we will process any valid headers prior
// to the first invalid header in the slashable cache & slasher.
verify_header_signature::<T, BlockError>(self, &header)?;
slashable_cache
.observe_slashable(
header.message.slot,
header.message.proposer_index,
block_root,
)
.map_err(|e| BlockError::BeaconChainError(Box::new(e.into())))?;
if let Some(slasher) = self.slasher.as_ref() {
slasher.accept_block_header(header);
}
}
Ok(())
@@ -3599,7 +3602,10 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
block_root: Hash256,
blobs: FixedBlobSidecarList<T::EthSpec>,
) -> Result<AvailabilityProcessingStatus, BlockError> {
self.check_blobs_for_slashability(block_root, blobs.iter().flatten().map(Arc::as_ref))?;
self.check_blob_header_signature_and_slashability(
block_root,
blobs.iter().flatten().map(Arc::as_ref),
)?;
let availability = self
.data_availability_checker
.put_rpc_blobs(block_root, blobs)?;
@@ -3616,12 +3622,15 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
) -> Result<AvailabilityProcessingStatus, BlockError> {
let availability = match engine_get_blobs_output {
EngineGetBlobsOutput::Blobs(blobs) => {
self.check_blobs_for_slashability(block_root, blobs.iter().map(|b| b.as_blob()))?;
self.check_blob_header_signature_and_slashability(
block_root,
blobs.iter().map(|b| b.as_blob()),
)?;
self.data_availability_checker
.put_kzg_verified_blobs(block_root, blobs)?
}
EngineGetBlobsOutput::CustodyColumns(data_columns) => {
self.check_columns_for_slashability(
self.check_data_column_sidecar_header_signature_and_slashability(
block_root,
data_columns.iter().map(|c| c.as_data_column()),
)?;
@@ -3642,7 +3651,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
block_root: Hash256,
custody_columns: DataColumnSidecarList<T::EthSpec>,
) -> Result<AvailabilityProcessingStatus, BlockError> {
self.check_columns_for_slashability(
self.check_data_column_sidecar_header_signature_and_slashability(
block_root,
custody_columns.iter().map(|c| c.as_ref()),
)?;
@@ -3659,7 +3668,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
.await
}
fn check_columns_for_slashability<'a>(
fn check_data_column_sidecar_header_signature_and_slashability<'a>(
self: &Arc<Self>,
block_root: Hash256,
custody_columns: impl IntoIterator<Item = &'a DataColumnSidecar<T::EthSpec>>,
@@ -3673,17 +3682,20 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
.map(|c| c.signed_block_header.clone())
.unique()
{
if verify_header_signature::<T, BlockError>(self, &header).is_ok() {
slashable_cache
.observe_slashable(
header.message.slot,
header.message.proposer_index,
block_root,
)
.map_err(|e| BlockError::BeaconChainError(Box::new(e.into())))?;
if let Some(slasher) = self.slasher.as_ref() {
slasher.accept_block_header(header);
}
// Return an error if *any* header signature is invalid, we do not want to import this
// list of blobs into the DA checker. However, we will process any valid headers prior
// to the first invalid header in the slashable cache & slasher.
verify_header_signature::<T, BlockError>(self, &header)?;
slashable_cache
.observe_slashable(
header.message.slot,
header.message.proposer_index,
block_root,
)
.map_err(|e| BlockError::BeaconChainError(Box::new(e.into())))?;
if let Some(slasher) = self.slasher.as_ref() {
slasher.accept_block_header(header);
}
}
Ok(())

View File

@@ -2437,7 +2437,7 @@ where
}
/// Builds an `RpcBlock` from a `SignedBeaconBlock` and `BlobsList`.
fn build_rpc_block_from_blobs(
pub fn build_rpc_block_from_blobs(
&self,
block_root: Hash256,
block: Arc<SignedBeaconBlock<E, FullPayload<E>>>,

View File

@@ -0,0 +1,120 @@
#![cfg(not(debug_assertions))]
use beacon_chain::test_utils::{
AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType, test_spec,
};
use beacon_chain::{
AvailabilityProcessingStatus, BlockError, ChainConfig, InvalidSignature, NotifyExecutionLayer,
block_verification_types::AsBlock,
};
use logging::create_test_tracing_subscriber;
use std::sync::{Arc, LazyLock};
use types::{blob_sidecar::FixedBlobSidecarList, *};
type E = MainnetEthSpec;
// Should ideally be divisible by 3.
const VALIDATOR_COUNT: usize = 24;
/// A cached set of keys.
static KEYPAIRS: LazyLock<Vec<Keypair>> =
LazyLock::new(|| types::test_utils::generate_deterministic_keypairs(VALIDATOR_COUNT));
fn get_harness(
validator_count: usize,
spec: Arc<ChainSpec>,
) -> BeaconChainHarness<EphemeralHarnessType<E>> {
create_test_tracing_subscriber();
let harness = BeaconChainHarness::builder(MainnetEthSpec)
.spec(spec)
.chain_config(ChainConfig {
reconstruct_historic_states: true,
..ChainConfig::default()
})
.keypairs(KEYPAIRS[0..validator_count].to_vec())
.fresh_ephemeral_store()
.mock_execution_layer()
.build();
harness.advance_slot();
harness
}
// Regression test for https://github.com/sigp/lighthouse/issues/7650
#[tokio::test]
async fn rpc_blobs_with_invalid_header_signature() {
let spec = Arc::new(test_spec::<E>());
// Only run this test if blobs are enabled and columns are disabled.
if spec.deneb_fork_epoch.is_none() || spec.is_fulu_scheduled() {
return;
}
let harness = get_harness(VALIDATOR_COUNT, spec);
let num_blocks = E::slots_per_epoch() as usize;
// Add some chain depth.
harness
.extend_chain(
num_blocks,
BlockStrategy::OnCanonicalHead,
AttestationStrategy::AllValidators,
)
.await;
// Produce a block with blobs.
harness.execution_block_generator().set_min_blob_count(1);
let head_state = harness.get_current_state();
let slot = head_state.slot() + 1;
let ((signed_block, opt_blobs), _) = harness.make_block(head_state, slot).await;
let (kzg_proofs, blobs) = opt_blobs.unwrap();
assert!(!blobs.is_empty());
let block_root = signed_block.canonical_root();
// Process the block without blobs so that it doesn't become available.
harness.advance_slot();
let rpc_block = harness
.build_rpc_block_from_blobs(block_root, signed_block.clone(), None)
.unwrap();
let availability = harness
.chain
.process_block(
block_root,
rpc_block,
NotifyExecutionLayer::Yes,
BlockImportSource::RangeSync,
|| Ok(()),
)
.await
.unwrap();
assert_eq!(
availability,
AvailabilityProcessingStatus::MissingComponents(slot, block_root)
);
// Build blob sidecars with invalid signatures in the block header.
let mut corrupt_block = (*signed_block).clone();
*corrupt_block.signature_mut() = Signature::infinity().unwrap();
let max_len = harness
.chain
.spec
.max_blobs_per_block(slot.epoch(E::slots_per_epoch())) as usize;
let mut blob_sidecars = FixedBlobSidecarList::new(vec![None; max_len]);
for (i, (kzg_proof, blob)) in kzg_proofs.into_iter().zip(blobs).enumerate() {
let blob_sidecar = BlobSidecar::new(i, blob, &corrupt_block, kzg_proof).unwrap();
blob_sidecars[i] = Some(Arc::new(blob_sidecar));
}
let err = harness
.chain
.process_rpc_blobs(slot, block_root, blob_sidecars)
.await
.unwrap_err();
assert!(matches!(
err,
BlockError::InvalidSignature(InvalidSignature::ProposerSignature)
));
}

View File

@@ -0,0 +1,117 @@
#![cfg(not(debug_assertions))]
use beacon_chain::test_utils::{
AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType,
generate_data_column_sidecars_from_block, test_spec,
};
use beacon_chain::{
AvailabilityProcessingStatus, BlockError, ChainConfig, InvalidSignature, NotifyExecutionLayer,
block_verification_types::AsBlock,
};
use logging::create_test_tracing_subscriber;
use std::sync::{Arc, LazyLock};
use types::*;
type E = MainnetEthSpec;
// Should ideally be divisible by 3.
const VALIDATOR_COUNT: usize = 24;
/// A cached set of keys.
static KEYPAIRS: LazyLock<Vec<Keypair>> =
LazyLock::new(|| types::test_utils::generate_deterministic_keypairs(VALIDATOR_COUNT));
fn get_harness(
validator_count: usize,
spec: Arc<ChainSpec>,
supernode: bool,
) -> BeaconChainHarness<EphemeralHarnessType<E>> {
create_test_tracing_subscriber();
let harness = BeaconChainHarness::builder(MainnetEthSpec)
.spec(spec)
.chain_config(ChainConfig {
reconstruct_historic_states: true,
..ChainConfig::default()
})
.keypairs(KEYPAIRS[0..validator_count].to_vec())
.import_all_data_columns(supernode)
.fresh_ephemeral_store()
.mock_execution_layer()
.build();
harness.advance_slot();
harness
}
// Regression test for https://github.com/sigp/lighthouse/issues/7650
#[tokio::test]
async fn rpc_columns_with_invalid_header_signature() {
let spec = Arc::new(test_spec::<E>());
// Only run this test if columns are enabled.
if !spec.is_fulu_scheduled() {
return;
}
let supernode = true;
let harness = get_harness(VALIDATOR_COUNT, spec, supernode);
let num_blocks = E::slots_per_epoch() as usize;
// Add some chain depth.
harness
.extend_chain(
num_blocks,
BlockStrategy::OnCanonicalHead,
AttestationStrategy::AllValidators,
)
.await;
// Produce a block with blobs.
harness.execution_block_generator().set_min_blob_count(1);
let head_state = harness.get_current_state();
let slot = head_state.slot() + 1;
let ((signed_block, opt_blobs), _) = harness.make_block(head_state, slot).await;
let (_, blobs) = opt_blobs.unwrap();
assert!(!blobs.is_empty());
let block_root = signed_block.canonical_root();
// Process the block without blobs so that it doesn't become available.
harness.advance_slot();
let rpc_block = harness
.build_rpc_block_from_blobs(block_root, signed_block.clone(), None)
.unwrap();
let availability = harness
.chain
.process_block(
block_root,
rpc_block,
NotifyExecutionLayer::Yes,
BlockImportSource::RangeSync,
|| Ok(()),
)
.await
.unwrap();
assert_eq!(
availability,
AvailabilityProcessingStatus::MissingComponents(slot, block_root)
);
// Build blob sidecars with invalid signatures in the block header.
let mut corrupt_block = (*signed_block).clone();
*corrupt_block.signature_mut() = Signature::infinity().unwrap();
let data_column_sidecars =
generate_data_column_sidecars_from_block(&corrupt_block, &harness.chain.spec);
let err = harness
.chain
.process_rpc_custody_columns(data_column_sidecars)
.await
.unwrap_err();
assert!(matches!(
err,
BlockError::InvalidSignature(InvalidSignature::ProposerSignature)
));
}

View File

@@ -1,15 +1,13 @@
use beacon_chain::blob_verification::GossipVerifiedBlob;
use beacon_chain::data_column_verification::GossipVerifiedDataColumn;
use beacon_chain::test_utils::{BeaconChainHarness, TEST_DATA_COLUMN_SIDECARS_SSZ};
use beacon_chain::test_utils::{BeaconChainHarness, generate_data_column_sidecars_from_block};
use eth2::types::{EventKind, SseBlobSidecar, SseDataColumnSidecar};
use rand::SeedableRng;
use rand::rngs::StdRng;
use std::sync::Arc;
use types::blob_sidecar::FixedBlobSidecarList;
use types::test_utils::TestRandom;
use types::{
BlobSidecar, DataColumnSidecar, EthSpec, ForkName, MinimalEthSpec, RuntimeVariableList, Slot,
};
use types::{BlobSidecar, DataColumnSidecar, EthSpec, ForkName, MinimalEthSpec, Slot};
type E = MinimalEthSpec;
@@ -108,19 +106,18 @@ async fn blob_sidecar_event_on_process_rpc_blobs() {
let mut blob_event_receiver = event_handler.subscribe_blob_sidecar();
// build and process multiple rpc blobs
let kzg = harness.chain.kzg.as_ref();
let mut rng = StdRng::seed_from_u64(0xDEADBEEF0BAD5EEDu64);
harness.execution_block_generator().set_min_blob_count(2);
let mut blob_1 = BlobSidecar::random_valid(&mut rng, kzg).unwrap();
let mut blob_2 = BlobSidecar {
index: 1,
..BlobSidecar::random_valid(&mut rng, kzg).unwrap()
};
let parent_root = harness.chain.head().head_block_root();
blob_1.signed_block_header.message.parent_root = parent_root;
blob_2.signed_block_header.message.parent_root = parent_root;
let blob_1 = Arc::new(blob_1);
let blob_2 = Arc::new(blob_2);
let head_state = harness.get_current_state();
let slot = head_state.slot() + 1;
let ((signed_block, opt_blobs), _) = harness.make_block(head_state, slot).await;
let (kzg_proofs, blobs) = opt_blobs.unwrap();
assert!(blobs.len() > 2);
let blob_1 =
Arc::new(BlobSidecar::new(0, blobs[0].clone(), &signed_block, kzg_proofs[0]).unwrap());
let blob_2 =
Arc::new(BlobSidecar::new(1, blobs[1].clone(), &signed_block, kzg_proofs[1]).unwrap());
let blobs = FixedBlobSidecarList::new(vec![Some(blob_1.clone()), Some(blob_2.clone())]);
let expected_sse_blobs = vec![
@@ -130,7 +127,7 @@ async fn blob_sidecar_event_on_process_rpc_blobs() {
let _ = harness
.chain
.process_rpc_blobs(blob_1.slot(), blob_1.block_root(), blobs)
.process_rpc_blobs(slot, blob_1.block_root(), blobs)
.await
.unwrap();
@@ -159,20 +156,24 @@ async fn data_column_sidecar_event_on_process_rpc_columns() {
let event_handler = harness.chain.event_handler.as_ref().unwrap();
let mut data_column_event_receiver = event_handler.subscribe_data_column_sidecar();
// build a valid block
harness.execution_block_generator().set_min_blob_count(1);
let head_state = harness.get_current_state();
let slot = head_state.slot() + 1;
let ((signed_block, opt_blobs), _) = harness.make_block(head_state, slot).await;
let (_, blobs) = opt_blobs.unwrap();
assert!(!blobs.is_empty());
// load the precomputed column sidecar to avoid computing them for every block in the tests.
let mut sidecar = RuntimeVariableList::<DataColumnSidecar<E>>::from_ssz_bytes(
TEST_DATA_COLUMN_SIDECARS_SSZ,
E::number_of_columns(),
)
.unwrap()[0]
.clone();
let parent_root = harness.chain.head().head_block_root();
sidecar.signed_block_header.message.parent_root = parent_root;
let data_column_sidecars =
generate_data_column_sidecars_from_block(&signed_block, &harness.chain.spec);
let sidecar = data_column_sidecars[0].clone();
let expected_sse_data_column = SseDataColumnSidecar::from_data_column_sidecar(&sidecar);
let _ = harness
.chain
.process_rpc_custody_columns(vec![Arc::new(sidecar)])
.process_rpc_custody_columns(vec![sidecar])
.await
.unwrap();

View File

@@ -1,8 +1,10 @@
mod attestation_production;
mod attestation_verification;
mod bellatrix;
mod blob_verification;
mod block_verification;
mod capella;
mod column_verification;
mod events;
mod op_verification;
mod payload_invalidation;