Fix slasher OOM (#9141)

Fix a vulnerability in the slasher whereby it would OOM upon processing an invalid attestation with an artificially high `validator_index`. This fix has already been made available to affected users on the `slasher-fix` branch.


  - Prevent attestations from being passed to the slasher prior to signature verification. This was unnecessary, as they would later be passed on successful validation as well.
- Add a defensive cap on the maximum validator index processable by the slasher. The cap is high enough that it shouldn't be reached for several years, and will quickly result in warning logs if forgotten.
- Add a regression test that confirms that the issue is fixed.


Co-Authored-By: Michael Sproul <michael@sigmaprime.io>
This commit is contained in:
Michael Sproul
2026-04-20 10:59:42 +10:00
committed by GitHub
parent 9b08e1ad63
commit c028bac28d
4 changed files with 79 additions and 10 deletions

View File

@@ -514,11 +514,6 @@ impl<'a, T: BeaconChainTypes> IndexedAggregatedAttestation<'a, T> {
chain: &BeaconChain<T>,
) -> Result<Self, Error> {
Self::verify_slashable(signed_aggregate, chain)
.inspect(|verified_aggregate| {
if let Some(slasher) = chain.slasher.as_ref() {
slasher.accept_attestation(verified_aggregate.indexed_attestation.clone());
}
})
.map_err(|slash_info| process_slash_info(slash_info, chain))
}
@@ -971,11 +966,6 @@ impl<'a, T: BeaconChainTypes> IndexedUnaggregatedAttestation<'a, T> {
chain: &BeaconChain<T>,
) -> Result<Self, Error> {
Self::verify_slashable(attestation, subnet_id, chain)
.inspect(|verified_unaggregated| {
if let Some(slasher) = chain.slasher.as_ref() {
slasher.accept_attestation(verified_unaggregated.indexed_attestation.clone());
}
})
.map_err(|slash_info| process_slash_info(slash_info, chain))
}

View File

@@ -19,8 +19,10 @@ use execution_layer::test_utils::generate_genesis_header;
use fixed_bytes::FixedBytesExtended;
use genesis::{DEFAULT_ETH1_BLOCK_HASH, interop_genesis_state};
use int_to_bytes::int_to_bytes32;
use slasher::{Config as SlasherConfig, Slasher};
use state_processing::per_slot_processing;
use std::sync::{Arc, LazyLock};
use tempfile::tempdir;
use tree_hash::TreeHash;
use typenum::Unsigned;
use types::{
@@ -1958,3 +1960,58 @@ async fn gloas_aggregated_attestation_same_slot_index_must_be_zero() {
result.err()
);
}
/// Regression test: a SingleAttestation with a huge bogus attester_index must not be forwarded to
/// the slasher. Previously the slasher received the IndexedAttestation before committee-membership
/// validation, causing an OOM when the slasher tried to allocate based on the untrusted index.
#[tokio::test]
async fn unaggregated_attestation_bogus_attester_index_not_sent_to_slasher() {
let slasher_dir = tempdir().unwrap();
let spec = Arc::new(test_spec::<E>());
let slasher = Arc::new(
Slasher::<E>::open(SlasherConfig::new(slasher_dir.path().into()), spec.clone()).unwrap(),
);
let inner_slasher = slasher.clone();
let harness = BeaconChainHarness::builder(MainnetEthSpec)
.spec(spec)
.keypairs(KEYPAIRS[0..VALIDATOR_COUNT].to_vec())
.fresh_ephemeral_store()
.initial_mutator(Box::new(move |builder| builder.slasher(inner_slasher)))
.mock_execution_layer()
.build();
harness.advance_slot();
harness
.extend_chain(
1,
BlockStrategy::OnCanonicalHead,
AttestationStrategy::AllValidators,
)
.await;
harness.advance_slot();
// Build a valid SingleAttestation, then replace the attester_index with a huge value.
let (mut bogus_attestation, _, _) = get_valid_unaggregated_attestation(&harness.chain);
bogus_attestation.attester_index = 1 << 40; // ~2^40, would OOM the slasher
// Drain any attestations already queued from block production.
slasher
.process_queued(harness.get_current_slot().epoch(E::slots_per_epoch()))
.unwrap();
let queue_len_before = slasher.attestation_queue_len();
assert_eq!(queue_len_before, 0);
let result = harness
.chain
.verify_unaggregated_attestation_for_gossip(&bogus_attestation, None);
assert!(
result.is_err(),
"attestation with bogus index should fail verification"
);
assert_eq!(
slasher.attestation_queue_len(),
0,
"slasher queue length must not change — bogus attestation must not be forwarded"
);
}

View File

@@ -2,8 +2,17 @@ use crate::{AttesterRecord, Config, IndexedAttesterRecord};
use parking_lot::Mutex;
use std::collections::BTreeMap;
use std::sync::{Arc, Weak};
use tracing::warn;
use types::{EthSpec, Hash256, IndexedAttestation};
/// Hard cap on validator indices accepted by the slasher.
///
/// Any attestation referencing a validator index above this limit is silently dropped during
/// grouping. This is a defence-in-depth measure to prevent pathological memory allocation if an
/// attestation with a bogus index somehow reaches the slasher. The value (2^23 = 8,388,608)
/// provides generous headroom above the current mainnet validator set (~2M).
const MAX_VALIDATOR_INDEX: u64 = 8_388_608;
/// Staging area for attestations received from the network.
///
/// Attestations are not grouped by validator index at this stage so that they can be easily
@@ -72,6 +81,14 @@ impl<E: EthSpec> AttestationBatch<E> {
let mut grouped_attestations = GroupedAttestations { subqueues: vec![] };
for ((validator_index, _), indexed_record) in self.attesters {
if validator_index >= MAX_VALIDATOR_INDEX {
warn!(
validator_index,
"Dropping slasher attestation with out-of-range validator index"
);
break;
}
let subqueue_id = config.validator_chunk_index(validator_index);
if subqueue_id >= grouped_attestations.subqueues.len() {

View File

@@ -74,6 +74,11 @@ impl<E: EthSpec> Slasher<E> {
&self.config
}
/// Return the number of attestations in the queue.
pub fn attestation_queue_len(&self) -> usize {
self.attestation_queue.len()
}
/// Accept an attestation from the network and queue it for processing.
pub fn accept_attestation(&self, attestation: IndexedAttestation<E>) {
self.attestation_queue.queue(attestation);