From c028bac28d8e706c5a7d1492be2ce6e5fbd90ad6 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Mon, 20 Apr 2026 10:59:42 +1000 Subject: [PATCH] Fix slasher OOM (#9141) Fix a vulnerability in the slasher whereby it would OOM upon processing an invalid attestation with an artificially high `validator_index`. This fix has already been made available to affected users on the `slasher-fix` branch. - Prevent attestations from being passed to the slasher prior to signature verification. This was unnecessary, as they would later be passed on successful validation as well. - Add a defensive cap on the maximum validator index processable by the slasher. The cap is high enough that it shouldn't be reached for several years, and will quickly result in warning logs if forgotten. - Add a regression test that confirms that the issue is fixed. Co-Authored-By: Michael Sproul --- .../src/attestation_verification.rs | 10 ---- .../tests/attestation_verification.rs | 57 +++++++++++++++++++ slasher/src/attestation_queue.rs | 17 ++++++ slasher/src/slasher.rs | 5 ++ 4 files changed, 79 insertions(+), 10 deletions(-) diff --git a/beacon_node/beacon_chain/src/attestation_verification.rs b/beacon_node/beacon_chain/src/attestation_verification.rs index 667bafe445..f35de59e1f 100644 --- a/beacon_node/beacon_chain/src/attestation_verification.rs +++ b/beacon_node/beacon_chain/src/attestation_verification.rs @@ -514,11 +514,6 @@ impl<'a, T: BeaconChainTypes> IndexedAggregatedAttestation<'a, T> { chain: &BeaconChain, ) -> Result { Self::verify_slashable(signed_aggregate, chain) - .inspect(|verified_aggregate| { - if let Some(slasher) = chain.slasher.as_ref() { - slasher.accept_attestation(verified_aggregate.indexed_attestation.clone()); - } - }) .map_err(|slash_info| process_slash_info(slash_info, chain)) } @@ -971,11 +966,6 @@ impl<'a, T: BeaconChainTypes> IndexedUnaggregatedAttestation<'a, T> { chain: &BeaconChain, ) -> Result { Self::verify_slashable(attestation, subnet_id, chain) - .inspect(|verified_unaggregated| { - if let Some(slasher) = chain.slasher.as_ref() { - slasher.accept_attestation(verified_unaggregated.indexed_attestation.clone()); - } - }) .map_err(|slash_info| process_slash_info(slash_info, chain)) } diff --git a/beacon_node/beacon_chain/tests/attestation_verification.rs b/beacon_node/beacon_chain/tests/attestation_verification.rs index acf326430b..91bc8e249a 100644 --- a/beacon_node/beacon_chain/tests/attestation_verification.rs +++ b/beacon_node/beacon_chain/tests/attestation_verification.rs @@ -19,8 +19,10 @@ use execution_layer::test_utils::generate_genesis_header; use fixed_bytes::FixedBytesExtended; use genesis::{DEFAULT_ETH1_BLOCK_HASH, interop_genesis_state}; use int_to_bytes::int_to_bytes32; +use slasher::{Config as SlasherConfig, Slasher}; use state_processing::per_slot_processing; use std::sync::{Arc, LazyLock}; +use tempfile::tempdir; use tree_hash::TreeHash; use typenum::Unsigned; use types::{ @@ -1958,3 +1960,58 @@ async fn gloas_aggregated_attestation_same_slot_index_must_be_zero() { result.err() ); } + +/// Regression test: a SingleAttestation with a huge bogus attester_index must not be forwarded to +/// the slasher. Previously the slasher received the IndexedAttestation before committee-membership +/// validation, causing an OOM when the slasher tried to allocate based on the untrusted index. +#[tokio::test] +async fn unaggregated_attestation_bogus_attester_index_not_sent_to_slasher() { + let slasher_dir = tempdir().unwrap(); + let spec = Arc::new(test_spec::()); + let slasher = Arc::new( + Slasher::::open(SlasherConfig::new(slasher_dir.path().into()), spec.clone()).unwrap(), + ); + + let inner_slasher = slasher.clone(); + let harness = BeaconChainHarness::builder(MainnetEthSpec) + .spec(spec) + .keypairs(KEYPAIRS[0..VALIDATOR_COUNT].to_vec()) + .fresh_ephemeral_store() + .initial_mutator(Box::new(move |builder| builder.slasher(inner_slasher))) + .mock_execution_layer() + .build(); + harness.advance_slot(); + harness + .extend_chain( + 1, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; + harness.advance_slot(); + + // Build a valid SingleAttestation, then replace the attester_index with a huge value. + let (mut bogus_attestation, _, _) = get_valid_unaggregated_attestation(&harness.chain); + bogus_attestation.attester_index = 1 << 40; // ~2^40, would OOM the slasher + + // Drain any attestations already queued from block production. + slasher + .process_queued(harness.get_current_slot().epoch(E::slots_per_epoch())) + .unwrap(); + let queue_len_before = slasher.attestation_queue_len(); + assert_eq!(queue_len_before, 0); + + let result = harness + .chain + .verify_unaggregated_attestation_for_gossip(&bogus_attestation, None); + assert!( + result.is_err(), + "attestation with bogus index should fail verification" + ); + + assert_eq!( + slasher.attestation_queue_len(), + 0, + "slasher queue length must not change — bogus attestation must not be forwarded" + ); +} diff --git a/slasher/src/attestation_queue.rs b/slasher/src/attestation_queue.rs index 62a1bb0945..e99a3708ad 100644 --- a/slasher/src/attestation_queue.rs +++ b/slasher/src/attestation_queue.rs @@ -2,8 +2,17 @@ use crate::{AttesterRecord, Config, IndexedAttesterRecord}; use parking_lot::Mutex; use std::collections::BTreeMap; use std::sync::{Arc, Weak}; +use tracing::warn; use types::{EthSpec, Hash256, IndexedAttestation}; +/// Hard cap on validator indices accepted by the slasher. +/// +/// Any attestation referencing a validator index above this limit is silently dropped during +/// grouping. This is a defence-in-depth measure to prevent pathological memory allocation if an +/// attestation with a bogus index somehow reaches the slasher. The value (2^23 = 8,388,608) +/// provides generous headroom above the current mainnet validator set (~2M). +const MAX_VALIDATOR_INDEX: u64 = 8_388_608; + /// Staging area for attestations received from the network. /// /// Attestations are not grouped by validator index at this stage so that they can be easily @@ -72,6 +81,14 @@ impl AttestationBatch { let mut grouped_attestations = GroupedAttestations { subqueues: vec![] }; for ((validator_index, _), indexed_record) in self.attesters { + if validator_index >= MAX_VALIDATOR_INDEX { + warn!( + validator_index, + "Dropping slasher attestation with out-of-range validator index" + ); + break; + } + let subqueue_id = config.validator_chunk_index(validator_index); if subqueue_id >= grouped_attestations.subqueues.len() { diff --git a/slasher/src/slasher.rs b/slasher/src/slasher.rs index 5d26c5a6da..8d34a34f3e 100644 --- a/slasher/src/slasher.rs +++ b/slasher/src/slasher.rs @@ -74,6 +74,11 @@ impl Slasher { &self.config } + /// Return the number of attestations in the queue. + pub fn attestation_queue_len(&self) -> usize { + self.attestation_queue.len() + } + /// Accept an attestation from the network and queue it for processing. pub fn accept_attestation(&self, attestation: IndexedAttestation) { self.attestation_queue.queue(attestation);