Refactor consensus/types (#7827)

Organize and categorize `consensus/types` into modules based on their relation to key consensus structures/concepts.
This is a precursor to a sensible public interface.

While this refactor is very opinionated, I am open to suggestions on module names, or type groupings if my current ones are inappropriate.


Co-Authored-By: Mac L <mjladson@pm.me>
This commit is contained in:
Mac L
2025-12-04 13:28:52 +04:00
committed by GitHub
parent 51d0336020
commit 4e958a92d3
167 changed files with 2117 additions and 1751 deletions

View File

@@ -0,0 +1,171 @@
#![cfg(test)]
use std::sync::LazyLock;
use beacon_chain::test_utils::{BeaconChainHarness, EphemeralHarnessType};
use bls::Keypair;
use fixed_bytes::FixedBytesExtended;
use milhouse::Vector;
use swap_or_not_shuffle::shuffle_list;
use types::*;
use crate::test_utils::generate_deterministic_keypairs;
pub const VALIDATOR_COUNT: usize = 16;
/// A cached set of keys.
static KEYPAIRS: LazyLock<Vec<Keypair>> =
LazyLock::new(|| generate_deterministic_keypairs(VALIDATOR_COUNT));
fn get_harness<E: EthSpec>(validator_count: usize) -> BeaconChainHarness<EphemeralHarnessType<E>> {
let harness = BeaconChainHarness::builder(E::default())
.default_spec()
.keypairs(KEYPAIRS[0..validator_count].to_vec())
.fresh_ephemeral_store()
.build();
harness.advance_slot();
harness
}
#[test]
fn default_values() {
let cache = CommitteeCache::default();
assert!(!cache.is_initialized_at(Epoch::new(0)));
assert!(&cache.active_validator_indices().is_empty());
assert_eq!(cache.get_beacon_committee(Slot::new(0), 0), None);
assert_eq!(cache.get_attestation_duties(0), None);
assert_eq!(cache.active_validator_count(), 0);
assert_eq!(cache.epoch_committee_count(), 0);
assert!(cache.get_beacon_committees_at_slot(Slot::new(0)).is_err());
}
async fn new_state<E: EthSpec>(validator_count: usize, slot: Slot) -> BeaconState<E> {
let harness = get_harness(validator_count);
let head_state = harness.get_current_state();
if slot > Slot::new(0) {
harness
.add_attested_blocks_at_slots(
head_state,
Hash256::zero(),
(1..=slot.as_u64())
.map(Slot::new)
.collect::<Vec<_>>()
.as_slice(),
(0..validator_count).collect::<Vec<_>>().as_slice(),
)
.await;
}
harness.get_current_state()
}
#[tokio::test]
#[should_panic]
async fn fails_without_validators() {
new_state::<MinimalEthSpec>(0, Slot::new(0)).await;
}
#[tokio::test]
async fn initializes_with_the_right_epoch() {
let state = new_state::<MinimalEthSpec>(16, Slot::new(0)).await;
let spec = &MinimalEthSpec::default_spec();
let cache = CommitteeCache::default();
assert!(!cache.is_initialized_at(state.current_epoch()));
let cache = CommitteeCache::initialized(&state, state.current_epoch(), spec).unwrap();
assert!(cache.is_initialized_at(state.current_epoch()));
let cache = CommitteeCache::initialized(&state, state.previous_epoch(), spec).unwrap();
assert!(cache.is_initialized_at(state.previous_epoch()));
let cache = CommitteeCache::initialized(&state, state.next_epoch().unwrap(), spec).unwrap();
assert!(cache.is_initialized_at(state.next_epoch().unwrap()));
}
#[tokio::test]
async fn shuffles_for_the_right_epoch() {
let num_validators = MinimalEthSpec::minimum_validator_count() * 2;
let epoch = Epoch::new(6);
let slot = epoch.start_slot(MinimalEthSpec::slots_per_epoch());
let mut state = new_state::<MinimalEthSpec>(num_validators, slot).await;
let spec = &MinimalEthSpec::default_spec();
assert_eq!(state.current_epoch(), epoch);
let distinct_hashes: Vec<Hash256> = (0..MinimalEthSpec::epochs_per_historical_vector())
.map(|i| Hash256::from_low_u64_be(i as u64))
.collect();
*state.randao_mixes_mut() = Vector::try_from_iter(distinct_hashes).unwrap();
let previous_seed = state
.get_seed(state.previous_epoch(), Domain::BeaconAttester, spec)
.unwrap();
let current_seed = state
.get_seed(state.current_epoch(), Domain::BeaconAttester, spec)
.unwrap();
let next_seed = state
.get_seed(state.next_epoch().unwrap(), Domain::BeaconAttester, spec)
.unwrap();
assert!((previous_seed != current_seed) && (current_seed != next_seed));
let shuffling_with_seed = |seed: Hash256| {
shuffle_list(
(0..num_validators).collect(),
spec.shuffle_round_count,
&seed[..],
false,
)
.unwrap()
};
let assert_shuffling_positions_accurate = |cache: &CommitteeCache| {
for (i, v) in cache.shuffling().iter().enumerate() {
assert_eq!(
cache.shuffled_position(*v).unwrap(),
i,
"Shuffling position inaccurate"
);
}
};
// We can initialize the committee cache at recent epochs in the past, and one epoch into the
// future.
for e in (0..=epoch.as_u64() + 1).map(Epoch::new) {
let seed = state.get_seed(e, Domain::BeaconAttester, spec).unwrap();
let cache = CommitteeCache::initialized(&state, e, spec)
.unwrap_or_else(|_| panic!("failed at epoch {}", e));
assert_eq!(cache.shuffling(), shuffling_with_seed(seed));
assert_shuffling_positions_accurate(&cache);
}
// We should *not* be able to build a committee cache for the epoch after the next epoch.
assert_eq!(
CommitteeCache::initialized(&state, epoch + 2, spec),
Err(BeaconStateError::EpochOutOfBounds)
);
}
#[tokio::test]
async fn min_randao_epoch_correct() {
let num_validators = MinimalEthSpec::minimum_validator_count() * 2;
let current_epoch = Epoch::new(MinimalEthSpec::epochs_per_historical_vector() as u64 * 2);
let mut state = new_state::<MinimalEthSpec>(
num_validators,
Epoch::new(1).start_slot(MinimalEthSpec::slots_per_epoch()),
)
.await;
// Override the epoch so that there's some room to move.
*state.slot_mut() = current_epoch.start_slot(MinimalEthSpec::slots_per_epoch());
assert_eq!(state.current_epoch(), current_epoch);
// The min_randao_epoch should be the minimum epoch such that `get_randao_mix` returns `Ok`.
let min_randao_epoch = state.min_randao_epoch();
state.get_randao_mix(min_randao_epoch).unwrap();
state.get_randao_mix(min_randao_epoch - 1).unwrap_err();
state.get_randao_mix(min_randao_epoch + 1).unwrap();
}

View File

@@ -0,0 +1,370 @@
#![cfg(test)]
use std::ops::Mul;
use std::sync::LazyLock;
use beacon_chain::test_utils::{BeaconChainHarness, EphemeralHarnessType};
use bls::Keypair;
use fixed_bytes::FixedBytesExtended;
use milhouse::Vector;
use rand::SeedableRng;
use rand_xorshift::XorShiftRng;
use ssz::Encode;
use swap_or_not_shuffle::compute_shuffled_index;
use types::test_utils::{TestRandom, generate_deterministic_keypairs};
use types::*;
pub const MAX_VALIDATOR_COUNT: usize = 129;
pub const SLOT_OFFSET: Slot = Slot::new(1);
/// A cached set of keys.
static KEYPAIRS: LazyLock<Vec<Keypair>> =
LazyLock::new(|| generate_deterministic_keypairs(MAX_VALIDATOR_COUNT));
async fn get_harness<E: EthSpec>(
validator_count: usize,
slot: Slot,
) -> BeaconChainHarness<EphemeralHarnessType<E>> {
let harness = BeaconChainHarness::builder(E::default())
.default_spec()
.keypairs(KEYPAIRS[0..validator_count].to_vec())
.fresh_ephemeral_store()
.build();
let skip_to_slot = slot - SLOT_OFFSET;
if skip_to_slot > Slot::new(0) {
let slots = (skip_to_slot.as_u64()..=slot.as_u64())
.map(Slot::new)
.collect::<Vec<_>>();
let state = harness.get_current_state();
harness
.add_attested_blocks_at_slots(
state,
Hash256::zero(),
slots.as_slice(),
(0..validator_count).collect::<Vec<_>>().as_slice(),
)
.await;
}
harness
}
async fn build_state<E: EthSpec>(validator_count: usize) -> BeaconState<E> {
get_harness(validator_count, Slot::new(0))
.await
.chain
.head_beacon_state_cloned()
}
async fn test_beacon_proposer_index<E: EthSpec>() {
let spec = E::default_spec();
// Get the i'th candidate proposer for the given state and slot
let ith_candidate = |state: &BeaconState<E>, slot: Slot, i: usize, spec: &ChainSpec| {
let epoch = slot.epoch(E::slots_per_epoch());
let seed = state.get_beacon_proposer_seed(slot, spec).unwrap();
let active_validators = state.get_active_validator_indices(epoch, spec).unwrap();
active_validators[compute_shuffled_index(
i,
active_validators.len(),
&seed,
spec.shuffle_round_count,
)
.unwrap()]
};
// Run a test on the state.
let test = |state: &BeaconState<E>, slot: Slot, candidate_index: usize| {
assert_eq!(
state.get_beacon_proposer_index(slot, &spec),
Ok(ith_candidate(state, slot, candidate_index, &spec))
);
};
// Test where we have one validator per slot.
// 0th candidate should be chosen every time.
let state = build_state(E::slots_per_epoch() as usize).await;
for i in 0..E::slots_per_epoch() {
test(&state, Slot::from(i), 0);
}
// Test where we have two validators per slot.
// 0th candidate should be chosen every time.
let state = build_state((E::slots_per_epoch() as usize).mul(2)).await;
for i in 0..E::slots_per_epoch() {
test(&state, Slot::from(i), 0);
}
// Test with two validators per slot, first validator has zero balance.
let mut state = build_state::<E>((E::slots_per_epoch() as usize).mul(2)).await;
let slot0_candidate0 = ith_candidate(&state, Slot::new(0), 0, &spec);
state
.validators_mut()
.get_mut(slot0_candidate0)
.unwrap()
.effective_balance = 0;
test(&state, Slot::new(0), 1);
for i in 1..E::slots_per_epoch() {
test(&state, Slot::from(i), 0);
}
}
#[tokio::test]
async fn beacon_proposer_index() {
test_beacon_proposer_index::<MinimalEthSpec>().await;
}
/// Test that
///
/// 1. Using the cache before it's built fails.
/// 2. Using the cache after it's build passes.
/// 3. Using the cache after it's dropped fails.
fn test_cache_initialization<E: EthSpec>(
state: &mut BeaconState<E>,
relative_epoch: RelativeEpoch,
spec: &ChainSpec,
) {
let slot = relative_epoch
.into_epoch(state.slot().epoch(E::slots_per_epoch()))
.start_slot(E::slots_per_epoch());
// Build the cache.
state.build_committee_cache(relative_epoch, spec).unwrap();
// Assert a call to a cache-using function passes.
state.get_beacon_committee(slot, 0).unwrap();
// Drop the cache.
state.drop_committee_cache(relative_epoch).unwrap();
// Assert a call to a cache-using function fail.
assert_eq!(
state.get_beacon_committee(slot, 0),
Err(BeaconStateError::CommitteeCacheUninitialized(Some(
relative_epoch
)))
);
}
#[tokio::test]
async fn cache_initialization() {
let spec = MinimalEthSpec::default_spec();
let mut state = build_state::<MinimalEthSpec>(16).await;
*state.slot_mut() =
(MinimalEthSpec::genesis_epoch() + 1).start_slot(MinimalEthSpec::slots_per_epoch());
test_cache_initialization(&mut state, RelativeEpoch::Previous, &spec);
test_cache_initialization(&mut state, RelativeEpoch::Current, &spec);
test_cache_initialization(&mut state, RelativeEpoch::Next, &spec);
}
/// Tests committee-specific components
#[cfg(test)]
mod committees {
use super::*;
use std::ops::{Add, Div};
use swap_or_not_shuffle::shuffle_list;
fn execute_committee_consistency_test<E: EthSpec>(
state: BeaconState<E>,
epoch: Epoch,
validator_count: usize,
spec: &ChainSpec,
) {
let active_indices: Vec<usize> = (0..validator_count).collect();
let seed = state.get_seed(epoch, Domain::BeaconAttester, spec).unwrap();
let relative_epoch = RelativeEpoch::from_epoch(state.current_epoch(), epoch).unwrap();
let mut ordered_indices = state
.get_cached_active_validator_indices(relative_epoch)
.unwrap()
.to_vec();
ordered_indices.sort_unstable();
assert_eq!(
active_indices, ordered_indices,
"Validator indices mismatch"
);
let shuffling =
shuffle_list(active_indices, spec.shuffle_round_count, &seed[..], false).unwrap();
let mut expected_indices_iter = shuffling.iter();
// Loop through all slots in the epoch being tested.
for slot in epoch.slot_iter(E::slots_per_epoch()) {
let beacon_committees = state.get_beacon_committees_at_slot(slot).unwrap();
// Assert that the number of committees in this slot is consistent with the reported number
// of committees in an epoch.
assert_eq!(
beacon_committees.len() as u64,
state
.get_epoch_committee_count(relative_epoch)
.unwrap()
.div(E::slots_per_epoch())
);
for (committee_index, bc) in beacon_committees.iter().enumerate() {
// Assert that indices are assigned sequentially across committees.
assert_eq!(committee_index as u64, bc.index);
// Assert that a committee lookup via slot is identical to a committee lookup via
// index.
assert_eq!(state.get_beacon_committee(bc.slot, bc.index).unwrap(), *bc);
// Loop through each validator in the committee.
for (committee_i, validator_i) in bc.committee.iter().enumerate() {
// Assert the validators are assigned contiguously across committees.
assert_eq!(
*validator_i,
*expected_indices_iter.next().unwrap(),
"Non-sequential validators."
);
// Assert a call to `get_attestation_duties` is consistent with a call to
// `get_beacon_committees_at_slot`
let attestation_duty = state
.get_attestation_duties(*validator_i, relative_epoch)
.unwrap()
.unwrap();
assert_eq!(attestation_duty.slot, slot);
assert_eq!(attestation_duty.index, bc.index);
assert_eq!(attestation_duty.committee_position, committee_i);
assert_eq!(attestation_duty.committee_len, bc.committee.len());
}
}
}
// Assert that all validators were assigned to a committee.
assert!(expected_indices_iter.next().is_none());
}
async fn committee_consistency_test<E: EthSpec>(
validator_count: usize,
state_epoch: Epoch,
cache_epoch: RelativeEpoch,
) {
let spec = &E::default_spec();
let slot = state_epoch.start_slot(E::slots_per_epoch());
let harness = get_harness::<E>(validator_count, slot).await;
let mut new_head_state = harness.get_current_state();
let distinct_hashes =
(0..E::epochs_per_historical_vector()).map(|i| Hash256::from_low_u64_be(i as u64));
*new_head_state.randao_mixes_mut() = Vector::try_from_iter(distinct_hashes).unwrap();
new_head_state
.force_build_committee_cache(RelativeEpoch::Previous, spec)
.unwrap();
new_head_state
.force_build_committee_cache(RelativeEpoch::Current, spec)
.unwrap();
new_head_state
.force_build_committee_cache(RelativeEpoch::Next, spec)
.unwrap();
let cache_epoch = cache_epoch.into_epoch(state_epoch);
execute_committee_consistency_test(new_head_state, cache_epoch, validator_count, spec);
}
async fn committee_consistency_test_suite<E: EthSpec>(cached_epoch: RelativeEpoch) {
let spec = E::default_spec();
let validator_count = spec
.max_committees_per_slot
.mul(E::slots_per_epoch() as usize)
.mul(spec.target_committee_size)
.add(1);
committee_consistency_test::<E>(validator_count, Epoch::new(0), cached_epoch).await;
committee_consistency_test::<E>(validator_count, E::genesis_epoch() + 4, cached_epoch)
.await;
committee_consistency_test::<E>(
validator_count,
E::genesis_epoch()
+ (E::slots_per_historical_root() as u64)
.mul(E::slots_per_epoch())
.mul(4),
cached_epoch,
)
.await;
}
#[tokio::test]
async fn current_epoch_committee_consistency() {
committee_consistency_test_suite::<MinimalEthSpec>(RelativeEpoch::Current).await;
}
#[tokio::test]
async fn previous_epoch_committee_consistency() {
committee_consistency_test_suite::<MinimalEthSpec>(RelativeEpoch::Previous).await;
}
#[tokio::test]
async fn next_epoch_committee_consistency() {
committee_consistency_test_suite::<MinimalEthSpec>(RelativeEpoch::Next).await;
}
}
#[test]
fn decode_base_and_altair() {
type E = MainnetEthSpec;
let spec = E::default_spec();
let rng = &mut XorShiftRng::from_seed([42; 16]);
let fork_epoch = spec.altair_fork_epoch.unwrap();
let base_epoch = fork_epoch.saturating_sub(1_u64);
let base_slot = base_epoch.end_slot(E::slots_per_epoch());
let altair_epoch = fork_epoch;
let altair_slot = altair_epoch.start_slot(E::slots_per_epoch());
// BeaconStateBase
{
let good_base_state: BeaconState<MainnetEthSpec> = BeaconState::Base(BeaconStateBase {
slot: base_slot,
..<_>::random_for_test(rng)
});
// It's invalid to have a base state with a slot higher than the fork slot.
let bad_base_state = {
let mut bad = good_base_state.clone();
*bad.slot_mut() = altair_slot;
bad
};
assert_eq!(
BeaconState::from_ssz_bytes(&good_base_state.as_ssz_bytes(), &spec)
.expect("good base state can be decoded"),
good_base_state
);
<BeaconState<MainnetEthSpec>>::from_ssz_bytes(&bad_base_state.as_ssz_bytes(), &spec)
.expect_err("bad base state cannot be decoded");
}
// BeaconStateAltair
{
let good_altair_state: BeaconState<MainnetEthSpec> =
BeaconState::Altair(BeaconStateAltair {
slot: altair_slot,
..<_>::random_for_test(rng)
});
// It's invalid to have an Altair state with a slot lower than the fork slot.
let bad_altair_state = {
let mut bad = good_altair_state.clone();
*bad.slot_mut() = base_slot;
bad
};
assert_eq!(
BeaconState::from_ssz_bytes(&good_altair_state.as_ssz_bytes(), &spec)
.expect("good altair state can be decoded"),
good_altair_state
);
<BeaconState<MainnetEthSpec>>::from_ssz_bytes(&bad_altair_state.as_ssz_bytes(), &spec)
.expect_err("bad altair state cannot be decoded");
}
}