Single-pass epoch processing and optimised block processing (#5279)

* Single-pass epoch processing (#4483, #4573)

Co-authored-by: Michael Sproul <michael@sigmaprime.io>

* Delete unused epoch processing code (#5170)

* Delete unused epoch processing code

* Compare total deltas

* Remove unnecessary apply_pending

* cargo fmt

* Remove newline

* Use epoch cache in block packing (#5223)

* Remove progressive balances mode (#5224)

* inline inactivity_penalty_quotient_for_state

* drop previous_epoch_total_active_balance

* fc lint

* spec compliant process_sync_aggregate (#15)

* spec compliant process_sync_aggregate

* Update consensus/state_processing/src/per_block_processing/altair/sync_committee.rs

Co-authored-by: Michael Sproul <micsproul@gmail.com>

---------

Co-authored-by: Michael Sproul <micsproul@gmail.com>

* Delete the participation cache (#16)

* update help

* Fix op_pool tests

* Fix fork choice tests

* Merge remote-tracking branch 'sigp/unstable' into epoch-single-pass

* Simplify exit cache (#5280)

* Fix clippy on exit cache

* Clean up single-pass a bit (#5282)

* Address Mark's review of single-pass (#5386)

* Merge remote-tracking branch 'origin/unstable' into epoch-single-pass

* Address Sean's review comments (#5414)

* Address most of Sean's review comments

* Simplify total balance cache building

* Clean up unused junk

* Merge remote-tracking branch 'origin/unstable' into epoch-single-pass

* More self-review

* Merge remote-tracking branch 'origin/unstable' into epoch-single-pass

* Merge branch 'unstable' into epoch-single-pass

* Fix imports for beta compiler

* Fix tests, probably
This commit is contained in:
Michael Sproul
2024-04-05 00:14:36 +11:00
committed by GitHub
parent f4cdcea7b1
commit feb531f85b
81 changed files with 2545 additions and 1316 deletions

View File

@@ -0,0 +1,44 @@
use crate::{ChainSpec, Epoch, Validator};
use std::collections::BTreeSet;
/// Activation queue computed during epoch processing for use in the *next* epoch.
#[derive(Debug, PartialEq, Eq, Default, Clone, arbitrary::Arbitrary)]
pub struct ActivationQueue {
/// Validators represented by `(activation_eligibility_epoch, index)` in sorted order.
///
/// These validators are not *necessarily* going to be activated. Their activation depends
/// on how finalization is updated, and the `churn_limit`.
queue: BTreeSet<(Epoch, usize)>,
}
impl ActivationQueue {
/// Check if `validator` could be eligible for activation in the next epoch and add them to
/// the tentative activation queue if this is the case.
pub fn add_if_could_be_eligible_for_activation(
&mut self,
index: usize,
validator: &Validator,
next_epoch: Epoch,
spec: &ChainSpec,
) {
if validator.could_be_eligible_for_activation_at(next_epoch, spec) {
self.queue
.insert((validator.activation_eligibility_epoch, index));
}
}
/// Determine the final activation queue after accounting for finalization & the churn limit.
pub fn get_validators_eligible_for_activation(
&self,
finalized_epoch: Epoch,
churn_limit: usize,
) -> BTreeSet<usize> {
self.queue
.iter()
.filter_map(|&(eligibility_epoch, index)| {
(eligibility_epoch <= finalized_epoch).then_some(index)
})
.take(churn_limit)
.collect()
}
}

View File

@@ -1,5 +1,5 @@
use self::committee_cache::get_active_validator_indices;
use self::exit_cache::ExitCache;
use crate::historical_summary::HistoricalSummary;
use crate::test_utils::TestRandom;
use crate::*;
use compare_fields::CompareFields;
@@ -7,7 +7,7 @@ use compare_fields_derive::CompareFields;
use derivative::Derivative;
use ethereum_hashing::hash;
use int_to_bytes::{int_to_bytes4, int_to_bytes8};
use pubkey_cache::PubkeyCache;
pub use pubkey_cache::PubkeyCache;
use safe_arith::{ArithError, SafeArith};
use serde::{Deserialize, Serialize};
use ssz::{ssz_encode, Decode, DecodeError, Encode};
@@ -25,8 +25,9 @@ pub use self::committee_cache::{
CommitteeCache,
};
pub use crate::beacon_state::balance::Balance;
pub use crate::beacon_state::exit_cache::ExitCache;
pub use crate::beacon_state::progressive_balances_cache::*;
use crate::historical_summary::HistoricalSummary;
pub use crate::beacon_state::slashings_cache::SlashingsCache;
pub use clone_config::CloneConfig;
pub use eth_spec::*;
pub use iter::BlockRootsIter;
@@ -40,12 +41,16 @@ mod exit_cache;
mod iter;
mod progressive_balances_cache;
mod pubkey_cache;
mod slashings_cache;
mod tests;
mod tree_hash_cache;
pub const CACHED_EPOCHS: usize = 3;
const MAX_RANDOM_BYTE: u64 = (1 << 8) - 1;
pub type Validators<T> = VariableList<Validator, <T as EthSpec>::ValidatorRegistryLimit>;
pub type Balances<T> = VariableList<u64, <T as EthSpec>::ValidatorRegistryLimit>;
#[derive(Debug, PartialEq, Clone)]
pub enum Error {
/// A state for a different hard-fork was required -- a severe logic error.
@@ -97,13 +102,20 @@ pub enum Error {
},
RelativeEpochError(RelativeEpochError),
ExitCacheUninitialized,
ExitCacheInvalidEpoch {
max_exit_epoch: Epoch,
request_epoch: Epoch,
},
SlashingsCacheUninitialized {
initialized_slot: Option<Slot>,
latest_block_slot: Slot,
},
CommitteeCacheUninitialized(Option<RelativeEpoch>),
SyncCommitteeCacheUninitialized,
BlsError(bls::Error),
SszTypesError(ssz_types::Error),
TreeHashCacheNotInitialized,
NonLinearTreeHashCacheHistory,
ParticipationCacheError(String),
ProgressiveBalancesCacheNotInitialized,
ProgressiveBalancesCacheInconsistent,
TreeHashCacheSkippedSlot {
@@ -133,6 +145,7 @@ pub enum Error {
epoch: Epoch,
},
IndexNotSupported(usize),
InvalidFlagIndex(usize),
MerkleTreeError(merkle_proof::MerkleTreeError),
}
@@ -356,6 +369,18 @@ where
#[tree_hash(skip_hashing)]
#[test_random(default)]
#[derivative(Clone(clone_with = "clone_default"))]
pub slashings_cache: SlashingsCache,
/// Epoch cache of values that are useful for block processing that are static over an epoch.
#[serde(skip_serializing, skip_deserializing)]
#[ssz(skip_serializing, skip_deserializing)]
#[tree_hash(skip_hashing)]
#[test_random(default)]
pub epoch_cache: EpochCache,
#[serde(skip_serializing, skip_deserializing)]
#[ssz(skip_serializing, skip_deserializing)]
#[tree_hash(skip_hashing)]
#[test_random(default)]
#[derivative(Clone(clone_with = "clone_default"))]
pub tree_hash_cache: BeaconTreeHashCache<E>,
}
@@ -422,6 +447,8 @@ impl<E: EthSpec> BeaconState<E> {
],
pubkey_cache: PubkeyCache::default(),
exit_cache: ExitCache::default(),
slashings_cache: SlashingsCache::default(),
epoch_cache: EpochCache::default(),
tree_hash_cache: <_>::default(),
})
}
@@ -514,10 +541,8 @@ impl<E: EthSpec> BeaconState<E> {
/// If the current epoch is the genesis epoch, the genesis_epoch is returned.
pub fn previous_epoch(&self) -> Epoch {
let current_epoch = self.current_epoch();
if current_epoch > E::genesis_epoch() {
current_epoch
.safe_sub(1)
.expect("current epoch greater than genesis implies greater than 0")
if let Ok(prev_epoch) = current_epoch.safe_sub(1) {
prev_epoch
} else {
current_epoch
}
@@ -892,14 +917,16 @@ impl<E: EthSpec> BeaconState<E> {
&mut self,
sync_committee: &SyncCommittee<E>,
) -> Result<Vec<usize>, Error> {
let mut indices = Vec::with_capacity(sync_committee.pubkeys.len());
for pubkey in sync_committee.pubkeys.iter() {
indices.push(
self.get_validator_index(pubkey)?
.ok_or(Error::PubkeyCacheInconsistent)?,
)
}
Ok(indices)
self.update_pubkey_cache()?;
sync_committee
.pubkeys
.iter()
.map(|pubkey| {
self.pubkey_cache()
.get(pubkey)
.ok_or(Error::PubkeyCacheInconsistent)
})
.collect()
}
/// Compute the sync committee indices for the next sync committee.
@@ -1212,7 +1239,11 @@ impl<E: EthSpec> BeaconState<E> {
/// Convenience accessor for validators and balances simultaneously.
pub fn validators_and_balances_and_progressive_balances_mut(
&mut self,
) -> (&mut [Validator], &mut [u64], &mut ProgressiveBalancesCache) {
) -> (
&mut Validators<E>,
&mut Balances<E>,
&mut ProgressiveBalancesCache,
) {
match self {
BeaconState::Base(state) => (
&mut state.validators,
@@ -1247,6 +1278,77 @@ impl<E: EthSpec> BeaconState<E> {
}
}
#[allow(clippy::type_complexity)]
pub fn mutable_validator_fields(
&mut self,
) -> Result<
(
&mut Validators<E>,
&mut Balances<E>,
&VariableList<ParticipationFlags, E::ValidatorRegistryLimit>,
&VariableList<ParticipationFlags, E::ValidatorRegistryLimit>,
&mut VariableList<u64, E::ValidatorRegistryLimit>,
&mut ProgressiveBalancesCache,
&mut ExitCache,
&mut EpochCache,
),
Error,
> {
match self {
BeaconState::Base(_) => Err(Error::IncorrectStateVariant),
BeaconState::Altair(state) => Ok((
&mut state.validators,
&mut state.balances,
&state.previous_epoch_participation,
&state.current_epoch_participation,
&mut state.inactivity_scores,
&mut state.progressive_balances_cache,
&mut state.exit_cache,
&mut state.epoch_cache,
)),
BeaconState::Merge(state) => Ok((
&mut state.validators,
&mut state.balances,
&state.previous_epoch_participation,
&state.current_epoch_participation,
&mut state.inactivity_scores,
&mut state.progressive_balances_cache,
&mut state.exit_cache,
&mut state.epoch_cache,
)),
BeaconState::Capella(state) => Ok((
&mut state.validators,
&mut state.balances,
&state.previous_epoch_participation,
&state.current_epoch_participation,
&mut state.inactivity_scores,
&mut state.progressive_balances_cache,
&mut state.exit_cache,
&mut state.epoch_cache,
)),
BeaconState::Deneb(state) => Ok((
&mut state.validators,
&mut state.balances,
&state.previous_epoch_participation,
&state.current_epoch_participation,
&mut state.inactivity_scores,
&mut state.progressive_balances_cache,
&mut state.exit_cache,
&mut state.epoch_cache,
)),
BeaconState::Electra(state) => Ok((
&mut state.validators,
&mut state.balances,
&state.previous_epoch_participation,
&state.current_epoch_participation,
&mut state.inactivity_scores,
&mut state.progressive_balances_cache,
&mut state.exit_cache,
&mut state.epoch_cache,
)),
}
}
/// Generate a seed for the given `epoch`.
pub fn get_seed(
&self,
@@ -1336,14 +1438,12 @@ impl<E: EthSpec> BeaconState<E> {
epoch: Epoch,
spec: &ChainSpec,
) -> Result<Epoch, Error> {
Ok(epoch.safe_add(1)?.safe_add(spec.max_seed_lookahead)?)
Ok(spec.compute_activation_exit_epoch(epoch)?)
}
/// Return the churn limit for the current epoch (number of validators who can leave per epoch).
///
/// Uses the epoch cache, and will error if it isn't initialized.
///
/// Spec v0.12.1
/// Uses the current epoch committee cache, and will error if it isn't initialized.
pub fn get_churn_limit(&self, spec: &ChainSpec) -> Result<u64, Error> {
Ok(std::cmp::max(
spec.min_per_epoch_churn_limit,
@@ -1356,9 +1456,7 @@ impl<E: EthSpec> BeaconState<E> {
/// Return the activation churn limit for the current epoch (number of validators who can enter per epoch).
///
/// Uses the epoch cache, and will error if it isn't initialized.
///
/// Spec v1.4.0
/// Uses the current epoch committee cache, and will error if it isn't initialized.
pub fn get_activation_churn_limit(&self, spec: &ChainSpec) -> Result<u64, Error> {
Ok(match self {
BeaconState::Base(_)
@@ -1388,20 +1486,22 @@ impl<E: EthSpec> BeaconState<E> {
Ok(cache.get_attestation_duties(validator_index))
}
/// Implementation of `get_total_balance`, matching the spec.
/// Compute the total active balance cache from scratch.
///
/// Returns minimum `EFFECTIVE_BALANCE_INCREMENT`, to avoid div by 0.
pub fn get_total_balance<'a, I: IntoIterator<Item = &'a usize>>(
&'a self,
validator_indices: I,
spec: &ChainSpec,
) -> Result<u64, Error> {
let total_balance = validator_indices.into_iter().try_fold(0_u64, |acc, i| {
self.get_effective_balance(*i)
.and_then(|bal| Ok(acc.safe_add(bal)?))
})?;
/// This method should rarely be invoked because single-pass epoch processing keeps the total
/// active balance cache up to date.
pub fn compute_total_active_balance_slow(&self, spec: &ChainSpec) -> Result<u64, Error> {
let current_epoch = self.current_epoch();
let mut total_active_balance = 0;
for validator in self.validators() {
if validator.is_active_at(current_epoch) {
total_active_balance.safe_add_assign(validator.effective_balance)?;
}
}
Ok(std::cmp::max(
total_balance,
total_active_balance,
spec.effective_balance_increment,
))
}
@@ -1413,33 +1513,54 @@ impl<E: EthSpec> BeaconState<E> {
///
/// Returns minimum `EFFECTIVE_BALANCE_INCREMENT`, to avoid div by 0.
pub fn get_total_active_balance(&self) -> Result<u64, Error> {
self.get_total_active_balance_at_epoch(self.current_epoch())
}
/// Get the cached total active balance while checking that it is for the correct `epoch`.
pub fn get_total_active_balance_at_epoch(&self, epoch: Epoch) -> Result<u64, Error> {
let (initialized_epoch, balance) = self
.total_active_balance()
.ok_or(Error::TotalActiveBalanceCacheUninitialized)?;
let current_epoch = self.current_epoch();
if initialized_epoch == current_epoch {
if initialized_epoch == epoch {
Ok(balance)
} else {
Err(Error::TotalActiveBalanceCacheInconsistent {
initialized_epoch,
current_epoch,
current_epoch: epoch,
})
}
}
/// Build the total active balance cache.
/// Manually set the total active balance.
///
/// This function requires the current committee cache to be already built. It is called
/// automatically when `build_committee_cache` is called for the current epoch.
fn build_total_active_balance_cache(&mut self, spec: &ChainSpec) -> Result<(), Error> {
// Order is irrelevant, so use the cached indices.
let current_epoch = self.current_epoch();
let total_active_balance = self.get_total_balance(
self.get_cached_active_validator_indices(RelativeEpoch::Current)?,
spec,
)?;
*self.total_active_balance_mut() = Some((current_epoch, total_active_balance));
/// This should only be called when the total active balance has been computed as part of
/// single-pass epoch processing (or `process_rewards_and_penalties` for phase0).
///
/// This function will ensure the balance is never set to 0, thus conforming to the spec.
pub fn set_total_active_balance(&mut self, epoch: Epoch, balance: u64, spec: &ChainSpec) {
let safe_balance = std::cmp::max(balance, spec.effective_balance_increment);
*self.total_active_balance_mut() = Some((epoch, safe_balance));
}
/// Build the total active balance cache for the current epoch if it is not already built.
pub fn build_total_active_balance_cache(&mut self, spec: &ChainSpec) -> Result<(), Error> {
if self
.get_total_active_balance_at_epoch(self.current_epoch())
.is_err()
{
self.force_build_total_active_balance_cache(spec)?;
}
Ok(())
}
/// Build the total active balance cache, even if it is already built.
pub fn force_build_total_active_balance_cache(
&mut self,
spec: &ChainSpec,
) -> Result<(), Error> {
let total_active_balance = self.compute_total_active_balance_slow(spec)?;
*self.total_active_balance_mut() = Some((self.current_epoch(), total_active_balance));
Ok(())
}
@@ -1452,8 +1573,10 @@ impl<E: EthSpec> BeaconState<E> {
pub fn get_epoch_participation_mut(
&mut self,
epoch: Epoch,
previous_epoch: Epoch,
current_epoch: Epoch,
) -> Result<&mut VariableList<ParticipationFlags, E::ValidatorRegistryLimit>, Error> {
if epoch == self.current_epoch() {
if epoch == current_epoch {
match self {
BeaconState::Base(_) => Err(BeaconStateError::IncorrectStateVariant),
BeaconState::Altair(state) => Ok(&mut state.current_epoch_participation),
@@ -1462,7 +1585,7 @@ impl<E: EthSpec> BeaconState<E> {
BeaconState::Deneb(state) => Ok(&mut state.current_epoch_participation),
BeaconState::Electra(state) => Ok(&mut state.current_epoch_participation),
}
} else if epoch == self.previous_epoch() {
} else if epoch == previous_epoch {
match self {
BeaconState::Base(_) => Err(BeaconStateError::IncorrectStateVariant),
BeaconState::Altair(state) => Ok(&mut state.previous_epoch_participation),
@@ -1494,6 +1617,7 @@ impl<E: EthSpec> BeaconState<E> {
self.build_all_committee_caches(spec)?;
self.update_pubkey_cache()?;
self.build_exit_cache(spec)?;
self.build_slashings_cache()?;
Ok(())
}
@@ -1514,6 +1638,20 @@ impl<E: EthSpec> BeaconState<E> {
Ok(())
}
/// Build the slashings cache if it needs to be built.
pub fn build_slashings_cache(&mut self) -> Result<(), Error> {
let latest_block_slot = self.latest_block_header().slot;
if !self.slashings_cache().is_initialized(latest_block_slot) {
*self.slashings_cache_mut() = SlashingsCache::new(latest_block_slot, self.validators());
}
Ok(())
}
pub fn slashings_cache_is_initialized(&self) -> bool {
let latest_block_slot = self.latest_block_header().slot;
self.slashings_cache().is_initialized(latest_block_slot)
}
/// Drop all caches on the state.
pub fn drop_all_caches(&mut self) -> Result<(), Error> {
self.drop_total_active_balance_cache();
@@ -1524,6 +1662,8 @@ impl<E: EthSpec> BeaconState<E> {
self.drop_tree_hash_cache();
self.drop_progressive_balances_cache();
*self.exit_cache_mut() = ExitCache::default();
*self.slashings_cache_mut() = SlashingsCache::default();
*self.epoch_cache_mut() = EpochCache::default();
Ok(())
}
@@ -1536,7 +1676,7 @@ impl<E: EthSpec> BeaconState<E> {
})
}
/// Build an epoch cache, unless it is has already been built.
/// Build a committee cache, unless it is has already been built.
pub fn build_committee_cache(
&mut self,
relative_epoch: RelativeEpoch,
@@ -1557,7 +1697,7 @@ impl<E: EthSpec> BeaconState<E> {
Ok(())
}
/// Always builds the previous epoch cache, even if it is already initialized.
/// Always builds the requested committee cache, even if it is already initialized.
pub fn force_build_committee_cache(
&mut self,
relative_epoch: RelativeEpoch,
@@ -1586,42 +1726,17 @@ impl<E: EthSpec> BeaconState<E> {
///
/// This should be used if the `slot` of this state is advanced beyond an epoch boundary.
///
/// Note: this function will not build any new committee caches, but will build the total
/// balance cache if the (new) current epoch cache is initialized.
pub fn advance_caches(&mut self, spec: &ChainSpec) -> Result<(), Error> {
/// Note: this function will not build any new committee caches, nor will it update the total
/// active balance cache. The total active balance cache must be updated separately.
pub fn advance_caches(&mut self) -> Result<(), Error> {
self.committee_caches_mut().rotate_left(1);
// Re-compute total active balance for current epoch.
//
// This can only be computed once the state's effective balances have been updated
// for the current epoch. I.e. it is not possible to know this value with the same
// lookahead as the committee shuffling.
let curr = Self::committee_cache_index(RelativeEpoch::Current);
let curr_cache = mem::take(self.committee_cache_at_index_mut(curr)?);
// If current epoch cache is initialized, compute the total active balance from its
// indices. We check that the cache is initialized at the _next_ epoch because the slot has
// not yet been advanced.
let new_current_epoch = self.next_epoch()?;
if curr_cache.is_initialized_at(new_current_epoch) {
*self.total_active_balance_mut() = Some((
new_current_epoch,
self.get_total_balance(curr_cache.active_validator_indices(), spec)?,
));
}
// If the cache is not initialized, then the previous cached value for the total balance is
// wrong, so delete it.
else {
self.drop_total_active_balance_cache();
}
*self.committee_cache_at_index_mut(curr)? = curr_cache;
let next = Self::committee_cache_index(RelativeEpoch::Next);
*self.committee_cache_at_index_mut(next)? = CommitteeCache::default();
Ok(())
}
fn committee_cache_index(relative_epoch: RelativeEpoch) -> usize {
pub(crate) fn committee_cache_index(relative_epoch: RelativeEpoch) -> usize {
match relative_epoch {
RelativeEpoch::Previous => 0,
RelativeEpoch::Current => 1,
@@ -1795,6 +1910,9 @@ impl<E: EthSpec> BeaconState<E> {
if config.exit_cache {
*res.exit_cache_mut() = self.exit_cache().clone();
}
if config.slashings_cache {
*res.slashings_cache_mut() = self.slashings_cache().clone();
}
if config.tree_hash_cache {
*res.tree_hash_cache_mut() = self.tree_hash_cache().clone();
}
@@ -1813,9 +1931,8 @@ impl<E: EthSpec> BeaconState<E> {
pub fn is_eligible_validator(
&self,
previous_epoch: Epoch,
val_index: usize,
val: &Validator,
) -> Result<bool, Error> {
let val = self.get_validator(val_index)?;
Ok(val.is_active_at(previous_epoch)
|| (val.slashed && previous_epoch.safe_add(Epoch::new(1))? < val.withdrawable_epoch))
}
@@ -1855,6 +1972,13 @@ impl<E: EthSpec> BeaconState<E> {
Ok(sync_committee)
}
/// Get the base reward for `validator_index` from the epoch cache.
///
/// This function will error if the epoch cache is not initialized.
pub fn get_base_reward(&self, validator_index: usize) -> Result<u64, EpochCacheError> {
self.epoch_cache().get_base_reward(validator_index)
}
pub fn compute_merkle_proof(
&mut self,
generalized_index: usize,

View File

@@ -4,6 +4,7 @@ pub struct CloneConfig {
pub committee_caches: bool,
pub pubkey_cache: bool,
pub exit_cache: bool,
pub slashings_cache: bool,
pub tree_hash_cache: bool,
pub progressive_balances_cache: bool,
}
@@ -14,6 +15,7 @@ impl CloneConfig {
committee_caches: true,
pubkey_cache: true,
exit_cache: true,
slashings_cache: true,
tree_hash_cache: true,
progressive_balances_cache: true,
}

View File

@@ -2,6 +2,7 @@
use crate::*;
use core::num::NonZeroUsize;
use derivative::Derivative;
use safe_arith::SafeArith;
use serde::{Deserialize, Serialize};
use ssz::{four_byte_option_impl, Decode, DecodeError, Encode};
@@ -18,16 +19,44 @@ four_byte_option_impl!(four_byte_option_non_zero_usize, NonZeroUsize);
/// Computes and stores the shuffling for an epoch. Provides various getters to allow callers to
/// read the committees for the given epoch.
#[derive(Debug, Default, PartialEq, Clone, Serialize, Deserialize, Encode, Decode)]
#[derive(Derivative, Debug, Default, Clone, Serialize, Deserialize, Encode, Decode)]
#[derivative(PartialEq)]
pub struct CommitteeCache {
#[ssz(with = "four_byte_option_epoch")]
initialized_epoch: Option<Epoch>,
shuffling: Vec<usize>,
#[derivative(PartialEq(compare_with = "compare_shuffling_positions"))]
shuffling_positions: Vec<NonZeroUsizeOption>,
committees_per_slot: u64,
slots_per_epoch: u64,
}
/// Equivalence function for `shuffling_positions` that ignores trailing `None` entries.
///
/// It can happen that states from different epochs computing the same cache have different
/// numbers of validators in `state.validators()` due to recent deposits. These new validators
/// cannot be active however and will always be omitted from the shuffling. This function checks
/// that two lists of shuffling positions are equivalent by ensuring that they are identical on all
/// common entries, and that new entries at the end are all `None`.
///
/// In practice this is only used in tests.
#[allow(clippy::indexing_slicing)]
fn compare_shuffling_positions(xs: &Vec<NonZeroUsizeOption>, ys: &Vec<NonZeroUsizeOption>) -> bool {
use std::cmp::Ordering;
let (shorter, longer) = match xs.len().cmp(&ys.len()) {
Ordering::Equal => {
return xs == ys;
}
Ordering::Less => (xs, ys),
Ordering::Greater => (ys, xs),
};
shorter == &longer[..shorter.len()]
&& longer[shorter.len()..]
.iter()
.all(|new| *new == NonZeroUsizeOption(None))
}
impl CommitteeCache {
/// Return a new, fully initialized cache.
///
@@ -321,17 +350,21 @@ pub fn epoch_committee_count(committees_per_slot: usize, slots_per_epoch: usize)
/// `epoch`.
///
/// Spec v0.12.1
pub fn get_active_validator_indices(validators: &[Validator], epoch: Epoch) -> Vec<usize> {
let mut active = Vec::with_capacity(validators.len());
pub fn get_active_validator_indices<'a, V, I>(validators: V, epoch: Epoch) -> Vec<usize>
where
V: IntoIterator<Item = &'a Validator, IntoIter = I>,
I: ExactSizeIterator + Iterator<Item = &'a Validator>,
{
let iter = validators.into_iter();
for (index, validator) in validators.iter().enumerate() {
let mut active = Vec::with_capacity(iter.len());
for (index, validator) in iter.enumerate() {
if validator.is_active_at(epoch) {
active.push(index)
}
}
active.shrink_to_fit();
active
}

View File

@@ -1,13 +1,17 @@
use super::{BeaconStateError, ChainSpec, Epoch, Validator};
use safe_arith::SafeArith;
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
use std::cmp::Ordering;
/// Map from exit epoch to the number of validators with that exit epoch.
#[derive(Debug, Default, Clone, PartialEq, Serialize, Deserialize)]
pub struct ExitCache {
/// True if the cache has been initialized.
initialized: bool,
exit_epoch_counts: HashMap<Epoch, u64>,
/// Maximum `exit_epoch` of any validator.
max_exit_epoch: Epoch,
/// Number of validators known to be exiting at `max_exit_epoch`.
max_exit_epoch_churn: u64,
}
impl ExitCache {
@@ -15,7 +19,8 @@ impl ExitCache {
pub fn new(validators: &[Validator], spec: &ChainSpec) -> Result<Self, BeaconStateError> {
let mut exit_cache = ExitCache {
initialized: true,
..ExitCache::default()
max_exit_epoch: Epoch::new(0),
max_exit_epoch_churn: 0,
};
// Add all validators with a non-default exit epoch to the cache.
validators
@@ -37,27 +42,44 @@ impl ExitCache {
/// Record the exit epoch of a validator. Must be called only once per exiting validator.
pub fn record_validator_exit(&mut self, exit_epoch: Epoch) -> Result<(), BeaconStateError> {
self.check_initialized()?;
self.exit_epoch_counts
.entry(exit_epoch)
.or_insert(0)
.safe_add_assign(1)?;
match exit_epoch.cmp(&self.max_exit_epoch) {
// Update churn for the current maximum epoch.
Ordering::Equal => {
self.max_exit_epoch_churn.safe_add_assign(1)?;
}
// Increase the max exit epoch, reset the churn to 1.
Ordering::Greater => {
self.max_exit_epoch = exit_epoch;
self.max_exit_epoch_churn = 1;
}
// Older exit epochs are not relevant.
Ordering::Less => (),
}
Ok(())
}
/// Get the largest exit epoch with a non-zero exit epoch count.
pub fn max_epoch(&self) -> Result<Option<Epoch>, BeaconStateError> {
self.check_initialized()?;
Ok(self.exit_epoch_counts.keys().max().cloned())
Ok((self.max_exit_epoch_churn > 0).then_some(self.max_exit_epoch))
}
/// Get number of validators with the given exit epoch. (Return 0 for the default exit epoch.)
pub fn get_churn_at(&self, exit_epoch: Epoch) -> Result<u64, BeaconStateError> {
self.check_initialized()?;
Ok(self
.exit_epoch_counts
.get(&exit_epoch)
.cloned()
.unwrap_or(0))
match exit_epoch.cmp(&self.max_exit_epoch) {
// Epochs are equal, we know the churn exactly.
Ordering::Equal => Ok(self.max_exit_epoch_churn),
// If exiting at an epoch later than the cached epoch then the churn is 0. This is a
// common case which happens when there are no exits for an epoch.
Ordering::Greater => Ok(0),
// Consensus code should never require the churn at an epoch prior to the cached epoch.
// That's a bug.
Ordering::Less => Err(BeaconStateError::ExitCacheInvalidEpoch {
max_exit_epoch: self.max_exit_epoch,
request_epoch: exit_epoch,
}),
}
}
}

View File

@@ -1,9 +1,13 @@
use crate::beacon_state::balance::Balance;
use crate::{BeaconState, BeaconStateError, ChainSpec, Epoch, EthSpec};
use crate::{
consts::altair::{
NUM_FLAG_INDICES, TIMELY_HEAD_FLAG_INDEX, TIMELY_SOURCE_FLAG_INDEX,
TIMELY_TARGET_FLAG_INDEX,
},
BeaconState, BeaconStateError, ChainSpec, Epoch, EthSpec, ParticipationFlags,
};
use arbitrary::Arbitrary;
use safe_arith::SafeArith;
use serde::{Deserialize, Serialize};
use strum::{Display, EnumString, EnumVariantNames};
/// This cache keeps track of the accumulated target attestation balance for the current & previous
/// epochs. The cached values can be utilised by fork choice to calculate unrealized justification
@@ -17,21 +21,120 @@ pub struct ProgressiveBalancesCache {
#[derive(Debug, PartialEq, Arbitrary, Clone)]
struct Inner {
pub current_epoch: Epoch,
pub previous_epoch_target_attesting_balance: Balance,
pub current_epoch_target_attesting_balance: Balance,
pub previous_epoch_cache: EpochTotalBalances,
pub current_epoch_cache: EpochTotalBalances,
}
/// Caches the participation values for one epoch (either the previous or current).
#[derive(PartialEq, Debug, Clone, Arbitrary)]
pub struct EpochTotalBalances {
/// Stores the sum of the balances for all validators in `self.unslashed_participating_indices`
/// for all flags in `NUM_FLAG_INDICES`.
///
/// A flag balance is only incremented if a validator is in that flag set.
pub total_flag_balances: [Balance; NUM_FLAG_INDICES],
}
impl EpochTotalBalances {
pub fn new(spec: &ChainSpec) -> Self {
let zero_balance = Balance::zero(spec.effective_balance_increment);
Self {
total_flag_balances: [zero_balance; NUM_FLAG_INDICES],
}
}
/// Returns the total balance of attesters who have `flag_index` set.
pub fn total_flag_balance(&self, flag_index: usize) -> Result<u64, BeaconStateError> {
self.total_flag_balances
.get(flag_index)
.map(Balance::get)
.ok_or(BeaconStateError::InvalidFlagIndex(flag_index))
}
/// Returns the raw total balance of attesters who have `flag_index` set.
pub fn total_flag_balance_raw(&self, flag_index: usize) -> Result<Balance, BeaconStateError> {
self.total_flag_balances
.get(flag_index)
.copied()
.ok_or(BeaconStateError::InvalidFlagIndex(flag_index))
}
pub fn on_new_attestation(
&mut self,
is_slashed: bool,
flag_index: usize,
validator_effective_balance: u64,
) -> Result<(), BeaconStateError> {
if is_slashed {
return Ok(());
}
let balance = self
.total_flag_balances
.get_mut(flag_index)
.ok_or(BeaconStateError::InvalidFlagIndex(flag_index))?;
balance.safe_add_assign(validator_effective_balance)?;
Ok(())
}
pub fn on_slashing(
&mut self,
participation_flags: ParticipationFlags,
validator_effective_balance: u64,
) -> Result<(), BeaconStateError> {
for flag_index in 0..NUM_FLAG_INDICES {
if participation_flags.has_flag(flag_index)? {
self.total_flag_balances
.get_mut(flag_index)
.ok_or(BeaconStateError::InvalidFlagIndex(flag_index))?
.safe_sub_assign(validator_effective_balance)?;
}
}
Ok(())
}
pub fn on_effective_balance_change(
&mut self,
is_slashed: bool,
current_epoch_participation_flags: ParticipationFlags,
old_effective_balance: u64,
new_effective_balance: u64,
) -> Result<(), BeaconStateError> {
// If the validator is slashed then we should not update the effective balance, because this
// validator's effective balance has already been removed from the totals.
if is_slashed {
return Ok(());
}
for flag_index in 0..NUM_FLAG_INDICES {
if current_epoch_participation_flags.has_flag(flag_index)? {
let total = self
.total_flag_balances
.get_mut(flag_index)
.ok_or(BeaconStateError::InvalidFlagIndex(flag_index))?;
if new_effective_balance > old_effective_balance {
total
.safe_add_assign(new_effective_balance.safe_sub(old_effective_balance)?)?;
} else {
total
.safe_sub_assign(old_effective_balance.safe_sub(new_effective_balance)?)?;
}
}
}
Ok(())
}
}
impl ProgressiveBalancesCache {
pub fn initialize(
&mut self,
current_epoch: Epoch,
previous_epoch_target_attesting_balance: Balance,
current_epoch_target_attesting_balance: Balance,
previous_epoch_cache: EpochTotalBalances,
current_epoch_cache: EpochTotalBalances,
) {
self.inner = Some(Inner {
current_epoch,
previous_epoch_target_attesting_balance,
current_epoch_target_attesting_balance,
previous_epoch_cache,
current_epoch_cache,
});
}
@@ -39,24 +142,36 @@ impl ProgressiveBalancesCache {
self.inner.is_some()
}
pub fn is_initialized_at(&self, epoch: Epoch) -> bool {
self.inner
.as_ref()
.map_or(false, |inner| inner.current_epoch == epoch)
}
/// When a new target attestation has been processed, we update the cached
/// `current_epoch_target_attesting_balance` to include the validator effective balance.
/// If the epoch is neither the current epoch nor the previous epoch, an error is returned.
pub fn on_new_target_attestation(
pub fn on_new_attestation(
&mut self,
epoch: Epoch,
is_slashed: bool,
flag_index: usize,
validator_effective_balance: u64,
) -> Result<(), BeaconStateError> {
let cache = self.get_inner_mut()?;
if epoch == cache.current_epoch {
cache
.current_epoch_target_attesting_balance
.safe_add_assign(validator_effective_balance)?;
cache.current_epoch_cache.on_new_attestation(
is_slashed,
flag_index,
validator_effective_balance,
)?;
} else if epoch.safe_add(1)? == cache.current_epoch {
cache
.previous_epoch_target_attesting_balance
.safe_add_assign(validator_effective_balance)?;
cache.previous_epoch_cache.on_new_attestation(
is_slashed,
flag_index,
validator_effective_balance,
)?;
} else {
return Err(BeaconStateError::ProgressiveBalancesCacheInconsistent);
}
@@ -68,21 +183,17 @@ impl ProgressiveBalancesCache {
/// validator's effective balance to exclude the validator weight.
pub fn on_slashing(
&mut self,
is_previous_epoch_target_attester: bool,
is_current_epoch_target_attester: bool,
previous_epoch_participation: ParticipationFlags,
current_epoch_participation: ParticipationFlags,
effective_balance: u64,
) -> Result<(), BeaconStateError> {
let cache = self.get_inner_mut()?;
if is_previous_epoch_target_attester {
cache
.previous_epoch_target_attesting_balance
.safe_sub_assign(effective_balance)?;
}
if is_current_epoch_target_attester {
cache
.current_epoch_target_attesting_balance
.safe_sub_assign(effective_balance)?;
}
cache
.previous_epoch_cache
.on_slashing(previous_epoch_participation, effective_balance)?;
cache
.current_epoch_cache
.on_slashing(current_epoch_participation, effective_balance)?;
Ok(())
}
@@ -90,22 +201,18 @@ impl ProgressiveBalancesCache {
/// its share of the target attesting balance in the cache.
pub fn on_effective_balance_change(
&mut self,
is_current_epoch_target_attester: bool,
is_slashed: bool,
current_epoch_participation: ParticipationFlags,
old_effective_balance: u64,
new_effective_balance: u64,
) -> Result<(), BeaconStateError> {
let cache = self.get_inner_mut()?;
if is_current_epoch_target_attester {
if new_effective_balance > old_effective_balance {
cache
.current_epoch_target_attesting_balance
.safe_add_assign(new_effective_balance.safe_sub(old_effective_balance)?)?;
} else {
cache
.current_epoch_target_attesting_balance
.safe_sub_assign(old_effective_balance.safe_sub(new_effective_balance)?)?;
}
}
cache.current_epoch_cache.on_effective_balance_change(
is_slashed,
current_epoch_participation,
old_effective_balance,
new_effective_balance,
)?;
Ok(())
}
@@ -114,25 +221,53 @@ impl ProgressiveBalancesCache {
pub fn on_epoch_transition(&mut self, spec: &ChainSpec) -> Result<(), BeaconStateError> {
let cache = self.get_inner_mut()?;
cache.current_epoch.safe_add_assign(1)?;
cache.previous_epoch_target_attesting_balance =
cache.current_epoch_target_attesting_balance;
cache.current_epoch_target_attesting_balance =
Balance::zero(spec.effective_balance_increment);
cache.previous_epoch_cache = std::mem::replace(
&mut cache.current_epoch_cache,
EpochTotalBalances::new(spec),
);
Ok(())
}
pub fn previous_epoch_flag_attesting_balance(
&self,
flag_index: usize,
) -> Result<u64, BeaconStateError> {
self.get_inner()?
.previous_epoch_cache
.total_flag_balance(flag_index)
}
pub fn current_epoch_flag_attesting_balance(
&self,
flag_index: usize,
) -> Result<u64, BeaconStateError> {
self.get_inner()?
.current_epoch_cache
.total_flag_balance(flag_index)
}
pub fn previous_epoch_source_attesting_balance(&self) -> Result<u64, BeaconStateError> {
self.previous_epoch_flag_attesting_balance(TIMELY_SOURCE_FLAG_INDEX)
}
pub fn previous_epoch_target_attesting_balance(&self) -> Result<u64, BeaconStateError> {
Ok(self
.get_inner()?
.previous_epoch_target_attesting_balance
.get())
self.previous_epoch_flag_attesting_balance(TIMELY_TARGET_FLAG_INDEX)
}
pub fn previous_epoch_head_attesting_balance(&self) -> Result<u64, BeaconStateError> {
self.previous_epoch_flag_attesting_balance(TIMELY_HEAD_FLAG_INDEX)
}
pub fn current_epoch_source_attesting_balance(&self) -> Result<u64, BeaconStateError> {
self.current_epoch_flag_attesting_balance(TIMELY_SOURCE_FLAG_INDEX)
}
pub fn current_epoch_target_attesting_balance(&self) -> Result<u64, BeaconStateError> {
Ok(self
.get_inner()?
.current_epoch_target_attesting_balance
.get())
self.current_epoch_flag_attesting_balance(TIMELY_TARGET_FLAG_INDEX)
}
pub fn current_epoch_head_attesting_balance(&self) -> Result<u64, BeaconStateError> {
self.current_epoch_flag_attesting_balance(TIMELY_HEAD_FLAG_INDEX)
}
fn get_inner_mut(&mut self) -> Result<&mut Inner, BeaconStateError> {
@@ -148,34 +283,7 @@ impl ProgressiveBalancesCache {
}
}
#[derive(
Debug, PartialEq, Eq, Clone, Copy, Deserialize, Serialize, Display, EnumString, EnumVariantNames,
)]
#[strum(serialize_all = "lowercase")]
pub enum ProgressiveBalancesMode {
/// Disable the usage of progressive cache, and use the existing `ParticipationCache` calculation.
Disabled,
/// Enable the usage of progressive cache, with checks against the `ParticipationCache` and falls
/// back to the existing calculation if there is a balance mismatch.
Checked,
/// Enable the usage of progressive cache, with checks against the `ParticipationCache`. Errors
/// if there is a balance mismatch. Used in testing only.
Strict,
/// Enable the usage of progressive cache, with no comparative checks against the
/// `ParticipationCache`. This is fast but an experimental mode, use with caution.
Fast,
}
impl ProgressiveBalancesMode {
pub fn perform_comparative_checks(&self) -> bool {
match self {
Self::Disabled | Self::Fast => false,
Self::Checked | Self::Strict => true,
}
}
}
/// `ProgressiveBalancesCache` is only enabled from `Altair` as it requires `ParticipationCache`.
/// `ProgressiveBalancesCache` is only enabled from `Altair` as it uses Altair-specific logic.
pub fn is_progressive_balances_enabled<E: EthSpec>(state: &BeaconState<E>) -> bool {
match state {
BeaconState::Base(_) => false,

View File

@@ -4,6 +4,7 @@ use std::collections::HashMap;
type ValidatorIndex = usize;
#[allow(clippy::len_without_is_empty)]
#[derive(Debug, PartialEq, Clone, Default, Serialize, Deserialize)]
pub struct PubkeyCache {
/// Maintain the number of keys added to the map. It is not sufficient to just use the HashMap

View File

@@ -0,0 +1,63 @@
use crate::{BeaconStateError, Slot, Validator};
use arbitrary::Arbitrary;
use std::collections::HashSet;
/// Persistent (cheap to clone) cache of all slashed validator indices.
#[derive(Debug, Default, Clone, PartialEq, Arbitrary)]
pub struct SlashingsCache {
latest_block_slot: Option<Slot>,
#[arbitrary(default)]
slashed_validators: HashSet<usize>,
}
impl SlashingsCache {
/// Initialize a new cache for the given list of validators.
pub fn new<'a, V, I>(latest_block_slot: Slot, validators: V) -> Self
where
V: IntoIterator<Item = &'a Validator, IntoIter = I>,
I: ExactSizeIterator + Iterator<Item = &'a Validator>,
{
let slashed_validators = validators
.into_iter()
.enumerate()
.filter_map(|(i, validator)| validator.slashed.then_some(i))
.collect();
Self {
latest_block_slot: Some(latest_block_slot),
slashed_validators,
}
}
pub fn is_initialized(&self, slot: Slot) -> bool {
self.latest_block_slot == Some(slot)
}
pub fn check_initialized(&self, latest_block_slot: Slot) -> Result<(), BeaconStateError> {
if self.is_initialized(latest_block_slot) {
Ok(())
} else {
Err(BeaconStateError::SlashingsCacheUninitialized {
initialized_slot: self.latest_block_slot,
latest_block_slot,
})
}
}
pub fn record_validator_slashing(
&mut self,
block_slot: Slot,
validator_index: usize,
) -> Result<(), BeaconStateError> {
self.check_initialized(block_slot)?;
self.slashed_validators.insert(validator_index);
Ok(())
}
pub fn is_slashed(&self, validator_index: usize) -> bool {
self.slashed_validators.contains(&validator_index)
}
pub fn update_latest_block_slot(&mut self, latest_block_slot: Slot) {
self.latest_block_slot = Some(latest_block_slot);
}
}

View File

@@ -223,13 +223,14 @@ async fn clone_config() {
.update_tree_hash_cache()
.expect("should update tree hash cache");
let num_caches = 5;
let num_caches = 6;
let all_configs = (0..2u8.pow(num_caches)).map(|i| CloneConfig {
committee_caches: (i & 1) != 0,
pubkey_cache: ((i >> 1) & 1) != 0,
exit_cache: ((i >> 2) & 1) != 0,
tree_hash_cache: ((i >> 3) & 1) != 0,
progressive_balances_cache: ((i >> 4) & 1) != 0,
slashings_cache: ((i >> 3) & 1) != 0,
tree_hash_cache: ((i >> 4) & 1) != 0,
progressive_balances_cache: ((i >> 5) & 1) != 0,
});
for config in all_configs {

View File

@@ -2,8 +2,8 @@ use crate::application_domain::{ApplicationDomain, APPLICATION_DOMAIN_BUILDER};
use crate::blob_sidecar::BlobIdentifier;
use crate::*;
use int_to_bytes::int_to_bytes4;
use serde::Deserialize;
use serde::{Deserializer, Serialize, Serializer};
use safe_arith::{ArithError, SafeArith};
use serde::{Deserialize, Deserializer, Serialize, Serializer};
use serde_utils::quoted_u64::MaybeQuoted;
use ssz::Encode;
use std::fs::File;
@@ -331,15 +331,13 @@ impl ChainSpec {
}
}
/// For a given `BeaconState`, return the inactivity penalty quotient associated with its variant.
pub fn inactivity_penalty_quotient_for_state<E: EthSpec>(&self, state: &BeaconState<E>) -> u64 {
match state {
BeaconState::Base(_) => self.inactivity_penalty_quotient,
BeaconState::Altair(_) => self.inactivity_penalty_quotient_altair,
BeaconState::Merge(_) => self.inactivity_penalty_quotient_bellatrix,
BeaconState::Capella(_) => self.inactivity_penalty_quotient_bellatrix,
BeaconState::Deneb(_) => self.inactivity_penalty_quotient_bellatrix,
BeaconState::Electra(_) => self.inactivity_penalty_quotient_bellatrix,
pub fn inactivity_penalty_quotient_for_fork(&self, fork_name: ForkName) -> u64 {
match fork_name {
ForkName::Base => self.inactivity_penalty_quotient,
ForkName::Altair => self.inactivity_penalty_quotient_altair,
ForkName::Merge => self.inactivity_penalty_quotient_bellatrix,
ForkName::Capella => self.inactivity_penalty_quotient_bellatrix,
ForkName::Deneb | ForkName::Electra => self.inactivity_penalty_quotient_bellatrix,
}
}
@@ -511,6 +509,13 @@ impl ChainSpec {
Hash256::from(domain)
}
/// Compute the epoch used for activations prior to Deneb, and for exits under all forks.
///
/// Spec: https://github.com/ethereum/consensus-specs/blob/dev/specs/phase0/beacon-chain.md#compute_activation_exit_epoch
pub fn compute_activation_exit_epoch(&self, epoch: Epoch) -> Result<Epoch, ArithError> {
epoch.safe_add(1)?.safe_add(self.max_seed_lookahead)
}
pub fn maximum_gossip_clock_disparity(&self) -> Duration {
Duration::from_millis(self.maximum_gossip_clock_disparity_millis)
}

View File

@@ -0,0 +1,142 @@
use crate::{ActivationQueue, BeaconStateError, ChainSpec, Epoch, Hash256, Slot};
use safe_arith::{ArithError, SafeArith};
use std::sync::Arc;
/// Cache of values which are uniquely determined at the start of an epoch.
///
/// The values are fixed with respect to the last block of the _prior_ epoch, which we refer
/// to as the "decision block". This cache is very similar to the `BeaconProposerCache` in that
/// beacon proposers are determined at exactly the same time as the values in this cache, so
/// the keys for the two caches are identical.
#[derive(Debug, PartialEq, Eq, Clone, Default, arbitrary::Arbitrary)]
pub struct EpochCache {
inner: Option<Arc<Inner>>,
}
#[derive(Debug, PartialEq, Eq, Clone, arbitrary::Arbitrary)]
struct Inner {
/// Unique identifier for this cache, which can be used to check its validity before use
/// with any `BeaconState`.
key: EpochCacheKey,
/// Effective balance for every validator in this epoch.
effective_balances: Vec<u64>,
/// Base rewards for every effective balance increment (currently 0..32 ETH).
///
/// Keyed by `effective_balance / effective_balance_increment`.
base_rewards: Vec<u64>,
/// Validator activation queue.
activation_queue: ActivationQueue,
/// Effective balance increment.
effective_balance_increment: u64,
}
#[derive(Debug, PartialEq, Eq, Hash, Clone, Copy, arbitrary::Arbitrary)]
pub struct EpochCacheKey {
pub epoch: Epoch,
pub decision_block_root: Hash256,
}
#[derive(Debug, PartialEq, Clone)]
pub enum EpochCacheError {
IncorrectEpoch { cache: Epoch, state: Epoch },
IncorrectDecisionBlock { cache: Hash256, state: Hash256 },
ValidatorIndexOutOfBounds { validator_index: usize },
EffectiveBalanceOutOfBounds { effective_balance_eth: usize },
InvalidSlot { slot: Slot },
Arith(ArithError),
BeaconState(BeaconStateError),
CacheNotInitialized,
}
impl From<BeaconStateError> for EpochCacheError {
fn from(e: BeaconStateError) -> Self {
Self::BeaconState(e)
}
}
impl From<ArithError> for EpochCacheError {
fn from(e: ArithError) -> Self {
Self::Arith(e)
}
}
impl EpochCache {
pub fn new(
key: EpochCacheKey,
effective_balances: Vec<u64>,
base_rewards: Vec<u64>,
activation_queue: ActivationQueue,
spec: &ChainSpec,
) -> EpochCache {
Self {
inner: Some(Arc::new(Inner {
key,
effective_balances,
base_rewards,
activation_queue,
effective_balance_increment: spec.effective_balance_increment,
})),
}
}
pub fn check_validity(
&self,
current_epoch: Epoch,
state_decision_root: Hash256,
) -> Result<(), EpochCacheError> {
let cache = self
.inner
.as_ref()
.ok_or(EpochCacheError::CacheNotInitialized)?;
if cache.key.epoch != current_epoch {
return Err(EpochCacheError::IncorrectEpoch {
cache: cache.key.epoch,
state: current_epoch,
});
}
if cache.key.decision_block_root != state_decision_root {
return Err(EpochCacheError::IncorrectDecisionBlock {
cache: cache.key.decision_block_root,
state: state_decision_root,
});
}
Ok(())
}
#[inline]
pub fn get_effective_balance(&self, validator_index: usize) -> Result<u64, EpochCacheError> {
self.inner
.as_ref()
.ok_or(EpochCacheError::CacheNotInitialized)?
.effective_balances
.get(validator_index)
.copied()
.ok_or(EpochCacheError::ValidatorIndexOutOfBounds { validator_index })
}
#[inline]
pub fn get_base_reward(&self, validator_index: usize) -> Result<u64, EpochCacheError> {
let inner = self
.inner
.as_ref()
.ok_or(EpochCacheError::CacheNotInitialized)?;
let effective_balance = self.get_effective_balance(validator_index)?;
let effective_balance_eth =
effective_balance.safe_div(inner.effective_balance_increment)? as usize;
inner
.base_rewards
.get(effective_balance_eth)
.copied()
.ok_or(EpochCacheError::EffectiveBalanceOutOfBounds {
effective_balance_eth,
})
}
pub fn activation_queue(&self) -> Result<&ActivationQueue, EpochCacheError> {
let inner = self
.inner
.as_ref()
.ok_or(EpochCacheError::CacheNotInitialized)?;
Ok(&inner.activation_queue)
}
}

View File

@@ -74,6 +74,7 @@ pub mod voluntary_exit;
pub mod withdrawal_credentials;
#[macro_use]
pub mod slot_epoch_macros;
pub mod activation_queue;
pub mod config_and_preset;
pub mod execution_block_header;
pub mod fork_context;
@@ -94,6 +95,7 @@ mod tree_hash_impls;
pub mod validator_registration_data;
pub mod withdrawal;
pub mod epoch_cache;
pub mod slot_data;
#[cfg(feature = "sqlite")]
pub mod sqlite;
@@ -105,6 +107,7 @@ pub mod runtime_var_list;
use ethereum_types::{H160, H256};
pub use crate::activation_queue::ActivationQueue;
pub use crate::aggregate_and_proof::AggregateAndProof;
pub use crate::attestation::{Attestation, Error as AttestationError};
pub use crate::attestation_data::AttestationData;
@@ -136,6 +139,7 @@ pub use crate::deposit_data::DepositData;
pub use crate::deposit_message::DepositMessage;
pub use crate::deposit_tree_snapshot::{DepositTreeSnapshot, FinalizedExecutionBlock};
pub use crate::enr_fork_id::EnrForkId;
pub use crate::epoch_cache::{EpochCache, EpochCacheError, EpochCacheKey};
pub use crate::eth1_data::Eth1Data;
pub use crate::eth_spec::EthSpecId;
pub use crate::execution_block_hash::ExecutionBlockHash;

View File

@@ -77,6 +77,22 @@ impl Validator {
&& self.activation_epoch == spec.far_future_epoch
}
/// Returns `true` if the validator *could* be eligible for activation at `epoch`.
///
/// Eligibility depends on finalization, so we assume best-possible finalization. This function
/// returning true is a necessary but *not sufficient* condition for a validator to activate in
/// the epoch transition at the end of `epoch`.
pub fn could_be_eligible_for_activation_at(&self, epoch: Epoch, spec: &ChainSpec) -> bool {
// Has not yet been activated
self.activation_epoch == spec.far_future_epoch
// Placement in queue could be finalized.
//
// NOTE: the epoch distance is 1 rather than 2 because we consider the activations that
// occur at the *end* of `epoch`, after `process_justification_and_finalization` has already
// updated the state's checkpoint.
&& self.activation_eligibility_epoch < epoch
}
/// Returns `true` if the validator has eth1 withdrawal credential.
pub fn has_eth1_withdrawal_credential(&self, spec: &ChainSpec) -> bool {
self.withdrawal_credentials