In-memory tree states (#5533)

* Consensus changes

* EF tests

* lcli

* common and watch

* account manager

* cargo

* fork choice

* promise cache

* beacon chain

* interop genesis

* http api

* lighthouse

* op pool

* beacon chain misc

* parallel state cache

* store

* fix issues in store

* IT COMPILES

* Remove some unnecessary module qualification

* Revert Arced pubkey optimization (#5536)

* Merge remote-tracking branch 'origin/unstable' into tree-states-memory

* Fix caching, rebasing and some tests

* Remove unused deps

* Merge remote-tracking branch 'origin/unstable' into tree-states-memory

* Small cleanups

* Revert shuffling cache/promise cache changes

* Fix state advance bugs

* Fix shuffling tests

* Remove some resolved FIXMEs

* Remove StateProcessingStrategy

* Optimise withdrawals calculation

* Don't reorg if state cache is missed

* Remove inconsistent state func

* Fix beta compiler

* Rebase early, rebase often

* Fix state caching behaviour

* Update to milhouse release

* Fix on-disk consensus context format

* Merge remote-tracking branch 'origin/unstable' into tree-states-memory

* Squashed commit of the following:

commit 3a16649023
Author: Michael Sproul <michael@sigmaprime.io>
Date:   Thu Apr 18 14:26:09 2024 +1000

    Fix on-disk consensus context format

* Keep indexed attestations, thanks Sean

* Merge branch 'on-disk-consensus-context' into tree-states-memory

* Merge branch 'unstable' into tree-states-memory

* Address half of Sean's review

* More simplifications from Sean's review

* Cache state after get_advanced_hot_state
This commit is contained in:
Michael Sproul
2024-04-24 11:22:36 +10:00
committed by GitHub
parent 4cad1fcbbe
commit 61962898e2
108 changed files with 2038 additions and 2762 deletions

File diff suppressed because it is too large Load Diff

View File

@@ -1,47 +0,0 @@
/// Configuration struct for controlling which caches of a `BeaconState` should be cloned.
#[derive(Debug, Default, PartialEq, Eq, Clone, Copy)]
pub struct CloneConfig {
pub committee_caches: bool,
pub pubkey_cache: bool,
pub exit_cache: bool,
pub slashings_cache: bool,
pub tree_hash_cache: bool,
pub progressive_balances_cache: bool,
}
impl CloneConfig {
pub fn all() -> Self {
Self {
committee_caches: true,
pubkey_cache: true,
exit_cache: true,
slashings_cache: true,
tree_hash_cache: true,
progressive_balances_cache: true,
}
}
pub fn none() -> Self {
Self::default()
}
pub fn committee_caches_only() -> Self {
Self {
committee_caches: true,
..Self::none()
}
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn sanity() {
assert!(CloneConfig::all().pubkey_cache);
assert!(!CloneConfig::none().tree_hash_cache);
assert!(CloneConfig::committee_caches_only().committee_caches);
assert!(!CloneConfig::committee_caches_only().exit_cache);
}
}

View File

@@ -8,6 +8,7 @@ use serde::{Deserialize, Serialize};
use ssz::{four_byte_option_impl, Decode, DecodeError, Encode};
use ssz_derive::{Decode, Encode};
use std::ops::Range;
use std::sync::Arc;
use swap_or_not_shuffle::shuffle_list;
mod tests;
@@ -65,7 +66,7 @@ impl CommitteeCache {
state: &BeaconState<E>,
epoch: Epoch,
spec: &ChainSpec,
) -> Result<CommitteeCache, Error> {
) -> Result<Arc<CommitteeCache>, Error> {
// Check that the cache is being built for an in-range epoch.
//
// We allow caches to be constructed for historic epochs, per:
@@ -115,13 +116,13 @@ impl CommitteeCache {
.ok_or(Error::ShuffleIndexOutOfBounds(v))? = NonZeroUsize::new(i + 1).into();
}
Ok(CommitteeCache {
Ok(Arc::new(CommitteeCache {
initialized_epoch: Some(epoch),
shuffling,
shuffling_positions,
committees_per_slot,
slots_per_epoch: E::slots_per_epoch(),
})
}))
}
/// Returns `true` if the cache has been initialized at the supplied `epoch`.

View File

@@ -92,7 +92,7 @@ async fn shuffles_for_the_right_epoch() {
.map(|i| Hash256::from_low_u64_be(i as u64))
.collect();
*state.randao_mixes_mut() = FixedVector::from(distinct_hashes);
*state.randao_mixes_mut() = Vector::try_from_iter(distinct_hashes).unwrap();
let previous_seed = state
.get_seed(state.previous_epoch(), Domain::BeaconAttester, spec)

View File

@@ -1,10 +1,9 @@
use super::{BeaconStateError, ChainSpec, Epoch, Validator};
use safe_arith::SafeArith;
use serde::{Deserialize, Serialize};
use std::cmp::Ordering;
/// Map from exit epoch to the number of validators with that exit epoch.
#[derive(Debug, Default, Clone, PartialEq, Serialize, Deserialize)]
#[derive(Debug, Default, Clone, PartialEq)]
pub struct ExitCache {
/// True if the cache has been initialized.
initialized: bool,
@@ -16,7 +15,11 @@ pub struct ExitCache {
impl ExitCache {
/// Initialize a new cache for the given list of validators.
pub fn new(validators: &[Validator], spec: &ChainSpec) -> Result<Self, BeaconStateError> {
pub fn new<'a, V, I>(validators: V, spec: &ChainSpec) -> Result<Self, BeaconStateError>
where
V: IntoIterator<Item = &'a Validator, IntoIter = I>,
I: ExactSizeIterator + Iterator<Item = &'a Validator>,
{
let mut exit_cache = ExitCache {
initialized: true,
max_exit_epoch: Epoch::new(0),
@@ -24,7 +27,7 @@ impl ExitCache {
};
// Add all validators with a non-default exit epoch to the cache.
validators
.iter()
.into_iter()
.filter(|validator| validator.exit_epoch != spec.far_future_epoch)
.try_for_each(|validator| exit_cache.record_validator_exit(validator.exit_epoch))?;
Ok(exit_cache)

View File

@@ -74,7 +74,7 @@ mod test {
let mut state: BeaconState<E> = BeaconState::new(0, <_>::default(), &spec);
for i in 0..state.block_roots().len() {
state.block_roots_mut()[i] = root_slot(i).1;
*state.block_roots_mut().get_mut(i).unwrap() = root_slot(i).1;
}
assert_eq!(
@@ -122,7 +122,7 @@ mod test {
let mut state: BeaconState<E> = BeaconState::new(0, <_>::default(), &spec);
for i in 0..state.block_roots().len() {
state.block_roots_mut()[i] = root_slot(i).1;
*state.block_roots_mut().get_mut(i).unwrap() = root_slot(i).1;
}
assert_eq!(

View File

@@ -1,21 +1,21 @@
use crate::*;
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
use rpds::HashTrieMapSync as HashTrieMap;
type ValidatorIndex = usize;
#[allow(clippy::len_without_is_empty)]
#[derive(Debug, PartialEq, Clone, Default, Serialize, Deserialize)]
#[derive(Debug, PartialEq, Clone, Default)]
pub struct PubkeyCache {
/// Maintain the number of keys added to the map. It is not sufficient to just use the HashMap
/// len, as it does not increase when duplicate keys are added. Duplicate keys are used during
/// testing.
/// Maintain the number of keys added to the map. It is not sufficient to just use the
/// HashTrieMap len, as it does not increase when duplicate keys are added. Duplicate keys are
/// used during testing.
len: usize,
map: HashMap<PublicKeyBytes, ValidatorIndex>,
map: HashTrieMap<PublicKeyBytes, ValidatorIndex>,
}
impl PubkeyCache {
/// Returns the number of validator indices added to the map so far.
#[allow(clippy::len_without_is_empty)]
pub fn len(&self) -> ValidatorIndex {
self.len
}
@@ -26,7 +26,7 @@ impl PubkeyCache {
/// that an index is never skipped.
pub fn insert(&mut self, pubkey: PublicKeyBytes, index: ValidatorIndex) -> bool {
if index == self.len {
self.map.insert(pubkey, index);
self.map.insert_mut(pubkey, index);
self.len = self
.len
.checked_add(1)

View File

@@ -1,13 +1,13 @@
use crate::{BeaconStateError, Slot, Validator};
use arbitrary::Arbitrary;
use std::collections::HashSet;
use rpds::HashTrieSetSync as HashTrieSet;
/// Persistent (cheap to clone) cache of all slashed validator indices.
#[derive(Debug, Default, Clone, PartialEq, Arbitrary)]
pub struct SlashingsCache {
latest_block_slot: Option<Slot>,
#[arbitrary(default)]
slashed_validators: HashSet<usize>,
slashed_validators: HashTrieSet<usize>,
}
impl SlashingsCache {
@@ -49,7 +49,7 @@ impl SlashingsCache {
validator_index: usize,
) -> Result<(), BeaconStateError> {
self.check_initialized(block_slot)?;
self.slashed_validators.insert(validator_index);
self.slashed_validators.insert_mut(validator_index);
Ok(())
}

View File

@@ -1,20 +1,14 @@
#![cfg(test)]
use crate::test_utils::*;
use beacon_chain::test_utils::{
interop_genesis_state_with_eth1, test_spec, BeaconChainHarness, EphemeralHarnessType,
DEFAULT_ETH1_BLOCK_HASH,
};
use crate::{test_utils::*, ForkName};
use beacon_chain::test_utils::{BeaconChainHarness, EphemeralHarnessType};
use beacon_chain::types::{
test_utils::TestRandom, BeaconState, BeaconStateAltair, BeaconStateBase, BeaconStateError,
ChainSpec, CloneConfig, Domain, Epoch, EthSpec, FixedVector, Hash256, Keypair, MainnetEthSpec,
MinimalEthSpec, RelativeEpoch, Slot,
test_utils::TestRandom, BeaconState, BeaconStateAltair, BeaconStateBase, BeaconStateCapella,
BeaconStateDeneb, BeaconStateElectra, BeaconStateError, BeaconStateMerge, ChainSpec, Domain,
Epoch, EthSpec, Hash256, Keypair, MainnetEthSpec, MinimalEthSpec, RelativeEpoch, Slot, Vector,
};
use safe_arith::SafeArith;
use ssz::Encode;
use state_processing::per_slot_processing;
use std::ops::Mul;
use swap_or_not_shuffle::compute_shuffled_index;
use tree_hash::TreeHash;
pub const MAX_VALIDATOR_COUNT: usize = 129;
pub const SLOT_OFFSET: Slot = Slot::new(1);
@@ -101,7 +95,11 @@ async fn test_beacon_proposer_index<E: EthSpec>() {
// Test with two validators per slot, first validator has zero balance.
let mut state = build_state::<E>((E::slots_per_epoch() as usize).mul(2)).await;
let slot0_candidate0 = ith_candidate(&state, Slot::new(0), 0, &spec);
state.validators_mut()[slot0_candidate0].effective_balance = 0;
state
.validators_mut()
.get_mut(slot0_candidate0)
.unwrap()
.effective_balance = 0;
test(&state, Slot::new(0), 1);
for i in 1..E::slots_per_epoch() {
test(&state, Slot::from(i), 0);
@@ -159,85 +157,6 @@ async fn cache_initialization() {
test_cache_initialization(&mut state, RelativeEpoch::Next, &spec);
}
fn test_clone_config<E: EthSpec>(base_state: &BeaconState<E>, clone_config: CloneConfig) {
let state = base_state.clone_with(clone_config);
if clone_config.committee_caches {
state
.committee_cache(RelativeEpoch::Previous)
.expect("committee cache exists");
state
.committee_cache(RelativeEpoch::Current)
.expect("committee cache exists");
state
.committee_cache(RelativeEpoch::Next)
.expect("committee cache exists");
state
.total_active_balance()
.expect("total active balance exists");
} else {
state
.committee_cache(RelativeEpoch::Previous)
.expect_err("shouldn't exist");
state
.committee_cache(RelativeEpoch::Current)
.expect_err("shouldn't exist");
state
.committee_cache(RelativeEpoch::Next)
.expect_err("shouldn't exist");
}
if clone_config.pubkey_cache {
assert_ne!(state.pubkey_cache().len(), 0);
} else {
assert_eq!(state.pubkey_cache().len(), 0);
}
if clone_config.exit_cache {
state
.exit_cache()
.check_initialized()
.expect("exit cache exists");
} else {
state
.exit_cache()
.check_initialized()
.expect_err("exit cache doesn't exist");
}
if clone_config.tree_hash_cache {
assert!(state.tree_hash_cache().is_initialized());
} else {
assert!(
!state.tree_hash_cache().is_initialized(),
"{:?}",
clone_config
);
}
}
#[tokio::test]
async fn clone_config() {
let spec = MinimalEthSpec::default_spec();
let mut state = build_state::<MinimalEthSpec>(16).await;
state.build_caches(&spec).unwrap();
state
.update_tree_hash_cache()
.expect("should update tree hash cache");
let num_caches = 6;
let all_configs = (0..2u8.pow(num_caches)).map(|i| CloneConfig {
committee_caches: (i & 1) != 0,
pubkey_cache: ((i >> 1) & 1) != 0,
exit_cache: ((i >> 2) & 1) != 0,
slashings_cache: ((i >> 3) & 1) != 0,
tree_hash_cache: ((i >> 4) & 1) != 0,
progressive_balances_cache: ((i >> 5) & 1) != 0,
});
for config in all_configs {
test_clone_config(&state, config);
}
}
/// Tests committee-specific components
#[cfg(test)]
mod committees {
@@ -328,10 +247,9 @@ mod committees {
let harness = get_harness::<E>(validator_count, slot).await;
let mut new_head_state = harness.get_current_state();
let distinct_hashes: Vec<Hash256> = (0..E::epochs_per_historical_vector())
.map(|i| Hash256::from_low_u64_be(i as u64))
.collect();
*new_head_state.randao_mixes_mut() = FixedVector::from(distinct_hashes);
let distinct_hashes =
(0..E::epochs_per_historical_vector()).map(|i| Hash256::from_low_u64_be(i as u64));
*new_head_state.randao_mixes_mut() = Vector::try_from_iter(distinct_hashes).unwrap();
new_head_state
.force_build_committee_cache(RelativeEpoch::Previous, spec)
@@ -487,120 +405,22 @@ fn decode_base_and_altair() {
}
#[test]
fn tree_hash_cache_linear_history() {
let mut rng = XorShiftRng::from_seed([42; 16]);
fn check_num_fields_pow2() {
use metastruct::NumFields;
pub type E = MainnetEthSpec;
let mut state: BeaconState<MainnetEthSpec> =
BeaconState::Base(BeaconStateBase::random_for_test(&mut rng));
let root = state.update_tree_hash_cache().unwrap();
assert_eq!(root.as_bytes(), &state.tree_hash_root()[..]);
/*
* A cache should hash twice without updating the slot.
*/
assert_eq!(
state.update_tree_hash_cache().unwrap(),
root,
"tree hash result should be identical on the same slot"
);
/*
* A cache should not hash after updating the slot but not updating the state roots.
*/
// The tree hash cache needs to be rebuilt since it was dropped when it failed.
state
.update_tree_hash_cache()
.expect("should rebuild cache");
*state.slot_mut() += 1;
assert_eq!(
state.update_tree_hash_cache(),
Err(BeaconStateError::NonLinearTreeHashCacheHistory),
"should not build hash without updating the state root"
);
/*
* The cache should update if the slot and state root are updated.
*/
// The tree hash cache needs to be rebuilt since it was dropped when it failed.
let root = state
.update_tree_hash_cache()
.expect("should rebuild cache");
*state.slot_mut() += 1;
state
.set_state_root(state.slot() - 1, root)
.expect("should set state root");
let root = state.update_tree_hash_cache().unwrap();
assert_eq!(root.as_bytes(), &state.tree_hash_root()[..]);
}
// Check how the cache behaves when there's a distance larger than `SLOTS_PER_HISTORICAL_ROOT`
// since its last update.
#[test]
fn tree_hash_cache_linear_history_long_skip() {
let validator_count = 128;
let keypairs = generate_deterministic_keypairs(validator_count);
let spec = &test_spec::<MinimalEthSpec>();
// This state has a cache that advances normally each slot.
let mut state: BeaconState<MinimalEthSpec> = interop_genesis_state_with_eth1(
&keypairs,
0,
Hash256::from_slice(DEFAULT_ETH1_BLOCK_HASH),
None,
spec,
)
.unwrap();
state.update_tree_hash_cache().unwrap();
// This state retains its original cache until it is updated after a long skip.
let mut original_cache_state = state.clone();
assert!(original_cache_state.tree_hash_cache().is_initialized());
// Advance the states to a slot beyond the historical state root limit, using the state root
// from the first state to avoid touching the original state's cache.
let start_slot = state.slot();
let target_slot = start_slot
.safe_add(MinimalEthSpec::slots_per_historical_root() as u64 + 1)
.unwrap();
let mut prev_state_root;
while state.slot() < target_slot {
prev_state_root = state.update_tree_hash_cache().unwrap();
per_slot_processing(&mut state, None, spec).unwrap();
per_slot_processing(&mut original_cache_state, Some(prev_state_root), spec).unwrap();
for fork_name in ForkName::list_all() {
let num_fields = match fork_name {
ForkName::Base => BeaconStateBase::<E>::NUM_FIELDS,
ForkName::Altair => BeaconStateAltair::<E>::NUM_FIELDS,
ForkName::Merge => BeaconStateMerge::<E>::NUM_FIELDS,
ForkName::Capella => BeaconStateCapella::<E>::NUM_FIELDS,
ForkName::Deneb => BeaconStateDeneb::<E>::NUM_FIELDS,
ForkName::Electra => BeaconStateElectra::<E>::NUM_FIELDS,
};
assert_eq!(
num_fields.next_power_of_two(),
BeaconState::<E>::NUM_FIELDS_POW2
);
}
// The state with the original cache should still be initialized at the starting slot.
assert_eq!(
original_cache_state
.tree_hash_cache()
.initialized_slot()
.unwrap(),
start_slot
);
// Updating the tree hash cache should be successful despite the long skip.
assert_eq!(
original_cache_state.update_tree_hash_cache().unwrap(),
state.update_tree_hash_cache().unwrap()
);
assert_eq!(
original_cache_state
.tree_hash_cache()
.initialized_slot()
.unwrap(),
target_slot
);
}

View File

@@ -1,645 +0,0 @@
#![allow(clippy::arithmetic_side_effects)]
#![allow(clippy::disallowed_methods)]
#![allow(clippy::indexing_slicing)]
use super::Error;
use crate::historical_summary::HistoricalSummaryCache;
use crate::{BeaconState, EthSpec, Hash256, ParticipationList, Slot, Unsigned, Validator};
use cached_tree_hash::{int_log, CacheArena, CachedTreeHash, TreeHashCache};
use rayon::prelude::*;
use ssz_derive::{Decode, Encode};
use ssz_types::VariableList;
use std::cmp::Ordering;
use tree_hash::{mix_in_length, MerkleHasher, TreeHash};
/// The number of leaves (including padding) on the `BeaconState` Merkle tree.
///
/// ## Note
///
/// This constant is set with the assumption that there are `> 16` and `<= 32` fields on the
/// `BeaconState`. **Tree hashing will fail if this value is set incorrectly.**
pub const NUM_BEACON_STATE_HASH_TREE_ROOT_LEAVES: usize = 32;
/// The number of nodes in the Merkle tree of a validator record.
const NODES_PER_VALIDATOR: usize = 15;
/// The number of validator record tree hash caches stored in each arena.
///
/// This is primarily used for concurrency; if we have 16 validators and set `VALIDATORS_PER_ARENA
/// == 8` then it is possible to do a 2-core concurrent hash.
///
/// Do not set to 0.
const VALIDATORS_PER_ARENA: usize = 4_096;
#[derive(Debug, PartialEq, Clone, Encode, Decode)]
pub struct Eth1DataVotesTreeHashCache<E: EthSpec> {
arena: CacheArena,
tree_hash_cache: TreeHashCache,
voting_period: u64,
roots: VariableList<Hash256, E::SlotsPerEth1VotingPeriod>,
}
impl<E: EthSpec> Eth1DataVotesTreeHashCache<E> {
/// Instantiates a new cache.
///
/// Allocates the necessary memory to store all of the cached Merkle trees. Only the leaves are
/// hashed, leaving the internal nodes as all-zeros.
pub fn new(state: &BeaconState<E>) -> Self {
let mut arena = CacheArena::default();
let roots: VariableList<_, _> = state
.eth1_data_votes()
.iter()
.map(|eth1_data| eth1_data.tree_hash_root())
.collect::<Vec<_>>()
.into();
let tree_hash_cache = roots.new_tree_hash_cache(&mut arena);
Self {
arena,
tree_hash_cache,
voting_period: Self::voting_period(state.slot()),
roots,
}
}
fn voting_period(slot: Slot) -> u64 {
slot.as_u64() / E::SlotsPerEth1VotingPeriod::to_u64()
}
pub fn recalculate_tree_hash_root(&mut self, state: &BeaconState<E>) -> Result<Hash256, Error> {
if state.eth1_data_votes().len() < self.roots.len()
|| Self::voting_period(state.slot()) != self.voting_period
{
*self = Self::new(state);
}
state
.eth1_data_votes()
.iter()
.skip(self.roots.len())
.try_for_each(|eth1_data| self.roots.push(eth1_data.tree_hash_root()))?;
self.roots
.recalculate_tree_hash_root(&mut self.arena, &mut self.tree_hash_cache)
.map_err(Into::into)
}
}
/// A cache that performs a caching tree hash of the entire `BeaconState` struct.
///
/// This type is a wrapper around the inner cache, which does all the work.
#[derive(Debug, Default, PartialEq, Clone)]
pub struct BeaconTreeHashCache<E: EthSpec> {
inner: Option<BeaconTreeHashCacheInner<E>>,
}
impl<E: EthSpec> BeaconTreeHashCache<E> {
pub fn new(state: &BeaconState<E>) -> Self {
Self {
inner: Some(BeaconTreeHashCacheInner::new(state)),
}
}
pub fn is_initialized(&self) -> bool {
self.inner.is_some()
}
/// Move the inner cache out so that the containing `BeaconState` can be borrowed.
pub fn take(&mut self) -> Option<BeaconTreeHashCacheInner<E>> {
self.inner.take()
}
/// Restore the inner cache after using `take`.
pub fn restore(&mut self, inner: BeaconTreeHashCacheInner<E>) {
self.inner = Some(inner);
}
/// Make the cache empty.
pub fn uninitialize(&mut self) {
self.inner = None;
}
/// Return the slot at which the cache was last updated.
///
/// This should probably only be used during testing.
pub fn initialized_slot(&self) -> Option<Slot> {
Some(self.inner.as_ref()?.previous_state?.1)
}
}
#[derive(Debug, PartialEq, Clone)]
pub struct BeaconTreeHashCacheInner<E: EthSpec> {
/// Tracks the previously generated state root to ensure the next state root provided descends
/// directly from this state.
previous_state: Option<(Hash256, Slot)>,
// Validators cache
validators: ValidatorsListTreeHashCache,
// Arenas
fixed_arena: CacheArena,
balances_arena: CacheArena,
slashings_arena: CacheArena,
// Caches
block_roots: TreeHashCache,
state_roots: TreeHashCache,
historical_roots: TreeHashCache,
historical_summaries: OptionalTreeHashCache,
balances: TreeHashCache,
randao_mixes: TreeHashCache,
slashings: TreeHashCache,
eth1_data_votes: Eth1DataVotesTreeHashCache<E>,
inactivity_scores: OptionalTreeHashCache,
// Participation caches
previous_epoch_participation: OptionalTreeHashCache,
current_epoch_participation: OptionalTreeHashCache,
}
impl<E: EthSpec> BeaconTreeHashCacheInner<E> {
/// Instantiates a new cache.
///
/// Allocates the necessary memory to store all of the cached Merkle trees. Only the leaves are
/// hashed, leaving the internal nodes as all-zeros.
pub fn new(state: &BeaconState<E>) -> Self {
let mut fixed_arena = CacheArena::default();
let block_roots = state.block_roots().new_tree_hash_cache(&mut fixed_arena);
let state_roots = state.state_roots().new_tree_hash_cache(&mut fixed_arena);
let historical_roots = state
.historical_roots()
.new_tree_hash_cache(&mut fixed_arena);
let historical_summaries = OptionalTreeHashCache::new(
state
.historical_summaries()
.ok()
.map(HistoricalSummaryCache::new)
.as_ref(),
);
let randao_mixes = state.randao_mixes().new_tree_hash_cache(&mut fixed_arena);
let validators = ValidatorsListTreeHashCache::new::<E>(state.validators());
let mut balances_arena = CacheArena::default();
let balances = state.balances().new_tree_hash_cache(&mut balances_arena);
let mut slashings_arena = CacheArena::default();
let slashings = state.slashings().new_tree_hash_cache(&mut slashings_arena);
let inactivity_scores = OptionalTreeHashCache::new(state.inactivity_scores().ok());
let previous_epoch_participation = OptionalTreeHashCache::new(
state
.previous_epoch_participation()
.ok()
.map(ParticipationList::new)
.as_ref(),
);
let current_epoch_participation = OptionalTreeHashCache::new(
state
.current_epoch_participation()
.ok()
.map(ParticipationList::new)
.as_ref(),
);
Self {
previous_state: None,
validators,
fixed_arena,
balances_arena,
slashings_arena,
block_roots,
state_roots,
historical_roots,
historical_summaries,
balances,
randao_mixes,
slashings,
inactivity_scores,
eth1_data_votes: Eth1DataVotesTreeHashCache::new(state),
previous_epoch_participation,
current_epoch_participation,
}
}
pub fn recalculate_tree_hash_leaves(
&mut self,
state: &BeaconState<E>,
) -> Result<Vec<Hash256>, Error> {
let mut leaves = vec![
// Genesis data leaves.
state.genesis_time().tree_hash_root(),
state.genesis_validators_root().tree_hash_root(),
// Current fork data leaves.
state.slot().tree_hash_root(),
state.fork().tree_hash_root(),
state.latest_block_header().tree_hash_root(),
// Roots leaves.
state
.block_roots()
.recalculate_tree_hash_root(&mut self.fixed_arena, &mut self.block_roots)?,
state
.state_roots()
.recalculate_tree_hash_root(&mut self.fixed_arena, &mut self.state_roots)?,
state
.historical_roots()
.recalculate_tree_hash_root(&mut self.fixed_arena, &mut self.historical_roots)?,
// Eth1 Data leaves.
state.eth1_data().tree_hash_root(),
self.eth1_data_votes.recalculate_tree_hash_root(state)?,
state.eth1_deposit_index().tree_hash_root(),
// Validator leaves.
self.validators
.recalculate_tree_hash_root(state.validators())?,
state
.balances()
.recalculate_tree_hash_root(&mut self.balances_arena, &mut self.balances)?,
state
.randao_mixes()
.recalculate_tree_hash_root(&mut self.fixed_arena, &mut self.randao_mixes)?,
state
.slashings()
.recalculate_tree_hash_root(&mut self.slashings_arena, &mut self.slashings)?,
];
// Participation
if let BeaconState::Base(state) = state {
leaves.push(state.previous_epoch_attestations.tree_hash_root());
leaves.push(state.current_epoch_attestations.tree_hash_root());
} else {
leaves.push(
self.previous_epoch_participation
.recalculate_tree_hash_root(&ParticipationList::new(
state.previous_epoch_participation()?,
))?,
);
leaves.push(
self.current_epoch_participation
.recalculate_tree_hash_root(&ParticipationList::new(
state.current_epoch_participation()?,
))?,
);
}
// Checkpoint leaves
leaves.push(state.justification_bits().tree_hash_root());
leaves.push(state.previous_justified_checkpoint().tree_hash_root());
leaves.push(state.current_justified_checkpoint().tree_hash_root());
leaves.push(state.finalized_checkpoint().tree_hash_root());
// Inactivity & light-client sync committees (Altair and later).
if let Ok(inactivity_scores) = state.inactivity_scores() {
leaves.push(
self.inactivity_scores
.recalculate_tree_hash_root(inactivity_scores)?,
);
}
if let Ok(current_sync_committee) = state.current_sync_committee() {
leaves.push(current_sync_committee.tree_hash_root());
}
if let Ok(next_sync_committee) = state.next_sync_committee() {
leaves.push(next_sync_committee.tree_hash_root());
}
// Execution payload (merge and later).
if let Ok(payload_header) = state.latest_execution_payload_header() {
leaves.push(payload_header.tree_hash_root());
}
// Withdrawal indices (Capella and later).
if let Ok(next_withdrawal_index) = state.next_withdrawal_index() {
leaves.push(next_withdrawal_index.tree_hash_root());
}
if let Ok(next_withdrawal_validator_index) = state.next_withdrawal_validator_index() {
leaves.push(next_withdrawal_validator_index.tree_hash_root());
}
// Historical roots/summaries (Capella and later).
if let Ok(historical_summaries) = state.historical_summaries() {
leaves.push(
self.historical_summaries.recalculate_tree_hash_root(
&HistoricalSummaryCache::new(historical_summaries),
)?,
);
}
Ok(leaves)
}
/// Updates the cache and returns the tree hash root for the given `state`.
///
/// The provided `state` should be a descendant of the last `state` given to this function, or
/// the `Self::new` function. If the state is more than `SLOTS_PER_HISTORICAL_ROOT` slots
/// after `self.previous_state` then the whole cache will be re-initialized.
pub fn recalculate_tree_hash_root(&mut self, state: &BeaconState<E>) -> Result<Hash256, Error> {
// If this cache has previously produced a root, ensure that it is in the state root
// history of this state.
//
// This ensures that the states applied have a linear history, this
// allows us to make assumptions about how the state changes over times and produce a more
// efficient algorithm.
if let Some((previous_root, previous_slot)) = self.previous_state {
// The previously-hashed state must not be newer than `state`.
if previous_slot > state.slot() {
return Err(Error::TreeHashCacheSkippedSlot {
cache: previous_slot,
state: state.slot(),
});
}
// If the state is newer, the previous root must be in the history of the given state.
// If the previous slot is out of range of the `state_roots` array (indicating a long
// gap between the cache's last use and the current state) then we re-initialize.
match state.get_state_root(previous_slot) {
Ok(state_previous_root) if *state_previous_root == previous_root => {}
Ok(_) => return Err(Error::NonLinearTreeHashCacheHistory),
Err(Error::SlotOutOfBounds) => {
*self = Self::new(state);
}
Err(e) => return Err(e),
}
}
let mut hasher = MerkleHasher::with_leaves(NUM_BEACON_STATE_HASH_TREE_ROOT_LEAVES);
let leaves = self.recalculate_tree_hash_leaves(state)?;
for leaf in leaves {
hasher.write(leaf.as_bytes())?;
}
let root = hasher.finish()?;
self.previous_state = Some((root, state.slot()));
Ok(root)
}
/// Updates the cache and provides the root of the given `validators`.
pub fn recalculate_validators_tree_hash_root(
&mut self,
validators: &[Validator],
) -> Result<Hash256, Error> {
self.validators.recalculate_tree_hash_root(validators)
}
}
/// A specialized cache for computing the tree hash root of `state.validators`.
#[derive(Debug, PartialEq, Clone, Default, Encode, Decode)]
struct ValidatorsListTreeHashCache {
list_arena: CacheArena,
list_cache: TreeHashCache,
values: ParallelValidatorTreeHash,
}
impl ValidatorsListTreeHashCache {
/// Instantiates a new cache.
///
/// Allocates the necessary memory to store all of the cached Merkle trees but does perform any
/// hashing.
fn new<E: EthSpec>(validators: &[Validator]) -> Self {
let mut list_arena = CacheArena::default();
Self {
list_cache: TreeHashCache::new(
&mut list_arena,
int_log(E::ValidatorRegistryLimit::to_usize()),
validators.len(),
),
list_arena,
values: ParallelValidatorTreeHash::new(validators),
}
}
/// Updates the cache and returns the tree hash root for the given `state`.
///
/// This function makes assumptions that the `validators` list will only change in accordance
/// with valid per-block/per-slot state transitions.
fn recalculate_tree_hash_root(&mut self, validators: &[Validator]) -> Result<Hash256, Error> {
let mut list_arena = std::mem::take(&mut self.list_arena);
let leaves = self.values.leaves(validators)?;
let num_leaves = leaves.iter().map(|arena| arena.len()).sum();
let leaves_iter = ForcedExactSizeIterator {
iter: leaves.into_iter().flatten().map(|h| h.to_fixed_bytes()),
len: num_leaves,
};
let list_root = self
.list_cache
.recalculate_merkle_root(&mut list_arena, leaves_iter)?;
self.list_arena = list_arena;
Ok(mix_in_length(&list_root, validators.len()))
}
}
/// Provides a wrapper around some `iter` if the number of items in the iterator is known to the
/// programmer but not the compiler. This allows use of `ExactSizeIterator` in some occasions.
///
/// Care should be taken to ensure `len` is accurate.
struct ForcedExactSizeIterator<I> {
iter: I,
len: usize,
}
impl<V, I: Iterator<Item = V>> Iterator for ForcedExactSizeIterator<I> {
type Item = V;
fn next(&mut self) -> Option<Self::Item> {
self.iter.next()
}
}
impl<V, I: Iterator<Item = V>> ExactSizeIterator for ForcedExactSizeIterator<I> {
fn len(&self) -> usize {
self.len
}
}
/// Provides a cache for each of the `Validator` objects in `state.validators` and computes the
/// roots of these using Rayon parallelization.
#[derive(Debug, PartialEq, Clone, Default, Encode, Decode)]
pub struct ParallelValidatorTreeHash {
/// Each arena and its associated sub-trees.
arenas: Vec<(CacheArena, Vec<TreeHashCache>)>,
}
impl ParallelValidatorTreeHash {
/// Instantiates a new cache.
///
/// Allocates the necessary memory to store all of the cached Merkle trees but does perform any
/// hashing.
fn new(validators: &[Validator]) -> Self {
let num_arenas = std::cmp::max(
1,
(validators.len() + VALIDATORS_PER_ARENA - 1) / VALIDATORS_PER_ARENA,
);
let mut arenas = (1..=num_arenas)
.map(|i| {
let num_validators = if i == num_arenas {
validators.len() % VALIDATORS_PER_ARENA
} else {
VALIDATORS_PER_ARENA
};
NODES_PER_VALIDATOR * num_validators
})
.map(|capacity| (CacheArena::with_capacity(capacity), vec![]))
.collect::<Vec<_>>();
validators.iter().enumerate().for_each(|(i, v)| {
let (arena, caches) = &mut arenas[i / VALIDATORS_PER_ARENA];
caches.push(v.new_tree_hash_cache(arena))
});
Self { arenas }
}
/// Returns the number of validators stored in self.
fn len(&self) -> usize {
self.arenas.last().map_or(0, |last| {
// Subtraction cannot underflow because `.last()` ensures the `.len() > 0`.
(self.arenas.len() - 1) * VALIDATORS_PER_ARENA + last.1.len()
})
}
/// Updates the caches for each `Validator` in `validators` and returns a list that maps 1:1
/// with `validators` to the hash of each validator.
///
/// This function makes assumptions that the `validators` list will only change in accordance
/// with valid per-block/per-slot state transitions.
fn leaves(&mut self, validators: &[Validator]) -> Result<Vec<Vec<Hash256>>, Error> {
match self.len().cmp(&validators.len()) {
Ordering::Less => validators.iter().skip(self.len()).for_each(|v| {
if self
.arenas
.last()
.map_or(true, |last| last.1.len() >= VALIDATORS_PER_ARENA)
{
let mut arena = CacheArena::default();
let cache = v.new_tree_hash_cache(&mut arena);
self.arenas.push((arena, vec![cache]))
} else {
let (arena, caches) = &mut self
.arenas
.last_mut()
.expect("Cannot reach this block if arenas is empty.");
caches.push(v.new_tree_hash_cache(arena))
}
}),
Ordering::Greater => {
return Err(Error::ValidatorRegistryShrunk);
}
Ordering::Equal => (),
}
self.arenas
.par_iter_mut()
.enumerate()
.map(|(arena_index, (arena, caches))| {
caches
.iter_mut()
.enumerate()
.map(move |(cache_index, cache)| {
let val_index = (arena_index * VALIDATORS_PER_ARENA) + cache_index;
let validator = validators
.get(val_index)
.ok_or(Error::TreeHashCacheInconsistent)?;
validator
.recalculate_tree_hash_root(arena, cache)
.map_err(Error::CachedTreeHashError)
})
.collect()
})
.collect()
}
}
#[derive(Debug, PartialEq, Clone)]
pub struct OptionalTreeHashCache {
inner: Option<OptionalTreeHashCacheInner>,
}
#[derive(Debug, PartialEq, Clone)]
pub struct OptionalTreeHashCacheInner {
arena: CacheArena,
tree_hash_cache: TreeHashCache,
}
impl OptionalTreeHashCache {
/// Initialize a new cache if `item.is_some()`.
fn new<C: CachedTreeHash<TreeHashCache>>(item: Option<&C>) -> Self {
let inner = item.map(OptionalTreeHashCacheInner::new);
Self { inner }
}
/// Compute the tree hash root for the given `item`.
///
/// This function will initialize the inner cache if necessary (e.g. when crossing the fork).
fn recalculate_tree_hash_root<C: CachedTreeHash<TreeHashCache>>(
&mut self,
item: &C,
) -> Result<Hash256, Error> {
let cache = self
.inner
.get_or_insert_with(|| OptionalTreeHashCacheInner::new(item));
item.recalculate_tree_hash_root(&mut cache.arena, &mut cache.tree_hash_cache)
.map_err(Into::into)
}
}
impl OptionalTreeHashCacheInner {
fn new<C: CachedTreeHash<TreeHashCache>>(item: &C) -> Self {
let mut arena = CacheArena::default();
let tree_hash_cache = item.new_tree_hash_cache(&mut arena);
OptionalTreeHashCacheInner {
arena,
tree_hash_cache,
}
}
}
impl<E: EthSpec> arbitrary::Arbitrary<'_> for BeaconTreeHashCache<E> {
fn arbitrary(_u: &mut arbitrary::Unstructured<'_>) -> arbitrary::Result<Self> {
Ok(Self::default())
}
}
#[cfg(test)]
mod test {
use super::*;
use crate::{MainnetEthSpec, ParticipationFlags};
#[test]
fn validator_node_count() {
let mut arena = CacheArena::default();
let v = Validator::default();
let _cache = v.new_tree_hash_cache(&mut arena);
assert_eq!(arena.backing_len(), NODES_PER_VALIDATOR);
}
#[test]
fn participation_flags() {
type N = <MainnetEthSpec as EthSpec>::ValidatorRegistryLimit;
let len = 65;
let mut test_flag = ParticipationFlags::default();
test_flag.add_flag(0).unwrap();
let epoch_participation = VariableList::<_, N>::new(vec![test_flag; len]).unwrap();
let mut cache = OptionalTreeHashCache { inner: None };
let cache_root = cache
.recalculate_tree_hash_root(&ParticipationList::new(&epoch_participation))
.unwrap();
let recalc_root = cache
.recalculate_tree_hash_root(&ParticipationList::new(&epoch_participation))
.unwrap();
assert_eq!(cache_root, recalc_root, "recalculated root should match");
assert_eq!(
cache_root,
epoch_participation.tree_hash_root(),
"cached root should match uncached"
);
}
}

View File

@@ -1,7 +1,7 @@
use crate::test_utils::TestRandom;
use crate::{
beacon_block_body::BLOB_KZG_COMMITMENTS_INDEX, BeaconBlockHeader, BeaconStateError, Blob,
EthSpec, Hash256, SignedBeaconBlockHeader, Slot,
EthSpec, FixedVector, Hash256, SignedBeaconBlockHeader, Slot, VariableList,
};
use crate::{KzgProofs, SignedBeaconBlock};
use bls::Signature;
@@ -16,7 +16,6 @@ use safe_arith::{ArithError, SafeArith};
use serde::{Deserialize, Serialize};
use ssz::Encode;
use ssz_derive::{Decode, Encode};
use ssz_types::{FixedVector, VariableList};
use std::fmt::Debug;
use std::hash::Hash;
use std::sync::Arc;

View File

@@ -32,7 +32,8 @@ use tree_hash_derive::TreeHash;
tree_hash(enum_behaviour = "transparent")
),
cast_error(ty = "Error", expr = "BeaconStateError::IncorrectStateVariant"),
partial_getter_error(ty = "Error", expr = "BeaconStateError::IncorrectStateVariant")
partial_getter_error(ty = "Error", expr = "BeaconStateError::IncorrectStateVariant"),
map_ref_into(ExecutionPayloadHeader)
)]
#[derive(
Debug, Clone, Serialize, Deserialize, Encode, TreeHash, Derivative, arbitrary::Arbitrary,
@@ -364,6 +365,27 @@ impl<E: EthSpec> TryFrom<ExecutionPayloadHeader<E>> for ExecutionPayloadHeaderDe
}
}
impl<'a, E: EthSpec> ExecutionPayloadHeaderRefMut<'a, E> {
/// Mutate through
pub fn replace(self, header: ExecutionPayloadHeader<E>) -> Result<(), BeaconStateError> {
match self {
ExecutionPayloadHeaderRefMut::Merge(mut_ref) => {
*mut_ref = header.try_into()?;
}
ExecutionPayloadHeaderRefMut::Capella(mut_ref) => {
*mut_ref = header.try_into()?;
}
ExecutionPayloadHeaderRefMut::Deneb(mut_ref) => {
*mut_ref = header.try_into()?;
}
ExecutionPayloadHeaderRefMut::Electra(mut_ref) => {
*mut_ref = header.try_into()?;
}
}
Ok(())
}
}
impl<E: EthSpec> TryFrom<ExecutionPayloadHeader<E>> for ExecutionPayloadHeaderElectra<E> {
type Error = BeaconStateError;
fn try_from(header: ExecutionPayloadHeader<E>) -> Result<Self, Self::Error> {

View File

@@ -23,8 +23,10 @@ use tree_hash_derive::TreeHash;
)]
#[arbitrary(bound = "E: EthSpec")]
pub struct HistoricalBatch<E: EthSpec> {
pub block_roots: FixedVector<Hash256, E::SlotsPerHistoricalRoot>,
pub state_roots: FixedVector<Hash256, E::SlotsPerHistoricalRoot>,
#[test_random(default)]
pub block_roots: Vector<Hash256, E::SlotsPerHistoricalRoot>,
#[test_random(default)]
pub state_roots: Vector<Hash256, E::SlotsPerHistoricalRoot>,
}
#[cfg(test)]

View File

@@ -98,7 +98,6 @@ pub mod sync_committee_contribution;
pub mod sync_committee_message;
pub mod sync_selection_proof;
pub mod sync_subnet_id;
mod tree_hash_impls;
pub mod validator_registration_data;
pub mod withdrawal;
@@ -132,7 +131,7 @@ pub use crate::beacon_block_body::{
};
pub use crate::beacon_block_header::BeaconBlockHeader;
pub use crate::beacon_committee::{BeaconCommittee, OwnedBeaconCommittee};
pub use crate::beacon_state::{BeaconTreeHashCache, Error as BeaconStateError, *};
pub use crate::beacon_state::{Error as BeaconStateError, *};
pub use crate::blob_sidecar::{BlobSidecar, BlobSidecarList, BlobsList};
pub use crate::bls_to_execution_change::BlsToExecutionChange;
pub use crate::chain_spec::{ChainSpec, Config, Domain};
@@ -257,8 +256,7 @@ pub use bls::{
AggregatePublicKey, AggregateSignature, Keypair, PublicKey, PublicKeyBytes, SecretKey,
Signature, SignatureBytes,
};
pub use kzg::{KzgCommitment, KzgProof, VERSIONED_HASH_VERSION_KZG};
pub use milhouse::{self, List, Vector};
pub use ssz_types::{typenum, typenum::Unsigned, BitList, BitVector, FixedVector, VariableList};
pub use superstruct::superstruct;

View File

@@ -1,8 +1,7 @@
use super::{BeaconState, EthSpec, FixedVector, Hash256, LightClientHeader, SyncCommittee};
use crate::{
light_client_update::*, test_utils::TestRandom, ChainSpec, ForkName, ForkVersionDeserialize,
LightClientHeaderAltair, LightClientHeaderCapella, LightClientHeaderDeneb, SignedBeaconBlock,
Slot,
light_client_update::*, test_utils::TestRandom, BeaconState, ChainSpec, EthSpec, FixedVector,
ForkName, ForkVersionDeserialize, Hash256, LightClientHeader, LightClientHeaderAltair,
LightClientHeaderCapella, LightClientHeaderDeneb, SignedBeaconBlock, Slot, SyncCommittee,
};
use derivative::Derivative;
use serde::{Deserialize, Deserializer, Serialize};

View File

@@ -58,6 +58,7 @@ pub struct LightClientFinalityUpdate<E: EthSpec> {
#[superstruct(only(Deneb), partial_getter(rename = "finalized_header_deneb"))]
pub finalized_header: LightClientHeaderDeneb<E>,
/// Merkle proof attesting finalized header.
#[test_random(default)]
pub finality_branch: FixedVector<Hash256, FinalizedRootProofLen>,
/// current sync aggregate
pub sync_aggregate: SyncAggregate<E>,

View File

@@ -37,6 +37,7 @@ pub const EXECUTION_PAYLOAD_PROOF_LEN: usize = 4;
#[derive(Debug, PartialEq, Clone)]
pub enum Error {
SszTypesError(ssz_types::Error),
MilhouseError(milhouse::Error),
BeaconStateError(beacon_state::Error),
ArithError(ArithError),
AltairForkNotActive,
@@ -65,6 +66,12 @@ impl From<ArithError> for Error {
}
}
impl From<milhouse::Error> for Error {
fn from(e: milhouse::Error) -> Error {
Error::MilhouseError(e)
}
}
/// A LightClientUpdate is the update we request solely to either complete the bootstrapping process,
/// or to sync up to the last committee period, we need to have one ready for each ALTAIR period
/// we go over, note: there is no need to keep all of the updates from [ALTAIR_PERIOD, CURRENT_PERIOD].

View File

@@ -87,7 +87,7 @@ where
}
}
impl<T, N: Unsigned> TestRandom for FixedVector<T, N>
impl<T, N: Unsigned> TestRandom for ssz_types::FixedVector<T, N>
where
T: TestRandom,
{

View File

@@ -1,165 +0,0 @@
//! This module contains custom implementations of `CachedTreeHash` for ETH2-specific types.
//!
//! It makes some assumptions about the layouts and update patterns of other structs in this
//! crate, and should be updated carefully whenever those structs are changed.
use crate::{Epoch, Hash256, PublicKeyBytes, Validator};
use cached_tree_hash::{int_log, CacheArena, CachedTreeHash, Error, TreeHashCache};
use int_to_bytes::int_to_fixed_bytes32;
use tree_hash::merkle_root;
/// Number of struct fields on `Validator`.
const NUM_VALIDATOR_FIELDS: usize = 8;
impl CachedTreeHash<TreeHashCache> for Validator {
fn new_tree_hash_cache(&self, arena: &mut CacheArena) -> TreeHashCache {
TreeHashCache::new(arena, int_log(NUM_VALIDATOR_FIELDS), NUM_VALIDATOR_FIELDS)
}
/// Efficiently tree hash a `Validator`, assuming it was updated by a valid state transition.
///
/// Specifically, we assume that the `pubkey` field is constant.
fn recalculate_tree_hash_root(
&self,
arena: &mut CacheArena,
cache: &mut TreeHashCache,
) -> Result<Hash256, Error> {
// Otherwise just check the fields which might have changed.
let dirty_indices = cache
.leaves()
.iter_mut(arena)?
.enumerate()
.flat_map(|(i, leaf)| {
// Pubkey field (index 0) is constant.
if i == 0 && cache.initialized {
None
} else if process_field_by_index(self, i, leaf, !cache.initialized) {
Some(i)
} else {
None
}
})
.collect();
cache.update_merkle_root(arena, dirty_indices)
}
}
fn process_field_by_index(
v: &Validator,
field_idx: usize,
leaf: &mut Hash256,
force_update: bool,
) -> bool {
match field_idx {
0 => process_pubkey_bytes_field(&v.pubkey, leaf, force_update),
1 => process_slice_field(v.withdrawal_credentials.as_bytes(), leaf, force_update),
2 => process_u64_field(v.effective_balance, leaf, force_update),
3 => process_bool_field(v.slashed, leaf, force_update),
4 => process_epoch_field(v.activation_eligibility_epoch, leaf, force_update),
5 => process_epoch_field(v.activation_epoch, leaf, force_update),
6 => process_epoch_field(v.exit_epoch, leaf, force_update),
7 => process_epoch_field(v.withdrawable_epoch, leaf, force_update),
_ => panic!(
"Validator type only has {} fields, {} out of bounds",
NUM_VALIDATOR_FIELDS, field_idx
),
}
}
fn process_pubkey_bytes_field(
val: &PublicKeyBytes,
leaf: &mut Hash256,
force_update: bool,
) -> bool {
let new_tree_hash = merkle_root(val.as_serialized(), 0);
process_slice_field(new_tree_hash.as_bytes(), leaf, force_update)
}
fn process_slice_field(new_tree_hash: &[u8], leaf: &mut Hash256, force_update: bool) -> bool {
if force_update || leaf.as_bytes() != new_tree_hash {
leaf.assign_from_slice(new_tree_hash);
true
} else {
false
}
}
fn process_u64_field(val: u64, leaf: &mut Hash256, force_update: bool) -> bool {
let new_tree_hash = int_to_fixed_bytes32(val);
process_slice_field(&new_tree_hash[..], leaf, force_update)
}
fn process_epoch_field(val: Epoch, leaf: &mut Hash256, force_update: bool) -> bool {
process_u64_field(val.as_u64(), leaf, force_update)
}
fn process_bool_field(val: bool, leaf: &mut Hash256, force_update: bool) -> bool {
process_u64_field(val as u64, leaf, force_update)
}
#[cfg(test)]
mod test {
use super::*;
use crate::test_utils::TestRandom;
use rand::SeedableRng;
use rand_xorshift::XorShiftRng;
use tree_hash::TreeHash;
fn test_validator_tree_hash(v: &Validator) {
let arena = &mut CacheArena::default();
let mut cache = v.new_tree_hash_cache(arena);
// With a fresh cache
assert_eq!(
&v.tree_hash_root()[..],
v.recalculate_tree_hash_root(arena, &mut cache)
.unwrap()
.as_bytes(),
"{:?}",
v
);
// With a completely up-to-date cache
assert_eq!(
&v.tree_hash_root()[..],
v.recalculate_tree_hash_root(arena, &mut cache)
.unwrap()
.as_bytes(),
"{:?}",
v
);
}
#[test]
fn default_validator() {
test_validator_tree_hash(&Validator::default());
}
#[test]
fn zeroed_validator() {
let v = Validator {
activation_eligibility_epoch: Epoch::from(0u64),
activation_epoch: Epoch::from(0u64),
..Default::default()
};
test_validator_tree_hash(&v);
}
#[test]
fn random_validators() {
let mut rng = XorShiftRng::from_seed([0xf1; 16]);
let num_validators = 1000;
(0..num_validators)
.map(|_| Validator::random_for_test(&mut rng))
.for_each(|v| test_validator_tree_hash(&v));
}
#[test]
#[allow(clippy::assertions_on_constants)]
pub fn smallvec_size_check() {
// If this test fails we need to go and reassess the length of the `SmallVec` in
// `cached_tree_hash::TreeHashCache`. If the size of the `SmallVec` is too slow we're going
// to start doing heap allocations for each validator, this will fragment memory and slow
// us down.
assert!(NUM_VALIDATOR_FIELDS <= 8,);
}
}