Tree hash cache arena (#836)

* Start adding interop genesis state to lcli

* Use more efficient method to generate genesis state

* Remove duplicate int_to_bytes32

* Add lcli command to change state genesis time

* Add option to allow VC to start with unsynced BN

* Set VC to do parallel key loading

* Don't default to dummy eth1 backend

* Add endpoint to dump operation pool

* Add metrics for op pool

* Remove state clone for slot notifier

* Add mem size approximation for tree hash cache

* Avoid cloning tree hash when getting head

* Avoid cloning tree hash when getting head

* Add working arena-based cached tree hash

* Add another benchmark

* Add pre-allocation for caches

* Make cache nullable

* Fix bugs in cache tree hash

* Add validator tree hash optimization

* Optimize hash_concat

* Make hash32_concat return fixed-len array

* Fix failing API tests

* Add new beacon state cache struct

* Add validator-specific cache

* Separate list and values arenas

* Add parallel validator registry hashing

* Remove MultiTreeHashCache

* Remove cached tree hash macro

* Fix failing tree hash test

* Address Michael's comments

* Add CachedTreeHash impl for ef tests

* Fix messy merge conflict

* Rename cache struct, add comments

* Rename cache struct, add comments

* Remove unnecessary mutability

* Wrap iter in result

* Tidy cached tree hash

* Address Michael comments

* Address more comments

* Use ring::Context
This commit is contained in:
Paul Hauner
2020-02-07 12:42:49 +11:00
committed by GitHub
parent f267bf2afe
commit c3182e3c1c
20 changed files with 1341 additions and 378 deletions

View File

@@ -2,7 +2,7 @@ use self::committee_cache::get_active_validator_indices;
use self::exit_cache::ExitCache;
use crate::test_utils::TestRandom;
use crate::*;
use cached_tree_hash::{CachedTreeHash, MultiTreeHashCache, TreeHashCache};
use cached_tree_hash::{CacheArena, CachedTreeHash};
use compare_fields_derive::CompareFields;
use eth2_hashing::hash;
use int_to_bytes::{int_to_bytes4, int_to_bytes8};
@@ -14,16 +14,18 @@ use ssz_types::{typenum::Unsigned, BitVector, FixedVector};
use swap_or_not_shuffle::compute_shuffled_index;
use test_random_derive::TestRandom;
use tree_hash::TreeHash;
use tree_hash_derive::{CachedTreeHash, TreeHash};
use tree_hash_derive::TreeHash;
pub use self::committee_cache::CommitteeCache;
pub use eth_spec::*;
pub use tree_hash_cache::BeaconTreeHashCache;
#[macro_use]
mod committee_cache;
mod exit_cache;
mod pubkey_cache;
mod tests;
mod tree_hash_cache;
pub const CACHED_EPOCHS: usize = 3;
const MAX_RANDOM_BYTE: u64 = (1 << 8) - 1;
@@ -61,8 +63,11 @@ pub enum Error {
ExitCacheUninitialized,
CommitteeCacheUninitialized(Option<RelativeEpoch>),
SszTypesError(ssz_types::Error),
TreeHashCacheNotInitialized,
CachedTreeHashError(cached_tree_hash::Error),
InvalidValidatorPubkey(ssz::DecodeError),
ValidatorRegistryShrunk,
TreeHashCacheInconsistent,
}
/// Control whether an epoch-indexed field can be indexed at the next epoch or not.
@@ -81,39 +86,6 @@ impl AllowNextEpoch {
}
}
#[derive(Debug, PartialEq, Clone, Default, Encode, Decode)]
pub struct BeaconTreeHashCache {
initialized: bool,
block_roots: TreeHashCache,
state_roots: TreeHashCache,
historical_roots: TreeHashCache,
validators: MultiTreeHashCache,
balances: TreeHashCache,
randao_mixes: TreeHashCache,
slashings: TreeHashCache,
}
impl BeaconTreeHashCache {
pub fn is_initialized(&self) -> bool {
self.initialized
}
/// Returns the approximate size of the cache in bytes.
///
/// The size is approximate because we ignore some stack-allocated `u64` and `Vec` pointers.
/// We focus instead on the lists of hashes, which should massively outweigh the items that we
/// ignore.
pub fn approx_mem_size(&self) -> usize {
self.block_roots.approx_mem_size()
+ self.state_roots.approx_mem_size()
+ self.historical_roots.approx_mem_size()
+ self.validators.approx_mem_size()
+ self.balances.approx_mem_size()
+ self.randao_mixes.approx_mem_size()
+ self.slashings.approx_mem_size()
}
}
/// The state of the `BeaconChain` at some slot.
///
/// Spec v0.9.1
@@ -127,11 +99,9 @@ impl BeaconTreeHashCache {
Encode,
Decode,
TreeHash,
CachedTreeHash,
CompareFields,
)]
#[serde(bound = "T: EthSpec")]
#[cached_tree_hash(type = "BeaconTreeHashCache")]
pub struct BeaconState<T>
where
T: EthSpec,
@@ -144,12 +114,9 @@ where
// History
pub latest_block_header: BeaconBlockHeader,
#[compare_fields(as_slice)]
#[cached_tree_hash(block_roots)]
pub block_roots: FixedVector<Hash256, T::SlotsPerHistoricalRoot>,
#[compare_fields(as_slice)]
#[cached_tree_hash(state_roots)]
pub state_roots: FixedVector<Hash256, T::SlotsPerHistoricalRoot>,
#[cached_tree_hash(historical_roots)]
pub historical_roots: VariableList<Hash256, T::HistoricalRootsLimit>,
// Ethereum 1.0 chain data
@@ -159,18 +126,14 @@ where
// Registry
#[compare_fields(as_slice)]
#[cached_tree_hash(validators)]
pub validators: VariableList<Validator, T::ValidatorRegistryLimit>,
#[compare_fields(as_slice)]
#[cached_tree_hash(balances)]
pub balances: VariableList<u64, T::ValidatorRegistryLimit>,
// Randomness
#[cached_tree_hash(randao_mixes)]
pub randao_mixes: FixedVector<Hash256, T::EpochsPerHistoricalVector>,
// Slashings
#[cached_tree_hash(slashings)]
pub slashings: FixedVector<u64, T::EpochsPerSlashingsVector>,
// Attestations
@@ -208,7 +171,7 @@ where
#[ssz(skip_deserializing)]
#[tree_hash(skip_hashing)]
#[test_random(default)]
pub tree_hash_cache: BeaconTreeHashCache,
pub tree_hash_cache: Option<BeaconTreeHashCache>,
}
impl<T: EthSpec> BeaconState<T> {
@@ -263,7 +226,7 @@ impl<T: EthSpec> BeaconState<T> {
],
pubkey_cache: PubkeyCache::default(),
exit_cache: ExitCache::default(),
tree_hash_cache: BeaconTreeHashCache::default(),
tree_hash_cache: None,
}
}
@@ -928,8 +891,8 @@ impl<T: EthSpec> BeaconState<T> {
/// Initialize but don't fill the tree hash cache, if it isn't already initialized.
pub fn initialize_tree_hash_cache(&mut self) {
if !self.tree_hash_cache.initialized {
self.tree_hash_cache = Self::new_tree_hash_cache();
if self.tree_hash_cache.is_none() {
self.tree_hash_cache = Some(BeaconTreeHashCache::new(self))
}
}
@@ -940,7 +903,7 @@ impl<T: EthSpec> BeaconState<T> {
/// Build the tree hash cache, with blatant disregard for any existing cache.
pub fn force_build_tree_hash_cache(&mut self) -> Result<(), Error> {
self.tree_hash_cache.initialized = false;
self.tree_hash_cache = None;
self.build_tree_hash_cache()
}
@@ -950,16 +913,22 @@ impl<T: EthSpec> BeaconState<T> {
pub fn update_tree_hash_cache(&mut self) -> Result<Hash256, Error> {
self.initialize_tree_hash_cache();
let mut cache = std::mem::replace(&mut self.tree_hash_cache, <_>::default());
let result = self.recalculate_tree_hash_root(&mut cache);
std::mem::replace(&mut self.tree_hash_cache, cache);
let cache = self.tree_hash_cache.take();
Ok(result?)
if let Some(mut cache) = cache {
// Note: we return early if the tree hash fails, leaving `self.tree_hash_cache` as
// None. There's no need to keep a cache that fails.
let root = cache.recalculate_tree_hash_root(self)?;
self.tree_hash_cache = Some(cache);
Ok(root)
} else {
Err(Error::TreeHashCacheNotInitialized)
}
}
/// Completely drops the tree hash cache, replacing it with a new, empty cache.
pub fn drop_tree_hash_cache(&mut self) {
self.tree_hash_cache = BeaconTreeHashCache::default();
self.tree_hash_cache = None;
}
/// Iterate through all validators and decompress their public key, unless it has already been
@@ -1008,7 +977,7 @@ impl<T: EthSpec> BeaconState<T> {
],
pubkey_cache: PubkeyCache::default(),
exit_cache: ExitCache::default(),
tree_hash_cache: BeaconTreeHashCache::default(),
tree_hash_cache: None,
}
}
@@ -1019,6 +988,24 @@ impl<T: EthSpec> BeaconState<T> {
}
}
/// This implementation primarily exists to satisfy some testing requirements (ef_tests). It is
/// recommended to use the methods directly on the beacon state instead.
impl<T: EthSpec> CachedTreeHash<BeaconTreeHashCache> for BeaconState<T> {
fn new_tree_hash_cache(&self, _arena: &mut CacheArena) -> BeaconTreeHashCache {
BeaconTreeHashCache::new(self)
}
fn recalculate_tree_hash_root(
&self,
_arena: &mut CacheArena,
cache: &mut BeaconTreeHashCache,
) -> Result<Hash256, cached_tree_hash::Error> {
cache
.recalculate_tree_hash_root(self)
.map_err(|_| cached_tree_hash::Error::CacheInconsistent)
}
}
impl From<RelativeEpochError> for Error {
fn from(e: RelativeEpochError) -> Error {
Error::RelativeEpochError(e)

View File

@@ -0,0 +1,274 @@
use super::Error;
use crate::{BeaconState, EthSpec, Hash256, Unsigned, Validator};
use cached_tree_hash::{int_log, CacheArena, CachedTreeHash, TreeHashCache};
use rayon::prelude::*;
use ssz_derive::{Decode, Encode};
use tree_hash::{mix_in_length, TreeHash};
/// The number of validator record tree hash caches stored in each arena.
///
/// This is primarily used for concurrency; if we have 16 validators and set `VALIDATORS_PER_ARENA
/// == 8` then it is possible to do a 2-core concurrent hash.
///
/// Do not set to 0.
const VALIDATORS_PER_ARENA: usize = 4_096;
/// A cache that performs a caching tree hash of the entire `BeaconState` struct.
#[derive(Debug, PartialEq, Clone, Default, Encode, Decode)]
pub struct BeaconTreeHashCache {
// Validators cache
validators: ValidatorsListTreeHashCache,
// Arenas
fixed_arena: CacheArena,
balances_arena: CacheArena,
slashings_arena: CacheArena,
// Caches
block_roots: TreeHashCache,
state_roots: TreeHashCache,
historical_roots: TreeHashCache,
balances: TreeHashCache,
randao_mixes: TreeHashCache,
slashings: TreeHashCache,
}
impl BeaconTreeHashCache {
/// Instantiates a new cache.
///
/// Allocates the necessary memory to store all of the cached Merkle trees but does perform any
/// hashing.
pub fn new<T: EthSpec>(state: &BeaconState<T>) -> Self {
let mut fixed_arena = CacheArena::default();
let block_roots = state.block_roots.new_tree_hash_cache(&mut fixed_arena);
let state_roots = state.state_roots.new_tree_hash_cache(&mut fixed_arena);
let historical_roots = state.historical_roots.new_tree_hash_cache(&mut fixed_arena);
let randao_mixes = state.randao_mixes.new_tree_hash_cache(&mut fixed_arena);
let validators = ValidatorsListTreeHashCache::new::<T>(&state.validators[..]);
let mut balances_arena = CacheArena::default();
let balances = state.balances.new_tree_hash_cache(&mut balances_arena);
let mut slashings_arena = CacheArena::default();
let slashings = state.slashings.new_tree_hash_cache(&mut slashings_arena);
Self {
validators,
fixed_arena,
balances_arena,
slashings_arena,
block_roots,
state_roots,
historical_roots,
balances,
randao_mixes,
slashings,
}
}
/// Updates the cache and returns the tree hash root for the given `state`.
///
/// The provided `state` should be a descendant of the last `state` given to this function, or
/// the `Self::new` function.
pub fn recalculate_tree_hash_root<T: EthSpec>(
&mut self,
state: &BeaconState<T>,
) -> Result<Hash256, Error> {
let mut leaves = vec![];
leaves.append(&mut state.genesis_time.tree_hash_root());
leaves.append(&mut state.slot.tree_hash_root());
leaves.append(&mut state.fork.tree_hash_root());
leaves.append(&mut state.latest_block_header.tree_hash_root());
leaves.extend_from_slice(
state
.block_roots
.recalculate_tree_hash_root(&mut self.fixed_arena, &mut self.block_roots)?
.as_bytes(),
);
leaves.extend_from_slice(
state
.state_roots
.recalculate_tree_hash_root(&mut self.fixed_arena, &mut self.state_roots)?
.as_bytes(),
);
leaves.extend_from_slice(
state
.historical_roots
.recalculate_tree_hash_root(&mut self.fixed_arena, &mut self.historical_roots)?
.as_bytes(),
);
leaves.append(&mut state.eth1_data.tree_hash_root());
leaves.append(&mut state.eth1_data_votes.tree_hash_root());
leaves.append(&mut state.eth1_deposit_index.tree_hash_root());
leaves.extend_from_slice(
self.validators
.recalculate_tree_hash_root(&state.validators[..])?
.as_bytes(),
);
leaves.extend_from_slice(
state
.balances
.recalculate_tree_hash_root(&mut self.balances_arena, &mut self.balances)?
.as_bytes(),
);
leaves.extend_from_slice(
state
.randao_mixes
.recalculate_tree_hash_root(&mut self.fixed_arena, &mut self.randao_mixes)?
.as_bytes(),
);
leaves.extend_from_slice(
state
.slashings
.recalculate_tree_hash_root(&mut self.slashings_arena, &mut self.slashings)?
.as_bytes(),
);
leaves.append(&mut state.previous_epoch_attestations.tree_hash_root());
leaves.append(&mut state.current_epoch_attestations.tree_hash_root());
leaves.append(&mut state.justification_bits.tree_hash_root());
leaves.append(&mut state.previous_justified_checkpoint.tree_hash_root());
leaves.append(&mut state.current_justified_checkpoint.tree_hash_root());
leaves.append(&mut state.finalized_checkpoint.tree_hash_root());
Ok(Hash256::from_slice(&tree_hash::merkle_root(&leaves, 0)))
}
}
/// A specialized cache for computing the tree hash root of `state.validators`.
#[derive(Debug, PartialEq, Clone, Default, Encode, Decode)]
struct ValidatorsListTreeHashCache {
list_arena: CacheArena,
list_cache: TreeHashCache,
values: ParallelValidatorTreeHash,
}
impl ValidatorsListTreeHashCache {
/// Instantiates a new cache.
///
/// Allocates the necessary memory to store all of the cached Merkle trees but does perform any
/// hashing.
fn new<E: EthSpec>(validators: &[Validator]) -> Self {
let mut list_arena = CacheArena::default();
Self {
list_cache: TreeHashCache::new(
&mut list_arena,
int_log(E::ValidatorRegistryLimit::to_usize()),
validators.len(),
),
list_arena,
values: ParallelValidatorTreeHash::new::<E>(validators),
}
}
/// Updates the cache and returns the tree hash root for the given `state`.
///
/// This function makes assumptions that the `validators` list will only change in accordance
/// with valid per-block/per-slot state transitions.
fn recalculate_tree_hash_root(&mut self, validators: &[Validator]) -> Result<Hash256, Error> {
let mut list_arena = std::mem::replace(&mut self.list_arena, CacheArena::default());
let leaves = self
.values
.leaves(validators)?
.into_iter()
.flatten()
.map(|h| h.to_fixed_bytes())
.collect::<Vec<_>>();
let list_root = self
.list_cache
.recalculate_merkle_root(&mut list_arena, leaves.into_iter())?;
std::mem::replace(&mut self.list_arena, list_arena);
Ok(Hash256::from_slice(&mix_in_length(
list_root.as_bytes(),
validators.len(),
)))
}
}
/// Provides a cache for each of the `Validator` objects in `state.validators` and computes the
/// roots of these using Rayon parallelization.
#[derive(Debug, PartialEq, Clone, Default, Encode, Decode)]
pub struct ParallelValidatorTreeHash {
/// Each arena and its associated sub-trees.
arenas: Vec<(CacheArena, Vec<TreeHashCache>)>,
}
impl ParallelValidatorTreeHash {
/// Instantiates a new cache.
///
/// Allocates the necessary memory to store all of the cached Merkle trees but does perform any
/// hashing.
fn new<E: EthSpec>(validators: &[Validator]) -> Self {
let num_arenas = (validators.len() + VALIDATORS_PER_ARENA - 1) / VALIDATORS_PER_ARENA;
let mut arenas = vec![(CacheArena::default(), vec![]); num_arenas];
validators.iter().enumerate().for_each(|(i, v)| {
let (arena, caches) = &mut arenas[i / VALIDATORS_PER_ARENA];
caches.push(v.new_tree_hash_cache(arena))
});
Self { arenas }
}
/// Returns the number of validators stored in self.
fn len(&self) -> usize {
self.arenas.last().map_or(0, |last| {
// Subtraction cannot underflow because `.last()` ensures the `.len() > 0`.
(self.arenas.len() - 1) * VALIDATORS_PER_ARENA + last.1.len()
})
}
/// Updates the caches for each `Validator` in `validators` and returns a list that maps 1:1
/// with `validators` to the hash of each validator.
///
/// This function makes assumptions that the `validators` list will only change in accordance
/// with valid per-block/per-slot state transitions.
fn leaves(&mut self, validators: &[Validator]) -> Result<Vec<Vec<Hash256>>, Error> {
if self.len() < validators.len() {
validators.iter().skip(self.len()).for_each(|v| {
if self
.arenas
.last()
.map_or(true, |last| last.1.len() >= VALIDATORS_PER_ARENA)
{
let mut arena = CacheArena::default();
let cache = v.new_tree_hash_cache(&mut arena);
self.arenas.push((arena, vec![cache]))
} else {
let (arena, caches) = &mut self
.arenas
.last_mut()
.expect("Cannot reach this block if arenas is empty.");
caches.push(v.new_tree_hash_cache(arena))
}
})
} else if validators.len() < self.len() {
return Err(Error::ValidatorRegistryShrunk);
}
self.arenas
.par_iter_mut()
.enumerate()
.map(|(arena_index, (arena, caches))| {
caches
.iter_mut()
.enumerate()
.map(move |(cache_index, cache)| {
let val_index = (arena_index * VALIDATORS_PER_ARENA) + cache_index;
let validator = validators
.get(val_index)
.ok_or_else(|| Error::TreeHashCacheInconsistent)?;
validator
.recalculate_tree_hash_root(arena, cache)
.map_err(Error::CachedTreeHashError)
})
.collect()
})
.collect()
}
}

View File

@@ -47,7 +47,7 @@ pub use crate::beacon_block::BeaconBlock;
pub use crate::beacon_block_body::BeaconBlockBody;
pub use crate::beacon_block_header::BeaconBlockHeader;
pub use crate::beacon_committee::{BeaconCommittee, OwnedBeaconCommittee};
pub use crate::beacon_state::{Error as BeaconStateError, *};
pub use crate::beacon_state::{BeaconTreeHashCache, Error as BeaconStateError, *};
pub use crate::chain_spec::{ChainSpec, Domain, YamlConfig};
pub use crate::checkpoint::Checkpoint;
pub use crate::deposit::{Deposit, DEPOSIT_TREE_DEPTH};

View File

@@ -2,40 +2,38 @@
//!
//! It makes some assumptions about the layouts and update patterns of other structs in this
//! crate, and should be updated carefully whenever those structs are changed.
use crate::{Hash256, Validator};
use cached_tree_hash::{int_log, CachedTreeHash, Error, TreeHashCache};
use crate::{Epoch, Hash256, Validator};
use cached_tree_hash::{int_log, CacheArena, CachedTreeHash, Error, TreeHashCache};
use int_to_bytes::int_to_fixed_bytes32;
use tree_hash::TreeHash;
/// Number of struct fields on `Validator`.
const NUM_VALIDATOR_FIELDS: usize = 8;
impl CachedTreeHash<TreeHashCache> for Validator {
fn new_tree_hash_cache() -> TreeHashCache {
TreeHashCache::new(int_log(NUM_VALIDATOR_FIELDS))
fn new_tree_hash_cache(&self, arena: &mut CacheArena) -> TreeHashCache {
TreeHashCache::new(arena, int_log(NUM_VALIDATOR_FIELDS), NUM_VALIDATOR_FIELDS)
}
/// Efficiently tree hash a `Validator`, assuming it was updated by a valid state transition.
///
/// Specifically, we assume that the `pubkey` and `withdrawal_credentials` fields are constant.
fn recalculate_tree_hash_root(&self, cache: &mut TreeHashCache) -> Result<Hash256, Error> {
// If the cache is empty, hash every field to fill it.
if cache.leaves().is_empty() {
return cache.recalculate_merkle_root(field_tree_hash_iter(self));
}
fn recalculate_tree_hash_root(
&self,
arena: &mut CacheArena,
cache: &mut TreeHashCache,
) -> Result<Hash256, Error> {
// Otherwise just check the fields which might have changed.
let dirty_indices = cache
.leaves()
.iter_mut()
.iter_mut(arena)?
.enumerate()
.flat_map(|(i, leaf)| {
// Fields pubkey and withdrawal_credentials are constant
if i == 0 || i == 1 {
if (i == 0 || i == 1) && cache.initialized {
None
} else {
let new_tree_hash = field_tree_hash_by_index(self, i);
if leaf.as_bytes() != &new_tree_hash[..] {
leaf.assign_from_slice(&new_tree_hash);
if process_field_by_index(self, i, leaf, !cache.initialized) {
Some(i)
} else {
None
@@ -44,21 +42,25 @@ impl CachedTreeHash<TreeHashCache> for Validator {
})
.collect();
cache.update_merkle_root(dirty_indices)
cache.update_merkle_root(arena, dirty_indices)
}
}
/// Get the tree hash root of a validator field by its position/index in the struct.
fn field_tree_hash_by_index(v: &Validator, field_idx: usize) -> Vec<u8> {
fn process_field_by_index(
v: &Validator,
field_idx: usize,
leaf: &mut Hash256,
force_update: bool,
) -> bool {
match field_idx {
0 => v.pubkey.tree_hash_root(),
1 => v.withdrawal_credentials.tree_hash_root(),
2 => v.effective_balance.tree_hash_root(),
3 => v.slashed.tree_hash_root(),
4 => v.activation_eligibility_epoch.tree_hash_root(),
5 => v.activation_epoch.tree_hash_root(),
6 => v.exit_epoch.tree_hash_root(),
7 => v.withdrawable_epoch.tree_hash_root(),
0 => process_vec_field(v.pubkey.tree_hash_root(), leaf, force_update),
1 => process_slice_field(v.withdrawal_credentials.as_bytes(), leaf, force_update),
2 => process_u64_field(v.effective_balance, leaf, force_update),
3 => process_bool_field(v.slashed, leaf, force_update),
4 => process_epoch_field(v.activation_eligibility_epoch, leaf, force_update),
5 => process_epoch_field(v.activation_epoch, leaf, force_update),
6 => process_epoch_field(v.exit_epoch, leaf, force_update),
7 => process_epoch_field(v.withdrawable_epoch, leaf, force_update),
_ => panic!(
"Validator type only has {} fields, {} out of bounds",
NUM_VALIDATOR_FIELDS, field_idx
@@ -66,17 +68,35 @@ fn field_tree_hash_by_index(v: &Validator, field_idx: usize) -> Vec<u8> {
}
}
/// Iterator over the tree hash roots of `Validator` fields.
fn field_tree_hash_iter<'a>(
v: &'a Validator,
) -> impl Iterator<Item = [u8; 32]> + ExactSizeIterator + 'a {
(0..NUM_VALIDATOR_FIELDS)
.map(move |i| field_tree_hash_by_index(v, i))
.map(|tree_hash_root| {
let mut res = [0; 32];
res.copy_from_slice(&tree_hash_root[0..32]);
res
})
fn process_vec_field(new_tree_hash: Vec<u8>, leaf: &mut Hash256, force_update: bool) -> bool {
if force_update || leaf.as_bytes() != &new_tree_hash[..] {
leaf.assign_from_slice(&new_tree_hash);
true
} else {
false
}
}
fn process_slice_field(new_tree_hash: &[u8], leaf: &mut Hash256, force_update: bool) -> bool {
if force_update || leaf.as_bytes() != new_tree_hash {
leaf.assign_from_slice(&new_tree_hash);
true
} else {
false
}
}
fn process_u64_field(val: u64, leaf: &mut Hash256, force_update: bool) -> bool {
let new_tree_hash = int_to_fixed_bytes32(val);
process_slice_field(&new_tree_hash[..], leaf, force_update)
}
fn process_epoch_field(val: Epoch, leaf: &mut Hash256, force_update: bool) -> bool {
process_u64_field(val.as_u64(), leaf, force_update)
}
fn process_bool_field(val: bool, leaf: &mut Hash256, force_update: bool) -> bool {
process_u64_field(val as u64, leaf, force_update)
}
#[cfg(test)]
@@ -88,18 +108,24 @@ mod test {
use rand_xorshift::XorShiftRng;
fn test_validator_tree_hash(v: &Validator) {
let mut cache = Validator::new_tree_hash_cache();
let arena = &mut CacheArena::default();
let mut cache = v.new_tree_hash_cache(arena);
// With a fresh cache
assert_eq!(
&v.tree_hash_root()[..],
v.recalculate_tree_hash_root(&mut cache).unwrap().as_bytes(),
v.recalculate_tree_hash_root(arena, &mut cache)
.unwrap()
.as_bytes(),
"{:?}",
v
);
// With a completely up-to-date cache
assert_eq!(
&v.tree_hash_root()[..],
v.recalculate_tree_hash_root(&mut cache).unwrap().as_bytes(),
v.recalculate_tree_hash_root(arena, &mut cache)
.unwrap()
.as_bytes(),
"{:?}",
v
);