Delete cached_tree_hash (#6060)

* Delete `cached_tree_hash`
This commit is contained in:
Michael Sproul
2024-07-08 20:33:50 +10:00
committed by GitHub
parent 5b2edfa0bd
commit 48c55ae295
13 changed files with 1 additions and 1224 deletions

16
Cargo.lock generated
View File

@@ -1169,21 +1169,6 @@ dependencies = [
"libc",
]
[[package]]
name = "cached_tree_hash"
version = "0.1.0"
dependencies = [
"ethereum-types 0.14.1",
"ethereum_hashing",
"ethereum_ssz",
"ethereum_ssz_derive",
"quickcheck",
"quickcheck_macros",
"smallvec",
"ssz_types",
"tree_hash",
]
[[package]]
name = "camino"
version = "1.1.7"
@@ -8776,7 +8761,6 @@ dependencies = [
"arbitrary",
"beacon_chain",
"bls",
"cached_tree_hash",
"compare_fields",
"compare_fields_derive",
"criterion",

View File

@@ -51,7 +51,6 @@ members = [
"database_manager",
"consensus/cached_tree_hash",
"consensus/int_to_bytes",
"consensus/fork_choice",
"consensus/proto_array",
@@ -188,7 +187,6 @@ beacon_chain = { path = "beacon_node/beacon_chain" }
beacon_node = { path = "beacon_node" }
beacon_processor = { path = "beacon_node/beacon_processor" }
bls = { path = "crypto/bls" }
cached_tree_hash = { path = "consensus/cached_tree_hash" }
clap_utils = { path = "common/clap_utils" }
compare_fields = { path = "common/compare_fields" }
deposit_contract = { path = "common/deposit_contract" }

View File

@@ -1,21 +0,0 @@
[package]
name = "cached_tree_hash"
version = "0.1.0"
authors = ["Michael Sproul <michael@sigmaprime.io>"]
edition = { workspace = true }
[dependencies]
ethereum-types = { workspace = true }
ssz_types = { workspace = true }
ethereum_hashing = { workspace = true }
ethereum_ssz_derive = { workspace = true }
ethereum_ssz = { workspace = true }
tree_hash = { workspace = true }
smallvec = { workspace = true }
[dev-dependencies]
quickcheck = { workspace = true }
quickcheck_macros = { workspace = true }
[features]
arbitrary = ["ethereum-types/arbitrary"]

View File

@@ -1,237 +0,0 @@
use crate::cache_arena;
use crate::SmallVec8;
use crate::{Error, Hash256};
use ethereum_hashing::{hash32_concat, ZERO_HASHES};
use smallvec::smallvec;
use ssz_derive::{Decode, Encode};
use tree_hash::BYTES_PER_CHUNK;
type CacheArena = cache_arena::CacheArena<Hash256>;
type CacheArenaAllocation = cache_arena::CacheArenaAllocation<Hash256>;
/// Sparse Merkle tree suitable for tree hashing vectors and lists.
#[derive(Debug, PartialEq, Clone, Default, Encode, Decode)]
pub struct TreeHashCache {
pub initialized: bool,
/// Depth is such that the tree has a capacity for 2^depth leaves
depth: usize,
/// Sparse layers.
///
/// The leaves are contained in `self.layers[self.depth]`, and each other layer `i`
/// contains the parents of the nodes in layer `i + 1`.
layers: SmallVec8<CacheArenaAllocation>,
}
impl TreeHashCache {
/// Create a new cache with the given `depth` with enough nodes allocated to suit `leaves`. All
/// leaves are set to `Hash256::zero()`.
pub fn new(arena: &mut CacheArena, depth: usize, leaves: usize) -> Self {
let mut layers = SmallVec8::with_capacity(depth + 1);
for i in 0..=depth {
let vec = arena.alloc();
vec.extend_with_vec(
arena,
smallvec![Hash256::zero(); nodes_per_layer(i, depth, leaves)],
)
.expect("A newly allocated sub-arena cannot fail unless it has reached max capacity");
layers.push(vec)
}
TreeHashCache {
initialized: false,
depth,
layers,
}
}
/// Compute the updated Merkle root for the given `leaves`.
pub fn recalculate_merkle_root(
&mut self,
arena: &mut CacheArena,
leaves: impl ExactSizeIterator<Item = [u8; BYTES_PER_CHUNK]>,
) -> Result<Hash256, Error> {
let dirty_indices = self.update_leaves(arena, leaves)?;
self.update_merkle_root(arena, dirty_indices)
}
/// Phase 1 of the algorithm: compute the indices of all dirty leaves.
pub fn update_leaves(
&mut self,
arena: &mut CacheArena,
mut leaves: impl ExactSizeIterator<Item = [u8; BYTES_PER_CHUNK]>,
) -> Result<SmallVec8<usize>, Error> {
let new_leaf_count = leaves.len();
if new_leaf_count < self.leaves().len(arena)? {
return Err(Error::CannotShrink);
} else if new_leaf_count > 2usize.pow(self.depth as u32) {
return Err(Error::TooManyLeaves);
}
let mut dirty = SmallVec8::new();
// Update the existing leaves
self.leaves()
.iter_mut(arena)?
.enumerate()
.zip(&mut leaves)
.for_each(|((i, leaf), new_leaf)| {
if !self.initialized || leaf.as_bytes() != new_leaf {
leaf.assign_from_slice(&new_leaf);
dirty.push(i);
}
});
// Push the rest of the new leaves (if any)
dirty.extend(self.leaves().len(arena)?..new_leaf_count);
self.leaves()
.extend_with_vec(arena, leaves.map(|l| Hash256::from_slice(&l)).collect())?;
Ok(dirty)
}
/// Phase 2: propagate changes upwards from the leaves of the tree, and compute the root.
///
/// Returns an error if `dirty_indices` is inconsistent with the cache.
pub fn update_merkle_root(
&mut self,
arena: &mut CacheArena,
mut dirty_indices: SmallVec8<usize>,
) -> Result<Hash256, Error> {
if dirty_indices.is_empty() {
return Ok(self.root(arena));
}
let mut depth = self.depth;
while depth > 0 {
let new_dirty_indices = lift_dirty(&dirty_indices);
for &idx in &new_dirty_indices {
let left_idx = 2 * idx;
let right_idx = left_idx + 1;
let left = self.layers[depth]
.get(arena, left_idx)?
.ok_or(Error::MissingLeftIdx(left_idx))?;
let right = self.layers[depth]
.get(arena, right_idx)?
.copied()
.unwrap_or_else(|| Hash256::from_slice(&ZERO_HASHES[self.depth - depth]));
let new_hash = hash32_concat(left.as_bytes(), right.as_bytes());
match self.layers[depth - 1].get_mut(arena, idx)? {
Some(hash) => {
hash.assign_from_slice(&new_hash);
}
None => {
// Parent layer should already contain nodes for all non-dirty indices
if idx != self.layers[depth - 1].len(arena)? {
return Err(Error::CacheInconsistent);
}
self.layers[depth - 1].push(arena, Hash256::from_slice(&new_hash))?;
}
}
}
dirty_indices = new_dirty_indices;
depth -= 1;
}
self.initialized = true;
Ok(self.root(arena))
}
/// Get the root of this cache, without doing any updates/computation.
pub fn root(&self, arena: &CacheArena) -> Hash256 {
self.layers[0]
.get(arena, 0)
.expect("cached tree should have a root layer")
.copied()
.unwrap_or_else(|| Hash256::from_slice(&ZERO_HASHES[self.depth]))
}
pub fn leaves(&mut self) -> &mut CacheArenaAllocation {
&mut self.layers[self.depth]
}
}
/// Compute the dirty indices for one layer up.
fn lift_dirty(dirty_indices: &[usize]) -> SmallVec8<usize> {
let mut new_dirty = SmallVec8::with_capacity(dirty_indices.len());
for index in dirty_indices {
new_dirty.push(index / 2)
}
new_dirty.dedup();
new_dirty
}
/// Returns the number of nodes that should be at each layer of a tree with the given `depth` and
/// number of `leaves`.
///
/// Note: the top-most layer is `0` and a tree that has 8 leaves (4 layers) has a depth of 3 (_not_
/// a depth of 4).
///
/// ## Example
///
/// Consider the following tree that has `depth = 3` and `leaves = 5`.
///
///```ignore
/// 0 o <-- height 0 has 1 node
/// / \
/// 1 o o <-- height 1 has 2 nodes
/// / \ /
/// 2 o o o <-- height 2 has 3 nodes
/// /\ /\ /
/// 3 o o o o o <-- height 3 have 5 nodes
/// ```
fn nodes_per_layer(layer: usize, depth: usize, leaves: usize) -> usize {
if layer == depth {
leaves
} else {
let leaves_per_node = 1 << (depth - layer);
(leaves + leaves_per_node - 1) / leaves_per_node
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn zero_leaves() {
let arena = &mut CacheArena::default();
let depth = 3;
let num_leaves = 0;
let mut cache = TreeHashCache::new(arena, depth, num_leaves);
let leaves: Vec<[u8; BYTES_PER_CHUNK]> = vec![];
cache
.recalculate_merkle_root(arena, leaves.into_iter())
.expect("should calculate root");
}
#[test]
fn test_node_per_layer_unbalanced_tree() {
assert_eq!(nodes_per_layer(0, 3, 5), 1);
assert_eq!(nodes_per_layer(1, 3, 5), 2);
assert_eq!(nodes_per_layer(2, 3, 5), 3);
assert_eq!(nodes_per_layer(3, 3, 5), 5);
}
#[test]
fn test_node_per_layer_balanced_tree() {
assert_eq!(nodes_per_layer(0, 3, 8), 1);
assert_eq!(nodes_per_layer(1, 3, 8), 2);
assert_eq!(nodes_per_layer(2, 3, 8), 4);
assert_eq!(nodes_per_layer(3, 3, 8), 8);
}
}

View File

@@ -1,498 +0,0 @@
use crate::SmallVec8;
use ssz::{Decode, Encode};
use ssz_derive::{Decode, Encode};
use std::cmp::Ordering;
use std::marker::PhantomData;
use std::ops::Range;
#[derive(Debug, PartialEq, Clone)]
pub enum Error {
UnknownAllocId(usize),
OffsetOverflow,
OffsetUnderflow,
RangeOverFlow,
}
/// Inspired by the `TypedArena` crate, the `CachedArena` provides a single contiguous memory
/// allocation from which smaller allocations can be produced. In effect this allows for having
/// many `Vec<T>`-like objects all stored contiguously on the heap with the aim of reducing memory
/// fragmentation.
///
/// Because all of the allocations are stored in one big `Vec`, resizing any of the allocations
/// will mean all items to the right of that allocation will be moved.
#[derive(Debug, PartialEq, Clone, Default, Encode, Decode)]
pub struct CacheArena<T: Encode + Decode> {
/// The backing array, storing cached values.
backing: Vec<T>,
/// A list of offsets indicating the start of each allocation.
offsets: Vec<usize>,
}
impl<T: Encode + Decode> CacheArena<T> {
/// Instantiate self with a backing array of the given `capacity`.
pub fn with_capacity(capacity: usize) -> Self {
Self {
backing: Vec::with_capacity(capacity),
offsets: vec![],
}
}
/// Produce an allocation of zero length at the end of the backing array.
pub fn alloc(&mut self) -> CacheArenaAllocation<T> {
let alloc_id = self.offsets.len();
self.offsets.push(self.backing.len());
CacheArenaAllocation {
alloc_id,
_phantom: PhantomData,
}
}
/// Update `self.offsets` to reflect an allocation increasing in size.
fn grow(&mut self, alloc_id: usize, grow_by: usize) -> Result<(), Error> {
if alloc_id < self.offsets.len() {
self.offsets
.iter_mut()
.skip(alloc_id + 1)
.try_for_each(|offset| {
*offset = offset.checked_add(grow_by).ok_or(Error::OffsetOverflow)?;
Ok(())
})
} else {
Err(Error::UnknownAllocId(alloc_id))
}
}
/// Update `self.offsets` to reflect an allocation decreasing in size.
fn shrink(&mut self, alloc_id: usize, shrink_by: usize) -> Result<(), Error> {
if alloc_id < self.offsets.len() {
self.offsets
.iter_mut()
.skip(alloc_id + 1)
.try_for_each(|offset| {
*offset = offset
.checked_sub(shrink_by)
.ok_or(Error::OffsetUnderflow)?;
Ok(())
})
} else {
Err(Error::UnknownAllocId(alloc_id))
}
}
/// Similar to `Vec::splice`, however the range is relative to some allocation (`alloc_id`) and
/// the replaced items are not returned (i.e., it is forgetful).
///
/// To reiterate, the given `range` should be relative to the given `alloc_id`, not
/// `self.backing`. E.g., if the allocation has an offset of `20` and the range is `0..1`, then
/// the splice will translate to `self.backing[20..21]`.
fn splice_forgetful<I: IntoIterator<Item = T>>(
&mut self,
alloc_id: usize,
range: Range<usize>,
replace_with: I,
) -> Result<(), Error> {
let offset = *self
.offsets
.get(alloc_id)
.ok_or(Error::UnknownAllocId(alloc_id))?;
let start = range
.start
.checked_add(offset)
.ok_or(Error::RangeOverFlow)?;
let end = range.end.checked_add(offset).ok_or(Error::RangeOverFlow)?;
let prev_len = self.backing.len();
self.backing.splice(start..end, replace_with);
match prev_len.cmp(&self.backing.len()) {
Ordering::Greater => self.shrink(alloc_id, prev_len - self.backing.len())?,
Ordering::Less => self.grow(alloc_id, self.backing.len() - prev_len)?,
Ordering::Equal => {}
}
Ok(())
}
/// Returns the length of the specified allocation.
fn len(&self, alloc_id: usize) -> Result<usize, Error> {
let start = self
.offsets
.get(alloc_id)
.ok_or(Error::UnknownAllocId(alloc_id))?;
let end = self
.offsets
.get(alloc_id + 1)
.copied()
.unwrap_or(self.backing.len());
Ok(end - start)
}
/// Get the value at position `i`, relative to the offset at `alloc_id`.
fn get(&self, alloc_id: usize, i: usize) -> Result<Option<&T>, Error> {
if i < self.len(alloc_id)? {
let offset = self
.offsets
.get(alloc_id)
.ok_or(Error::UnknownAllocId(alloc_id))?;
Ok(self.backing.get(i + offset))
} else {
Ok(None)
}
}
/// Mutably get the value at position `i`, relative to the offset at `alloc_id`.
fn get_mut(&mut self, alloc_id: usize, i: usize) -> Result<Option<&mut T>, Error> {
if i < self.len(alloc_id)? {
let offset = self
.offsets
.get(alloc_id)
.ok_or(Error::UnknownAllocId(alloc_id))?;
Ok(self.backing.get_mut(i + offset))
} else {
Ok(None)
}
}
/// Returns the range in `self.backing` that is occupied by some allocation.
fn range(&self, alloc_id: usize) -> Result<Range<usize>, Error> {
let start = *self
.offsets
.get(alloc_id)
.ok_or(Error::UnknownAllocId(alloc_id))?;
let end = self
.offsets
.get(alloc_id + 1)
.copied()
.unwrap_or(self.backing.len());
Ok(start..end)
}
/// Iterate through all values in some allocation.
fn iter(&self, alloc_id: usize) -> Result<impl Iterator<Item = &T>, Error> {
Ok(self.backing[self.range(alloc_id)?].iter())
}
/// Mutably iterate through all values in some allocation.
fn iter_mut(&mut self, alloc_id: usize) -> Result<impl Iterator<Item = &mut T>, Error> {
let range = self.range(alloc_id)?;
Ok(self.backing[range].iter_mut())
}
/// Returns the total number of items stored in the arena, the sum of all values in all
/// allocations.
pub fn backing_len(&self) -> usize {
self.backing.len()
}
}
/// An allocation from a `CacheArena` that behaves like a `Vec<T>`.
///
/// All functions will modify the given `arena` instead of `self`. As such, it is safe to have
/// multiple instances of this allocation at once.
///
/// For all functions that accept a `CacheArena<T>` parameter, that arena should always be the one
/// that created `Self`. I.e., do not mix-and-match allocations and arenas unless you _really_ know
/// what you're doing (or want to have a bad time).
#[derive(Debug, PartialEq, Clone, Default, Encode, Decode)]
pub struct CacheArenaAllocation<T> {
alloc_id: usize,
#[ssz(skip_serializing, skip_deserializing)]
_phantom: PhantomData<T>,
}
impl<T: Encode + Decode> CacheArenaAllocation<T> {
/// Grow the allocation in `arena`, appending `vec` to the current values.
pub fn extend_with_vec(
&self,
arena: &mut CacheArena<T>,
vec: SmallVec8<T>,
) -> Result<(), Error> {
let len = arena.len(self.alloc_id)?;
arena.splice_forgetful(self.alloc_id, len..len, vec)?;
Ok(())
}
/// Push `item` to the end of the current allocation in `arena`.
///
/// An error is returned if this allocation is not known to the given `arena`.
pub fn push(&self, arena: &mut CacheArena<T>, item: T) -> Result<(), Error> {
let len = arena.len(self.alloc_id)?;
arena.splice_forgetful(self.alloc_id, len..len, vec![item])?;
Ok(())
}
/// Get the i'th item in the `arena` (relative to this allocation).
///
/// An error is returned if this allocation is not known to the given `arena`.
pub fn get<'a>(&self, arena: &'a CacheArena<T>, i: usize) -> Result<Option<&'a T>, Error> {
arena.get(self.alloc_id, i)
}
/// Mutably get the i'th item in the `arena` (relative to this allocation).
///
/// An error is returned if this allocation is not known to the given `arena`.
pub fn get_mut<'a>(
&self,
arena: &'a mut CacheArena<T>,
i: usize,
) -> Result<Option<&'a mut T>, Error> {
arena.get_mut(self.alloc_id, i)
}
/// Iterate through all items in the `arena` (relative to this allocation).
pub fn iter<'a>(&self, arena: &'a CacheArena<T>) -> Result<impl Iterator<Item = &'a T>, Error> {
arena.iter(self.alloc_id)
}
/// Mutably iterate through all items in the `arena` (relative to this allocation).
pub fn iter_mut<'a>(
&self,
arena: &'a mut CacheArena<T>,
) -> Result<impl Iterator<Item = &'a mut T>, Error> {
arena.iter_mut(self.alloc_id)
}
/// Return the number of items stored in this allocation.
pub fn len(&self, arena: &CacheArena<T>) -> Result<usize, Error> {
arena.len(self.alloc_id)
}
/// Returns true if this allocation is empty.
pub fn is_empty(&self, arena: &CacheArena<T>) -> Result<bool, Error> {
self.len(arena).map(|len| len == 0)
}
}
#[cfg(test)]
mod tests {
use crate::Hash256;
use smallvec::smallvec;
type CacheArena = super::CacheArena<Hash256>;
type CacheArenaAllocation = super::CacheArenaAllocation<Hash256>;
fn hash(i: usize) -> Hash256 {
Hash256::from_low_u64_be(i as u64)
}
fn test_routine(arena: &mut CacheArena, sub: &mut CacheArenaAllocation) {
let mut len = sub.len(arena).expect("should exist");
sub.push(arena, hash(len)).expect("should push");
len += 1;
assert_eq!(
sub.len(arena).expect("should exist"),
len,
"after first push sub should have len {}",
len
);
assert!(
!sub.is_empty(arena).expect("should exist"),
"new sub should not be empty"
);
sub.push(arena, hash(len)).expect("should push again");
len += 1;
assert_eq!(
sub.len(arena).expect("should exist"),
len,
"after second push sub should have len {}",
len
);
sub.extend_with_vec(arena, smallvec![hash(len), hash(len + 1)])
.expect("should extend with vec");
len += 2;
assert_eq!(
sub.len(arena).expect("should exist"),
len,
"after extend sub should have len {}",
len
);
let collected = sub
.iter(arena)
.expect("should get iter")
.cloned()
.collect::<Vec<_>>();
let collected_mut = sub
.iter_mut(arena)
.expect("should get mut iter")
.map(|v| *v)
.collect::<Vec<_>>();
for i in 0..len {
assert_eq!(
*sub.get(arena, i)
.expect("should exist")
.expect("should get sub index"),
hash(i),
"get({}) should be hash({})",
i,
i
);
assert_eq!(
collected[i],
hash(i),
"collected[{}] should be hash({})",
i,
i
);
assert_eq!(
collected_mut[i],
hash(i),
"collected_mut[{}] should be hash({})",
i,
i
);
}
}
#[test]
fn single() {
let arena = &mut CacheArena::default();
assert_eq!(arena.backing.len(), 0, "should start with an empty backing");
assert_eq!(arena.offsets.len(), 0, "should start without any offsets");
let mut sub = arena.alloc();
assert_eq!(
sub.len(arena).expect("should exist"),
0,
"new sub should have len 0"
);
assert!(
sub.is_empty(arena).expect("should exist"),
"new sub should be empty"
);
test_routine(arena, &mut sub);
}
#[test]
fn double() {
let arena = &mut CacheArena::default();
assert_eq!(arena.backing.len(), 0, "should start with an empty backing");
assert_eq!(arena.offsets.len(), 0, "should start without any offsets");
let mut sub_01 = arena.alloc();
assert_eq!(
sub_01.len(arena).expect("should exist"),
0,
"new sub should have len 0"
);
assert!(
sub_01.is_empty(arena).expect("should exist"),
"new sub should be empty"
);
let mut sub_02 = arena.alloc();
assert_eq!(
sub_02.len(arena).expect("should exist"),
0,
"new sub should have len 0"
);
assert!(
sub_02.is_empty(arena).expect("should exist"),
"new sub should be empty"
);
test_routine(arena, &mut sub_01);
test_routine(arena, &mut sub_02);
}
#[test]
fn one_then_other() {
let arena = &mut CacheArena::default();
assert_eq!(arena.backing.len(), 0, "should start with an empty backing");
assert_eq!(arena.offsets.len(), 0, "should start without any offsets");
let mut sub_01 = arena.alloc();
assert_eq!(
sub_01.len(arena).expect("should exist"),
0,
"new sub should have len 0"
);
assert!(
sub_01.is_empty(arena).expect("should exist"),
"new sub should be empty"
);
test_routine(arena, &mut sub_01);
let mut sub_02 = arena.alloc();
assert_eq!(
sub_02.len(arena).expect("should exist"),
0,
"new sub should have len 0"
);
assert!(
sub_02.is_empty(arena).expect("should exist"),
"new sub should be empty"
);
test_routine(arena, &mut sub_02);
test_routine(arena, &mut sub_01);
test_routine(arena, &mut sub_02);
}
#[test]
fn many() {
let arena = &mut CacheArena::default();
assert_eq!(arena.backing.len(), 0, "should start with an empty backing");
assert_eq!(arena.offsets.len(), 0, "should start without any offsets");
let mut subs = vec![];
for i in 0..50 {
if i == 0 {
let sub = arena.alloc();
assert_eq!(
sub.len(arena).expect("should exist"),
0,
"new sub should have len 0"
);
assert!(
sub.is_empty(arena).expect("should exist"),
"new sub should be empty"
);
subs.push(sub);
continue;
} else if i % 2 == 0 {
test_routine(arena, &mut subs[i - 1]);
}
let sub = arena.alloc();
assert_eq!(
sub.len(arena).expect("should exist"),
0,
"new sub should have len 0"
);
assert!(
sub.is_empty(arena).expect("should exist"),
"new sub should be empty"
);
subs.push(sub);
}
for sub in subs.iter_mut() {
test_routine(arena, sub);
}
}
}

View File

@@ -1,138 +0,0 @@
use crate::{CacheArena, CachedTreeHash, Error, Hash256, TreeHashCache};
use ssz_types::{typenum::Unsigned, FixedVector, VariableList};
use std::mem::size_of;
use tree_hash::{mix_in_length, BYTES_PER_CHUNK};
/// Compute ceil(log(n))
///
/// Smallest number of bits d so that n <= 2^d
pub fn int_log(n: usize) -> usize {
match n.checked_next_power_of_two() {
Some(x) => x.trailing_zeros() as usize,
None => 8 * std::mem::size_of::<usize>(),
}
}
pub fn hash256_leaf_count(len: usize) -> usize {
len
}
pub fn u64_leaf_count(len: usize) -> usize {
let type_size = size_of::<u64>();
let vals_per_chunk = BYTES_PER_CHUNK / type_size;
(len + vals_per_chunk - 1) / vals_per_chunk
}
pub fn hash256_iter(
values: &[Hash256],
) -> impl ExactSizeIterator<Item = [u8; BYTES_PER_CHUNK]> + '_ {
values.iter().copied().map(Hash256::to_fixed_bytes)
}
pub fn u64_iter(values: &[u64]) -> impl ExactSizeIterator<Item = [u8; BYTES_PER_CHUNK]> + '_ {
let type_size = size_of::<u64>();
let vals_per_chunk = BYTES_PER_CHUNK / type_size;
values.chunks(vals_per_chunk).map(move |xs| {
xs.iter().map(|x| x.to_le_bytes()).enumerate().fold(
[0; BYTES_PER_CHUNK],
|mut chunk, (i, x_bytes)| {
chunk[i * type_size..(i + 1) * type_size].copy_from_slice(&x_bytes);
chunk
},
)
})
}
impl<N: Unsigned> CachedTreeHash<TreeHashCache> for FixedVector<Hash256, N> {
fn new_tree_hash_cache(&self, arena: &mut CacheArena) -> TreeHashCache {
TreeHashCache::new(
arena,
int_log(N::to_usize()),
hash256_leaf_count(self.len()),
)
}
fn recalculate_tree_hash_root(
&self,
arena: &mut CacheArena,
cache: &mut TreeHashCache,
) -> Result<Hash256, Error> {
cache.recalculate_merkle_root(arena, hash256_iter(self))
}
}
impl<N: Unsigned> CachedTreeHash<TreeHashCache> for FixedVector<u64, N> {
fn new_tree_hash_cache(&self, arena: &mut CacheArena) -> TreeHashCache {
let vals_per_chunk = BYTES_PER_CHUNK / size_of::<u64>();
TreeHashCache::new(
arena,
int_log(N::to_usize() / vals_per_chunk),
u64_leaf_count(self.len()),
)
}
fn recalculate_tree_hash_root(
&self,
arena: &mut CacheArena,
cache: &mut TreeHashCache,
) -> Result<Hash256, Error> {
cache.recalculate_merkle_root(arena, u64_iter(self))
}
}
impl<N: Unsigned> CachedTreeHash<TreeHashCache> for VariableList<Hash256, N> {
fn new_tree_hash_cache(&self, arena: &mut CacheArena) -> TreeHashCache {
TreeHashCache::new(
arena,
int_log(N::to_usize()),
hash256_leaf_count(self.len()),
)
}
fn recalculate_tree_hash_root(
&self,
arena: &mut CacheArena,
cache: &mut TreeHashCache,
) -> Result<Hash256, Error> {
Ok(mix_in_length(
&cache.recalculate_merkle_root(arena, hash256_iter(self))?,
self.len(),
))
}
}
impl<N: Unsigned> CachedTreeHash<TreeHashCache> for VariableList<u64, N> {
fn new_tree_hash_cache(&self, arena: &mut CacheArena) -> TreeHashCache {
let vals_per_chunk = BYTES_PER_CHUNK / size_of::<u64>();
TreeHashCache::new(
arena,
int_log(N::to_usize() / vals_per_chunk),
u64_leaf_count(self.len()),
)
}
fn recalculate_tree_hash_root(
&self,
arena: &mut CacheArena,
cache: &mut TreeHashCache,
) -> Result<Hash256, Error> {
Ok(mix_in_length(
&cache.recalculate_merkle_root(arena, u64_iter(self))?,
self.len(),
))
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn test_int_log() {
for i in 0..63 {
assert_eq!(int_log(2usize.pow(i)), i as usize);
}
assert_eq!(int_log(10), 4);
}
}

View File

@@ -1,46 +0,0 @@
mod cache;
mod cache_arena;
mod impls;
#[cfg(test)]
mod test;
use smallvec::SmallVec;
type SmallVec8<T> = SmallVec<[T; 8]>;
pub type CacheArena = cache_arena::CacheArena<Hash256>;
pub use crate::cache::TreeHashCache;
pub use crate::impls::int_log;
use ethereum_types::H256 as Hash256;
#[derive(Debug, PartialEq, Clone)]
pub enum Error {
/// Attempting to provide more than 2^depth leaves to a Merkle tree is disallowed.
TooManyLeaves,
/// Shrinking a Merkle tree cache by providing it with less leaves than it currently has is
/// disallowed (for simplicity).
CannotShrink,
/// Cache is inconsistent with the list of dirty indices provided.
CacheInconsistent,
CacheArenaError(cache_arena::Error),
/// Unable to find left index in Merkle tree.
MissingLeftIdx(usize),
}
impl From<cache_arena::Error> for Error {
fn from(e: cache_arena::Error) -> Error {
Error::CacheArenaError(e)
}
}
/// Trait for types which can make use of a cache to accelerate calculation of their tree hash root.
pub trait CachedTreeHash<Cache> {
/// Create a new cache appropriate for use with values of this type.
fn new_tree_hash_cache(&self, arena: &mut CacheArena) -> Cache;
/// Update the cache and use it to compute the tree hash root for `self`.
fn recalculate_tree_hash_root(
&self,
arena: &mut CacheArena,
cache: &mut Cache,
) -> Result<Hash256, Error>;
}

View File

@@ -1,153 +0,0 @@
use crate::impls::hash256_iter;
use crate::{CacheArena, CachedTreeHash, Error, Hash256, TreeHashCache};
use ethereum_hashing::ZERO_HASHES;
use quickcheck_macros::quickcheck;
use ssz_types::{
typenum::{Unsigned, U16, U255, U256, U257},
FixedVector, VariableList,
};
use tree_hash::TreeHash;
fn int_hashes(start: u64, end: u64) -> Vec<Hash256> {
(start..end).map(Hash256::from_low_u64_le).collect()
}
type List16 = VariableList<Hash256, U16>;
type Vector16 = FixedVector<Hash256, U16>;
type Vector16u64 = FixedVector<u64, U16>;
#[test]
fn max_leaves() {
let arena = &mut CacheArena::default();
let depth = 4;
let max_len = 2u64.pow(depth as u32);
let mut cache = TreeHashCache::new(arena, depth, 2);
assert!(cache
.recalculate_merkle_root(arena, hash256_iter(&int_hashes(0, max_len - 1)))
.is_ok());
assert!(cache
.recalculate_merkle_root(arena, hash256_iter(&int_hashes(0, max_len)))
.is_ok());
assert_eq!(
cache.recalculate_merkle_root(arena, hash256_iter(&int_hashes(0, max_len + 1))),
Err(Error::TooManyLeaves)
);
assert_eq!(
cache.recalculate_merkle_root(arena, hash256_iter(&int_hashes(0, max_len * 2))),
Err(Error::TooManyLeaves)
);
}
#[test]
fn cannot_shrink() {
let arena = &mut CacheArena::default();
let init_len = 12;
let list1 = List16::new(int_hashes(0, init_len)).unwrap();
let list2 = List16::new(int_hashes(0, init_len - 1)).unwrap();
let mut cache = list1.new_tree_hash_cache(arena);
assert!(list1.recalculate_tree_hash_root(arena, &mut cache).is_ok());
assert_eq!(
list2.recalculate_tree_hash_root(arena, &mut cache),
Err(Error::CannotShrink)
);
}
#[test]
fn empty_leaves() {
let arena = &mut CacheArena::default();
let depth = 20;
let mut cache = TreeHashCache::new(arena, depth, 0);
assert_eq!(
cache
.recalculate_merkle_root(arena, vec![].into_iter())
.unwrap()
.as_bytes(),
&ZERO_HASHES[depth][..]
);
}
#[test]
fn fixed_vector_hash256() {
let arena = &mut CacheArena::default();
let len = 16;
let vec = Vector16::new(int_hashes(0, len)).unwrap();
let mut cache = vec.new_tree_hash_cache(arena);
assert_eq!(
vec.tree_hash_root(),
vec.recalculate_tree_hash_root(arena, &mut cache).unwrap()
);
}
#[test]
fn fixed_vector_u64() {
let arena = &mut CacheArena::default();
let len = 16;
let vec = Vector16u64::new((0..len).collect()).unwrap();
let mut cache = vec.new_tree_hash_cache(arena);
assert_eq!(
vec.tree_hash_root(),
vec.recalculate_tree_hash_root(arena, &mut cache).unwrap()
);
}
#[test]
fn variable_list_hash256() {
let arena = &mut CacheArena::default();
let len = 13;
let list = List16::new(int_hashes(0, len)).unwrap();
let mut cache = list.new_tree_hash_cache(arena);
assert_eq!(
list.tree_hash_root(),
list.recalculate_tree_hash_root(arena, &mut cache).unwrap()
);
}
#[quickcheck]
fn quickcheck_variable_list_h256_256(leaves_and_skips: Vec<(u64, bool)>) -> bool {
variable_list_h256_test::<U256>(leaves_and_skips)
}
#[quickcheck]
fn quickcheck_variable_list_h256_255(leaves_and_skips: Vec<(u64, bool)>) -> bool {
variable_list_h256_test::<U255>(leaves_and_skips)
}
#[quickcheck]
fn quickcheck_variable_list_h256_257(leaves_and_skips: Vec<(u64, bool)>) -> bool {
variable_list_h256_test::<U257>(leaves_and_skips)
}
fn variable_list_h256_test<Len: Unsigned>(leaves_and_skips: Vec<(u64, bool)>) -> bool {
let arena = &mut CacheArena::default();
let leaves: Vec<_> = leaves_and_skips
.iter()
.map(|(l, _)| Hash256::from_low_u64_be(*l))
.take(Len::to_usize())
.collect();
let mut list: VariableList<Hash256, Len>;
let init: VariableList<Hash256, Len> = VariableList::new(vec![]).unwrap();
let mut cache = init.new_tree_hash_cache(arena);
for (end, (_, update_cache)) in leaves_and_skips.into_iter().enumerate() {
list = VariableList::new(leaves[..end].to_vec()).unwrap();
if update_cache
&& list
.recalculate_tree_hash_root(arena, &mut cache)
.unwrap()
.as_bytes()
!= &list.tree_hash_root()[..]
{
return false;
}
}
true
}

View File

@@ -33,7 +33,6 @@ test_random_derive = { path = "../../common/test_random_derive" }
tree_hash = { workspace = true, features = ["arbitrary"] }
tree_hash_derive = { workspace = true }
rand_xorshift = "0.3.0"
cached_tree_hash = { workspace = true }
serde_yaml = { workspace = true }
tempfile = { workspace = true }
derivative = { workspace = true }

View File

@@ -121,7 +121,6 @@ pub enum Error {
state: Slot,
},
TreeHashError(tree_hash::Error),
CachedTreeHashError(cached_tree_hash::Error),
InvalidValidatorPubkey(ssz::DecodeError),
ValidatorRegistryShrunk,
TreeHashCacheInconsistent,
@@ -2560,12 +2559,6 @@ impl From<bls::Error> for Error {
}
}
impl From<cached_tree_hash::Error> for Error {
fn from(e: cached_tree_hash::Error) -> Error {
Error::CachedTreeHashError(e)
}
}
impl From<tree_hash::Error> for Error {
fn from(e: tree_hash::Error) -> Error {
Error::TreeHashError(e)

View File

@@ -1,14 +1,10 @@
use crate::test_utils::TestRandom;
use crate::Unsigned;
use crate::{BeaconState, EthSpec, Hash256};
use cached_tree_hash::Error;
use cached_tree_hash::{int_log, CacheArena, CachedTreeHash, TreeHashCache};
use compare_fields_derive::CompareFields;
use serde::{Deserialize, Serialize};
use ssz_derive::{Decode, Encode};
use ssz_types::VariableList;
use test_random_derive::TestRandom;
use tree_hash::{mix_in_length, TreeHash, BYTES_PER_CHUNK};
use tree_hash::TreeHash;
use tree_hash_derive::TreeHash;
/// `HistoricalSummary` matches the components of the phase0 `HistoricalBatch`
@@ -44,46 +40,3 @@ impl HistoricalSummary {
}
}
}
/// Wrapper type allowing the implementation of `CachedTreeHash`.
#[derive(Debug)]
pub struct HistoricalSummaryCache<'a, N: Unsigned> {
pub inner: &'a VariableList<HistoricalSummary, N>,
}
impl<'a, N: Unsigned> HistoricalSummaryCache<'a, N> {
pub fn new(inner: &'a VariableList<HistoricalSummary, N>) -> Self {
Self { inner }
}
#[allow(clippy::len_without_is_empty)]
pub fn len(&self) -> usize {
self.inner.len()
}
}
impl<'a, N: Unsigned> CachedTreeHash<TreeHashCache> for HistoricalSummaryCache<'a, N> {
fn new_tree_hash_cache(&self, arena: &mut CacheArena) -> TreeHashCache {
TreeHashCache::new(arena, int_log(N::to_usize()), self.len())
}
fn recalculate_tree_hash_root(
&self,
arena: &mut CacheArena,
cache: &mut TreeHashCache,
) -> Result<Hash256, Error> {
Ok(mix_in_length(
&cache.recalculate_merkle_root(arena, leaf_iter(self.inner))?,
self.len(),
))
}
}
pub fn leaf_iter(
values: &[HistoricalSummary],
) -> impl ExactSizeIterator<Item = [u8; BYTES_PER_CHUNK]> + '_ {
values
.iter()
.map(|value| value.tree_hash_root())
.map(Hash256::to_fixed_bytes)
}

View File

@@ -84,7 +84,6 @@ pub mod config_and_preset;
pub mod execution_block_header;
pub mod fork_context;
pub mod participation_flags;
pub mod participation_list;
pub mod payload;
pub mod preset;
pub mod slot_epoch;
@@ -200,7 +199,6 @@ pub use crate::light_client_update::{
LightClientUpdateCapella, LightClientUpdateDeneb, LightClientUpdateElectra,
};
pub use crate::participation_flags::ParticipationFlags;
pub use crate::participation_list::ParticipationList;
pub use crate::payload::{
AbstractExecPayload, BlindedPayload, BlindedPayloadBellatrix, BlindedPayloadCapella,
BlindedPayloadDeneb, BlindedPayloadElectra, BlindedPayloadRef, BlockType, ExecPayload,

View File

@@ -1,55 +0,0 @@
#![allow(clippy::arithmetic_side_effects)]
use crate::{Hash256, ParticipationFlags, Unsigned, VariableList};
use cached_tree_hash::{int_log, CacheArena, CachedTreeHash, Error, TreeHashCache};
use tree_hash::{mix_in_length, BYTES_PER_CHUNK};
/// Wrapper type allowing the implementation of `CachedTreeHash`.
#[derive(Debug)]
pub struct ParticipationList<'a, N: Unsigned> {
pub inner: &'a VariableList<ParticipationFlags, N>,
}
impl<'a, N: Unsigned> ParticipationList<'a, N> {
pub fn new(inner: &'a VariableList<ParticipationFlags, N>) -> Self {
Self { inner }
}
}
impl<'a, N: Unsigned> CachedTreeHash<TreeHashCache> for ParticipationList<'a, N> {
fn new_tree_hash_cache(&self, arena: &mut CacheArena) -> TreeHashCache {
TreeHashCache::new(
arena,
int_log(N::to_usize() / BYTES_PER_CHUNK),
leaf_count(self.inner.len()),
)
}
fn recalculate_tree_hash_root(
&self,
arena: &mut CacheArena,
cache: &mut TreeHashCache,
) -> Result<Hash256, Error> {
Ok(mix_in_length(
&cache.recalculate_merkle_root(arena, leaf_iter(self.inner))?,
self.inner.len(),
))
}
}
pub fn leaf_count(len: usize) -> usize {
(len + BYTES_PER_CHUNK - 1) / BYTES_PER_CHUNK
}
pub fn leaf_iter(
values: &[ParticipationFlags],
) -> impl ExactSizeIterator<Item = [u8; BYTES_PER_CHUNK]> + '_ {
values.chunks(BYTES_PER_CHUNK).map(|xs| {
// Zero-pad chunks on the right.
let mut chunk = [0u8; BYTES_PER_CHUNK];
for (byte, x) in chunk.iter_mut().zip(xs) {
*byte = x.into_u8();
}
chunk
})
}