Move tree_hash from ssz into own crate

This commit is contained in:
Paul Hauner
2019-04-15 11:14:30 +10:00
parent 7132ee59c0
commit 0b5c10212d
10 changed files with 461 additions and 443 deletions

View File

@@ -0,0 +1,11 @@
[package]
name = "tree_hash"
version = "0.1.0"
authors = ["Paul Hauner <paul@paulhauner.com>"]
edition = "2018"
[dependencies]
ethereum-types = "0.5"
hashing = { path = "../hashing" }
int_to_bytes = { path = "../int_to_bytes" }
ssz = { path = "../ssz" }

View File

@@ -0,0 +1,193 @@
use super::*;
#[derive(Debug, PartialEq, Clone)]
pub struct TreeHashCache {
cache: Vec<u8>,
chunk_modified: Vec<bool>,
}
impl Into<Vec<u8>> for TreeHashCache {
fn into(self) -> Vec<u8> {
self.cache
}
}
impl TreeHashCache {
pub fn new<T>(item: &T) -> Result<Self, Error>
where
T: CachedTreeHash<T>,
{
item.build_tree_hash_cache()
}
pub fn from_elems(cache: Vec<u8>, chunk_modified: Vec<bool>) -> Self {
Self {
cache,
chunk_modified,
}
}
pub fn from_leaves_and_subtrees<T>(
item: &T,
leaves_and_subtrees: Vec<Self>,
) -> Result<Self, Error>
where
T: CachedTreeHash<T>,
{
let offset_handler = BTreeOverlay::new(item, 0)?;
// Note how many leaves were provided. If is not a power-of-two, we'll need to pad it out
// later.
let num_provided_leaf_nodes = leaves_and_subtrees.len();
// Allocate enough bytes to store the internal nodes and the leaves and subtrees, then fill
// all the to-be-built internal nodes with zeros and append the leaves and subtrees.
let internal_node_bytes = offset_handler.num_internal_nodes * BYTES_PER_CHUNK;
let leaves_and_subtrees_bytes = leaves_and_subtrees
.iter()
.fold(0, |acc, t| acc + t.bytes_len());
let mut cache = Vec::with_capacity(leaves_and_subtrees_bytes + internal_node_bytes);
cache.resize(internal_node_bytes, 0);
// Allocate enough bytes to store all the leaves.
let mut leaves = Vec::with_capacity(offset_handler.num_leaf_nodes * HASHSIZE);
// Iterate through all of the leaves/subtrees, adding their root as a leaf node and then
// concatenating their merkle trees.
for t in leaves_and_subtrees {
leaves.append(&mut t.root()?);
cache.append(&mut t.into_merkle_tree());
}
// Pad the leaves to an even power-of-two, using zeros.
pad_for_leaf_count(num_provided_leaf_nodes, &mut cache);
// Merkleize the leaves, then split the leaf nodes off them. Then, replace all-zeros
// internal nodes created earlier with the internal nodes generated by `merkleize`.
let mut merkleized = merkleize(leaves);
merkleized.split_off(internal_node_bytes);
cache.splice(0..internal_node_bytes, merkleized);
Ok(Self {
chunk_modified: vec![false; cache.len() / BYTES_PER_CHUNK],
cache,
})
}
pub fn from_bytes(bytes: Vec<u8>, initial_modified_state: bool) -> Result<Self, Error> {
if bytes.len() % BYTES_PER_CHUNK > 0 {
return Err(Error::BytesAreNotEvenChunks(bytes.len()));
}
Ok(Self {
chunk_modified: vec![initial_modified_state; bytes.len() / BYTES_PER_CHUNK],
cache: bytes,
})
}
pub fn bytes_len(&self) -> usize {
self.cache.len()
}
pub fn root(&self) -> Result<Vec<u8>, Error> {
self.cache
.get(0..HASHSIZE)
.ok_or_else(|| Error::NoBytesForRoot)
.and_then(|slice| Ok(slice.to_vec()))
}
pub fn splice(&mut self, chunk_range: Range<usize>, replace_with: Self) {
let (bytes, bools) = replace_with.into_components();
// Update the `chunk_modified` vec, marking all spliced-in nodes as changed.
self.chunk_modified.splice(chunk_range.clone(), bools);
self.cache
.splice(node_range_to_byte_range(&chunk_range), bytes);
}
pub fn maybe_update_chunk(&mut self, chunk: usize, to: &[u8]) -> Result<(), Error> {
let start = chunk * BYTES_PER_CHUNK;
let end = start + BYTES_PER_CHUNK;
if !self.chunk_equals(chunk, to)? {
self.cache
.get_mut(start..end)
.ok_or_else(|| Error::NoModifiedFieldForChunk(chunk))?
.copy_from_slice(to);
self.chunk_modified[chunk] = true;
}
Ok(())
}
pub fn slices(&self, chunk_range: Range<usize>) -> Option<(&[u8], &[bool])> {
Some((
self.cache.get(node_range_to_byte_range(&chunk_range))?,
self.chunk_modified.get(chunk_range)?,
))
}
pub fn modify_chunk(&mut self, chunk: usize, to: &[u8]) -> Result<(), Error> {
let start = chunk * BYTES_PER_CHUNK;
let end = start + BYTES_PER_CHUNK;
self.cache
.get_mut(start..end)
.ok_or_else(|| Error::NoBytesForChunk(chunk))?
.copy_from_slice(to);
self.chunk_modified[chunk] = true;
Ok(())
}
pub fn get_chunk(&self, chunk: usize) -> Result<&[u8], Error> {
let start = chunk * BYTES_PER_CHUNK;
let end = start + BYTES_PER_CHUNK;
Ok(self
.cache
.get(start..end)
.ok_or_else(|| Error::NoModifiedFieldForChunk(chunk))?)
}
pub fn chunk_equals(&mut self, chunk: usize, other: &[u8]) -> Result<bool, Error> {
Ok(self.get_chunk(chunk)? == other)
}
pub fn changed(&self, chunk: usize) -> Result<bool, Error> {
self.chunk_modified
.get(chunk)
.cloned()
.ok_or_else(|| Error::NoModifiedFieldForChunk(chunk))
}
pub fn either_modified(&self, children: (&usize, &usize)) -> Result<bool, Error> {
Ok(self.changed(*children.0)? | self.changed(*children.1)?)
}
pub fn hash_children(&self, children: (&usize, &usize)) -> Result<Vec<u8>, Error> {
let mut child_bytes = Vec::with_capacity(BYTES_PER_CHUNK * 2);
child_bytes.append(&mut self.get_chunk(*children.0)?.to_vec());
child_bytes.append(&mut self.get_chunk(*children.1)?.to_vec());
Ok(hash(&child_bytes))
}
pub fn mix_in_length(&self, chunk: usize, length: usize) -> Result<Vec<u8>, Error> {
let mut bytes = Vec::with_capacity(2 * BYTES_PER_CHUNK);
bytes.append(&mut self.get_chunk(chunk)?.to_vec());
bytes.append(&mut int_to_bytes32(length as u64));
Ok(hash(&bytes))
}
pub fn into_merkle_tree(self) -> Vec<u8> {
self.cache
}
pub fn into_components(self) -> (Vec<u8>, Vec<bool>) {
(self.cache, self.chunk_modified)
}
}

View File

@@ -0,0 +1,239 @@
use super::resize::{grow_merkle_cache, shrink_merkle_cache};
use super::*;
use ssz::ssz_encode;
impl CachedTreeHash<u64> for u64 {
fn item_type() -> ItemType {
ItemType::Basic
}
fn build_tree_hash_cache(&self) -> Result<TreeHashCache, Error> {
Ok(TreeHashCache::from_bytes(
merkleize(ssz_encode(self)),
false,
)?)
}
fn num_bytes(&self) -> usize {
8
}
fn offsets(&self) -> Result<Vec<usize>, Error> {
Err(Error::ShouldNotProduceBTreeOverlay)
}
fn num_child_nodes(&self) -> usize {
0
}
fn packed_encoding(&self) -> Vec<u8> {
ssz_encode(self)
}
fn packing_factor() -> usize {
32 / 8
}
fn cached_hash_tree_root(
&self,
other: &Self,
cache: &mut TreeHashCache,
chunk: usize,
) -> Result<usize, Error> {
if self != other {
let leaf = merkleize(ssz_encode(self));
cache.modify_chunk(chunk, &leaf)?;
}
Ok(chunk + 1)
}
}
impl<T> CachedTreeHash<Vec<T>> for Vec<T>
where
T: CachedTreeHash<T>,
{
fn item_type() -> ItemType {
ItemType::List
}
fn build_tree_hash_cache(&self) -> Result<TreeHashCache, Error> {
match T::item_type() {
ItemType::Basic => TreeHashCache::from_bytes(merkleize(get_packed_leaves(self)), false),
ItemType::Composite | ItemType::List => {
let subtrees = self
.iter()
.map(|item| TreeHashCache::new(item))
.collect::<Result<Vec<TreeHashCache>, _>>()?;
TreeHashCache::from_leaves_and_subtrees(self, subtrees)
}
}
}
fn offsets(&self) -> Result<Vec<usize>, Error> {
let offsets = match T::item_type() {
ItemType::Basic => vec![1; self.len() / T::packing_factor()],
ItemType::Composite | ItemType::List => {
let mut offsets = vec![];
for item in self {
offsets.push(BTreeOverlay::new(item, 0)?.total_nodes())
}
offsets
}
};
Ok(offsets)
}
fn num_child_nodes(&self) -> usize {
// TODO
42
}
fn num_bytes(&self) -> usize {
self.iter().fold(0, |acc, item| acc + item.num_bytes())
}
fn packed_encoding(&self) -> Vec<u8> {
panic!("List should never be packed")
}
fn packing_factor() -> usize {
1
}
fn cached_hash_tree_root(
&self,
other: &Vec<T>,
cache: &mut TreeHashCache,
chunk: usize,
) -> Result<usize, Error> {
let offset_handler = BTreeOverlay::new(self, chunk)?;
let old_offset_handler = BTreeOverlay::new(other, chunk)?;
if offset_handler.num_leaf_nodes != old_offset_handler.num_leaf_nodes {
let old_offset_handler = BTreeOverlay::new(other, chunk)?;
// Get slices of the exsiting tree from the cache.
let (old_bytes, old_flags) = cache
.slices(old_offset_handler.chunk_range())
.ok_or_else(|| Error::UnableToObtainSlices)?;
let (new_bytes, new_flags) =
if offset_handler.num_leaf_nodes > old_offset_handler.num_leaf_nodes {
grow_merkle_cache(
old_bytes,
old_flags,
old_offset_handler.height(),
offset_handler.height(),
)
.ok_or_else(|| Error::UnableToGrowMerkleTree)?
} else {
shrink_merkle_cache(
old_bytes,
old_flags,
old_offset_handler.height(),
offset_handler.height(),
offset_handler.total_chunks(),
)
.ok_or_else(|| Error::UnableToShrinkMerkleTree)?
};
// Create a `TreeHashCache` from the raw elements.
let modified_cache = TreeHashCache::from_elems(new_bytes, new_flags);
// Splice the newly created `TreeHashCache` over the existing elements.
cache.splice(old_offset_handler.chunk_range(), modified_cache);
}
match T::item_type() {
ItemType::Basic => {
let leaves = get_packed_leaves(self);
for (i, chunk) in offset_handler.iter_leaf_nodes().enumerate() {
if let Some(latest) = leaves.get(i * HASHSIZE..(i + 1) * HASHSIZE) {
cache.maybe_update_chunk(*chunk, latest)?;
}
}
let first_leaf_chunk = offset_handler.first_leaf_node()?;
cache.splice(
first_leaf_chunk..offset_handler.next_node,
TreeHashCache::from_bytes(leaves, true)?,
);
}
ItemType::Composite | ItemType::List => {
let mut i = offset_handler.num_leaf_nodes;
for &start_chunk in offset_handler.iter_leaf_nodes().rev() {
i -= 1;
match (other.get(i), self.get(i)) {
// The item existed in the previous list and exsits in the current list.
(Some(old), Some(new)) => {
new.cached_hash_tree_root(old, cache, start_chunk)?;
}
// The item existed in the previous list but does not exist in this list.
//
// I.e., the list has been shortened.
(Some(old), None) => {
// Splice out the entire tree of the removed node, replacing it with a
// single padding node.
let end_chunk = BTreeOverlay::new(old, start_chunk)?.next_node();
cache.splice(
start_chunk..end_chunk,
TreeHashCache::from_bytes(vec![0; HASHSIZE], true)?,
);
}
// The item existed in the previous list but does exist in this list.
//
// I.e., the list has been lengthened.
(None, Some(new)) => {
let bytes: Vec<u8> = TreeHashCache::new(new)?.into();
cache.splice(
start_chunk..start_chunk + 1,
TreeHashCache::from_bytes(bytes, true)?,
);
}
// The item didn't exist in the old list and doesn't exist in the new list,
// nothing to do.
(None, None) => {}
};
}
}
}
for (&parent, children) in offset_handler.iter_internal_nodes().rev() {
if cache.either_modified(children)? {
cache.modify_chunk(parent, &cache.hash_children(children)?)?;
}
}
// If the root node or the length has changed, mix in the length of the list.
let root_node = offset_handler.root();
if cache.changed(root_node)? | (self.len() != other.len()) {
cache.modify_chunk(root_node, &cache.mix_in_length(root_node, self.len())?)?;
}
Ok(offset_handler.next_node())
}
}
fn get_packed_leaves<T>(vec: &Vec<T>) -> Vec<u8>
where
T: CachedTreeHash<T>,
{
let num_packed_bytes = vec.num_bytes();
let num_leaves = num_sanitized_leaves(num_packed_bytes);
let mut packed = Vec::with_capacity(num_leaves * HASHSIZE);
for item in vec {
packed.append(&mut item.packed_encoding());
}
sanitise_bytes(packed)
}

View File

@@ -0,0 +1,249 @@
use hashing::hash;
use int_to_bytes::int_to_bytes32;
use std::fmt::Debug;
use std::iter::Iterator;
use std::ops::Range;
mod cached_tree_hash;
mod impls;
mod resize;
pub use cached_tree_hash::TreeHashCache;
pub const BYTES_PER_CHUNK: usize = 32;
pub const HASHSIZE: usize = 32;
pub const MERKLE_HASH_CHUNCK: usize = 2 * BYTES_PER_CHUNK;
#[derive(Debug, PartialEq, Clone)]
pub enum Error {
ShouldNotProduceBTreeOverlay,
NoFirstNode,
NoBytesForRoot,
UnableToObtainSlices,
UnableToGrowMerkleTree,
UnableToShrinkMerkleTree,
BytesAreNotEvenChunks(usize),
NoModifiedFieldForChunk(usize),
NoBytesForChunk(usize),
}
#[derive(Debug, PartialEq, Clone)]
pub enum ItemType {
Basic,
List,
Composite,
}
// TODO: remove debug requirement.
pub trait CachedTreeHash<Item>: Debug {
fn item_type() -> ItemType;
fn build_tree_hash_cache(&self) -> Result<TreeHashCache, Error>;
/// Return the number of bytes when this element is encoded as raw SSZ _without_ length
/// prefixes.
fn num_bytes(&self) -> usize;
fn offsets(&self) -> Result<Vec<usize>, Error>;
fn num_child_nodes(&self) -> usize;
fn packed_encoding(&self) -> Vec<u8>;
fn packing_factor() -> usize;
fn cached_hash_tree_root(
&self,
other: &Item,
cache: &mut TreeHashCache,
chunk: usize,
) -> Result<usize, Error>;
}
fn children(parent: usize) -> (usize, usize) {
((2 * parent + 1), (2 * parent + 2))
}
fn num_nodes(num_leaves: usize) -> usize {
2 * num_leaves - 1
}
fn node_range_to_byte_range(node_range: &Range<usize>) -> Range<usize> {
node_range.start * HASHSIZE..node_range.end * HASHSIZE
}
#[derive(Debug)]
pub struct BTreeOverlay {
num_internal_nodes: usize,
pub num_leaf_nodes: usize,
first_node: usize,
next_node: usize,
offsets: Vec<usize>,
}
impl BTreeOverlay {
pub fn new<T>(item: &T, initial_offset: usize) -> Result<Self, Error>
where
T: CachedTreeHash<T>,
{
Self::from_lengths(initial_offset, item.offsets()?)
}
fn from_lengths(offset: usize, mut lengths: Vec<usize>) -> Result<Self, Error> {
// Extend it to the next power-of-two, if it is not already.
let num_leaf_nodes = if lengths.len().is_power_of_two() {
lengths.len()
} else {
let num_leaf_nodes = lengths.len().next_power_of_two();
lengths.resize(num_leaf_nodes, 1);
num_leaf_nodes
};
let num_nodes = num_nodes(num_leaf_nodes);
let num_internal_nodes = num_nodes - num_leaf_nodes;
let mut offsets = Vec::with_capacity(num_nodes);
offsets.append(&mut (offset..offset + num_internal_nodes).collect());
let mut next_node = num_internal_nodes + offset;
for i in 0..num_leaf_nodes {
offsets.push(next_node);
next_node += lengths[i];
}
Ok(Self {
num_internal_nodes,
num_leaf_nodes,
offsets,
first_node: offset,
next_node,
})
}
pub fn root(&self) -> usize {
self.first_node
}
pub fn height(&self) -> usize {
self.num_leaf_nodes.trailing_zeros() as usize
}
pub fn chunk_range(&self) -> Range<usize> {
self.first_node..self.next_node
}
pub fn total_chunks(&self) -> usize {
self.next_node - self.first_node
}
pub fn total_nodes(&self) -> usize {
self.num_internal_nodes + self.num_leaf_nodes
}
pub fn first_leaf_node(&self) -> Result<usize, Error> {
self.offsets
.get(self.num_internal_nodes)
.cloned()
.ok_or_else(|| Error::NoFirstNode)
}
pub fn next_node(&self) -> usize {
self.next_node
}
/// Returns an iterator visiting each internal node, providing the left and right child chunks
/// for the node.
pub fn iter_internal_nodes<'a>(
&'a self,
) -> impl DoubleEndedIterator<Item = (&'a usize, (&'a usize, &'a usize))> {
let internal_nodes = &self.offsets[0..self.num_internal_nodes];
internal_nodes.iter().enumerate().map(move |(i, parent)| {
let children = children(i);
(
parent,
(&self.offsets[children.0], &self.offsets[children.1]),
)
})
}
/// Returns an iterator visiting each leaf node, providing the chunk for that node.
pub fn iter_leaf_nodes<'a>(&'a self) -> impl DoubleEndedIterator<Item = &'a usize> {
let leaf_nodes = &self.offsets[self.num_internal_nodes..];
leaf_nodes.iter()
}
}
/// Split `values` into a power-of-two, identical-length chunks (padding with `0`) and merkleize
/// them, returning the entire merkle tree.
///
/// The root hash is `merkleize(values)[0..BYTES_PER_CHUNK]`.
pub fn merkleize(values: Vec<u8>) -> Vec<u8> {
let values = sanitise_bytes(values);
let leaves = values.len() / HASHSIZE;
if leaves == 0 {
panic!("No full leaves");
}
if !leaves.is_power_of_two() {
panic!("leaves is not power of two");
}
let mut o: Vec<u8> = vec![0; (num_nodes(leaves) - leaves) * HASHSIZE];
o.append(&mut values.to_vec());
let mut i = o.len();
let mut j = o.len() - values.len();
while i >= MERKLE_HASH_CHUNCK {
i -= MERKLE_HASH_CHUNCK;
let hash = hash(&o[i..i + MERKLE_HASH_CHUNCK]);
j -= HASHSIZE;
o[j..j + HASHSIZE].copy_from_slice(&hash);
}
o
}
pub fn sanitise_bytes(mut bytes: Vec<u8>) -> Vec<u8> {
let present_leaves = num_unsanitized_leaves(bytes.len());
let required_leaves = present_leaves.next_power_of_two();
if (present_leaves != required_leaves) | last_leaf_needs_padding(bytes.len()) {
bytes.resize(num_bytes(required_leaves), 0);
}
bytes
}
fn pad_for_leaf_count(num_leaves: usize, bytes: &mut Vec<u8>) {
let required_leaves = num_leaves.next_power_of_two();
bytes.resize(
bytes.len() + (required_leaves - num_leaves) * BYTES_PER_CHUNK,
0,
);
}
fn last_leaf_needs_padding(num_bytes: usize) -> bool {
num_bytes % HASHSIZE != 0
}
/// Rounds up
fn num_unsanitized_leaves(num_bytes: usize) -> usize {
(num_bytes + HASHSIZE - 1) / HASHSIZE
}
/// Rounds up
fn num_sanitized_leaves(num_bytes: usize) -> usize {
let leaves = (num_bytes + HASHSIZE - 1) / HASHSIZE;
leaves.next_power_of_two()
}
fn num_bytes(num_leaves: usize) -> usize {
num_leaves * HASHSIZE
}

View File

@@ -0,0 +1,276 @@
use super::*;
/// New vec is bigger than old vec.
pub fn grow_merkle_cache(
old_bytes: &[u8],
old_flags: &[bool],
from_height: usize,
to_height: usize,
) -> Option<(Vec<u8>, Vec<bool>)> {
// Determine the size of our new tree. It is not just a simple `1 << to_height` as there can be
// an arbitrary number of nodes in `old_bytes` leaves if those leaves are subtrees.
let to_nodes = {
let old_nodes = old_bytes.len() / HASHSIZE;
let additional_nodes = old_nodes - nodes_in_tree_of_height(from_height);
nodes_in_tree_of_height(to_height) + additional_nodes
};
let mut bytes = vec![0; to_nodes * HASHSIZE];
let mut flags = vec![true; to_nodes];
let leaf_level = from_height;
for i in 0..=from_height as usize {
// If we're on the leaf slice, grab the first byte and all the of the bytes after that.
// This is required because we can have an arbitrary number of bytes at the leaf level
// (e.g., the case where there are subtrees as leaves).
//
// If we're not on a leaf level, the number of nodes is fixed and known.
let (old_byte_slice, old_flag_slice) = if i == leaf_level {
(
old_bytes.get(first_byte_at_height(i)..)?,
old_flags.get(first_node_at_height(i)..)?,
)
} else {
(
old_bytes.get(byte_range_at_height(i))?,
old_flags.get(node_range_at_height(i))?,
)
};
let new_i = i + to_height - from_height;
let (new_byte_slice, new_flag_slice) = if i == leaf_level {
(
bytes.get_mut(first_byte_at_height(new_i)..)?,
flags.get_mut(first_node_at_height(new_i)..)?,
)
} else {
(
bytes.get_mut(byte_range_at_height(new_i))?,
flags.get_mut(node_range_at_height(new_i))?,
)
};
new_byte_slice
.get_mut(0..old_byte_slice.len())?
.copy_from_slice(old_byte_slice);
new_flag_slice
.get_mut(0..old_flag_slice.len())?
.copy_from_slice(old_flag_slice);
}
Some((bytes, flags))
}
/// New vec is smaller than old vec.
pub fn shrink_merkle_cache(
from_bytes: &[u8],
from_flags: &[bool],
from_height: usize,
to_height: usize,
to_nodes: usize,
) -> Option<(Vec<u8>, Vec<bool>)> {
let mut bytes = vec![0; to_nodes * HASHSIZE];
let mut flags = vec![true; to_nodes];
for i in 0..=to_height as usize {
let from_i = i + from_height - to_height;
let (from_byte_slice, from_flag_slice) = if from_i == from_height {
(
from_bytes.get(first_byte_at_height(from_i)..)?,
from_flags.get(first_node_at_height(from_i)..)?,
)
} else {
(
from_bytes.get(byte_range_at_height(from_i))?,
from_flags.get(node_range_at_height(from_i))?,
)
};
let (to_byte_slice, to_flag_slice) = if i == to_height {
(
bytes.get_mut(first_byte_at_height(i)..)?,
flags.get_mut(first_node_at_height(i)..)?,
)
} else {
(
bytes.get_mut(byte_range_at_height(i))?,
flags.get_mut(node_range_at_height(i))?,
)
};
to_byte_slice.copy_from_slice(from_byte_slice.get(0..to_byte_slice.len())?);
to_flag_slice.copy_from_slice(from_flag_slice.get(0..to_flag_slice.len())?);
}
Some((bytes, flags))
}
fn nodes_in_tree_of_height(h: usize) -> usize {
2 * (1 << h) - 1
}
fn byte_range_at_height(h: usize) -> Range<usize> {
let node_range = node_range_at_height(h);
node_range.start * HASHSIZE..node_range.end * HASHSIZE
}
fn node_range_at_height(h: usize) -> Range<usize> {
first_node_at_height(h)..last_node_at_height(h) + 1
}
fn first_byte_at_height(h: usize) -> usize {
first_node_at_height(h) * HASHSIZE
}
fn first_node_at_height(h: usize) -> usize {
(1 << h) - 1
}
fn last_node_at_height(h: usize) -> usize {
(1 << (h + 1)) - 2
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn can_grow_and_shrink_three_levels() {
let small: usize = 1;
let big: usize = 15;
let original_bytes = vec![42; small * HASHSIZE];
let original_flags = vec![false; small];
let (grown_bytes, grown_flags) = grow_merkle_cache(
&original_bytes,
&original_flags,
(small + 1).trailing_zeros() as usize - 1,
(big + 1).trailing_zeros() as usize - 1,
)
.unwrap();
let mut expected_bytes = vec![];
let mut expected_flags = vec![];
// First level
expected_bytes.append(&mut vec![0; 32]);
expected_flags.push(true);
// Second level
expected_bytes.append(&mut vec![0; 32]);
expected_bytes.append(&mut vec![0; 32]);
expected_flags.push(true);
expected_flags.push(true);
// Third level
expected_bytes.append(&mut vec![0; 32]);
expected_bytes.append(&mut vec![0; 32]);
expected_bytes.append(&mut vec![0; 32]);
expected_bytes.append(&mut vec![0; 32]);
expected_flags.push(true);
expected_flags.push(true);
expected_flags.push(true);
expected_flags.push(true);
// Fourth level
expected_bytes.append(&mut vec![42; 32]);
expected_bytes.append(&mut vec![0; 32]);
expected_bytes.append(&mut vec![0; 32]);
expected_bytes.append(&mut vec![0; 32]);
expected_bytes.append(&mut vec![0; 32]);
expected_bytes.append(&mut vec![0; 32]);
expected_bytes.append(&mut vec![0; 32]);
expected_bytes.append(&mut vec![0; 32]);
expected_flags.push(false);
expected_flags.push(true);
expected_flags.push(true);
expected_flags.push(true);
expected_flags.push(true);
expected_flags.push(true);
expected_flags.push(true);
expected_flags.push(true);
assert_eq!(expected_bytes, grown_bytes);
assert_eq!(expected_flags, grown_flags);
let (shrunk_bytes, shrunk_flags) = shrink_merkle_cache(
&grown_bytes,
&grown_flags,
(big + 1).trailing_zeros() as usize - 1,
(small + 1).trailing_zeros() as usize - 1,
small,
)
.unwrap();
assert_eq!(original_bytes, shrunk_bytes);
assert_eq!(original_flags, shrunk_flags);
}
#[test]
fn can_grow_and_shrink_one_level() {
let small: usize = 7;
let big: usize = 15;
let original_bytes = vec![42; small * HASHSIZE];
let original_flags = vec![false; small];
let (grown_bytes, grown_flags) = grow_merkle_cache(
&original_bytes,
&original_flags,
(small + 1).trailing_zeros() as usize - 1,
(big + 1).trailing_zeros() as usize - 1,
)
.unwrap();
let mut expected_bytes = vec![];
let mut expected_flags = vec![];
// First level
expected_bytes.append(&mut vec![0; 32]);
expected_flags.push(true);
// Second level
expected_bytes.append(&mut vec![42; 32]);
expected_bytes.append(&mut vec![0; 32]);
expected_flags.push(false);
expected_flags.push(true);
// Third level
expected_bytes.append(&mut vec![42; 32]);
expected_bytes.append(&mut vec![42; 32]);
expected_bytes.append(&mut vec![0; 32]);
expected_bytes.append(&mut vec![0; 32]);
expected_flags.push(false);
expected_flags.push(false);
expected_flags.push(true);
expected_flags.push(true);
// Fourth level
expected_bytes.append(&mut vec![42; 32]);
expected_bytes.append(&mut vec![42; 32]);
expected_bytes.append(&mut vec![42; 32]);
expected_bytes.append(&mut vec![42; 32]);
expected_bytes.append(&mut vec![0; 32]);
expected_bytes.append(&mut vec![0; 32]);
expected_bytes.append(&mut vec![0; 32]);
expected_bytes.append(&mut vec![0; 32]);
expected_flags.push(false);
expected_flags.push(false);
expected_flags.push(false);
expected_flags.push(false);
expected_flags.push(true);
expected_flags.push(true);
expected_flags.push(true);
expected_flags.push(true);
assert_eq!(expected_bytes, grown_bytes);
assert_eq!(expected_flags, grown_flags);
let (shrunk_bytes, shrunk_flags) = shrink_merkle_cache(
&grown_bytes,
&grown_flags,
(big + 1).trailing_zeros() as usize - 1,
(small + 1).trailing_zeros() as usize - 1,
small,
)
.unwrap();
assert_eq!(original_bytes, shrunk_bytes);
assert_eq!(original_flags, shrunk_flags);
}
}

View File

@@ -0,0 +1,937 @@
use hashing::hash;
use int_to_bytes::{int_to_bytes32, int_to_bytes8};
use tree_hash::*;
fn num_nodes(num_leaves: usize) -> usize {
2 * num_leaves - 1
}
#[derive(Clone, Debug)]
pub struct Inner {
pub a: u64,
pub b: u64,
pub c: u64,
pub d: u64,
}
impl CachedTreeHash<Inner> for Inner {
fn item_type() -> ItemType {
ItemType::Composite
}
fn build_tree_hash_cache(&self) -> Result<TreeHashCache, Error> {
let tree = TreeHashCache::from_leaves_and_subtrees(
self,
vec![
self.a.build_tree_hash_cache()?,
self.b.build_tree_hash_cache()?,
self.c.build_tree_hash_cache()?,
self.d.build_tree_hash_cache()?,
],
)?;
Ok(tree)
}
fn num_bytes(&self) -> usize {
let mut bytes = 0;
bytes += self.a.num_bytes();
bytes += self.b.num_bytes();
bytes += self.c.num_bytes();
bytes += self.d.num_bytes();
bytes
}
fn offsets(&self) -> Result<Vec<usize>, Error> {
let mut offsets = vec![];
offsets.push(self.a.num_child_nodes() + 1);
offsets.push(self.b.num_child_nodes() + 1);
offsets.push(self.c.num_child_nodes() + 1);
offsets.push(self.d.num_child_nodes() + 1);
Ok(offsets)
}
fn num_child_nodes(&self) -> usize {
let mut children = 0;
let leaves = 4;
children += self.a.num_child_nodes();
children += self.b.num_child_nodes();
children += self.c.num_child_nodes();
children += self.d.num_child_nodes();
num_nodes(leaves) + children - 1
}
fn packed_encoding(&self) -> Vec<u8> {
panic!("Struct should never be packed")
}
fn packing_factor() -> usize {
1
}
fn cached_hash_tree_root(
&self,
other: &Self,
cache: &mut TreeHashCache,
chunk: usize,
) -> Result<usize, Error> {
let offset_handler = BTreeOverlay::new(self, chunk)?;
// Skip past the internal nodes and update any changed leaf nodes.
{
let chunk = offset_handler.first_leaf_node()?;
let chunk = self.a.cached_hash_tree_root(&other.a, cache, chunk)?;
let chunk = self.b.cached_hash_tree_root(&other.b, cache, chunk)?;
let chunk = self.c.cached_hash_tree_root(&other.c, cache, chunk)?;
let _chunk = self.d.cached_hash_tree_root(&other.d, cache, chunk)?;
}
for (&parent, children) in offset_handler.iter_internal_nodes().rev() {
if cache.either_modified(children)? {
cache.modify_chunk(parent, &cache.hash_children(children)?)?;
}
}
Ok(offset_handler.next_node())
}
}
#[derive(Clone, Debug)]
pub struct Outer {
pub a: u64,
pub b: Inner,
pub c: u64,
}
impl CachedTreeHash<Outer> for Outer {
fn item_type() -> ItemType {
ItemType::Composite
}
fn build_tree_hash_cache(&self) -> Result<TreeHashCache, Error> {
let tree = TreeHashCache::from_leaves_and_subtrees(
self,
vec![
self.a.build_tree_hash_cache()?,
self.b.build_tree_hash_cache()?,
self.c.build_tree_hash_cache()?,
],
)?;
Ok(tree)
}
fn num_bytes(&self) -> usize {
let mut bytes = 0;
bytes += self.a.num_bytes();
bytes += self.b.num_bytes();
bytes += self.c.num_bytes();
bytes
}
fn num_child_nodes(&self) -> usize {
let mut children = 0;
let leaves = 3;
children += self.a.num_child_nodes();
children += self.b.num_child_nodes();
children += self.c.num_child_nodes();
num_nodes(leaves) + children - 1
}
fn offsets(&self) -> Result<Vec<usize>, Error> {
let mut offsets = vec![];
offsets.push(self.a.num_child_nodes() + 1);
offsets.push(self.b.num_child_nodes() + 1);
offsets.push(self.c.num_child_nodes() + 1);
Ok(offsets)
}
fn packed_encoding(&self) -> Vec<u8> {
panic!("Struct should never be packed")
}
fn packing_factor() -> usize {
1
}
fn cached_hash_tree_root(
&self,
other: &Self,
cache: &mut TreeHashCache,
chunk: usize,
) -> Result<usize, Error> {
let offset_handler = BTreeOverlay::new(self, chunk)?;
// Skip past the internal nodes and update any changed leaf nodes.
{
let chunk = offset_handler.first_leaf_node()?;
let chunk = self.a.cached_hash_tree_root(&other.a, cache, chunk)?;
let chunk = self.b.cached_hash_tree_root(&other.b, cache, chunk)?;
let _chunk = self.c.cached_hash_tree_root(&other.c, cache, chunk)?;
}
for (&parent, children) in offset_handler.iter_internal_nodes().rev() {
if cache.either_modified(children)? {
cache.modify_chunk(parent, &cache.hash_children(children)?)?;
}
}
Ok(offset_handler.next_node())
}
}
fn join(many: Vec<Vec<u8>>) -> Vec<u8> {
let mut all = vec![];
for one in many {
all.extend_from_slice(&mut one.clone())
}
all
}
#[test]
fn partial_modification_to_inner_struct() {
let original_inner = Inner {
a: 1,
b: 2,
c: 3,
d: 4,
};
let original_outer = Outer {
a: 0,
b: original_inner.clone(),
c: 5,
};
let modified_inner = Inner {
a: 42,
..original_inner.clone()
};
// Modify outer
let modified_outer = Outer {
b: modified_inner.clone(),
..original_outer.clone()
};
// Perform a differential hash
let mut cache_struct = TreeHashCache::new(&original_outer).unwrap();
modified_outer
.cached_hash_tree_root(&original_outer, &mut cache_struct, 0)
.unwrap();
let modified_cache: Vec<u8> = cache_struct.into();
// Generate reference data.
let mut data = vec![];
data.append(&mut int_to_bytes32(0));
let inner_bytes: Vec<u8> = TreeHashCache::new(&modified_inner).unwrap().into();
data.append(&mut int_to_bytes32(5));
let leaves = vec![
int_to_bytes32(0),
inner_bytes[0..32].to_vec(),
int_to_bytes32(5),
vec![0; 32], // padding
];
let mut merkle = merkleize(join(leaves));
merkle.splice(4 * 32..5 * 32, inner_bytes);
assert_eq!(merkle.len() / HASHSIZE, 13);
assert_eq!(modified_cache.len() / HASHSIZE, 13);
assert_eq!(merkle, modified_cache);
}
#[test]
fn partial_modification_to_outer() {
let inner = Inner {
a: 1,
b: 2,
c: 3,
d: 4,
};
let original_outer = Outer {
a: 0,
b: inner.clone(),
c: 5,
};
// Build the initial cache.
// let original_cache = original_outer.build_cache_bytes();
// Modify outer
let modified_outer = Outer {
c: 42,
..original_outer.clone()
};
// Perform a differential hash
let mut cache_struct = TreeHashCache::new(&original_outer).unwrap();
modified_outer
.cached_hash_tree_root(&original_outer, &mut cache_struct, 0)
.unwrap();
let modified_cache: Vec<u8> = cache_struct.into();
// Generate reference data.
let mut data = vec![];
data.append(&mut int_to_bytes32(0));
let inner_bytes: Vec<u8> = TreeHashCache::new(&inner).unwrap().into();
data.append(&mut int_to_bytes32(5));
let leaves = vec![
int_to_bytes32(0),
inner_bytes[0..32].to_vec(),
int_to_bytes32(42),
vec![0; 32], // padding
];
let mut merkle = merkleize(join(leaves));
merkle.splice(4 * 32..5 * 32, inner_bytes);
assert_eq!(merkle.len() / HASHSIZE, 13);
assert_eq!(modified_cache.len() / HASHSIZE, 13);
assert_eq!(merkle, modified_cache);
}
#[test]
fn outer_builds() {
let inner = Inner {
a: 1,
b: 2,
c: 3,
d: 4,
};
let outer = Outer {
a: 0,
b: inner.clone(),
c: 5,
};
// Build the function output.
let cache: Vec<u8> = TreeHashCache::new(&outer).unwrap().into();
// Generate reference data.
let mut data = vec![];
data.append(&mut int_to_bytes32(0));
let inner_bytes: Vec<u8> = TreeHashCache::new(&inner).unwrap().into();
data.append(&mut int_to_bytes32(5));
let leaves = vec![
int_to_bytes32(0),
inner_bytes[0..32].to_vec(),
int_to_bytes32(5),
vec![0; 32], // padding
];
let mut merkle = merkleize(join(leaves));
merkle.splice(4 * 32..5 * 32, inner_bytes);
assert_eq!(merkle.len() / HASHSIZE, 13);
assert_eq!(cache.len() / HASHSIZE, 13);
assert_eq!(merkle, cache);
}
fn mix_in_length(root: &mut [u8], len: usize) {
let mut bytes = root.to_vec();
bytes.append(&mut int_to_bytes32(len as u64));
root.copy_from_slice(&hash(&bytes));
}
/// Generic test that covers:
///
/// 1. Produce a new cache from `original`.
/// 2. Do a differential hash between `original` and `modified`.
/// 3. Test that the cache generated matches the one we generate manually.
///
/// In effect it ensures that we can do a differential hash between two `Vec<u64>`.
fn test_u64_vec_modifications(original: Vec<u64>, modified: Vec<u64>) {
// Generate initial cache.
let original_cache: Vec<u8> = TreeHashCache::new(&original).unwrap().into();
// Perform a differential hash
let mut cache_struct = TreeHashCache::from_bytes(original_cache.clone(), false).unwrap();
modified
.cached_hash_tree_root(&original, &mut cache_struct, 0)
.unwrap();
let modified_cache: Vec<u8> = cache_struct.into();
// Generate reference data.
let mut data = vec![];
for i in &modified {
data.append(&mut int_to_bytes8(*i));
}
let data = sanitise_bytes(data);
let mut expected = merkleize(data);
mix_in_length(&mut expected[0..HASHSIZE], modified.len());
assert_eq!(expected, modified_cache);
}
#[test]
fn partial_modification_u64_vec() {
let n: u64 = 2_u64.pow(5);
let original_vec: Vec<u64> = (0..n).collect();
let mut modified_vec = original_vec.clone();
modified_vec[n as usize - 1] = 42;
test_u64_vec_modifications(original_vec, modified_vec);
}
#[test]
fn shortened_u64_vec_len_within_pow_2_boundary() {
let n: u64 = 2_u64.pow(5) - 1;
let original_vec: Vec<u64> = (0..n).collect();
let mut modified_vec = original_vec.clone();
modified_vec.pop();
test_u64_vec_modifications(original_vec, modified_vec);
}
#[test]
fn shortened_u64_vec_len_outside_pow_2_boundary() {
let original_vec: Vec<u64> = (0..2_u64.pow(6)).collect();
let modified_vec: Vec<u64> = (0..2_u64.pow(5)).collect();
test_u64_vec_modifications(original_vec, modified_vec);
}
#[test]
fn extended_u64_vec_len_within_pow_2_boundary() {
let n: u64 = 2_u64.pow(5) - 2;
let original_vec: Vec<u64> = (0..n).collect();
let mut modified_vec = original_vec.clone();
modified_vec.push(42);
test_u64_vec_modifications(original_vec, modified_vec);
}
#[test]
fn extended_u64_vec_len_outside_pow_2_boundary() {
let original_vec: Vec<u64> = (0..2_u64.pow(5)).collect();
let modified_vec: Vec<u64> = (0..2_u64.pow(6)).collect();
test_u64_vec_modifications(original_vec, modified_vec);
}
#[test]
fn large_vec_of_u64_builds() {
let n: u64 = 50;
let my_vec: Vec<u64> = (0..n).collect();
// Generate function output.
let cache: Vec<u8> = TreeHashCache::new(&my_vec).unwrap().into();
// Generate reference data.
let mut data = vec![];
for i in &my_vec {
data.append(&mut int_to_bytes8(*i));
}
let data = sanitise_bytes(data);
let expected = merkleize(data);
assert_eq!(expected, cache);
}
/// Generic test that covers:
///
/// 1. Produce a new cache from `original`.
/// 2. Do a differential hash between `original` and `modified`.
/// 3. Test that the cache generated matches the one we generate manually.
///
/// The `reference` vec is used to build the tree hash cache manually. `Inner` is just 4x `u64`, so
/// you can represent 2x `Inner` with a `reference` vec of len 8.
///
/// In effect it ensures that we can do a differential hash between two `Vec<Inner>`.
fn test_inner_vec_modifications(original: Vec<Inner>, modified: Vec<Inner>, reference: Vec<u64>) {
let mut cache = TreeHashCache::new(&original).unwrap();
modified
.cached_hash_tree_root(&original, &mut cache, 0)
.unwrap();
let modified_cache: Vec<u8> = cache.into();
// Build the reference vec.
let mut leaves = vec![];
let mut full_bytes = vec![];
for n in reference.chunks(4) {
let mut merkle = merkleize(join(vec![
int_to_bytes32(n[0]),
int_to_bytes32(n[1]),
int_to_bytes32(n[2]),
int_to_bytes32(n[3]),
]));
leaves.append(&mut merkle[0..HASHSIZE].to_vec());
full_bytes.append(&mut merkle);
}
let num_leaves = leaves.len() / HASHSIZE;
let mut expected = merkleize(leaves);
let num_internal_nodes = num_leaves.next_power_of_two() - 1;
expected.splice(num_internal_nodes * HASHSIZE.., full_bytes);
for _ in num_leaves..num_leaves.next_power_of_two() {
expected.append(&mut vec![0; HASHSIZE]);
}
mix_in_length(&mut expected[0..HASHSIZE], modified.len());
// Compare the cached tree to the reference tree.
assert_trees_eq(&expected, &modified_cache);
}
#[test]
fn partial_modification_of_vec_of_inner() {
let original = vec![
Inner {
a: 0,
b: 1,
c: 2,
d: 3,
},
Inner {
a: 4,
b: 5,
c: 6,
d: 7,
},
Inner {
a: 8,
b: 9,
c: 10,
d: 11,
},
];
let mut modified = original.clone();
modified[1].a = 42;
let mut reference_vec: Vec<u64> = (0..12).collect();
reference_vec[4] = 42;
test_inner_vec_modifications(original, modified, reference_vec);
}
#[test]
fn shortened_vec_of_inner_within_power_of_two_boundary() {
let original = vec![
Inner {
a: 0,
b: 1,
c: 2,
d: 3,
},
Inner {
a: 4,
b: 5,
c: 6,
d: 7,
},
Inner {
a: 8,
b: 9,
c: 10,
d: 11,
},
Inner {
a: 12,
b: 13,
c: 14,
d: 15,
},
];
let mut modified = original.clone();
modified.pop(); // remove the last element from the list.
let reference_vec: Vec<u64> = (0..12).collect();
test_inner_vec_modifications(original, modified, reference_vec);
}
#[test]
fn shortened_vec_of_inner_outside_power_of_two_boundary() {
let original = vec![
Inner {
a: 0,
b: 1,
c: 2,
d: 3,
},
Inner {
a: 4,
b: 5,
c: 6,
d: 7,
},
Inner {
a: 8,
b: 9,
c: 10,
d: 11,
},
Inner {
a: 12,
b: 13,
c: 14,
d: 15,
},
Inner {
a: 16,
b: 17,
c: 18,
d: 19,
},
];
let mut modified = original.clone();
modified.pop(); // remove the last element from the list.
let reference_vec: Vec<u64> = (0..16).collect();
test_inner_vec_modifications(original, modified, reference_vec);
}
#[test]
fn lengthened_vec_of_inner_within_power_of_two_boundary() {
let original = vec![
Inner {
a: 0,
b: 1,
c: 2,
d: 3,
},
Inner {
a: 4,
b: 5,
c: 6,
d: 7,
},
Inner {
a: 8,
b: 9,
c: 10,
d: 11,
},
];
let mut modified = original.clone();
modified.push(Inner {
a: 12,
b: 13,
c: 14,
d: 15,
});
let reference_vec: Vec<u64> = (0..16).collect();
test_inner_vec_modifications(original, modified, reference_vec);
}
#[test]
fn lengthened_vec_of_inner_outside_power_of_two_boundary() {
let original = vec![
Inner {
a: 0,
b: 1,
c: 2,
d: 3,
},
Inner {
a: 4,
b: 5,
c: 6,
d: 7,
},
Inner {
a: 8,
b: 9,
c: 10,
d: 11,
},
Inner {
a: 12,
b: 13,
c: 14,
d: 15,
},
];
let mut modified = original.clone();
modified.push(Inner {
a: 16,
b: 17,
c: 18,
d: 19,
});
let reference_vec: Vec<u64> = (0..20).collect();
test_inner_vec_modifications(original, modified, reference_vec);
}
#[test]
fn vec_of_inner_builds() {
let numbers: Vec<u64> = (0..12).collect();
let mut leaves = vec![];
let mut full_bytes = vec![];
for n in numbers.chunks(4) {
let mut merkle = merkleize(join(vec![
int_to_bytes32(n[0]),
int_to_bytes32(n[1]),
int_to_bytes32(n[2]),
int_to_bytes32(n[3]),
]));
leaves.append(&mut merkle[0..HASHSIZE].to_vec());
full_bytes.append(&mut merkle);
}
let mut expected = merkleize(leaves);
expected.splice(3 * HASHSIZE.., full_bytes);
expected.append(&mut vec![0; HASHSIZE]);
let my_vec = vec![
Inner {
a: 0,
b: 1,
c: 2,
d: 3,
},
Inner {
a: 4,
b: 5,
c: 6,
d: 7,
},
Inner {
a: 8,
b: 9,
c: 10,
d: 11,
},
];
let cache: Vec<u8> = TreeHashCache::new(&my_vec).unwrap().into();
assert_trees_eq(&expected, &cache);
}
/// Provides detailed assertions when comparing merkle trees.
fn assert_trees_eq(a: &[u8], b: &[u8]) {
assert_eq!(a.len(), b.len(), "Byte lens different");
for i in (0..a.len() / HASHSIZE).rev() {
let range = i * HASHSIZE..(i + 1) * HASHSIZE;
assert_eq!(
a[range.clone()],
b[range],
"Chunk {}/{} different \n\n a: {:?} \n\n b: {:?}",
i,
a.len() / HASHSIZE,
a,
b,
);
}
}
#[test]
fn vec_of_u64_builds() {
let data = join(vec![
int_to_bytes8(1),
int_to_bytes8(2),
int_to_bytes8(3),
int_to_bytes8(4),
int_to_bytes8(5),
vec![0; 32 - 8], // padding
]);
let expected = merkleize(data);
let my_vec = vec![1, 2, 3, 4, 5];
let cache: Vec<u8> = TreeHashCache::new(&my_vec).unwrap().into();
assert_eq!(expected, cache);
}
#[test]
fn merkleize_odd() {
let data = join(vec![
int_to_bytes32(1),
int_to_bytes32(2),
int_to_bytes32(3),
int_to_bytes32(4),
int_to_bytes32(5),
]);
let merkle = merkleize(sanitise_bytes(data));
let expected_len = num_nodes(8) * BYTES_PER_CHUNK;
assert_eq!(merkle.len(), expected_len);
}
fn generic_test(index: usize) {
let inner = Inner {
a: 1,
b: 2,
c: 3,
d: 4,
};
let cache: Vec<u8> = TreeHashCache::new(&inner).unwrap().into();
let changed_inner = match index {
0 => Inner {
a: 42,
..inner.clone()
},
1 => Inner {
b: 42,
..inner.clone()
},
2 => Inner {
c: 42,
..inner.clone()
},
3 => Inner {
d: 42,
..inner.clone()
},
_ => panic!("bad index"),
};
let mut cache_struct = TreeHashCache::from_bytes(cache.clone(), false).unwrap();
changed_inner
.cached_hash_tree_root(&inner, &mut cache_struct, 0)
.unwrap();
// assert_eq!(*cache_struct.hash_count, 3);
let new_cache: Vec<u8> = cache_struct.into();
let data1 = int_to_bytes32(1);
let data2 = int_to_bytes32(2);
let data3 = int_to_bytes32(3);
let data4 = int_to_bytes32(4);
let mut data = vec![data1, data2, data3, data4];
data[index] = int_to_bytes32(42);
let expected = merkleize(join(data));
assert_eq!(expected, new_cache);
}
#[test]
fn cached_hash_on_inner() {
generic_test(0);
generic_test(1);
generic_test(2);
generic_test(3);
}
#[test]
fn inner_builds() {
let data1 = int_to_bytes32(1);
let data2 = int_to_bytes32(2);
let data3 = int_to_bytes32(3);
let data4 = int_to_bytes32(4);
let data = join(vec![data1, data2, data3, data4]);
let expected = merkleize(data);
let inner = Inner {
a: 1,
b: 2,
c: 3,
d: 4,
};
let cache: Vec<u8> = TreeHashCache::new(&inner).unwrap().into();
assert_eq!(expected, cache);
}
#[test]
fn merkleize_4_leaves() {
let data1 = hash(&int_to_bytes32(1));
let data2 = hash(&int_to_bytes32(2));
let data3 = hash(&int_to_bytes32(3));
let data4 = hash(&int_to_bytes32(4));
let data = join(vec![
data1.clone(),
data2.clone(),
data3.clone(),
data4.clone(),
]);
let cache = merkleize(data);
let hash_12 = {
let mut joined = vec![];
joined.append(&mut data1.clone());
joined.append(&mut data2.clone());
hash(&joined)
};
let hash_34 = {
let mut joined = vec![];
joined.append(&mut data3.clone());
joined.append(&mut data4.clone());
hash(&joined)
};
let hash_hash12_hash_34 = {
let mut joined = vec![];
joined.append(&mut hash_12.clone());
joined.append(&mut hash_34.clone());
hash(&joined)
};
for (i, chunk) in cache.chunks(HASHSIZE).enumerate().rev() {
let expected = match i {
0 => hash_hash12_hash_34.clone(),
1 => hash_12.clone(),
2 => hash_34.clone(),
3 => data1.clone(),
4 => data2.clone(),
5 => data3.clone(),
6 => data4.clone(),
_ => vec![],
};
assert_eq!(chunk, &expected[..], "failed at {}", i);
}
}