Tree hash cache arena (#836)

* Start adding interop genesis state to lcli

* Use more efficient method to generate genesis state

* Remove duplicate int_to_bytes32

* Add lcli command to change state genesis time

* Add option to allow VC to start with unsynced BN

* Set VC to do parallel key loading

* Don't default to dummy eth1 backend

* Add endpoint to dump operation pool

* Add metrics for op pool

* Remove state clone for slot notifier

* Add mem size approximation for tree hash cache

* Avoid cloning tree hash when getting head

* Avoid cloning tree hash when getting head

* Add working arena-based cached tree hash

* Add another benchmark

* Add pre-allocation for caches

* Make cache nullable

* Fix bugs in cache tree hash

* Add validator tree hash optimization

* Optimize hash_concat

* Make hash32_concat return fixed-len array

* Fix failing API tests

* Add new beacon state cache struct

* Add validator-specific cache

* Separate list and values arenas

* Add parallel validator registry hashing

* Remove MultiTreeHashCache

* Remove cached tree hash macro

* Fix failing tree hash test

* Address Michael's comments

* Add CachedTreeHash impl for ef tests

* Fix messy merge conflict

* Rename cache struct, add comments

* Rename cache struct, add comments

* Remove unnecessary mutability

* Wrap iter in result

* Tidy cached tree hash

* Address Michael comments

* Address more comments

* Use ring::Context
This commit is contained in:
Paul Hauner
2020-02-07 12:42:49 +11:00
committed by GitHub
parent f267bf2afe
commit c3182e3c1c
20 changed files with 1341 additions and 378 deletions

View File

@@ -5,7 +5,7 @@
//! defining it once in this crate makes it easy to replace.
#[cfg(not(target_arch = "wasm32"))]
use ring::digest::{digest, SHA256};
use ring::digest::{digest, Context, SHA256};
#[cfg(target_arch = "wasm32")]
use sha2::{Digest, Sha256};
@@ -27,10 +27,35 @@ pub fn hash(input: &[u8]) -> Vec<u8> {
}
/// Compute the hash of two slices concatenated.
pub fn hash_concat(h1: &[u8], h2: &[u8]) -> Vec<u8> {
let mut vec1 = h1.to_vec();
vec1.extend_from_slice(h2);
hash(&vec1)
///
/// # Panics
///
/// Will panic if either `h1` or `h2` are not 32 bytes in length.
#[cfg(not(target_arch = "wasm32"))]
pub fn hash32_concat(h1: &[u8], h2: &[u8]) -> [u8; 32] {
let mut context = Context::new(&SHA256);
context.update(h1);
context.update(h2);
let mut output = [0; 32];
output[..].copy_from_slice(context.finish().as_ref());
output
}
/// Compute the hash of two slices concatenated.
///
/// # Panics
///
/// Will panic if either `h1` or `h2` are not 32 bytes in length.
#[cfg(target_arch = "wasm32")]
pub fn hash32_concat(h1: &[u8], h2: &[u8]) -> [u8; 32] {
let mut preimage = [0; 64];
preimage[0..32].copy_from_slice(h1);
preimage[32..64].copy_from_slice(h2);
let mut output = [0; 32];
output[..].copy_from_slice(&hash(&preimage));
output
}
/// The max index that can be used with `ZERO_HASHES`.
@@ -44,7 +69,7 @@ lazy_static! {
let mut hashes = vec![vec![0; 32]; ZERO_HASHES_MAX_INDEX + 1];
for i in 0..ZERO_HASHES_MAX_INDEX {
hashes[i + 1] = hash_concat(&hashes[i], &hashes[i]);
hashes[i + 1] = hash32_concat(&hashes[i], &hashes[i])[..].to_vec();
}
hashes