From a488c4dccd5534be326db85e0f5265304836d6c6 Mon Sep 17 00:00:00 2001 From: gnattishness Date: Tue, 29 Oct 2019 01:07:12 +0000 Subject: [PATCH 01/21] Rename get_permutated_index to compute_shuffled_index. (#564) To match name of equivalent function in v0.8.3 spec. --- .../swap_or_not_shuffle/benches/benches.rs | 6 +++--- ...ated_index.rs => compute_shuffled_index.rs} | 18 +++++++++--------- eth2/utils/swap_or_not_shuffle/src/lib.rs | 12 ++++++------ .../swap_or_not_shuffle/src/shuffle_list.rs | 5 +++-- tests/ef_tests/src/cases/shuffling.rs | 6 +++--- 5 files changed, 24 insertions(+), 23 deletions(-) rename eth2/utils/swap_or_not_shuffle/src/{get_permutated_index.rs => compute_shuffled_index.rs} (84%) diff --git a/eth2/utils/swap_or_not_shuffle/benches/benches.rs b/eth2/utils/swap_or_not_shuffle/benches/benches.rs index 0502e6fc46..ec9b31ac11 100644 --- a/eth2/utils/swap_or_not_shuffle/benches/benches.rs +++ b/eth2/utils/swap_or_not_shuffle/benches/benches.rs @@ -1,13 +1,13 @@ use criterion::Criterion; use criterion::{black_box, criterion_group, criterion_main, Benchmark}; -use swap_or_not_shuffle::{get_permutated_index, shuffle_list as fast_shuffle}; +use swap_or_not_shuffle::{compute_shuffled_index, shuffle_list as fast_shuffle}; const SHUFFLE_ROUND_COUNT: u8 = 90; fn shuffle_list(seed: &[u8], list_size: usize) -> Vec { let mut output = Vec::with_capacity(list_size); for i in 0..list_size { - output.push(get_permutated_index(i, list_size, seed, SHUFFLE_ROUND_COUNT).unwrap()); + output.push(compute_shuffled_index(i, list_size, seed, SHUFFLE_ROUND_COUNT).unwrap()); } output } @@ -15,7 +15,7 @@ fn shuffle_list(seed: &[u8], list_size: usize) -> Vec { fn shuffles(c: &mut Criterion) { c.bench_function("single swap", move |b| { let seed = vec![42; 32]; - b.iter(|| black_box(get_permutated_index(0, 10, &seed, SHUFFLE_ROUND_COUNT))) + b.iter(|| black_box(compute_shuffled_index(0, 10, &seed, SHUFFLE_ROUND_COUNT))) }); c.bench_function("whole list of size 8", move |b| { diff --git a/eth2/utils/swap_or_not_shuffle/src/get_permutated_index.rs b/eth2/utils/swap_or_not_shuffle/src/compute_shuffled_index.rs similarity index 84% rename from eth2/utils/swap_or_not_shuffle/src/get_permutated_index.rs rename to eth2/utils/swap_or_not_shuffle/src/compute_shuffled_index.rs index b9a9e3e919..977d0ffbfa 100644 --- a/eth2/utils/swap_or_not_shuffle/src/get_permutated_index.rs +++ b/eth2/utils/swap_or_not_shuffle/src/compute_shuffled_index.rs @@ -9,7 +9,7 @@ use std::cmp::max; /// See the 'generalized domain' algorithm on page 3. /// /// Note: this function is significantly slower than the `shuffle_list` function in this crate. -/// Using `get_permutated_list` to shuffle an entire list, index by index, has been observed to be +/// Using `compute_shuffled_index` to shuffle an entire list, index by index, has been observed to be /// 250x slower than `shuffle_list`. Therefore, this function is only useful when shuffling a small /// portion of a much larger list. /// @@ -18,7 +18,7 @@ use std::cmp::max; /// - `index >= list_size` /// - `list_size > 2**24` /// - `list_size > usize::max_value() / 2` -pub fn get_permutated_index( +pub fn compute_shuffled_index( index: usize, list_size: usize, seed: &[u8], @@ -54,7 +54,7 @@ fn hash_with_round_and_position(seed: &[u8], round: u8, position: usize) -> Opti seed.append(&mut int_to_bytes1(round)); /* * Note: the specification has an implicit assertion in `int_to_bytes4` that `position / 256 < - * 2**24`. For efficiency, we do not check for that here as it is checked in `get_permutated_index`. + * 2**24`. For efficiency, we do not check for that here as it is checked in `compute_shuffled_index`. */ seed.append(&mut int_to_bytes4((position / 256) as u32)); Some(hash(&seed[..])) @@ -90,7 +90,7 @@ mod tests { let seed = Hash256::random(); let shuffle_rounds = 90; - assert!(get_permutated_index(index, list_size, &seed[..], shuffle_rounds).is_some()); + assert!(compute_shuffled_index(index, list_size, &seed[..], shuffle_rounds).is_some()); } // Test at max list_size low indices. @@ -100,7 +100,7 @@ mod tests { let seed = Hash256::random(); let shuffle_rounds = 90; - assert!(get_permutated_index(index, list_size, &seed[..], shuffle_rounds).is_some()); + assert!(compute_shuffled_index(index, list_size, &seed[..], shuffle_rounds).is_some()); } // Test at max list_size high indices. @@ -110,25 +110,25 @@ mod tests { let seed = Hash256::random(); let shuffle_rounds = 90; - assert!(get_permutated_index(index, list_size, &seed[..], shuffle_rounds).is_some()); + assert!(compute_shuffled_index(index, list_size, &seed[..], shuffle_rounds).is_some()); } } #[test] fn returns_none_for_zero_length_list() { - assert_eq!(None, get_permutated_index(100, 0, &[42, 42], 90)); + assert_eq!(None, compute_shuffled_index(100, 0, &[42, 42], 90)); } #[test] fn returns_none_for_out_of_bounds_index() { - assert_eq!(None, get_permutated_index(100, 100, &[42, 42], 90)); + assert_eq!(None, compute_shuffled_index(100, 100, &[42, 42], 90)); } #[test] fn returns_none_for_too_large_list() { assert_eq!( None, - get_permutated_index(100, usize::max_value() / 2, &[42, 42], 90) + compute_shuffled_index(100, usize::max_value() / 2, &[42, 42], 90) ); } } diff --git a/eth2/utils/swap_or_not_shuffle/src/lib.rs b/eth2/utils/swap_or_not_shuffle/src/lib.rs index 57049fbdf6..0ea6188849 100644 --- a/eth2/utils/swap_or_not_shuffle/src/lib.rs +++ b/eth2/utils/swap_or_not_shuffle/src/lib.rs @@ -1,21 +1,21 @@ //! Provides list-shuffling functions matching the Ethereum 2.0 specification. //! //! See -//! [get_permutated_index](https://github.com/ethereum/eth2.0-specs/blob/0.4.0/specs/core/0_beacon-chain.md#get_permuted_index) +//! [compute_shuffled_index](https://github.com/ethereum/eth2.0-specs/blob/v0.8.3/specs/core/0_beacon-chain.md#compute_shuffled_index) //! for specifications. //! //! There are two functions exported by this crate: //! -//! - `get_permutated_index`: given a single index, computes the index resulting from a shuffle. +//! - `compute_shuffled_index`: given a single index, computes the index resulting from a shuffle. //! Runs in less time than it takes to run `shuffle_list`. //! - `shuffle_list`: shuffles an entire list in-place. Runs in less time than it takes to run -//! `get_permutated_index` on each index. +//! `compute_shuffled_index` on each index. //! -//! In general, use `get_permutated_list` to calculate the shuffling of a small subset of a much +//! In general, use `compute_shuffled_index` to calculate the shuffling of a small subset of a much //! larger list (~250x larger is a good guide, but solid figures yet to be calculated). -mod get_permutated_index; +mod compute_shuffled_index; mod shuffle_list; -pub use get_permutated_index::get_permutated_index; +pub use compute_shuffled_index::compute_shuffled_index; pub use shuffle_list::shuffle_list; diff --git a/eth2/utils/swap_or_not_shuffle/src/shuffle_list.rs b/eth2/utils/swap_or_not_shuffle/src/shuffle_list.rs index 5c803b57ff..416303b820 100644 --- a/eth2/utils/swap_or_not_shuffle/src/shuffle_list.rs +++ b/eth2/utils/swap_or_not_shuffle/src/shuffle_list.rs @@ -9,9 +9,9 @@ const TOTAL_SIZE: usize = SEED_SIZE + ROUND_SIZE + POSITION_WINDOW_SIZE; /// Shuffles an entire list in-place. /// -/// Note: this is equivalent to the `get_permutated_index` function, except it shuffles an entire +/// Note: this is equivalent to the `compute_shuffled_index` function, except it shuffles an entire /// list not just a single index. With large lists this function has been observed to be 250x -/// faster than running `get_permutated_index` across an entire list. +/// faster than running `compute_shuffled_index` across an entire list. /// /// Credits to [@protolambda](https://github.com/protolambda) for defining this algorithm. /// @@ -19,6 +19,7 @@ const TOTAL_SIZE: usize = SEED_SIZE + ROUND_SIZE + POSITION_WINDOW_SIZE; /// It holds that: shuffle_list(shuffle_list(l, r, s, true), r, s, false) == l /// and: shuffle_list(shuffle_list(l, r, s, false), r, s, true) == l /// +/// TODO forwards is around the wrong way - denote? /// Returns `None` under any of the following conditions: /// - `list_size == 0` /// - `list_size > 2**24` diff --git a/tests/ef_tests/src/cases/shuffling.rs b/tests/ef_tests/src/cases/shuffling.rs index 2fe632e84d..2ed5c0bd46 100644 --- a/tests/ef_tests/src/cases/shuffling.rs +++ b/tests/ef_tests/src/cases/shuffling.rs @@ -3,7 +3,7 @@ use crate::case_result::compare_result; use crate::decode::yaml_decode_file; use serde_derive::Deserialize; use std::marker::PhantomData; -use swap_or_not_shuffle::{get_permutated_index, shuffle_list}; +use swap_or_not_shuffle::{compute_shuffled_index, shuffle_list}; #[derive(Debug, Clone, Deserialize)] pub struct Shuffling { @@ -29,10 +29,10 @@ impl Case for Shuffling { let seed = hex::decode(&self.seed[2..]) .map_err(|e| Error::FailedToParseTest(format!("{:?}", e)))?; - // Test get_permuted_index + // Test compute_shuffled_index let shuffling = (0..self.count) .map(|i| { - get_permutated_index(i, self.count, &seed, spec.shuffle_round_count).unwrap() + compute_shuffled_index(i, self.count, &seed, spec.shuffle_round_count).unwrap() }) .collect(); compare_result::<_, Error>(&Ok(shuffling), &Some(self.mapping.clone()))?; From 2c6b40be7894f93727c225c1850c3abf29fec623 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Tue, 29 Oct 2019 12:51:32 +1100 Subject: [PATCH 02/21] Allow slot clock to work on genesis (#573) * Allow slot clock to work on genesis * Loose over-strict requirements for slot clock tests --- eth2/utils/slot_clock/src/system_time_slot_clock.rs | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/eth2/utils/slot_clock/src/system_time_slot_clock.rs b/eth2/utils/slot_clock/src/system_time_slot_clock.rs index d2ebd42ea3..23159e79d7 100644 --- a/eth2/utils/slot_clock/src/system_time_slot_clock.rs +++ b/eth2/utils/slot_clock/src/system_time_slot_clock.rs @@ -29,10 +29,10 @@ impl SlotClock for SystemTimeSlotClock { let now = SystemTime::now().duration_since(UNIX_EPOCH).ok()?; let genesis = self.genesis_duration; - if now > genesis { + if now >= genesis { let since_genesis = now .checked_sub(genesis) - .expect("Control flow ensures now is greater than genesis"); + .expect("Control flow ensures now is greater than or equal to genesis"); let slot = Slot::from((since_genesis.as_millis() / self.slot_duration.as_millis()) as u64); Some(slot + self.genesis_slot) @@ -50,7 +50,7 @@ impl SlotClock for SystemTimeSlotClock { genesis + slot * self.slot_duration }; - if now > genesis { + if now >= genesis { Some( slot_start(self.now()? + 1) .checked_sub(now) @@ -100,12 +100,12 @@ mod tests { let clock = SystemTimeSlotClock::new(genesis_slot, prior_genesis(500), Duration::from_secs(1)); assert_eq!(clock.now(), Some(Slot::new(0))); - assert!(clock.duration_to_next_slot().unwrap() < Duration::from_millis(500)); + assert!(clock.duration_to_next_slot().unwrap() <= Duration::from_millis(500)); let clock = SystemTimeSlotClock::new(genesis_slot, prior_genesis(1_500), Duration::from_secs(1)); assert_eq!(clock.now(), Some(Slot::new(1))); - assert!(clock.duration_to_next_slot().unwrap() < Duration::from_millis(500)); + assert!(clock.duration_to_next_slot().unwrap() <= Duration::from_millis(500)); } #[test] From dfe858b8cb4ae36027edfa83474261de6790cdf6 Mon Sep 17 00:00:00 2001 From: b-m-f Date: Tue, 29 Oct 2019 16:58:39 +0000 Subject: [PATCH 03/21] Only run non-ef tests when executing make test --- Makefile | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/Makefile b/Makefile index c08d04a9be..c7d23e85c1 100644 --- a/Makefile +++ b/Makefile @@ -11,10 +11,20 @@ release: # Runs the full workspace tests, without downloading any additional test # vectors. test: - cargo test --all --all-features --release + cargo test --all --all-features --release --exclude ef_tests + + +# only run the ef-test vectors +--run-ef-tests: + cargo test --release --manifest-path=$(EF_TESTS)/Cargo.toml --features "ef_tests" + +test-ef: make-ef-tests --run-ef-tests # Runs the entire test suite, downloading test vectors if required. -test-full: make-ef-tests test +test-full: + test + test-ef + # Runs the makefile in the `ef_tests` repo. # From 5c97ed3562a7219bc221d8e1543c79463dc216de Mon Sep 17 00:00:00 2001 From: Age Manning Date: Wed, 30 Oct 2019 12:22:18 +1100 Subject: [PATCH 04/21] Updates external dependencies (#577) * Updates external dependencies * Correct fmt formatting --- Cargo.toml | 1 + account_manager/Cargo.toml | 10 +++--- beacon_node/Cargo.toml | 24 ++++++------- beacon_node/beacon_chain/Cargo.toml | 32 ++++++++--------- beacon_node/client/Cargo.toml | 32 ++++++++--------- beacon_node/eth2-libp2p/Cargo.toml | 32 ++++++++--------- beacon_node/eth2-libp2p/src/behaviour.rs | 9 +++-- beacon_node/eth2-libp2p/src/config.rs | 2 +- beacon_node/eth2-libp2p/src/discovery.rs | 4 +-- beacon_node/eth2-libp2p/src/service.rs | 33 ++++++------------ beacon_node/network/Cargo.toml | 16 ++++----- beacon_node/rest_api/Cargo.toml | 38 ++++++++++----------- beacon_node/rpc/Cargo.toml | 16 ++++----- beacon_node/store/Cargo.toml | 12 +++---- beacon_node/websocket_server/Cargo.toml | 18 +++++----- eth2/lmd_ghost/Cargo.toml | 14 ++++---- eth2/operation_pool/Cargo.toml | 10 +++--- eth2/state_processing/Cargo.toml | 24 ++++++------- eth2/types/Cargo.toml | 31 +++++++++-------- eth2/types/src/test_utils/keypairs_file.rs | 2 +- eth2/types/src/test_utils/mod.rs | 6 ++-- eth2/utils/bls/Cargo.toml | 14 ++++---- eth2/utils/eth2_config/Cargo.toml | 8 ++--- eth2/utils/eth2_hashing/Cargo.toml | 4 +-- eth2/utils/eth2_interop_keypairs/Cargo.toml | 16 ++++----- eth2/utils/int_to_bytes/Cargo.toml | 4 +-- eth2/utils/lighthouse_bootstrap/Cargo.toml | 6 ++-- eth2/utils/lighthouse_metrics/Cargo.toml | 4 +-- eth2/utils/logging/Cargo.toml | 4 +-- eth2/utils/merkle_proof/Cargo.toml | 10 +++--- eth2/utils/serde_hex/Cargo.toml | 2 +- eth2/utils/slot_clock/Cargo.toml | 2 +- eth2/utils/ssz/Cargo.toml | 2 +- eth2/utils/ssz_types/Cargo.toml | 12 +++---- eth2/utils/swap_or_not_shuffle/Cargo.toml | 8 ++--- eth2/utils/tree_hash/Cargo.toml | 10 +++--- lcli/Cargo.toml | 14 ++++---- protos/Cargo.toml | 8 ++--- protos/build.rs | 2 +- tests/ef_tests/Cargo.toml | 20 +++++------ validator_client/Cargo.toml | 36 +++++++++---------- 41 files changed, 272 insertions(+), 280 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 9b31060a2d..9616155310 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -48,3 +48,4 @@ tree_hash_derive = { path = "eth2/utils/tree_hash_derive" } eth2_ssz = { path = "eth2/utils/ssz" } eth2_ssz_derive = { path = "eth2/utils/ssz_derive" } eth2_ssz_types = { path = "eth2/utils/ssz_types" } +eth2_hashing = { path = "eth2/utils/eth2_hashing" } diff --git a/account_manager/Cargo.toml b/account_manager/Cargo.toml index 32d2286c8e..fc3df1e8da 100644 --- a/account_manager/Cargo.toml +++ b/account_manager/Cargo.toml @@ -6,10 +6,10 @@ edition = "2018" [dependencies] bls = { path = "../eth2/utils/bls" } -clap = "2.32.0" -slog = "^2.2.3" -slog-term = "^2.4.0" -slog-async = "^2.3.0" +clap = "2.33.0" +slog = "2.5.2" +slog-term = "2.4.2" +slog-async = "2.3.0" validator_client = { path = "../validator_client" } types = { path = "../eth2/types" } -dirs = "2.0.1" +dirs = "2.0.2" diff --git a/beacon_node/Cargo.toml b/beacon_node/Cargo.toml index 0e42990182..8238b5f8da 100644 --- a/beacon_node/Cargo.toml +++ b/beacon_node/Cargo.toml @@ -12,16 +12,16 @@ types = { path = "../eth2/types" } store = { path = "./store" } client = { path = "client" } version = { path = "version" } -clap = "2.32.0" -rand = "0.7" -slog = { version = "^2.2.3" , features = ["max_level_trace", "release_max_level_trace"] } -slog-term = "^2.4.0" -slog-async = "^2.3.0" -ctrlc = { version = "3.1.1", features = ["termination"] } -tokio = "0.1.15" -tokio-timer = "0.2.10" -futures = "0.1.25" -exit-future = "0.1.3" -env_logger = "0.6.1" -dirs = "2.0.1" +clap = "2.33.0" +rand = "0.7.2" +slog = { version = "2.5.2", features = ["max_level_trace", "release_max_level_trace"] } +slog-term = "2.4.2" +slog-async = "2.3.0" +ctrlc = { version = "3.1.3", features = ["termination"] } +tokio = "0.1.22" +tokio-timer = "0.2.11" +exit-future = "0.1.4" +env_logger = "0.7.1" +dirs = "2.0.2" logging = { path = "../eth2/utils/logging" } +futures = "0.1.29" diff --git a/beacon_node/beacon_chain/Cargo.toml b/beacon_node/beacon_chain/Cargo.toml index 02a45d1374..b69564047c 100644 --- a/beacon_node/beacon_chain/Cargo.toml +++ b/beacon_node/beacon_chain/Cargo.toml @@ -12,28 +12,28 @@ write_ssz_files = [] # Writes debugging .ssz files to /tmp during block process eth2_config = { path = "../../eth2/utils/eth2_config" } merkle_proof = { path = "../../eth2/utils/merkle_proof" } store = { path = "../store" } -parking_lot = "0.7" -lazy_static = "1.3.0" +parking_lot = "0.9.0" +lazy_static = "1.4.0" lighthouse_metrics = { path = "../../eth2/utils/lighthouse_metrics" } lighthouse_bootstrap = { path = "../../eth2/utils/lighthouse_bootstrap" } -log = "0.4" +log = "0.4.8" operation_pool = { path = "../../eth2/operation_pool" } -rayon = "1.0" -serde = "1.0" -serde_derive = "1.0" -serde_yaml = "0.8" -serde_json = "^1.0" -slog = { version = "^2.2.3" , features = ["max_level_trace"] } -sloggers = { version = "^0.3" } +rayon = "1.2.0" +serde = "1.0.102" +serde_derive = "1.0.102" +serde_yaml = "0.8.11" +serde_json = "1.0.41" +slog = { version = "2.5.2", features = ["max_level_trace"] } +sloggers = "0.3.4" slot_clock = { path = "../../eth2/utils/slot_clock" } -eth2_hashing = { path = "../../eth2/utils/eth2_hashing" } -eth2_ssz = "0.1" -eth2_ssz_derive = "0.1" +eth2_hashing = "0.1.0" +eth2_ssz = "0.1.2" +eth2_ssz_derive = "0.1.0" state_processing = { path = "../../eth2/state_processing" } -tree_hash = "0.1" +tree_hash = "0.1.0" types = { path = "../../eth2/types" } lmd_ghost = { path = "../../eth2/lmd_ghost" } [dev-dependencies] -rand = "0.5.5" -lazy_static = "1.3.0" +rand = "0.7.2" +lazy_static = "1.4.0" diff --git a/beacon_node/client/Cargo.toml b/beacon_node/client/Cargo.toml index e557217938..ec0c14159a 100644 --- a/beacon_node/client/Cargo.toml +++ b/beacon_node/client/Cargo.toml @@ -12,22 +12,22 @@ eth2-libp2p = { path = "../eth2-libp2p" } rpc = { path = "../rpc" } rest_api = { path = "../rest_api" } websocket_server = { path = "../websocket_server" } -prometheus = "^0.6" +prometheus = "0.7.0" types = { path = "../../eth2/types" } -tree_hash = "0.1" +tree_hash = "0.1.0" eth2_config = { path = "../../eth2/utils/eth2_config" } slot_clock = { path = "../../eth2/utils/slot_clock" } -serde = "1.0.93" -serde_derive = "1.0" -error-chain = "0.12.0" -serde_yaml = "0.8" -slog = { version = "^2.2.3" , features = ["max_level_trace"] } -slog-async = "^2.3.0" -slog-json = "^2.3" -tokio = "0.1.15" -clap = "2.32.0" -dirs = "1.0.3" -exit-future = "0.1.3" -futures = "0.1.25" -reqwest = "0.9" -url = "1.2" +serde = "1.0.102" +serde_derive = "1.0.102" +error-chain = "0.12.1" +serde_yaml = "0.8.11" +slog = { version = "2.5.2", features = ["max_level_trace"] } +slog-async = "2.3.0" +slog-json = "2.3.0" +tokio = "0.1.22" +clap = "2.33.0" +dirs = "2.0.2" +exit-future = "0.1.4" +futures = "0.1.29" +reqwest = "0.9.22" +url = "2.1.0" diff --git a/beacon_node/eth2-libp2p/Cargo.toml b/beacon_node/eth2-libp2p/Cargo.toml index ccc6efb6d9..8982e17669 100644 --- a/beacon_node/eth2-libp2p/Cargo.toml +++ b/beacon_node/eth2-libp2p/Cargo.toml @@ -5,28 +5,28 @@ authors = ["Age Manning "] edition = "2018" [dependencies] -clap = "2.32.0" +clap = "2.33.0" hex = "0.3" #SigP repository -libp2p = { git = "https://github.com/SigP/rust-libp2p", rev = "8ac9c744197faaadc0e2b64fed7470ac4e2a41ca" } -enr = { git = "https://github.com/SigP/rust-libp2p/", rev = "8ac9c744197faaadc0e2b64fed7470ac4e2a41ca", features = ["serde"] } +libp2p = { git = "https://github.com/SigP/rust-libp2p", rev = "cdd5251d29e21a01aa2ffed8cb577a37a0f9e2eb" } +enr = { git = "https://github.com/SigP/rust-libp2p/", rev = "cdd5251d29e21a01aa2ffed8cb577a37a0f9e2eb", features = ["serde"] } types = { path = "../../eth2/types" } -serde = "1.0" -serde_derive = "1.0" -eth2_ssz = "0.1" -eth2_ssz_derive = "0.1" -slog = { version = "^2.4.1" , features = ["max_level_trace"] } +serde = "1.0.102" +serde_derive = "1.0.102" +eth2_ssz = "0.1.2" +eth2_ssz_derive = "0.1.0" +slog = { version = "2.5.2", features = ["max_level_trace"] } version = { path = "../version" } -tokio = "0.1.16" -futures = "0.1.25" -error-chain = "0.12.0" -tokio-timer = "0.2.10" -dirs = "2.0.1" +tokio = "0.1.22" +futures = "0.1.29" +error-chain = "0.12.1" +tokio-timer = "0.2.11" +dirs = "2.0.2" tokio-io = "0.1.12" -smallvec = "0.6.10" +smallvec = "0.6.11" fnv = "1.0.6" -unsigned-varint = "0.2.2" +unsigned-varint = "0.2.3" bytes = "0.4.12" tokio-io-timeout = "0.3.1" -lazy_static = "1.3.0" +lazy_static = "1.4.0" lighthouse_metrics = { path = "../../eth2/utils/lighthouse_metrics" } diff --git a/beacon_node/eth2-libp2p/src/behaviour.rs b/beacon_node/eth2-libp2p/src/behaviour.rs index a47d32ec2a..aa11d586ff 100644 --- a/beacon_node/eth2-libp2p/src/behaviour.rs +++ b/beacon_node/eth2-libp2p/src/behaviour.rs @@ -153,8 +153,10 @@ impl NetworkBehaviourEventProcess { if info.listen_addrs.len() > MAX_IDENTIFY_ADDRESSES { debug!( @@ -167,11 +169,12 @@ impl NetworkBehaviourEventProcess info.protocol_version, "Agent Version" => info.agent_version, "Listening Addresses" => format!("{:?}", info.listen_addrs), + "Observed Address" => format!("{:?}", observed_addr), "Protocols" => format!("{:?}", info.protocols) ); } + IdentifyEvent::Sent { .. } => {} IdentifyEvent::Error { .. } => {} - IdentifyEvent::SendBack { .. } => {} } } } diff --git a/beacon_node/eth2-libp2p/src/config.rs b/beacon_node/eth2-libp2p/src/config.rs index fa20d2cdd0..cacad9c206 100644 --- a/beacon_node/eth2-libp2p/src/config.rs +++ b/beacon_node/eth2-libp2p/src/config.rs @@ -82,7 +82,7 @@ impl Default for Config { gs_config: GossipsubConfigBuilder::new() .max_transmit_size(1_048_576) .heartbeat_interval(Duration::from_secs(20)) // TODO: Reduce for mainnet - .propagate_messages(false) // require validation before propagation + .manual_propagation(true) // require validation before propagation .build(), boot_nodes: vec![], libp2p_nodes: vec![], diff --git a/beacon_node/eth2-libp2p/src/discovery.rs b/beacon_node/eth2-libp2p/src/discovery.rs index 69ca39ad79..380914af5b 100644 --- a/beacon_node/eth2-libp2p/src/discovery.rs +++ b/beacon_node/eth2-libp2p/src/discovery.rs @@ -286,7 +286,7 @@ fn load_enr( // Build the local ENR. // Note: Discovery should update the ENR record's IP to the external IP as seen by the // majority of our peers. - let mut local_enr = EnrBuilder::new() + let mut local_enr = EnrBuilder::new("v4") .ip(config.discovery_address) .tcp(config.libp2p_port) .udp(config.discovery_port) @@ -302,7 +302,7 @@ fn load_enr( match Enr::from_str(&enr_string) { Ok(enr) => { if enr.node_id() == local_enr.node_id() { - if enr.ip() == config.discovery_address.into() + if enr.ip().map(Into::into) == Some(config.discovery_address) && enr.tcp() == Some(config.libp2p_port) && enr.udp() == Some(config.discovery_port) { diff --git a/beacon_node/eth2-libp2p/src/service.rs b/beacon_node/eth2-libp2p/src/service.rs index f9c06a532d..2ffafb855e 100644 --- a/beacon_node/eth2-libp2p/src/service.rs +++ b/beacon_node/eth2-libp2p/src/service.rs @@ -8,12 +8,8 @@ use crate::{Topic, TopicHash}; use futures::prelude::*; use futures::Stream; use libp2p::core::{ - identity::Keypair, - multiaddr::Multiaddr, - muxing::StreamMuxerBox, - nodes::Substream, + identity::Keypair, multiaddr::Multiaddr, muxing::StreamMuxerBox, nodes::Substream, transport::boxed::Boxed, - upgrade::{InboundUpgradeExt, OutboundUpgradeExt}, }; use libp2p::{core, secio, PeerId, Swarm, Transport}; use slog::{crit, debug, info, trace, warn}; @@ -206,7 +202,7 @@ impl Stream for Service { fn build_transport(local_private_key: Keypair) -> Boxed<(PeerId, StreamMuxerBox), Error> { // TODO: The Wire protocol currently doesn't specify encryption and this will need to be customised // in the future. - let transport = libp2p::tcp::TcpConfig::new(); + let transport = libp2p::tcp::TcpConfig::new().nodelay(true); let transport = libp2p::dns::DnsConfig::new(transport); #[cfg(feature = "libp2p-websocket")] let transport = { @@ -214,22 +210,15 @@ fn build_transport(local_private_key: Keypair) -> Boxed<(PeerId, StreamMuxerBox) transport.or_transport(websocket::WsConfig::new(trans_clone)) }; transport - .with_upgrade(secio::SecioConfig::new(local_private_key)) - .and_then(move |out, endpoint| { - let peer_id = out.remote_key.into_peer_id(); - let peer_id2 = peer_id.clone(); - let upgrade = core::upgrade::SelectUpgrade::new( - libp2p::yamux::Config::default(), - libp2p::mplex::MplexConfig::new(), - ) - // TODO: use a single `.map` instead of two maps - .map_inbound(move |muxer| (peer_id, muxer)) - .map_outbound(move |muxer| (peer_id2, muxer)); - - core::upgrade::apply(out.stream, upgrade, endpoint) - .map(|(id, muxer)| (id, core::muxing::StreamMuxerBox::new(muxer))) - }) - .with_timeout(Duration::from_secs(20)) + .upgrade(core::upgrade::Version::V1) + .authenticate(secio::SecioConfig::new(local_private_key)) + .multiplex(core::upgrade::SelectUpgrade::new( + libp2p::yamux::Config::default(), + libp2p::mplex::MplexConfig::new(), + )) + .map(|(peer, muxer), _| (peer, core::muxing::StreamMuxerBox::new(muxer))) + .timeout(Duration::from_secs(20)) + .timeout(Duration::from_secs(20)) .map_err(|err| Error::new(ErrorKind::Other, err)) .boxed() } diff --git a/beacon_node/network/Cargo.toml b/beacon_node/network/Cargo.toml index ffeba96ec5..b58f2fd7cb 100644 --- a/beacon_node/network/Cargo.toml +++ b/beacon_node/network/Cargo.toml @@ -5,19 +5,19 @@ authors = ["Age Manning "] edition = "2018" [dev-dependencies] -sloggers = "0.3.2" +sloggers = "0.3.4" [dependencies] beacon_chain = { path = "../beacon_chain" } store = { path = "../store" } eth2-libp2p = { path = "../eth2-libp2p" } types = { path = "../../eth2/types" } -slog = { version = "^2.2.3" , features = ["max_level_trace"] } +slog = { version = "2.5.2", features = ["max_level_trace"] } hex = "0.3" -eth2_ssz = "0.1" -tree_hash = "0.1" -futures = "0.1.25" -error-chain = "0.12.0" -tokio = "0.1.16" +eth2_ssz = "0.1.2" +tree_hash = "0.1.0" +futures = "0.1.29" +error-chain = "0.12.1" +tokio = "0.1.22" parking_lot = "0.9.0" -smallvec = "0.6.10" +smallvec = "0.6.11" diff --git a/beacon_node/rest_api/Cargo.toml b/beacon_node/rest_api/Cargo.toml index 7ea21eeba3..ac019b97c0 100644 --- a/beacon_node/rest_api/Cargo.toml +++ b/beacon_node/rest_api/Cargo.toml @@ -12,28 +12,28 @@ network = { path = "../network" } eth2-libp2p = { path = "../eth2-libp2p" } store = { path = "../store" } version = { path = "../version" } -serde = { version = "1.0", features = ["derive"] } -serde_json = "^1.0" -serde_yaml = "0.8" -slog = "^2.2.3" -slog-term = "^2.4.0" -slog-async = "^2.3.0" -eth2_ssz = { path = "../../eth2/utils/ssz" } -eth2_ssz_derive = { path = "../../eth2/utils/ssz_derive" } +serde = { version = "1.0.102", features = ["derive"] } +serde_json = "1.0.41" +serde_yaml = "0.8.11" +slog = "2.5.2" +slog-term = "2.4.2" +slog-async = "2.3.0" +eth2_ssz = "0.1.2" +eth2_ssz_derive = "0.1.0" state_processing = { path = "../../eth2/state_processing" } types = { path = "../../eth2/types" } -clap = "2.32.0" -http = "^0.1.17" -prometheus = { version = "^0.6", features = ["process"] } -hyper = "0.12.34" -exit-future = "0.1.3" -tokio = "0.1.17" -url = "2.0" -lazy_static = "1.3.0" +clap = "2.33.0" +http = "0.1.19" +prometheus = { version = "0.7.0", features = ["process"] } +hyper = "0.12.35" +exit-future = "0.1.4" +tokio = "0.1.22" +url = "2.1.0" +lazy_static = "1.4.0" eth2_config = { path = "../../eth2/utils/eth2_config" } lighthouse_metrics = { path = "../../eth2/utils/lighthouse_metrics" } slot_clock = { path = "../../eth2/utils/slot_clock" } -hex = "0.3.2" -parking_lot = "0.9" -futures = "0.1.25" +hex = "0.3" +parking_lot = "0.9.0" +futures = "0.1.29" diff --git a/beacon_node/rpc/Cargo.toml b/beacon_node/rpc/Cargo.toml index 8fc13a1bd6..93a5239e48 100644 --- a/beacon_node/rpc/Cargo.toml +++ b/beacon_node/rpc/Cargo.toml @@ -11,13 +11,13 @@ network = { path = "../network" } eth2-libp2p = { path = "../eth2-libp2p" } version = { path = "../version" } types = { path = "../../eth2/types" } -eth2_ssz = "0.1" +eth2_ssz = "0.1.2" protos = { path = "../../protos" } -grpcio = { version = "0.4", default-features = false, features = ["protobuf-codec"] } -clap = "2.32.0" -futures = "0.1.23" -serde = "1.0" -serde_derive = "1.0" -slog = { version = "^2.2.3" , features = ["max_level_trace"] } -tokio = "0.1.17" +grpcio = { version = "0.4.6", default-features = false, features = ["protobuf-codec"] } +clap = "2.33.0" +futures = "0.1.29" +serde = "1.0.102" +serde_derive = "1.0.102" +slog = { version = "2.5.2", features = ["max_level_trace"] } +tokio = "0.1.22" exit-future = "0.1.4" diff --git a/beacon_node/store/Cargo.toml b/beacon_node/store/Cargo.toml index cd9711253e..d613c12000 100644 --- a/beacon_node/store/Cargo.toml +++ b/beacon_node/store/Cargo.toml @@ -5,15 +5,15 @@ authors = ["Paul Hauner "] edition = "2018" [dev-dependencies] -tempfile = "3" +tempfile = "3.1.0" [dependencies] db-key = "0.0.5" leveldb = "0.8.4" -parking_lot = "0.7" -eth2_ssz = "0.1" -eth2_ssz_derive = "0.1" -tree_hash = "0.1" +parking_lot = "0.9.0" +eth2_ssz = "0.1.2" +eth2_ssz_derive = "0.1.0" +tree_hash = "0.1.0" types = { path = "../../eth2/types" } -lazy_static = "1.3.0" +lazy_static = "1.4.0" lighthouse_metrics = { path = "../../eth2/utils/lighthouse_metrics" } diff --git a/beacon_node/websocket_server/Cargo.toml b/beacon_node/websocket_server/Cargo.toml index 48f046e074..2922d5fa5d 100644 --- a/beacon_node/websocket_server/Cargo.toml +++ b/beacon_node/websocket_server/Cargo.toml @@ -8,13 +8,13 @@ edition = "2018" [dependencies] beacon_chain = { path = "../beacon_chain" } -clap = "2.32.0" -exit-future = "0.1.3" -futures = "0.1.25" -serde = "1.0" -serde_derive = "1.0" -serde_json = "^1.0" -slog = "^2.2.3" -tokio = "0.1.16" +clap = "2.33.0" +exit-future = "0.1.4" +futures = "0.1.29" +serde = "1.0.102" +serde_derive = "1.0.102" +serde_json = "1.0.41" +slog = "2.5.2" +tokio = "0.1.22" types = { path = "../../eth2/types" } -ws = "0.9" +ws = "0.9.1" diff --git a/eth2/lmd_ghost/Cargo.toml b/eth2/lmd_ghost/Cargo.toml index 636076c466..e26b85626c 100644 --- a/eth2/lmd_ghost/Cargo.toml +++ b/eth2/lmd_ghost/Cargo.toml @@ -5,17 +5,17 @@ authors = ["Age Manning ", "Paul Hauner edition = "2018" [dependencies] -parking_lot = "0.7" +parking_lot = "0.9.0" store = { path = "../../beacon_node/store" } types = { path = "../types" } [dev-dependencies] -criterion = "0.2" -hex = "0.3.2" -yaml-rust = "0.4.2" +criterion = "0.3.0" +hex = "0.3" +yaml-rust = "0.4.3" bls = { path = "../utils/bls" } slot_clock = { path = "../utils/slot_clock" } beacon_chain = { path = "../../beacon_node/beacon_chain" } -env_logger = "0.6.0" -lazy_static = "1.3.0" -rand = "0.7" +env_logger = "0.7.1" +lazy_static = "1.4.0" +rand = "0.7.2" diff --git a/eth2/operation_pool/Cargo.toml b/eth2/operation_pool/Cargo.toml index 41edf8086c..9aa2b598da 100644 --- a/eth2/operation_pool/Cargo.toml +++ b/eth2/operation_pool/Cargo.toml @@ -6,12 +6,12 @@ edition = "2018" [dependencies] int_to_bytes = { path = "../utils/int_to_bytes" } -itertools = "0.8" -parking_lot = "0.7" +itertools = "0.8.1" +parking_lot = "0.9.0" types = { path = "../types" } state_processing = { path = "../state_processing" } -eth2_ssz = "0.1" -eth2_ssz_derive = { path = "../utils/ssz_derive" } +eth2_ssz = "0.1.2" +eth2_ssz_derive = "0.1.0" [dev-dependencies] -rand = "0.5.5" +rand = "0.7.2" diff --git a/eth2/state_processing/Cargo.toml b/eth2/state_processing/Cargo.toml index 1e29431991..96b8f20145 100644 --- a/eth2/state_processing/Cargo.toml +++ b/eth2/state_processing/Cargo.toml @@ -9,13 +9,13 @@ name = "benches" harness = false [dev-dependencies] -criterion = "0.2" -env_logger = "0.6.0" -serde = "1.0" -serde_derive = "1.0" -lazy_static = "1.4" -serde_yaml = "0.8" -eth2_ssz = { path = "../utils/ssz" } +criterion = "0.3.0" +env_logger = "0.7.1" +serde = "1.0.102" +serde_derive = "1.0.102" +lazy_static = "1.4.0" +serde_yaml = "0.8.11" +eth2_ssz = "0.1.2" beacon_chain = { path = "../../beacon_node/beacon_chain" } store = { path = "../../beacon_node/store" } lmd_ghost = { path = "../lmd_ghost" } @@ -23,15 +23,15 @@ lmd_ghost = { path = "../lmd_ghost" } [dependencies] bls = { path = "../utils/bls" } -integer-sqrt = "0.1" -itertools = "0.8" +integer-sqrt = "0.1.2" +itertools = "0.8.1" eth2_ssz_types = { path = "../utils/ssz_types" } merkle_proof = { path = "../utils/merkle_proof" } -log = "0.4" -tree_hash = "0.1" +log = "0.4.8" +tree_hash = "0.1.0" tree_hash_derive = "0.2" types = { path = "../types" } -rayon = "1.0" +rayon = "1.2.0" [features] fake_crypto = ["bls/fake_crypto"] diff --git a/eth2/types/Cargo.toml b/eth2/types/Cargo.toml index 95d7a03174..9123ca6b32 100644 --- a/eth2/types/Cargo.toml +++ b/eth2/types/Cargo.toml @@ -8,27 +8,28 @@ edition = "2018" bls = { path = "../utils/bls" } compare_fields = { path = "../utils/compare_fields" } compare_fields_derive = { path = "../utils/compare_fields_derive" } -dirs = "1.0" -derivative = "1.0" +dirs = "2.0.2" +derivative = "1.0.3" eth2_interop_keypairs = { path = "../utils/eth2_interop_keypairs" } -ethereum-types = "0.6" -eth2_hashing = { path = "../utils/eth2_hashing" } +ethereum-types = "0.8.0" +eth2_hashing = "0.1.0" hex = "0.3" int_to_bytes = { path = "../utils/int_to_bytes" } -log = "0.4" -rayon = "1.0" -rand = "0.5.5" -serde = "1.0" -serde_derive = "1.0" -slog = "^2.2.3" -eth2_ssz = "0.1" -eth2_ssz_derive = "0.1" +log = "0.4.8" +rayon = "1.2.0" +rand = "0.7.2" +serde = "1.0.102" +serde_derive = "1.0.102" +slog = "2.5.2" +eth2_ssz = "0.1.2" +eth2_ssz_derive = "0.1.0" eth2_ssz_types = { path = "../utils/ssz_types" } swap_or_not_shuffle = { path = "../utils/swap_or_not_shuffle" } test_random_derive = { path = "../utils/test_random_derive" } -tree_hash = "0.1" +tree_hash = "0.1.0" tree_hash_derive = "0.2" +rand_xorshift = "0.2.0" [dev-dependencies] -env_logger = "0.6.0" -serde_json = "^1.0" +env_logger = "0.7.1" +serde_json = "1.0.41" diff --git a/eth2/types/src/test_utils/keypairs_file.rs b/eth2/types/src/test_utils/keypairs_file.rs index a1ea4d928b..13b1b17f2d 100644 --- a/eth2/types/src/test_utils/keypairs_file.rs +++ b/eth2/types/src/test_utils/keypairs_file.rs @@ -91,7 +91,7 @@ mod tests { } fn random_tmp_file() -> String { - let mut rng = thread_rng(); + let rng = thread_rng(); rng.sample_iter(&Alphanumeric).take(7).collect() } diff --git a/eth2/types/src/test_utils/mod.rs b/eth2/types/src/test_utils/mod.rs index b3ecb9089c..0e5a6d41d2 100644 --- a/eth2/types/src/test_utils/mod.rs +++ b/eth2/types/src/test_utils/mod.rs @@ -10,8 +10,6 @@ pub use generate_deterministic_keypairs::generate_deterministic_keypair; pub use generate_deterministic_keypairs::generate_deterministic_keypairs; pub use generate_deterministic_keypairs::load_keypairs_from_yaml; pub use keypairs_file::KeypairsFile; -pub use rand::{ - RngCore, - {prng::XorShiftRng, SeedableRng}, -}; +pub use rand::{RngCore, SeedableRng}; +pub use rand_xorshift::XorShiftRng; pub use test_random::TestRandom; diff --git a/eth2/utils/bls/Cargo.toml b/eth2/utils/bls/Cargo.toml index 6638f13a26..dbee0cead6 100644 --- a/eth2/utils/bls/Cargo.toml +++ b/eth2/utils/bls/Cargo.toml @@ -5,16 +5,16 @@ authors = ["Paul Hauner "] edition = "2018" [dependencies] -milagro_bls = { git = "https://github.com/sigp/milagro_bls", tag = "v0.11.0" } -eth2_hashing = { path = "../eth2_hashing" } +milagro_bls = { git = "https://github.com/sigp/milagro_bls", tag = "v0.11.1" } +eth2_hashing = "0.1.0" hex = "0.3" -rand = "^0.5" -serde = "1.0" -serde_derive = "1.0" +rand = "0.7.2" +serde = "1.0.102" +serde_derive = "1.0.102" serde_hex = { path = "../serde_hex" } -eth2_ssz = "0.1" +eth2_ssz = "0.1.2" eth2_ssz_types = { path = "../ssz_types" } -tree_hash = "0.1" +tree_hash = "0.1.0" [features] fake_crypto = [] diff --git a/eth2/utils/eth2_config/Cargo.toml b/eth2/utils/eth2_config/Cargo.toml index a125887480..7459cfed69 100644 --- a/eth2/utils/eth2_config/Cargo.toml +++ b/eth2/utils/eth2_config/Cargo.toml @@ -5,8 +5,8 @@ authors = ["Paul Hauner "] edition = "2018" [dependencies] -clap = "2.32.0" -serde = "1.0" -serde_derive = "1.0" -toml = "^0.5" +clap = "2.33.0" +serde = "1.0.102" +serde_derive = "1.0.102" +toml = "0.5.4" types = { path = "../../types" } diff --git a/eth2/utils/eth2_hashing/Cargo.toml b/eth2/utils/eth2_hashing/Cargo.toml index 665e89d597..3e7d8ed3f1 100644 --- a/eth2/utils/eth2_hashing/Cargo.toml +++ b/eth2/utils/eth2_hashing/Cargo.toml @@ -7,10 +7,10 @@ license = "Apache-2.0" description = "Hashing primitives used in Ethereum 2.0" [target.'cfg(not(target_arch = "wasm32"))'.dependencies] -ring = "0.14.6" +ring = "0.16.9" [dev-dependencies] rustc-hex = "2.0.1" [target.'cfg(target_arch = "wasm32")'.dev-dependencies] -wasm-bindgen-test = "0.2.47" +wasm-bindgen-test = "0.3.2" diff --git a/eth2/utils/eth2_interop_keypairs/Cargo.toml b/eth2/utils/eth2_interop_keypairs/Cargo.toml index fbebfa25da..b8969c6618 100644 --- a/eth2/utils/eth2_interop_keypairs/Cargo.toml +++ b/eth2/utils/eth2_interop_keypairs/Cargo.toml @@ -7,14 +7,14 @@ edition = "2018" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -lazy_static = "1.4" -num-bigint = "0.2" -eth2_hashing = "0.1" +lazy_static = "1.4.0" +num-bigint = "0.2.3" +eth2_hashing = "0.1.0" hex = "0.3" -milagro_bls = { git = "https://github.com/sigp/milagro_bls", tag = "v0.11.0" } -serde_yaml = "0.8" -serde = "1.0" -serde_derive = "1.0" +milagro_bls = { git = "https://github.com/sigp/milagro_bls", tag = "v0.11.1" } +serde_yaml = "0.8.11" +serde = "1.0.102" +serde_derive = "1.0.102" [dev-dependencies] -base64 = "0.10" +base64 = "0.11.0" diff --git a/eth2/utils/int_to_bytes/Cargo.toml b/eth2/utils/int_to_bytes/Cargo.toml index f7424e0328..48c52548b1 100644 --- a/eth2/utils/int_to_bytes/Cargo.toml +++ b/eth2/utils/int_to_bytes/Cargo.toml @@ -5,8 +5,8 @@ authors = ["Paul Hauner "] edition = "2018" [dependencies] -bytes = "0.4" +bytes = "0.4.12" [dev-dependencies] -yaml-rust = "0.4.2" +yaml-rust = "0.4.3" hex = "0.3" diff --git a/eth2/utils/lighthouse_bootstrap/Cargo.toml b/eth2/utils/lighthouse_bootstrap/Cargo.toml index cfc4c6bafd..0e1bbc7448 100644 --- a/eth2/utils/lighthouse_bootstrap/Cargo.toml +++ b/eth2/utils/lighthouse_bootstrap/Cargo.toml @@ -9,8 +9,8 @@ edition = "2018" [dependencies] eth2_config = { path = "../eth2_config" } eth2-libp2p = { path = "../../../beacon_node/eth2-libp2p" } -reqwest = "0.9" +reqwest = "0.9.22" url = "1.2" types = { path = "../../types" } -serde = "1.0" -slog = { version = "^2.2.3" , features = ["max_level_trace", "release_max_level_trace"] } +serde = "1.0.102" +slog = { version = "2.5.2", features = ["max_level_trace", "release_max_level_trace"] } diff --git a/eth2/utils/lighthouse_metrics/Cargo.toml b/eth2/utils/lighthouse_metrics/Cargo.toml index 0a24a96fb5..3b01c63e41 100644 --- a/eth2/utils/lighthouse_metrics/Cargo.toml +++ b/eth2/utils/lighthouse_metrics/Cargo.toml @@ -7,5 +7,5 @@ edition = "2018" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -lazy_static = "1.3.0" -prometheus = "^0.6" +lazy_static = "1.4.0" +prometheus = "0.7.0" diff --git a/eth2/utils/logging/Cargo.toml b/eth2/utils/logging/Cargo.toml index 62a8b41e0c..9d9405429e 100644 --- a/eth2/utils/logging/Cargo.toml +++ b/eth2/utils/logging/Cargo.toml @@ -5,5 +5,5 @@ authors = ["blacktemplar "] edition = "2018" [dependencies] -slog = { version = "^2.2.3" } -slog-term = "^2.4.0" +slog = "2.5.2" +slog-term = "2.4.2" diff --git a/eth2/utils/merkle_proof/Cargo.toml b/eth2/utils/merkle_proof/Cargo.toml index 5ffb6af532..a342b5bea7 100644 --- a/eth2/utils/merkle_proof/Cargo.toml +++ b/eth2/utils/merkle_proof/Cargo.toml @@ -5,10 +5,10 @@ authors = ["Michael Sproul "] edition = "2018" [dependencies] -ethereum-types = "0.6" -eth2_hashing = { path = "../eth2_hashing" } -lazy_static = "1.3.0" +ethereum-types = "0.8.0" +eth2_hashing = "0.1.0" +lazy_static = "1.4.0" [dev-dependencies] -quickcheck = "0.8" -quickcheck_macros = "0.8" +quickcheck = "0.9.0" +quickcheck_macros = "0.8.0" diff --git a/eth2/utils/serde_hex/Cargo.toml b/eth2/utils/serde_hex/Cargo.toml index b28194dd6f..06102f24ec 100644 --- a/eth2/utils/serde_hex/Cargo.toml +++ b/eth2/utils/serde_hex/Cargo.toml @@ -5,5 +5,5 @@ authors = ["Paul Hauner "] edition = "2018" [dependencies] -serde = "1.0" +serde = "1.0.102" hex = "0.3" diff --git a/eth2/utils/slot_clock/Cargo.toml b/eth2/utils/slot_clock/Cargo.toml index c4b9df5edd..e27395e42d 100644 --- a/eth2/utils/slot_clock/Cargo.toml +++ b/eth2/utils/slot_clock/Cargo.toml @@ -6,5 +6,5 @@ edition = "2018" [dependencies] types = { path = "../../types" } -lazy_static = "1.3.0" +lazy_static = "1.4.0" lighthouse_metrics = { path = "../lighthouse_metrics" } diff --git a/eth2/utils/ssz/Cargo.toml b/eth2/utils/ssz/Cargo.toml index ff5df162d8..53d75b697c 100644 --- a/eth2/utils/ssz/Cargo.toml +++ b/eth2/utils/ssz/Cargo.toml @@ -13,4 +13,4 @@ name = "ssz" eth2_ssz_derive = "0.1.0" [dependencies] -ethereum-types = "0.6" +ethereum-types = "0.8.0" diff --git a/eth2/utils/ssz_types/Cargo.toml b/eth2/utils/ssz_types/Cargo.toml index 732cea20d6..d019373e9f 100644 --- a/eth2/utils/ssz_types/Cargo.toml +++ b/eth2/utils/ssz_types/Cargo.toml @@ -8,13 +8,13 @@ edition = "2018" name = "ssz_types" [dependencies] -tree_hash = "0.1" -serde = "1.0" -serde_derive = "1.0" +tree_hash = "0.1.0" +serde = "1.0.102" +serde_derive = "1.0.102" serde_hex = { path = "../serde_hex" } -eth2_ssz = "0.1" -typenum = "1.10" +eth2_ssz = "0.1.2" +typenum = "1.11.2" [dev-dependencies] -serde_yaml = "0.8" +serde_yaml = "0.8.11" tree_hash_derive = "0.2" diff --git a/eth2/utils/swap_or_not_shuffle/Cargo.toml b/eth2/utils/swap_or_not_shuffle/Cargo.toml index 7c0e8151d8..2f7d331036 100644 --- a/eth2/utils/swap_or_not_shuffle/Cargo.toml +++ b/eth2/utils/swap_or_not_shuffle/Cargo.toml @@ -9,11 +9,11 @@ name = "benches" harness = false [dev-dependencies] -criterion = "0.2" -yaml-rust = "0.4.2" +criterion = "0.3.0" +yaml-rust = "0.4.3" hex = "0.3" -ethereum-types = "0.6" +ethereum-types = "0.8.0" [dependencies] -eth2_hashing = { path = "../eth2_hashing" } +eth2_hashing = "0.1.0" int_to_bytes = { path = "../int_to_bytes" } diff --git a/eth2/utils/tree_hash/Cargo.toml b/eth2/utils/tree_hash/Cargo.toml index 6cb18648db..e416a3f8e7 100644 --- a/eth2/utils/tree_hash/Cargo.toml +++ b/eth2/utils/tree_hash/Cargo.toml @@ -11,12 +11,12 @@ name = "benches" harness = false [dev-dependencies] -criterion = "0.2" -rand = "0.7" +criterion = "0.3.0" +rand = "0.7.2" tree_hash_derive = "0.2" types = { path = "../../types" } [dependencies] -ethereum-types = "0.6" -eth2_hashing = "0.1" -lazy_static = "1.4" +ethereum-types = "0.8.0" +eth2_hashing = "0.1.0" +lazy_static = "1.4.0" diff --git a/lcli/Cargo.toml b/lcli/Cargo.toml index 55bfc16544..d1dbdb221e 100644 --- a/lcli/Cargo.toml +++ b/lcli/Cargo.toml @@ -8,13 +8,13 @@ edition = "2018" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -clap = "2.33" +clap = "2.33.0" hex = "0.3" -log = "0.4" -serde = "1.0" -serde_yaml = "0.8" -simple_logger = "1.0" +log = "0.4.8" +serde = "1.0.102" +serde_yaml = "0.8.11" +simple_logger = "1.3.0" types = { path = "../eth2/types" } state_processing = { path = "../eth2/state_processing" } -eth2_ssz = { path = "../eth2/utils/ssz" } -regex = "1.3" +eth2_ssz = "0.1.2" +regex = "1.3.1" diff --git a/protos/Cargo.toml b/protos/Cargo.toml index ab97e5fb3a..479273cfc7 100644 --- a/protos/Cargo.toml +++ b/protos/Cargo.toml @@ -6,9 +6,9 @@ edition = "2018" description = "Google protobuf message and service definitions used in Lighthouse APIs." [dependencies] -futures = "0.1" -grpcio = { version = "0.4", default-features = false, features = ["protobuf-codec"] } -protobuf = "~2.8.1" +futures = "0.1.29" +grpcio = { version = "0.4.6", default-features = false, features = ["protobuf-codec"] } +protobuf = "2.8.1" [build-dependencies] -protoc-grpcio = "0.3.1" +protoc-grpcio = "1.1.0" diff --git a/protos/build.rs b/protos/build.rs index 108d9e2ddb..5b45bb7f83 100644 --- a/protos/build.rs +++ b/protos/build.rs @@ -5,6 +5,6 @@ use std::path::Path; fn main() { let proto_root = Path::new("src"); println!("cargo:rerun-if-changed={}", proto_root.display()); - protoc_grpcio::compile_grpc_protos(&["services.proto"], &[proto_root], &proto_root) + protoc_grpcio::compile_grpc_protos(&["services.proto"], &[proto_root], &proto_root, None) .expect("Failed to compile gRPC definitions!"); } diff --git a/tests/ef_tests/Cargo.toml b/tests/ef_tests/Cargo.toml index 24fe75fb3e..b0d281b8d6 100644 --- a/tests/ef_tests/Cargo.toml +++ b/tests/ef_tests/Cargo.toml @@ -12,18 +12,18 @@ fake_crypto = ["bls/fake_crypto"] [dependencies] bls = { path = "../../eth2/utils/bls" } compare_fields = { path = "../../eth2/utils/compare_fields" } -ethereum-types = "0.6" +ethereum-types = "0.8.0" hex = "0.3" -rayon = "1.0" -serde = "1.0" -serde_derive = "1.0" -serde_repr = "0.1" -serde_yaml = "0.8" -eth2_ssz = "0.1" -eth2_ssz_derive = "0.1" -tree_hash = "0.1" +rayon = "1.2.0" +serde = "1.0.102" +serde_derive = "1.0.102" +serde_repr = "0.1.5" +serde_yaml = "0.8.11" +eth2_ssz = "0.1.2" +eth2_ssz_derive = "0.1.0" +tree_hash = "0.1.0" tree_hash_derive = "0.2" state_processing = { path = "../../eth2/state_processing" } swap_or_not_shuffle = { path = "../../eth2/utils/swap_or_not_shuffle" } types = { path = "../../eth2/types" } -walkdir = "2" +walkdir = "2.2.9" diff --git a/validator_client/Cargo.toml b/validator_client/Cargo.toml index dcadf3b478..09cb52b76d 100644 --- a/validator_client/Cargo.toml +++ b/validator_client/Cargo.toml @@ -14,28 +14,28 @@ path = "src/lib.rs" [dependencies] bls = { path = "../eth2/utils/bls" } -eth2_ssz = "0.1" +eth2_ssz = "0.1.2" eth2_config = { path = "../eth2/utils/eth2_config" } -tree_hash = "0.1" -clap = "2.32.0" +tree_hash = "0.1.0" +clap = "2.33.0" lighthouse_bootstrap = { path = "../eth2/utils/lighthouse_bootstrap" } eth2_interop_keypairs = { path = "../eth2/utils/eth2_interop_keypairs" } -grpcio = { version = "0.4", default-features = false, features = ["protobuf-codec"] } +grpcio = { version = "0.4.6", default-features = false, features = ["protobuf-codec"] } protos = { path = "../protos" } slot_clock = { path = "../eth2/utils/slot_clock" } types = { path = "../eth2/types" } -serde = "1.0" -serde_derive = "1.0" -serde_json = "^1.0" -slog = { version = "^2.2.3" , features = ["max_level_trace", "release_max_level_trace"] } -slog-async = "^2.3.0" -slog-json = "^2.3" -slog-term = "^2.4.0" -tokio = "0.1.18" -tokio-timer = "0.2.10" -error-chain = "0.12.0" -bincode = "^1.1.2" -futures = "0.1.25" -dirs = "2.0.1" +serde = "1.0.102" +serde_derive = "1.0.102" +serde_json = "1.0.41" +slog = { version = "2.5.2", features = ["max_level_trace", "release_max_level_trace"] } +slog-async = "2.3.0" +slog-json = "2.3.0" +slog-term = "2.4.2" +tokio = "0.1.22" +tokio-timer = "0.2.11" +error-chain = "0.12.1" +bincode = "1.2.0" +futures = "0.1.29" +dirs = "2.0.2" logging = { path = "../eth2/utils/logging" } -libc = "0.2" +libc = "0.2.65" From 091ac07af9ddd1e98b1717c6e084ad454bc6d4f9 Mon Sep 17 00:00:00 2001 From: pscott <30843220+pscott@users.noreply.github.com> Date: Wed, 30 Oct 2019 09:20:40 +0100 Subject: [PATCH 05/21] Warn during long beacon state skips (#566) * Add warn whenever trying to skip more than 3 blocks * Define block skipping threshold warning as const * Fix cargo fmt --- beacon_node/beacon_chain/src/beacon_chain.rs | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index df0a3f80d8..7385edb002 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -44,6 +44,8 @@ pub const GRAFFITI: &str = "sigp/lighthouse-0.0.0-prerelease"; /// Only useful for testing. const WRITE_BLOCK_PROCESSING_SSZ: bool = cfg!(feature = "write_ssz_files"); +const BLOCK_SKIPPING_LOGGING_THRESHOLD: u64 = 3; + #[derive(Debug, PartialEq)] pub enum BlockProcessingOutcome { /// Block was valid and imported into the block graph. @@ -374,6 +376,14 @@ impl BeaconChain { if slot == head_state.slot { Ok(head_state) } else if slot > head_state.slot { + if slot > head_state.slot + BLOCK_SKIPPING_LOGGING_THRESHOLD { + warn!( + self.log, + "Skipping more than {} blocks", BLOCK_SKIPPING_LOGGING_THRESHOLD; + "head_slot" => head_state.slot, + "request_slot" => slot + ) + } let head_state_slot = head_state.slot; let mut state = head_state; while state.slot < slot { From c450b4a18dcf8b9e8bcb1c8127dd9538c4130785 Mon Sep 17 00:00:00 2001 From: b-m-f Date: Wed, 30 Oct 2019 09:42:32 +0000 Subject: [PATCH 06/21] Rename make task to run EF tests --- Makefile | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/Makefile b/Makefile index c7d23e85c1..1f111b3c11 100644 --- a/Makefile +++ b/Makefile @@ -15,15 +15,13 @@ test: # only run the ef-test vectors ---run-ef-tests: +run-ef-tests: cargo test --release --manifest-path=$(EF_TESTS)/Cargo.toml --features "ef_tests" -test-ef: make-ef-tests --run-ef-tests +test-ef: make-ef-tests run-ef-tests # Runs the entire test suite, downloading test vectors if required. -test-full: - test - test-ef +test-full: test test-ef # Runs the makefile in the `ef_tests` repo. From 83c571d6eb542185d1123ada166b12d6cce492cf Mon Sep 17 00:00:00 2001 From: Mehdi Zerouali Date: Thu, 31 Oct 2019 13:13:21 +1100 Subject: [PATCH 07/21] Delete fuzzing targets (#586) --- eth2/utils/eth2_hashing/fuzz/.gitignore | 4 - eth2/utils/eth2_hashing/fuzz/Cargo.toml | 22 ---- .../fuzz/fuzz_targets/fuzz_target_hash.rs | 9 -- eth2/utils/ssz/fuzz/.gitignore | 4 - eth2/utils/ssz/fuzz/Cargo.toml | 105 ------------------ .../fuzz_target_address_decode.rs | 20 ---- .../fuzz_target_address_encode.rs | 20 ---- .../fuzz_targets/fuzz_target_bool_decode.rs | 24 ---- .../fuzz_targets/fuzz_target_bool_encode.rs | 20 ---- .../fuzz_target_hash256_decode.rs | 20 ---- .../fuzz_target_hash256_encode.rs | 20 ---- .../fuzz_targets/fuzz_target_u16_decode.rs | 19 ---- .../fuzz_targets/fuzz_target_u16_encode.rs | 20 ---- .../fuzz_targets/fuzz_target_u32_decode.rs | 19 ---- .../fuzz_targets/fuzz_target_u32_encode.rs | 20 ---- .../fuzz_targets/fuzz_target_u64_decode.rs | 28 ----- .../fuzz_targets/fuzz_target_u64_encode.rs | 38 ------- .../fuzz_targets/fuzz_target_u8_decode.rs | 18 --- .../fuzz_targets/fuzz_target_u8_encode.rs | 20 ---- .../fuzz_targets/fuzz_target_usize_decode.rs | 29 ----- .../fuzz_targets/fuzz_target_usize_encode.rs | 38 ------- .../fuzz_target_vec_address_decode.rs | 12 -- .../fuzz_target_vec_bool_decode.rs | 10 -- .../fuzz_targets/fuzz_target_vec_decode.rs | 11 -- .../fuzz_targets/fuzz_target_vec_encode.rs | 14 --- .../fuzz_target_vec_u64_decode.rs | 10 -- 26 files changed, 574 deletions(-) delete mode 100644 eth2/utils/eth2_hashing/fuzz/.gitignore delete mode 100644 eth2/utils/eth2_hashing/fuzz/Cargo.toml delete mode 100644 eth2/utils/eth2_hashing/fuzz/fuzz_targets/fuzz_target_hash.rs delete mode 100644 eth2/utils/ssz/fuzz/.gitignore delete mode 100644 eth2/utils/ssz/fuzz/Cargo.toml delete mode 100644 eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_address_decode.rs delete mode 100644 eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_address_encode.rs delete mode 100644 eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_bool_decode.rs delete mode 100644 eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_bool_encode.rs delete mode 100644 eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_hash256_decode.rs delete mode 100644 eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_hash256_encode.rs delete mode 100644 eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_u16_decode.rs delete mode 100644 eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_u16_encode.rs delete mode 100644 eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_u32_decode.rs delete mode 100644 eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_u32_encode.rs delete mode 100644 eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_u64_decode.rs delete mode 100644 eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_u64_encode.rs delete mode 100644 eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_u8_decode.rs delete mode 100644 eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_u8_encode.rs delete mode 100644 eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_usize_decode.rs delete mode 100644 eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_usize_encode.rs delete mode 100644 eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_vec_address_decode.rs delete mode 100644 eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_vec_bool_decode.rs delete mode 100644 eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_vec_decode.rs delete mode 100644 eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_vec_encode.rs delete mode 100644 eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_vec_u64_decode.rs diff --git a/eth2/utils/eth2_hashing/fuzz/.gitignore b/eth2/utils/eth2_hashing/fuzz/.gitignore deleted file mode 100644 index 572e03bdf3..0000000000 --- a/eth2/utils/eth2_hashing/fuzz/.gitignore +++ /dev/null @@ -1,4 +0,0 @@ - -target -corpus -artifacts diff --git a/eth2/utils/eth2_hashing/fuzz/Cargo.toml b/eth2/utils/eth2_hashing/fuzz/Cargo.toml deleted file mode 100644 index 57e0172eb6..0000000000 --- a/eth2/utils/eth2_hashing/fuzz/Cargo.toml +++ /dev/null @@ -1,22 +0,0 @@ - -[package] -name = "hashing-fuzz" -version = "0.0.1" -authors = ["Automatically generated"] -publish = false - -[package.metadata] -cargo-fuzz = true - -[dependencies.hashing] -path = ".." -[dependencies.libfuzzer-sys] -git = "https://github.com/rust-fuzz/libfuzzer-sys.git" - -# Prevent this from interfering with workspaces -[workspace] -members = ["."] - -[[bin]] -name = "fuzz_target_hash" -path = "fuzz_targets/fuzz_target_hash.rs" diff --git a/eth2/utils/eth2_hashing/fuzz/fuzz_targets/fuzz_target_hash.rs b/eth2/utils/eth2_hashing/fuzz/fuzz_targets/fuzz_target_hash.rs deleted file mode 100644 index dd78d1ac86..0000000000 --- a/eth2/utils/eth2_hashing/fuzz/fuzz_targets/fuzz_target_hash.rs +++ /dev/null @@ -1,9 +0,0 @@ -#![no_main] -#[macro_use] extern crate libfuzzer_sys; -extern crate hashing; - -use hashing::hash; - -fuzz_target!(|data: &[u8]| { - let _result = hash(data); -}); diff --git a/eth2/utils/ssz/fuzz/.gitignore b/eth2/utils/ssz/fuzz/.gitignore deleted file mode 100644 index 572e03bdf3..0000000000 --- a/eth2/utils/ssz/fuzz/.gitignore +++ /dev/null @@ -1,4 +0,0 @@ - -target -corpus -artifacts diff --git a/eth2/utils/ssz/fuzz/Cargo.toml b/eth2/utils/ssz/fuzz/Cargo.toml deleted file mode 100644 index 3c922bac98..0000000000 --- a/eth2/utils/ssz/fuzz/Cargo.toml +++ /dev/null @@ -1,105 +0,0 @@ - -[package] -name = "ssz-fuzz" -version = "0.0.1" -authors = ["Automatically generated"] -publish = false - -[package.metadata] -cargo-fuzz = true - -[dependencies] -ethereum-types = "0.6" - -[dependencies.ssz] -path = ".." -[dependencies.libfuzzer-sys] -git = "https://github.com/rust-fuzz/libfuzzer-sys.git" - -# Prevent this from interfering with workspaces -[workspace] -members = ["."] - -[[bin]] -name = "fuzz_target_bool_decode" -path = "fuzz_targets/fuzz_target_bool_decode.rs" - -[[bin]] -name = "fuzz_target_bool_encode" -path = "fuzz_targets/fuzz_target_bool_encode.rs" - -[[bin]] -name = "fuzz_target_u8_decode" -path = "fuzz_targets/fuzz_target_u8_decode.rs" - -[[bin]] -name = "fuzz_target_u8_encode" -path = "fuzz_targets/fuzz_target_u8_encode.rs" - -[[bin]] -name = "fuzz_target_u16_decode" -path = "fuzz_targets/fuzz_target_u16_decode.rs" - -[[bin]] -name = "fuzz_target_u16_encode" -path = "fuzz_targets/fuzz_target_u16_encode.rs" - -[[bin]] -name = "fuzz_target_u32_decode" -path = "fuzz_targets/fuzz_target_u32_decode.rs" - -[[bin]] -name = "fuzz_target_u32_encode" -path = "fuzz_targets/fuzz_target_u32_encode.rs" - -[[bin]] -name = "fuzz_target_u64_decode" -path = "fuzz_targets/fuzz_target_u64_decode.rs" - -[[bin]] -name = "fuzz_target_u64_encode" -path = "fuzz_targets/fuzz_target_u64_encode.rs" - -[[bin]] -name = "fuzz_target_usize_decode" -path = "fuzz_targets/fuzz_target_usize_decode.rs" - -[[bin]] -name = "fuzz_target_usize_encode" -path = "fuzz_targets/fuzz_target_usize_encode.rs" - -[[bin]] -name = "fuzz_target_hash256_decode" -path = "fuzz_targets/fuzz_target_hash256_decode.rs" - -[[bin]] -name = "fuzz_target_hash256_encode" -path = "fuzz_targets/fuzz_target_hash256_encode.rs" - -[[bin]] -name = "fuzz_target_address_decode" -path = "fuzz_targets/fuzz_target_address_decode.rs" - -[[bin]] -name = "fuzz_target_address_encode" -path = "fuzz_targets/fuzz_target_address_encode.rs" - -[[bin]] -name = "fuzz_target_vec_address_decode" -path = "fuzz_targets/fuzz_target_vec_address_decode.rs" - -[[bin]] -name = "fuzz_target_vec_bool_decode" -path = "fuzz_targets/fuzz_target_vec_bool_decode.rs" - -[[bin]] -name = "fuzz_target_vec_decode" -path = "fuzz_targets/fuzz_target_vec_decode.rs" - -[[bin]] -name = "fuzz_target_vec_encode" -path = "fuzz_targets/fuzz_target_vec_encode.rs" - -[[bin]] -name = "fuzz_target_vec_u64_decode" -path = "fuzz_targets/fuzz_target_vec_u64_decode.rs" diff --git a/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_address_decode.rs b/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_address_decode.rs deleted file mode 100644 index 03ec386adc..0000000000 --- a/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_address_decode.rs +++ /dev/null @@ -1,20 +0,0 @@ -#![no_main] -#[macro_use] extern crate libfuzzer_sys; -extern crate ethereum_types; -extern crate ssz; - -use ethereum_types::Address; -use ssz::{DecodeError, decode}; - -// Fuzz ssz_decode() -fuzz_target!(|data: &[u8]| { - let result: Result = decode(data); - if data.len() == 20 { - // Should have valid result - let address = result.unwrap(); - assert_eq!(address, Address::from_slice(&data[..20])); - } else { - // Length of less than 32 should return error - assert!(result.is_err()); - } -}); diff --git a/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_address_encode.rs b/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_address_encode.rs deleted file mode 100644 index 0e51e00acb..0000000000 --- a/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_address_encode.rs +++ /dev/null @@ -1,20 +0,0 @@ -#![no_main] -#[macro_use] extern crate libfuzzer_sys; -extern crate ethereum_types; -extern crate ssz; - -use ethereum_types::Address; -use ssz::SszStream; - -// Fuzz ssz_encode (via ssz_append) -fuzz_target!(|data: &[u8]| { - let mut ssz = SszStream::new(); - if data.len() >= 20 { - let hash = Address::from_slice(&data[..20]); - ssz.append(&hash); - let ssz = ssz.drain(); - - assert_eq!(data[..20], ssz[..20]); - assert_eq!(ssz.len(), 20); - } -}); diff --git a/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_bool_decode.rs b/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_bool_decode.rs deleted file mode 100644 index fe555385cd..0000000000 --- a/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_bool_decode.rs +++ /dev/null @@ -1,24 +0,0 @@ -#![no_main] -#[macro_use] extern crate libfuzzer_sys; -extern crate ssz; - -use ssz::{DecodeError, decode}; - -// Fuzz ssz_decode() -fuzz_target!(|data: &[u8]| { - let result: Result = decode(data); - if data.len() == 1 { - if data[0] == 1 { - let val_bool = result.unwrap(); - assert!(val_bool); - } else if data[0] == 0 { - let val_bool = result.unwrap(); - assert!(!val_bool); - } else { - assert_eq!(result, Err(DecodeError::Invalid)); - } - } else { - // Length of 0 should return error - assert!(result.is_err()); - } -}); diff --git a/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_bool_encode.rs b/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_bool_encode.rs deleted file mode 100644 index 5165515382..0000000000 --- a/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_bool_encode.rs +++ /dev/null @@ -1,20 +0,0 @@ -#![no_main] -#[macro_use] extern crate libfuzzer_sys; -extern crate ssz; - -use ssz::SszStream; - -// Fuzz ssz_encode (via ssz_append) -fuzz_target!(|data: &[u8]| { - let mut ssz = SszStream::new(); - let mut val_bool = 0; - if data.len() >= 1 { - val_bool = data[0] % u8::pow(2, 6); - } - - ssz.append(&val_bool); - let ssz = ssz.drain(); - - assert_eq!(val_bool, ssz[0]); - assert_eq!(ssz.len(), 1); -}); diff --git a/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_hash256_decode.rs b/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_hash256_decode.rs deleted file mode 100644 index fd34844d89..0000000000 --- a/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_hash256_decode.rs +++ /dev/null @@ -1,20 +0,0 @@ -#![no_main] -#[macro_use] extern crate libfuzzer_sys; -extern crate ethereum_types; -extern crate ssz; - -use ethereum_types::H256; -use ssz::{DecodeError, decode}; - -// Fuzz ssz_decode() -fuzz_target!(|data: &[u8]| { - let result: Result = decode(data); - if data.len() == 32 { - // Should have valid result - let hash = result.unwrap(); - assert_eq!(hash, H256::from_slice(&data[..32])); - } else { - // Length of less than 32 should return error - assert!(result.is_err()); - } -}); diff --git a/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_hash256_encode.rs b/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_hash256_encode.rs deleted file mode 100644 index 537d9cdf96..0000000000 --- a/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_hash256_encode.rs +++ /dev/null @@ -1,20 +0,0 @@ -#![no_main] -#[macro_use] extern crate libfuzzer_sys; -extern crate ethereum_types; -extern crate ssz; - -use ethereum_types::H256; -use ssz::SszStream; - -// Fuzz ssz_encode (via ssz_append) -fuzz_target!(|data: &[u8]| { - let mut ssz = SszStream::new(); - if data.len() >= 32 { - let hash = H256::from_slice(&data[..32]); - ssz.append(&hash); - let ssz = ssz.drain(); - - assert_eq!(data[..32], ssz[..32]); - assert_eq!(ssz.len(), 32); - } -}); diff --git a/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_u16_decode.rs b/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_u16_decode.rs deleted file mode 100644 index e5f24ea88b..0000000000 --- a/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_u16_decode.rs +++ /dev/null @@ -1,19 +0,0 @@ -#![no_main] -#[macro_use] extern crate libfuzzer_sys; -extern crate ssz; - -use ssz::{DecodeError, decode}; - -// Fuzz ssz_decode() -fuzz_target!(|data: &[u8]| { - let result: Result = decode(data); - if data.len() == 2 { - // Valid result - let number_u16 = result.unwrap(); - let val = u16::from_le_bytes([data[0], data[1]]); - assert_eq!(number_u16, val); - } else { - // Length of 0 or 1 should return error - assert!(result.is_err()); - } -}); diff --git a/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_u16_encode.rs b/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_u16_encode.rs deleted file mode 100644 index 2dea8bb73e..0000000000 --- a/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_u16_encode.rs +++ /dev/null @@ -1,20 +0,0 @@ -#![no_main] -#[macro_use] extern crate libfuzzer_sys; -extern crate ssz; - -use ssz::SszStream; - -// Fuzz ssz_encode (via ssz_append) -fuzz_target!(|data: &[u8]| { - let mut ssz = SszStream::new(); - let mut number_u16 = 0; - if data.len() >= 2 { - number_u16 = u16::from_be_bytes([data[0], data[1]]); - } - - ssz.append(&number_u16); - let ssz = ssz.drain(); - - assert_eq!(ssz.len(), 2); - assert_eq!(number_u16, u16::from_le_bytes([ssz[0], ssz[1]])); -}); diff --git a/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_u32_decode.rs b/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_u32_decode.rs deleted file mode 100644 index f00c338fc4..0000000000 --- a/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_u32_decode.rs +++ /dev/null @@ -1,19 +0,0 @@ -#![no_main] -#[macro_use] extern crate libfuzzer_sys; -extern crate ssz; - -use ssz::{DecodeError, decode}; - -// Fuzz ssz_decode() -fuzz_target!(|data: &[u8]| { - let result: Result = decode(data); - if data.len() == 4 { - // Valid result - let number_u32 = result.unwrap(); - let val = u32::from_le_bytes([data[0], data[1], data[2], data[3]]); - assert_eq!(number_u32, val); - } else { - // Length not 4 should return error - assert!(result.is_err()); - } -}); diff --git a/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_u32_encode.rs b/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_u32_encode.rs deleted file mode 100644 index db3b750a78..0000000000 --- a/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_u32_encode.rs +++ /dev/null @@ -1,20 +0,0 @@ -#![no_main] -#[macro_use] extern crate libfuzzer_sys; -extern crate ssz; - -use ssz::SszStream; - -// Fuzz ssz_encode (via ssz_append) -fuzz_target!(|data: &[u8]| { - let mut ssz = SszStream::new(); - let mut number_u32 = 0; - if data.len() >= 4 { - number_u32 = u32::from_be_bytes([data[0], data[1], data[2], data[3]]); - } - - ssz.append(&number_u32); - let ssz = ssz.drain(); - - assert_eq!(ssz.len(), 4); - assert_eq!(number_u32, u32::from_le_bytes([ssz[0], ssz[1], ssz[2], ssz[3]])); -}); diff --git a/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_u64_decode.rs b/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_u64_decode.rs deleted file mode 100644 index f5c2794da3..0000000000 --- a/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_u64_decode.rs +++ /dev/null @@ -1,28 +0,0 @@ -#![no_main] -#[macro_use] extern crate libfuzzer_sys; -extern crate ssz; - -use ssz::{DecodeError, decode}; - -// Fuzz ssz_decode() -fuzz_target!(|data: &[u8]| { - let result: Result = decode(data); - if data.len() == 8 { - // Valid result - let number_u64 = result.unwrap(); - let val = u64::from_le_bytes([ - data[0], - data[1], - data[2], - data[3], - data[4], - data[5], - data[6], - data[7], - ]); - assert_eq!(number_u64, val); - } else { - // Length not 8 should return error - assert!(result.is_err()); - } -}); diff --git a/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_u64_encode.rs b/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_u64_encode.rs deleted file mode 100644 index 6301fa86b8..0000000000 --- a/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_u64_encode.rs +++ /dev/null @@ -1,38 +0,0 @@ -#![no_main] -#[macro_use] extern crate libfuzzer_sys; -extern crate ssz; - -use ssz::SszStream; - -// Fuzz ssz_encode (via ssz_append) -fuzz_target!(|data: &[u8]| { - let mut ssz = SszStream::new(); - let mut number_u64 = 0; - if data.len() >= 8 { - number_u64 = u64::from_le_bytes([ - data[0], - data[1], - data[2], - data[3], - data[4], - data[5], - data[6], - data[7], - ]); - } - - ssz.append(&number_u64); - let ssz = ssz.drain(); - - assert_eq!(ssz.len(), 8); - assert_eq!(number_u64, u64::from_le_bytes([ - ssz[0], - ssz[1], - ssz[2], - ssz[3], - ssz[4], - ssz[5], - ssz[6], - ssz[7], - ])); -}); diff --git a/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_u8_decode.rs b/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_u8_decode.rs deleted file mode 100644 index 4fcf9e2205..0000000000 --- a/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_u8_decode.rs +++ /dev/null @@ -1,18 +0,0 @@ -#![no_main] -#[macro_use] extern crate libfuzzer_sys; -extern crate ssz; - -use ssz::{DecodeError, decode}; - -// Fuzz ssz_decode() -fuzz_target!(|data: &[u8]| { - let result: Result = decode(data); - if data.len() == 1 { - // Should have valid result - let number_u8 = result.unwrap(); - assert_eq!(number_u8, data[0]); - } else { - // Length not 1 should return error - assert!(result.is_err()); - } -}); diff --git a/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_u8_encode.rs b/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_u8_encode.rs deleted file mode 100644 index fa14379485..0000000000 --- a/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_u8_encode.rs +++ /dev/null @@ -1,20 +0,0 @@ -#![no_main] -#[macro_use] extern crate libfuzzer_sys; -extern crate ssz; - -use ssz::SszStream; - -// Fuzz ssz_encode (via ssz_append) -fuzz_target!(|data: &[u8]| { - let mut ssz = SszStream::new(); - let mut number_u8 = 0; - if data.len() >= 1 { - number_u8 = data[0]; - } - - ssz.append(&number_u8); - let ssz = ssz.drain(); - - assert_eq!(number_u8, ssz[0]); - assert_eq!(ssz.len(), 1); -}); diff --git a/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_usize_decode.rs b/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_usize_decode.rs deleted file mode 100644 index 89ac62dce2..0000000000 --- a/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_usize_decode.rs +++ /dev/null @@ -1,29 +0,0 @@ -#![no_main] -#[macro_use] extern crate libfuzzer_sys; -extern crate ssz; - -use ssz::{DecodeError, decode}; - -// Fuzz decode() -fuzz_target!(|data: &[u8]| { - // Note: we assume architecture is 64 bit -> usize == 64 bits - let result: Result = decode(data); - if data.len() == 8 { - // Valid result - let number_usize = result.unwrap(); - let val = u64::from_le_bytes([ - data[0], - data[1], - data[2], - data[3], - data[4], - data[5], - data[6], - data[7], - ]); - assert_eq!(number_usize, val as usize); - } else { - // Length less then 8 should return error - assert!(result.is_err()); - } -}); diff --git a/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_usize_encode.rs b/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_usize_encode.rs deleted file mode 100644 index a2c804311f..0000000000 --- a/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_usize_encode.rs +++ /dev/null @@ -1,38 +0,0 @@ -#![no_main] -#[macro_use] extern crate libfuzzer_sys; -extern crate ssz; - -use ssz::SszStream; - -// Fuzz ssz_encode (via ssz_append) -fuzz_target!(|data: &[u8]| { - let mut ssz = SszStream::new(); - let mut number_usize = 0; - if data.len() >= 8 { - number_usize = u64::from_le_bytes([ - data[0], - data[1], - data[2], - data[3], - data[4], - data[5], - data[6], - data[7], - ]) as usize; - } - - ssz.append(&number_usize); - let ssz = ssz.drain(); - - assert_eq!(ssz.len(), 8); - assert_eq!(number_usize, u64::from_le_bytes([ - ssz[0], - ssz[1], - ssz[2], - ssz[3], - ssz[4], - ssz[5], - ssz[6], - ssz[7], - ]) as usize); -}); diff --git a/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_vec_address_decode.rs b/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_vec_address_decode.rs deleted file mode 100644 index 6b78862a21..0000000000 --- a/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_vec_address_decode.rs +++ /dev/null @@ -1,12 +0,0 @@ -#![no_main] -#[macro_use] extern crate libfuzzer_sys; -extern crate ethereum_types; -extern crate ssz; - -use ethereum_types::{Address}; -use ssz::{decode, DecodeError}; - -// Fuzz ssz_decode() -fuzz_target!(|data: &[u8]| { - let _result: Result, DecodeError> = decode(data); -}); diff --git a/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_vec_bool_decode.rs b/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_vec_bool_decode.rs deleted file mode 100644 index ceff2652ff..0000000000 --- a/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_vec_bool_decode.rs +++ /dev/null @@ -1,10 +0,0 @@ -#![no_main] -#[macro_use] extern crate libfuzzer_sys; -extern crate ssz; - -use ssz::{decode, DecodeError}; - -// Fuzz ssz_decode() -fuzz_target!(|data: &[u8]| { - let _result: Result, DecodeError> = decode(data); -}); diff --git a/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_vec_decode.rs b/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_vec_decode.rs deleted file mode 100644 index 0605a011b7..0000000000 --- a/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_vec_decode.rs +++ /dev/null @@ -1,11 +0,0 @@ -#![no_main] -#[macro_use] extern crate libfuzzer_sys; -extern crate ethereum_types; -extern crate ssz; - -use ssz::{decode, DecodeError, Decodable}; - -// Fuzz ssz_decode() -fuzz_target!(|data: &[u8]| { - let _result: Result, DecodeError> = decode(data); -}); diff --git a/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_vec_encode.rs b/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_vec_encode.rs deleted file mode 100644 index 4b56aa60bf..0000000000 --- a/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_vec_encode.rs +++ /dev/null @@ -1,14 +0,0 @@ -#![no_main] -#[macro_use] extern crate libfuzzer_sys; -extern crate ethereum_types; -extern crate ssz; - -use ssz::SszStream; - -// Fuzz ssz_encode() -fuzz_target!(|data: &[u8]| { - - let mut ssz = SszStream::new(); - let data_vec = data.to_vec(); - ssz.append(&data_vec); -}); diff --git a/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_vec_u64_decode.rs b/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_vec_u64_decode.rs deleted file mode 100644 index 56f808f36e..0000000000 --- a/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_vec_u64_decode.rs +++ /dev/null @@ -1,10 +0,0 @@ -#![no_main] -#[macro_use] extern crate libfuzzer_sys; -extern crate ssz; - -use ssz::{decode, DecodeError}; - -// Fuzz ssz_decode() -fuzz_target!(|data: &[u8]| { - let _result: Result, DecodeError> = decode(data); -}); From dea2b5dffccd33499edc6ff139b2dea7465459e0 Mon Sep 17 00:00:00 2001 From: Sam Wilson <57262657+SamWilsn@users.noreply.github.com> Date: Mon, 4 Nov 2019 20:28:35 -0500 Subject: [PATCH 08/21] Depend on sha2 in eth2_hashing for wasm32 (#589) --- eth2/utils/eth2_hashing/Cargo.toml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/eth2/utils/eth2_hashing/Cargo.toml b/eth2/utils/eth2_hashing/Cargo.toml index 3e7d8ed3f1..af48d0d4e7 100644 --- a/eth2/utils/eth2_hashing/Cargo.toml +++ b/eth2/utils/eth2_hashing/Cargo.toml @@ -9,6 +9,9 @@ description = "Hashing primitives used in Ethereum 2.0" [target.'cfg(not(target_arch = "wasm32"))'.dependencies] ring = "0.16.9" +[target.'cfg(target_arch = "wasm32")'.dependencies] +sha2 = "0.8.0" + [dev-dependencies] rustc-hex = "2.0.1" From a1e14cc3690df63053ef7887d1154eb25273dbb9 Mon Sep 17 00:00:00 2001 From: pscott <30843220+pscott@users.noreply.github.com> Date: Tue, 5 Nov 2019 02:29:07 +0100 Subject: [PATCH 09/21] Implement push_leaf for MerkleTree (#574) * Prototype for far_right push * Add push method and tests * Modify beacon_chain_builder for interop to use push instead of create * Add Push method to MerkleTree * Cargo fmt * Remove redundant tests * Fix typo * Rename push to push_leaf * Fix clippy warnings * Add DepthTooSmall enum variant * Avoid cloning in MerkleTree::push_leaf * Add quickcheck test for push_leaf * Cargo fmt updated * Return err instead of using unwrap() * Use enumerate instead of hard indexing * Use if let and return string on error * Fix typo in deposit_leave * Fix cargo fmt --- .../beacon_chain/src/beacon_chain_builder.rs | 21 ++- eth2/utils/merkle_proof/src/lib.rs | 130 +++++++++++++++++- 2 files changed, 138 insertions(+), 13 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain_builder.rs b/beacon_node/beacon_chain/src/beacon_chain_builder.rs index 949fb856b8..2ee6950612 100644 --- a/beacon_node/beacon_chain/src/beacon_chain_builder.rs +++ b/beacon_node/beacon_chain/src/beacon_chain_builder.rs @@ -210,22 +210,19 @@ fn interop_genesis_state( .collect::>(); let mut proofs = vec![]; - for i in 1..=deposit_root_leaves.len() { - // Note: this implementation is not so efficient. - // - // If `MerkleTree` had a push method, we could just build one tree and sample it instead of - // rebuilding the tree for each deposit. - let tree = MerkleTree::create( - &deposit_root_leaves[0..i], - spec.deposit_contract_tree_depth as usize, - ); + let depth = spec.deposit_contract_tree_depth as usize; + let mut tree = MerkleTree::create(&[], depth); + for (i, deposit_leaf) in deposit_root_leaves.iter().enumerate() { + if let Err(_) = tree.push_leaf(*deposit_leaf, depth) { + return Err(String::from("Failed to push leaf")); + } - let (_, mut proof) = tree.generate_proof(i - 1, spec.deposit_contract_tree_depth as usize); - proof.push(Hash256::from_slice(&int_to_bytes32(i))); + let (_, mut proof) = tree.generate_proof(i, depth); + proof.push(Hash256::from_slice(&int_to_bytes32(i + 1))); assert_eq!( proof.len(), - spec.deposit_contract_tree_depth as usize + 1, + depth + 1, "Deposit proof should be correct len" ); diff --git a/eth2/utils/merkle_proof/src/lib.rs b/eth2/utils/merkle_proof/src/lib.rs index 13f67c5840..785072eb4e 100644 --- a/eth2/utils/merkle_proof/src/lib.rs +++ b/eth2/utils/merkle_proof/src/lib.rs @@ -29,7 +29,7 @@ lazy_static! { /// /// Efficiently represents a Merkle tree of fixed depth where only the first N /// indices are populated by non-zero leaves (perfect for the deposit contract tree). -#[derive(Debug)] +#[derive(Debug, PartialEq)] pub enum MerkleTree { /// Leaf node with the hash of its content. Leaf(H256), @@ -41,6 +41,18 @@ pub enum MerkleTree { Zero(usize), } +#[derive(Debug, PartialEq)] +pub enum MerkleTreeError { + // Trying to push in a leaf + LeafReached, + // No more space in the MerkleTree + MerkleTreeFull, + // MerkleTree is invalid + Invalid, + // Incorrect Depth provided + DepthTooSmall, +} + impl MerkleTree { /// Create a new Merkle tree from a list of leaves and a fixed depth. pub fn create(leaves: &[H256], depth: usize) -> Self { @@ -73,6 +85,62 @@ impl MerkleTree { } } + /// Push an element in the MerkleTree. + /// MerkleTree and depth must be correct, as the algorithm expects valid data. + pub fn push_leaf(&mut self, elem: H256, depth: usize) -> Result<(), MerkleTreeError> { + use std::mem; + use MerkleTree::*; + + if depth == 0 { + return Err(MerkleTreeError::DepthTooSmall); + } + + match self { + Leaf(_) => return Err(MerkleTreeError::LeafReached), + Zero(_) => { + mem::replace(self, MerkleTree::create(&[elem], depth)); + } + Node(ref mut hash, ref mut left, ref mut right) => { + let left: &mut MerkleTree = &mut *left; + let right: &mut MerkleTree = &mut *right; + match (&*left, &*right) { + // Tree is full + (Leaf(_), Leaf(_)) => return Err(MerkleTreeError::MerkleTreeFull), + // There is a right node so insert in right node + (Node(_, _, _), Node(_, _, _)) => { + if let Err(e) = right.push_leaf(elem, depth - 1) { + return Err(e); + } + } + // Both branches are zero, insert in left one + (Zero(_), Zero(_)) => { + mem::replace(left, MerkleTree::create(&[elem], depth - 1)); + } + // Leaf on left branch and zero on right branch, insert on right side + (Leaf(_), Zero(_)) => { + mem::replace(right, MerkleTree::create(&[elem], depth - 1)); + } + // Try inserting on the left node -> if it fails because it is full, insert in right side. + (Node(_, _, _), Zero(_)) => { + match left.push_leaf(elem, depth - 1) { + Ok(_) => (), + // Left node is full, insert in right node + Err(MerkleTreeError::MerkleTreeFull) => { + mem::replace(right, MerkleTree::create(&[elem], depth - 1)); + } + Err(e) => return Err(e), + }; + } + // All other possibilities are invalid MerkleTrees + (_, _) => return Err(MerkleTreeError::Invalid), + }; + *hash = hash_concat(left.hash(), right.hash()); + } + } + + Ok(()) + } + /// Retrieve the root hash of this Merkle tree. pub fn hash(&self) -> H256 { match *self { @@ -213,6 +281,25 @@ mod tests { TestResult::from_bool(proofs_ok) } + #[quickcheck] + fn quickcheck_push_leaf_and_verify(int_leaves: Vec, depth: usize) -> TestResult { + if depth == 0 || depth > MAX_TREE_DEPTH || int_leaves.len() > 2usize.pow(depth as u32) { + return TestResult::discard(); + } + + let leaves: Vec<_> = int_leaves.into_iter().map(H256::from_low_u64_be).collect(); + + let mut merkle_tree = MerkleTree::create(&[], depth); + + let proofs_ok = leaves.into_iter().enumerate().all(|(i, leaf)| { + assert_eq!(merkle_tree.push_leaf(leaf, depth), Ok(())); + let (stored_leaf, branch) = merkle_tree.generate_proof(i, depth); + stored_leaf == leaf && verify_merkle_proof(leaf, &branch, depth, i, merkle_tree.hash()) + }); + + TestResult::from_bool(proofs_ok) + } + #[test] fn sparse_zero_correct() { let depth = 2; @@ -328,4 +415,45 @@ mod tests { assert!(verify_merkle_proof(leaf, &[], 0, 0, leaf)); assert!(!verify_merkle_proof(leaf, &[], 0, 7, junk)); } + + #[test] + fn push_complete_example() { + let depth = 2; + let mut tree = MerkleTree::create(&[], depth); + + let leaf_b00 = H256::from([0xAA; 32]); + + let res = tree.push_leaf(leaf_b00, 0); + assert_eq!(res, Err(MerkleTreeError::DepthTooSmall)); + let expected_tree = MerkleTree::create(&[], depth); + assert_eq!(tree.hash(), expected_tree.hash()); + + tree.push_leaf(leaf_b00, depth) + .expect("Pushing in empty tree failed"); + let expected_tree = MerkleTree::create(&[leaf_b00], depth); + assert_eq!(tree.hash(), expected_tree.hash()); + + let leaf_b01 = H256::from([0xBB; 32]); + tree.push_leaf(leaf_b01, depth) + .expect("Pushing in left then right node failed"); + let expected_tree = MerkleTree::create(&[leaf_b00, leaf_b01], depth); + assert_eq!(tree.hash(), expected_tree.hash()); + + let leaf_b10 = H256::from([0xCC; 32]); + tree.push_leaf(leaf_b10, depth) + .expect("Pushing in right then left node failed"); + let expected_tree = MerkleTree::create(&[leaf_b00, leaf_b01, leaf_b10], depth); + assert_eq!(tree.hash(), expected_tree.hash()); + + let leaf_b11 = H256::from([0xDD; 32]); + tree.push_leaf(leaf_b11, depth) + .expect("Pushing in outtermost leaf failed"); + let expected_tree = MerkleTree::create(&[leaf_b00, leaf_b01, leaf_b10, leaf_b11], depth); + assert_eq!(tree.hash(), expected_tree.hash()); + + let leaf_b12 = H256::from([0xEE; 32]); + let res = tree.push_leaf(leaf_b12, depth); + assert_eq!(res, Err(MerkleTreeError::MerkleTreeFull)); + assert_eq!(tree.hash(), expected_tree.hash()); + } } From 4ef66a544abc108a487cc18eeb55d1d16195576f Mon Sep 17 00:00:00 2001 From: pscott <30843220+pscott@users.noreply.github.com> Date: Tue, 5 Nov 2019 04:43:08 +0100 Subject: [PATCH 10/21] Move EthSpec (#591) * Allow slot clock to work on genesis * Loose over-strict requirements for slot clock tests * move and rename beacon_state_types to eth_spec.rs --- eth2/types/src/beacon_state.rs | 3 +-- .../src/{beacon_state/beacon_state_types.rs => eth_spec.rs} | 0 eth2/types/src/lib.rs | 1 + 3 files changed, 2 insertions(+), 2 deletions(-) rename eth2/types/src/{beacon_state/beacon_state_types.rs => eth_spec.rs} (100%) diff --git a/eth2/types/src/beacon_state.rs b/eth2/types/src/beacon_state.rs index 6b2b44d0ee..f64deb38a2 100644 --- a/eth2/types/src/beacon_state.rs +++ b/eth2/types/src/beacon_state.rs @@ -15,10 +15,9 @@ use tree_hash::TreeHash; use tree_hash_derive::TreeHash; pub use self::committee_cache::CommitteeCache; -pub use beacon_state_types::*; +pub use eth_spec::*; #[macro_use] -mod beacon_state_types; mod committee_cache; mod exit_cache; mod pubkey_cache; diff --git a/eth2/types/src/beacon_state/beacon_state_types.rs b/eth2/types/src/eth_spec.rs similarity index 100% rename from eth2/types/src/beacon_state/beacon_state_types.rs rename to eth2/types/src/eth_spec.rs diff --git a/eth2/types/src/lib.rs b/eth2/types/src/lib.rs index d1eaa393f8..8f9c07b0db 100644 --- a/eth2/types/src/lib.rs +++ b/eth2/types/src/lib.rs @@ -23,6 +23,7 @@ pub mod crosslink_committee; pub mod deposit; pub mod deposit_data; pub mod eth1_data; +pub mod eth_spec; pub mod fork; pub mod free_attestation; pub mod historical_batch; From c1a2238f1a6258af765b03bbefd13da908fc15d1 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Tue, 5 Nov 2019 15:46:52 +1100 Subject: [PATCH 11/21] Implement tree hash caching (#584) * Implement basic tree hash caching * Use spaces to indent top-level Cargo.toml * Optimize BLS tree hash by hashing bytes directly * Implement tree hash caching for validator registry * Persist BeaconState tree hash cache to disk * Address Paul's review comments --- Cargo.toml | 75 ++++---- beacon_node/store/src/impls/beacon_state.rs | 9 +- eth2/types/Cargo.toml | 1 + eth2/types/src/beacon_state.rs | 105 +++++++---- eth2/types/src/crosslink_committee.rs | 5 +- eth2/types/src/lib.rs | 1 + eth2/types/src/tree_hash_impls.rs | 129 ++++++++++++++ eth2/utils/bls/build.rs | 19 -- eth2/utils/bls/src/aggregate_signature.rs | 2 +- .../utils/bls/src/fake_aggregate_signature.rs | 2 +- eth2/utils/bls/src/fake_public_key.rs | 2 +- eth2/utils/bls/src/fake_signature.rs | 2 +- eth2/utils/bls/src/macros.rs | 21 ++- eth2/utils/bls/src/public_key.rs | 2 +- eth2/utils/bls/src/public_key_bytes.rs | 3 +- eth2/utils/bls/src/secret_key.rs | 2 +- eth2/utils/bls/src/signature.rs | 2 +- eth2/utils/bls/src/signature_bytes.rs | 8 +- eth2/utils/cached_tree_hash/Cargo.toml | 17 ++ eth2/utils/cached_tree_hash/src/cache.rs | 137 +++++++++++++++ eth2/utils/cached_tree_hash/src/impls.rs | 99 +++++++++++ eth2/utils/cached_tree_hash/src/lib.rs | 31 ++++ .../utils/cached_tree_hash/src/multi_cache.rs | 62 +++++++ eth2/utils/cached_tree_hash/src/test.rs | 147 ++++++++++++++++ eth2/utils/eth2_hashing/Cargo.toml | 9 +- eth2/utils/eth2_hashing/src/lib.rs | 38 ++++ eth2/utils/merkle_proof/src/lib.rs | 53 ++---- eth2/utils/tree_hash/Cargo.toml | 2 +- eth2/utils/tree_hash/benches/benches.rs | 56 ++++-- eth2/utils/tree_hash/src/impls.rs | 30 ---- eth2/utils/tree_hash/src/lib.rs | 5 +- eth2/utils/tree_hash/src/merkleize_padded.rs | 30 +--- eth2/utils/tree_hash_derive/src/lib.rs | 165 +++++++++++++++++- tests/ef_tests/Cargo.toml | 1 + tests/ef_tests/src/cases/ssz_generic.rs | 2 +- tests/ef_tests/src/cases/ssz_static.rs | 44 ++++- tests/ef_tests/src/handler.rs | 26 +++ tests/ef_tests/tests/tests.rs | 16 +- 38 files changed, 1112 insertions(+), 248 deletions(-) create mode 100644 eth2/types/src/tree_hash_impls.rs delete mode 100644 eth2/utils/bls/build.rs create mode 100644 eth2/utils/cached_tree_hash/Cargo.toml create mode 100644 eth2/utils/cached_tree_hash/src/cache.rs create mode 100644 eth2/utils/cached_tree_hash/src/impls.rs create mode 100644 eth2/utils/cached_tree_hash/src/lib.rs create mode 100644 eth2/utils/cached_tree_hash/src/multi_cache.rs create mode 100644 eth2/utils/cached_tree_hash/src/test.rs diff --git a/Cargo.toml b/Cargo.toml index 9616155310..2edbced095 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,44 +1,45 @@ [workspace] members = [ - "eth2/lmd_ghost", - "eth2/operation_pool", - "eth2/state_processing", - "eth2/types", - "eth2/utils/bls", - "eth2/utils/compare_fields", - "eth2/utils/compare_fields_derive", - "eth2/utils/eth2_config", - "eth2/utils/eth2_interop_keypairs", - "eth2/utils/logging", - "eth2/utils/eth2_hashing", - "eth2/utils/lighthouse_metrics", - "eth2/utils/lighthouse_bootstrap", - "eth2/utils/merkle_proof", - "eth2/utils/int_to_bytes", - "eth2/utils/serde_hex", - "eth2/utils/slot_clock", - "eth2/utils/ssz", - "eth2/utils/ssz_derive", - "eth2/utils/ssz_types", - "eth2/utils/swap_or_not_shuffle", - "eth2/utils/tree_hash", - "eth2/utils/tree_hash_derive", + "eth2/lmd_ghost", + "eth2/operation_pool", + "eth2/state_processing", + "eth2/types", + "eth2/utils/bls", + "eth2/utils/compare_fields", + "eth2/utils/compare_fields_derive", + "eth2/utils/eth2_config", + "eth2/utils/eth2_interop_keypairs", + "eth2/utils/logging", + "eth2/utils/eth2_hashing", + "eth2/utils/lighthouse_metrics", + "eth2/utils/lighthouse_bootstrap", + "eth2/utils/merkle_proof", + "eth2/utils/int_to_bytes", + "eth2/utils/serde_hex", + "eth2/utils/slot_clock", + "eth2/utils/ssz", + "eth2/utils/ssz_derive", + "eth2/utils/ssz_types", + "eth2/utils/swap_or_not_shuffle", + "eth2/utils/cached_tree_hash", + "eth2/utils/tree_hash", + "eth2/utils/tree_hash_derive", "eth2/utils/test_random_derive", - "beacon_node", - "beacon_node/store", - "beacon_node/client", - "beacon_node/rest_api", - "beacon_node/network", - "beacon_node/eth2-libp2p", + "beacon_node", + "beacon_node/store", + "beacon_node/client", + "beacon_node/rest_api", + "beacon_node/network", + "beacon_node/eth2-libp2p", "beacon_node/rpc", - "beacon_node/version", - "beacon_node/beacon_chain", - "beacon_node/websocket_server", - "tests/ef_tests", - "lcli", - "protos", - "validator_client", - "account_manager", + "beacon_node/version", + "beacon_node/beacon_chain", + "beacon_node/websocket_server", + "tests/ef_tests", + "lcli", + "protos", + "validator_client", + "account_manager", ] [patch] diff --git a/beacon_node/store/src/impls/beacon_state.rs b/beacon_node/store/src/impls/beacon_state.rs index 69e83cd636..2113d35bd1 100644 --- a/beacon_node/store/src/impls/beacon_state.rs +++ b/beacon_node/store/src/impls/beacon_state.rs @@ -2,13 +2,15 @@ use crate::*; use ssz::{Decode, DecodeError, Encode}; use ssz_derive::{Decode, Encode}; use std::convert::TryInto; -use types::beacon_state::{CommitteeCache, CACHED_EPOCHS}; +use types::beacon_state::{BeaconTreeHashCache, CommitteeCache, CACHED_EPOCHS}; /// A container for storing `BeaconState` components. +// TODO: would be more space efficient with the caches stored separately and referenced by hash #[derive(Encode, Decode)] struct StorageContainer { state_bytes: Vec, committee_caches_bytes: Vec>, + tree_hash_cache_bytes: Vec, } impl StorageContainer { @@ -20,9 +22,12 @@ impl StorageContainer { committee_caches_bytes.push(cache.as_ssz_bytes()); } + let tree_hash_cache_bytes = state.tree_hash_cache.as_ssz_bytes(); + Self { state_bytes: state.as_ssz_bytes(), committee_caches_bytes, + tree_hash_cache_bytes, } } } @@ -43,6 +48,8 @@ impl TryInto> for StorageContainer { state.committee_caches[i] = CommitteeCache::from_ssz_bytes(bytes)?; } + state.tree_hash_cache = BeaconTreeHashCache::from_ssz_bytes(&self.tree_hash_cache_bytes)?; + Ok(state) } } diff --git a/eth2/types/Cargo.toml b/eth2/types/Cargo.toml index 9123ca6b32..e3138b26cc 100644 --- a/eth2/types/Cargo.toml +++ b/eth2/types/Cargo.toml @@ -29,6 +29,7 @@ test_random_derive = { path = "../utils/test_random_derive" } tree_hash = "0.1.0" tree_hash_derive = "0.2" rand_xorshift = "0.2.0" +cached_tree_hash = { path = "../utils/cached_tree_hash" } [dev-dependencies] env_logger = "0.7.1" diff --git a/eth2/types/src/beacon_state.rs b/eth2/types/src/beacon_state.rs index f64deb38a2..2aa805808a 100644 --- a/eth2/types/src/beacon_state.rs +++ b/eth2/types/src/beacon_state.rs @@ -2,6 +2,7 @@ use self::committee_cache::get_active_validator_indices; use self::exit_cache::ExitCache; use crate::test_utils::TestRandom; use crate::*; +use cached_tree_hash::{CachedTreeHash, MultiTreeHashCache, TreeHashCache}; use compare_fields_derive::CompareFields; use eth2_hashing::hash; use int_to_bytes::{int_to_bytes32, int_to_bytes8}; @@ -12,7 +13,7 @@ use ssz_derive::{Decode, Encode}; use ssz_types::{typenum::Unsigned, BitVector, FixedVector}; use test_random_derive::TestRandom; use tree_hash::TreeHash; -use tree_hash_derive::TreeHash; +use tree_hash_derive::{CachedTreeHash, TreeHash}; pub use self::committee_cache::CommitteeCache; pub use eth_spec::*; @@ -57,6 +58,7 @@ pub enum Error { RelativeEpochError(RelativeEpochError), CommitteeCacheUninitialized(RelativeEpoch), SszTypesError(ssz_types::Error), + CachedTreeHashError(cached_tree_hash::Error), } /// Control whether an epoch-indexed field can be indexed at the next epoch or not. @@ -75,6 +77,26 @@ impl AllowNextEpoch { } } +#[derive(Debug, PartialEq, Clone, Default, Encode, Decode)] +pub struct BeaconTreeHashCache { + initialized: bool, + block_roots: TreeHashCache, + state_roots: TreeHashCache, + historical_roots: TreeHashCache, + validators: MultiTreeHashCache, + balances: TreeHashCache, + randao_mixes: TreeHashCache, + active_index_roots: TreeHashCache, + compact_committees_roots: TreeHashCache, + slashings: TreeHashCache, +} + +impl BeaconTreeHashCache { + pub fn is_initialized(&self) -> bool { + self.initialized + } +} + /// The state of the `BeaconChain` at some slot. /// /// Spec v0.8.0 @@ -88,9 +110,11 @@ impl AllowNextEpoch { Encode, Decode, TreeHash, + CachedTreeHash, CompareFields, )] #[serde(bound = "T: EthSpec")] +#[cached_tree_hash(type = "BeaconTreeHashCache")] pub struct BeaconState where T: EthSpec, @@ -103,9 +127,12 @@ where // History pub latest_block_header: BeaconBlockHeader, #[compare_fields(as_slice)] + #[cached_tree_hash(block_roots)] pub block_roots: FixedVector, #[compare_fields(as_slice)] + #[cached_tree_hash(state_roots)] pub state_roots: FixedVector, + #[cached_tree_hash(historical_roots)] pub historical_roots: VariableList, // Ethereum 1.0 chain data @@ -115,19 +142,25 @@ where // Registry #[compare_fields(as_slice)] + #[cached_tree_hash(validators)] pub validators: VariableList, #[compare_fields(as_slice)] + #[cached_tree_hash(balances)] pub balances: VariableList, // Shuffling pub start_shard: u64, + #[cached_tree_hash(randao_mixes)] pub randao_mixes: FixedVector, #[compare_fields(as_slice)] + #[cached_tree_hash(active_index_roots)] pub active_index_roots: FixedVector, #[compare_fields(as_slice)] + #[cached_tree_hash(compact_committees_roots)] pub compact_committees_roots: FixedVector, // Slashings + #[cached_tree_hash(slashings)] pub slashings: FixedVector, // Attestations @@ -164,6 +197,12 @@ where #[tree_hash(skip_hashing)] #[test_random(default)] pub exit_cache: ExitCache, + #[serde(skip_serializing, skip_deserializing)] + #[ssz(skip_serializing)] + #[ssz(skip_deserializing)] + #[tree_hash(skip_hashing)] + #[test_random(default)] + pub tree_hash_cache: BeaconTreeHashCache, } impl BeaconState { @@ -225,6 +264,7 @@ impl BeaconState { ], pubkey_cache: PubkeyCache::default(), exit_cache: ExitCache::default(), + tree_hash_cache: BeaconTreeHashCache::default(), } } @@ -825,7 +865,7 @@ impl BeaconState { self.build_committee_cache(RelativeEpoch::Current, spec)?; self.build_committee_cache(RelativeEpoch::Next, spec)?; self.update_pubkey_cache()?; - self.update_tree_hash_cache()?; + self.build_tree_hash_cache()?; self.exit_cache.build_from_registry(&self.validators, spec); Ok(()) @@ -936,41 +976,40 @@ impl BeaconState { self.pubkey_cache = PubkeyCache::default() } - /// Update the tree hash cache, building it for the first time if it is empty. - /// - /// Returns the `tree_hash_root` resulting from the update. This root can be considered the - /// canonical root of `self`. - /// - /// ## Note - /// - /// Cache not currently implemented, just performs a full tree hash. - pub fn update_tree_hash_cache(&mut self) -> Result { - // TODO(#440): re-enable cached tree hash - Ok(Hash256::from_slice(&self.tree_hash_root())) + /// Initialize but don't fill the tree hash cache, if it isn't already initialized. + pub fn initialize_tree_hash_cache(&mut self) { + if !self.tree_hash_cache.initialized { + self.tree_hash_cache = Self::new_tree_hash_cache(); + } } - /// Returns the tree hash root determined by the last execution of `self.update_tree_hash_cache(..)`. + /// Build and update the tree hash cache if it isn't already initialized. + pub fn build_tree_hash_cache(&mut self) -> Result<(), Error> { + self.update_tree_hash_cache().map(|_| ()) + } + + /// Build the tree hash cache, with blatant disregard for any existing cache. + pub fn force_build_tree_hash_cache(&mut self) -> Result<(), Error> { + self.tree_hash_cache.initialized = false; + self.build_tree_hash_cache() + } + + /// Compute the tree hash root of the state using the tree hash cache. /// - /// Note: does _not_ update the cache and may return an outdated root. - /// - /// Returns an error if the cache is not initialized or if an error is encountered during the - /// cache update. - /// - /// ## Note - /// - /// Cache not currently implemented, just performs a full tree hash. - pub fn cached_tree_hash_root(&self) -> Result { - // TODO(#440): re-enable cached tree hash - Ok(Hash256::from_slice(&self.tree_hash_root())) + /// Initialize the tree hash cache if it isn't already initialized. + pub fn update_tree_hash_cache(&mut self) -> Result { + self.initialize_tree_hash_cache(); + + let mut cache = std::mem::replace(&mut self.tree_hash_cache, <_>::default()); + let result = self.recalculate_tree_hash_root(&mut cache); + std::mem::replace(&mut self.tree_hash_cache, cache); + + Ok(result?) } /// Completely drops the tree hash cache, replacing it with a new, empty cache. - /// - /// ## Note - /// - /// Cache not currently implemented, is a no-op. pub fn drop_tree_hash_cache(&mut self) { - // TODO(#440): re-enable cached tree hash + self.tree_hash_cache = BeaconTreeHashCache::default(); } } @@ -985,3 +1024,9 @@ impl From for Error { Error::SszTypesError(e) } } + +impl From for Error { + fn from(e: cached_tree_hash::Error) -> Error { + Error::CachedTreeHashError(e) + } +} diff --git a/eth2/types/src/crosslink_committee.rs b/eth2/types/src/crosslink_committee.rs index 00c4bebc0b..0f7a401ca2 100644 --- a/eth2/types/src/crosslink_committee.rs +++ b/eth2/types/src/crosslink_committee.rs @@ -1,7 +1,6 @@ use crate::*; -use tree_hash_derive::TreeHash; -#[derive(Default, Clone, Debug, PartialEq, TreeHash)] +#[derive(Default, Clone, Debug, PartialEq)] pub struct CrosslinkCommittee<'a> { pub slot: Slot, pub shard: Shard, @@ -18,7 +17,7 @@ impl<'a> CrosslinkCommittee<'a> { } } -#[derive(Default, Clone, Debug, PartialEq, TreeHash)] +#[derive(Default, Clone, Debug, PartialEq)] pub struct OwnedCrosslinkCommittee { pub slot: Slot, pub shard: Shard, diff --git a/eth2/types/src/lib.rs b/eth2/types/src/lib.rs index 8f9c07b0db..fa23f9c1c1 100644 --- a/eth2/types/src/lib.rs +++ b/eth2/types/src/lib.rs @@ -38,6 +38,7 @@ pub mod slot_epoch_macros; pub mod relative_epoch; pub mod slot_epoch; pub mod slot_height; +mod tree_hash_impls; pub mod validator; use ethereum_types::{H160, H256, U256}; diff --git a/eth2/types/src/tree_hash_impls.rs b/eth2/types/src/tree_hash_impls.rs new file mode 100644 index 0000000000..2d652c475c --- /dev/null +++ b/eth2/types/src/tree_hash_impls.rs @@ -0,0 +1,129 @@ +//! This module contains custom implementations of `CachedTreeHash` for ETH2-specific types. +//! +//! It makes some assumptions about the layouts and update patterns of other structs in this +//! crate, and should be updated carefully whenever those structs are changed. +use crate::{Hash256, Validator}; +use cached_tree_hash::{int_log, CachedTreeHash, Error, TreeHashCache}; +use tree_hash::TreeHash; + +/// Number of struct fields on `Validator`. +const NUM_VALIDATOR_FIELDS: usize = 8; + +impl CachedTreeHash for Validator { + fn new_tree_hash_cache() -> TreeHashCache { + TreeHashCache::new(int_log(NUM_VALIDATOR_FIELDS)) + } + + /// Efficiently tree hash a `Validator`, assuming it was updated by a valid state transition. + /// + /// Specifically, we assume that the `pubkey` and `withdrawal_credentials` fields are constant. + fn recalculate_tree_hash_root(&self, cache: &mut TreeHashCache) -> Result { + // If the cache is empty, hash every field to fill it. + if cache.leaves().is_empty() { + return cache.recalculate_merkle_root(field_tree_hash_iter(self)); + } + + // Otherwise just check the fields which might have changed. + let dirty_indices = cache + .leaves() + .iter_mut() + .enumerate() + .flat_map(|(i, leaf)| { + // Fields pubkey and withdrawal_credentials are constant + if i == 0 || i == 1 { + None + } else { + let new_tree_hash = field_tree_hash_by_index(self, i); + if leaf.as_bytes() != &new_tree_hash[..] { + leaf.assign_from_slice(&new_tree_hash); + Some(i) + } else { + None + } + } + }) + .collect(); + + cache.update_merkle_root(dirty_indices) + } +} + +/// Get the tree hash root of a validator field by its position/index in the struct. +fn field_tree_hash_by_index(v: &Validator, field_idx: usize) -> Vec { + match field_idx { + 0 => v.pubkey.tree_hash_root(), + 1 => v.withdrawal_credentials.tree_hash_root(), + 2 => v.effective_balance.tree_hash_root(), + 3 => v.slashed.tree_hash_root(), + 4 => v.activation_eligibility_epoch.tree_hash_root(), + 5 => v.activation_epoch.tree_hash_root(), + 6 => v.exit_epoch.tree_hash_root(), + 7 => v.withdrawable_epoch.tree_hash_root(), + _ => panic!( + "Validator type only has {} fields, {} out of bounds", + NUM_VALIDATOR_FIELDS, field_idx + ), + } +} + +/// Iterator over the tree hash roots of `Validator` fields. +fn field_tree_hash_iter<'a>( + v: &'a Validator, +) -> impl Iterator + ExactSizeIterator + 'a { + (0..NUM_VALIDATOR_FIELDS) + .map(move |i| field_tree_hash_by_index(v, i)) + .map(|tree_hash_root| { + let mut res = [0; 32]; + res.copy_from_slice(&tree_hash_root[0..32]); + res + }) +} + +#[cfg(test)] +mod test { + use super::*; + use crate::test_utils::TestRandom; + use crate::Epoch; + use rand::SeedableRng; + use rand_xorshift::XorShiftRng; + + fn test_validator_tree_hash(v: &Validator) { + let mut cache = Validator::new_tree_hash_cache(); + // With a fresh cache + assert_eq!( + &v.tree_hash_root()[..], + v.recalculate_tree_hash_root(&mut cache).unwrap().as_bytes(), + "{:?}", + v + ); + // With a completely up-to-date cache + assert_eq!( + &v.tree_hash_root()[..], + v.recalculate_tree_hash_root(&mut cache).unwrap().as_bytes(), + "{:?}", + v + ); + } + + #[test] + fn default_validator() { + test_validator_tree_hash(&Validator::default()); + } + + #[test] + fn zeroed_validator() { + let mut v = Validator::default(); + v.activation_eligibility_epoch = Epoch::from(0u64); + v.activation_epoch = Epoch::from(0u64); + test_validator_tree_hash(&v); + } + + #[test] + fn random_validators() { + let mut rng = XorShiftRng::from_seed([0xf1; 16]); + let num_validators = 1000; + (0..num_validators) + .map(|_| Validator::random_for_test(&mut rng)) + .for_each(|v| test_validator_tree_hash(&v)); + } +} diff --git a/eth2/utils/bls/build.rs b/eth2/utils/bls/build.rs deleted file mode 100644 index 7f08a1ed5c..0000000000 --- a/eth2/utils/bls/build.rs +++ /dev/null @@ -1,19 +0,0 @@ -// This build script is symlinked from each project that requires BLS's "fake crypto", -// so that the `fake_crypto` feature of every sub-crate can be turned on by running -// with FAKE_CRYPTO=1 from the top-level workspace. -// At some point in the future it might be possible to do: -// $ cargo test --all --release --features fake_crypto -// but at the present time this doesn't work. -// Related: https://github.com/rust-lang/cargo/issues/5364 -fn main() { - if let Ok(fake_crypto) = std::env::var("FAKE_CRYPTO") { - if fake_crypto == "1" { - println!("cargo:rustc-cfg=feature=\"fake_crypto\""); - println!("cargo:rerun-if-env-changed=FAKE_CRYPTO"); - println!( - "cargo:warning=[{}]: Compiled with fake BLS cryptography. DO NOT USE, TESTING ONLY", - std::env::var("CARGO_PKG_NAME").unwrap() - ); - } - } -} diff --git a/eth2/utils/bls/src/aggregate_signature.rs b/eth2/utils/bls/src/aggregate_signature.rs index e80c1b1000..5a081c9435 100644 --- a/eth2/utils/bls/src/aggregate_signature.rs +++ b/eth2/utils/bls/src/aggregate_signature.rs @@ -155,7 +155,7 @@ impl_ssz!( "AggregateSignature" ); -impl_tree_hash!(AggregateSignature, U96); +impl_tree_hash!(AggregateSignature, BLS_AGG_SIG_BYTE_SIZE); impl Serialize for AggregateSignature { /// Serde serialization is compliant the Ethereum YAML test format. diff --git a/eth2/utils/bls/src/fake_aggregate_signature.rs b/eth2/utils/bls/src/fake_aggregate_signature.rs index 7911bb57a9..52495a76e8 100644 --- a/eth2/utils/bls/src/fake_aggregate_signature.rs +++ b/eth2/utils/bls/src/fake_aggregate_signature.rs @@ -93,7 +93,7 @@ impl_ssz!( "FakeAggregateSignature" ); -impl_tree_hash!(FakeAggregateSignature, U96); +impl_tree_hash!(FakeAggregateSignature, BLS_AGG_SIG_BYTE_SIZE); impl Serialize for FakeAggregateSignature { fn serialize(&self, serializer: S) -> Result diff --git a/eth2/utils/bls/src/fake_public_key.rs b/eth2/utils/bls/src/fake_public_key.rs index 82b1c707f2..f9440d86de 100644 --- a/eth2/utils/bls/src/fake_public_key.rs +++ b/eth2/utils/bls/src/fake_public_key.rs @@ -102,7 +102,7 @@ impl default::Default for FakePublicKey { impl_ssz!(FakePublicKey, BLS_PUBLIC_KEY_BYTE_SIZE, "FakePublicKey"); -impl_tree_hash!(FakePublicKey, U48); +impl_tree_hash!(FakePublicKey, BLS_PUBLIC_KEY_BYTE_SIZE); impl Serialize for FakePublicKey { fn serialize(&self, serializer: S) -> Result diff --git a/eth2/utils/bls/src/fake_signature.rs b/eth2/utils/bls/src/fake_signature.rs index 6e34a518ce..3ece5e87b9 100644 --- a/eth2/utils/bls/src/fake_signature.rs +++ b/eth2/utils/bls/src/fake_signature.rs @@ -91,7 +91,7 @@ impl FakeSignature { impl_ssz!(FakeSignature, BLS_SIG_BYTE_SIZE, "FakeSignature"); -impl_tree_hash!(FakeSignature, U96); +impl_tree_hash!(FakeSignature, BLS_SIG_BYTE_SIZE); impl Serialize for FakeSignature { fn serialize(&self, serializer: S) -> Result diff --git a/eth2/utils/bls/src/macros.rs b/eth2/utils/bls/src/macros.rs index e8bd3dd048..4acf185f0d 100644 --- a/eth2/utils/bls/src/macros.rs +++ b/eth2/utils/bls/src/macros.rs @@ -42,7 +42,7 @@ macro_rules! impl_ssz { } macro_rules! impl_tree_hash { - ($type: ty, $byte_size: ident) => { + ($type: ty, $byte_size: expr) => { impl tree_hash::TreeHash for $type { fn tree_hash_type() -> tree_hash::TreeHashType { tree_hash::TreeHashType::Vector @@ -57,16 +57,19 @@ macro_rules! impl_tree_hash { } fn tree_hash_root(&self) -> Vec { - let vector: ssz_types::FixedVector = - ssz_types::FixedVector::from(self.as_ssz_bytes()); - vector.tree_hash_root() + // We could use the tree hash implementation for `FixedVec`, + // but benchmarks have show that to be at least 15% slower because of the + // unnecessary copying and allocation (one Vec per byte) + let values_per_chunk = tree_hash::BYTES_PER_CHUNK; + let minimum_chunk_count = ($byte_size + values_per_chunk - 1) / values_per_chunk; + tree_hash::merkle_root(&self.as_ssz_bytes(), minimum_chunk_count) } } }; } macro_rules! bytes_struct { - ($name: ident, $type: ty, $byte_size: expr, $small_name: expr, $ssz_type_size: ident, + ($name: ident, $type: ty, $byte_size: expr, $small_name: expr, $type_str: expr, $byte_size_str: expr) => { #[doc = "Stores `"] #[doc = $byte_size_str] @@ -82,9 +85,9 @@ macro_rules! bytes_struct { #[derive(Clone)] pub struct $name([u8; $byte_size]); }; - ($name: ident, $type: ty, $byte_size: expr, $small_name: expr, $ssz_type_size: ident) => { - bytes_struct!($name, $type, $byte_size, $small_name, $ssz_type_size, stringify!($type), - stringify!($byte_size)); + ($name: ident, $type: ty, $byte_size: expr, $small_name: expr) => { + bytes_struct!($name, $type, $byte_size, $small_name, stringify!($type), + stringify!($byte_size)); impl $name { pub fn from_bytes(bytes: &[u8]) -> Result { @@ -144,7 +147,7 @@ macro_rules! bytes_struct { impl_ssz!($name, $byte_size, "$type"); - impl_tree_hash!($name, $ssz_type_size); + impl_tree_hash!($name, $byte_size); impl serde::ser::Serialize for $name { /// Serde serialization is compliant the Ethereum YAML test format. diff --git a/eth2/utils/bls/src/public_key.rs b/eth2/utils/bls/src/public_key.rs index 4b5abb58e3..87204fae19 100644 --- a/eth2/utils/bls/src/public_key.rs +++ b/eth2/utils/bls/src/public_key.rs @@ -94,7 +94,7 @@ impl default::Default for PublicKey { impl_ssz!(PublicKey, BLS_PUBLIC_KEY_BYTE_SIZE, "PublicKey"); -impl_tree_hash!(PublicKey, U48); +impl_tree_hash!(PublicKey, BLS_PUBLIC_KEY_BYTE_SIZE); impl Serialize for PublicKey { fn serialize(&self, serializer: S) -> Result diff --git a/eth2/utils/bls/src/public_key_bytes.rs b/eth2/utils/bls/src/public_key_bytes.rs index afdbcb2701..528ef82544 100644 --- a/eth2/utils/bls/src/public_key_bytes.rs +++ b/eth2/utils/bls/src/public_key_bytes.rs @@ -6,8 +6,7 @@ bytes_struct!( PublicKeyBytes, PublicKey, BLS_PUBLIC_KEY_BYTE_SIZE, - "public key", - U48 + "public key" ); #[cfg(test)] diff --git a/eth2/utils/bls/src/secret_key.rs b/eth2/utils/bls/src/secret_key.rs index d9ada73337..6e39cace3f 100644 --- a/eth2/utils/bls/src/secret_key.rs +++ b/eth2/utils/bls/src/secret_key.rs @@ -49,7 +49,7 @@ impl SecretKey { impl_ssz!(SecretKey, BLS_SECRET_KEY_BYTE_SIZE, "SecretKey"); -impl_tree_hash!(SecretKey, U48); +impl_tree_hash!(SecretKey, BLS_SECRET_KEY_BYTE_SIZE); impl Serialize for SecretKey { fn serialize(&self, serializer: S) -> Result diff --git a/eth2/utils/bls/src/signature.rs b/eth2/utils/bls/src/signature.rs index 7a2bc60519..64f306b302 100644 --- a/eth2/utils/bls/src/signature.rs +++ b/eth2/utils/bls/src/signature.rs @@ -108,7 +108,7 @@ impl Signature { impl_ssz!(Signature, BLS_SIG_BYTE_SIZE, "Signature"); -impl_tree_hash!(Signature, U96); +impl_tree_hash!(Signature, BLS_SIG_BYTE_SIZE); impl Serialize for Signature { /// Serde serialization is compliant the Ethereum YAML test format. diff --git a/eth2/utils/bls/src/signature_bytes.rs b/eth2/utils/bls/src/signature_bytes.rs index b89c0f0d11..bfec269b02 100644 --- a/eth2/utils/bls/src/signature_bytes.rs +++ b/eth2/utils/bls/src/signature_bytes.rs @@ -2,13 +2,7 @@ use ssz::{Decode, DecodeError, Encode}; use super::{Signature, BLS_SIG_BYTE_SIZE}; -bytes_struct!( - SignatureBytes, - Signature, - BLS_SIG_BYTE_SIZE, - "signature", - U96 -); +bytes_struct!(SignatureBytes, Signature, BLS_SIG_BYTE_SIZE, "signature"); #[cfg(test)] mod tests { diff --git a/eth2/utils/cached_tree_hash/Cargo.toml b/eth2/utils/cached_tree_hash/Cargo.toml new file mode 100644 index 0000000000..5ed95c78dc --- /dev/null +++ b/eth2/utils/cached_tree_hash/Cargo.toml @@ -0,0 +1,17 @@ +[package] +name = "cached_tree_hash" +version = "0.1.0" +authors = ["Michael Sproul "] +edition = "2018" + +[dependencies] +ethereum-types = "0.8" +eth2_ssz_types = { path = "../ssz_types" } +eth2_hashing = "0.1" +eth2_ssz_derive = "0.1.0" +eth2_ssz = "0.1.2" +tree_hash = "0.1" + +[dev-dependencies] +quickcheck = "0.9" +quickcheck_macros = "0.8" diff --git a/eth2/utils/cached_tree_hash/src/cache.rs b/eth2/utils/cached_tree_hash/src/cache.rs new file mode 100644 index 0000000000..4a5d650fb2 --- /dev/null +++ b/eth2/utils/cached_tree_hash/src/cache.rs @@ -0,0 +1,137 @@ +use crate::{Error, Hash256}; +use eth2_hashing::{hash_concat, ZERO_HASHES}; +use ssz_derive::{Decode, Encode}; +use tree_hash::BYTES_PER_CHUNK; + +/// Sparse Merkle tree suitable for tree hashing vectors and lists. +#[derive(Debug, PartialEq, Clone, Default, Encode, Decode)] +pub struct TreeHashCache { + /// Depth is such that the tree has a capacity for 2^depth leaves + depth: usize, + /// Sparse layers. + /// + /// The leaves are contained in `self.layers[self.depth]`, and each other layer `i` + /// contains the parents of the nodes in layer `i + 1`. + layers: Vec>, +} + +impl TreeHashCache { + /// Create a new cache with the given `depth`, but no actual content. + pub fn new(depth: usize) -> Self { + TreeHashCache { + depth, + layers: vec![vec![]; depth + 1], + } + } + + /// Compute the updated Merkle root for the given `leaves`. + pub fn recalculate_merkle_root( + &mut self, + leaves: impl Iterator + ExactSizeIterator, + ) -> Result { + let dirty_indices = self.update_leaves(leaves)?; + self.update_merkle_root(dirty_indices) + } + + /// Phase 1 of the algorithm: compute the indices of all dirty leaves. + pub fn update_leaves( + &mut self, + mut leaves: impl Iterator + ExactSizeIterator, + ) -> Result, Error> { + let new_leaf_count = leaves.len(); + + if new_leaf_count < self.leaves().len() { + return Err(Error::CannotShrink); + } else if new_leaf_count > 2usize.pow(self.depth as u32) { + return Err(Error::TooManyLeaves); + } + + // Update the existing leaves + let mut dirty = self + .leaves() + .iter_mut() + .enumerate() + .zip(&mut leaves) + .flat_map(|((i, leaf), new_leaf)| { + if leaf.as_bytes() != new_leaf { + leaf.assign_from_slice(&new_leaf); + Some(i) + } else { + None + } + }) + .collect::>(); + + // Push the rest of the new leaves (if any) + dirty.extend(self.leaves().len()..new_leaf_count); + self.leaves() + .extend(leaves.map(|l| Hash256::from_slice(&l))); + + Ok(dirty) + } + + /// Phase 2: propagate changes upwards from the leaves of the tree, and compute the root. + /// + /// Returns an error if `dirty_indices` is inconsistent with the cache. + pub fn update_merkle_root(&mut self, mut dirty_indices: Vec) -> Result { + if dirty_indices.is_empty() { + return Ok(self.root()); + } + + let mut depth = self.depth; + + while depth > 0 { + let new_dirty_indices = lift_dirty(&dirty_indices); + + for &idx in &new_dirty_indices { + let left_idx = 2 * idx; + let right_idx = left_idx + 1; + + let left = self.layers[depth][left_idx]; + let right = self.layers[depth] + .get(right_idx) + .copied() + .unwrap_or_else(|| Hash256::from_slice(&ZERO_HASHES[self.depth - depth])); + + let new_hash = hash_concat(left.as_bytes(), right.as_bytes()); + + match self.layers[depth - 1].get_mut(idx) { + Some(hash) => { + hash.assign_from_slice(&new_hash); + } + None => { + // Parent layer should already contain nodes for all non-dirty indices + if idx != self.layers[depth - 1].len() { + return Err(Error::CacheInconsistent); + } + self.layers[depth - 1].push(Hash256::from_slice(&new_hash)); + } + } + } + + dirty_indices = new_dirty_indices; + depth -= 1; + } + + Ok(self.root()) + } + + /// Get the root of this cache, without doing any updates/computation. + pub fn root(&self) -> Hash256 { + self.layers[0] + .get(0) + .copied() + .unwrap_or_else(|| Hash256::from_slice(&ZERO_HASHES[self.depth])) + } + + pub fn leaves(&mut self) -> &mut Vec { + &mut self.layers[self.depth] + } +} + +/// Compute the dirty indices for one layer up. +fn lift_dirty(dirty_indices: &[usize]) -> Vec { + let mut new_dirty = dirty_indices.iter().map(|i| *i / 2).collect::>(); + new_dirty.dedup(); + new_dirty +} diff --git a/eth2/utils/cached_tree_hash/src/impls.rs b/eth2/utils/cached_tree_hash/src/impls.rs new file mode 100644 index 0000000000..c5bc181205 --- /dev/null +++ b/eth2/utils/cached_tree_hash/src/impls.rs @@ -0,0 +1,99 @@ +use crate::{CachedTreeHash, Error, Hash256, TreeHashCache}; +use ssz_types::{typenum::Unsigned, FixedVector, VariableList}; +use std::mem::size_of; +use tree_hash::{mix_in_length, BYTES_PER_CHUNK}; + +/// Compute ceil(log(n)) +/// +/// Smallest number of bits d so that n <= 2^d +pub fn int_log(n: usize) -> usize { + match n.checked_next_power_of_two() { + Some(x) => x.trailing_zeros() as usize, + None => 8 * std::mem::size_of::(), + } +} + +pub fn hash256_iter<'a>( + values: &'a [Hash256], +) -> impl Iterator + ExactSizeIterator + 'a { + values.iter().copied().map(Hash256::to_fixed_bytes) +} + +pub fn u64_iter<'a>( + values: &'a [u64], +) -> impl Iterator + ExactSizeIterator + 'a { + let type_size = size_of::(); + let vals_per_chunk = BYTES_PER_CHUNK / type_size; + values.chunks(vals_per_chunk).map(move |xs| { + xs.iter().map(|x| x.to_le_bytes()).enumerate().fold( + [0; BYTES_PER_CHUNK], + |mut chunk, (i, x_bytes)| { + chunk[i * type_size..(i + 1) * type_size].copy_from_slice(&x_bytes); + chunk + }, + ) + }) +} + +impl CachedTreeHash for FixedVector { + fn new_tree_hash_cache() -> TreeHashCache { + TreeHashCache::new(int_log(N::to_usize())) + } + + fn recalculate_tree_hash_root(&self, cache: &mut TreeHashCache) -> Result { + cache.recalculate_merkle_root(hash256_iter(&self)) + } +} + +impl CachedTreeHash for FixedVector { + fn new_tree_hash_cache() -> TreeHashCache { + let vals_per_chunk = BYTES_PER_CHUNK / size_of::(); + TreeHashCache::new(int_log(N::to_usize() / vals_per_chunk)) + } + + fn recalculate_tree_hash_root(&self, cache: &mut TreeHashCache) -> Result { + cache.recalculate_merkle_root(u64_iter(&self)) + } +} + +impl CachedTreeHash for VariableList { + fn new_tree_hash_cache() -> TreeHashCache { + TreeHashCache::new(int_log(N::to_usize())) + } + + fn recalculate_tree_hash_root(&self, cache: &mut TreeHashCache) -> Result { + Ok(Hash256::from_slice(&mix_in_length( + cache + .recalculate_merkle_root(hash256_iter(&self))? + .as_bytes(), + self.len(), + ))) + } +} + +impl CachedTreeHash for VariableList { + fn new_tree_hash_cache() -> TreeHashCache { + let vals_per_chunk = BYTES_PER_CHUNK / size_of::(); + TreeHashCache::new(int_log(N::to_usize() / vals_per_chunk)) + } + + fn recalculate_tree_hash_root(&self, cache: &mut TreeHashCache) -> Result { + Ok(Hash256::from_slice(&mix_in_length( + cache.recalculate_merkle_root(u64_iter(&self))?.as_bytes(), + self.len(), + ))) + } +} + +#[cfg(test)] +mod test { + use super::*; + + #[test] + fn test_int_log() { + for i in 0..63 { + assert_eq!(int_log(2usize.pow(i)), i as usize); + } + assert_eq!(int_log(10), 4); + } +} diff --git a/eth2/utils/cached_tree_hash/src/lib.rs b/eth2/utils/cached_tree_hash/src/lib.rs new file mode 100644 index 0000000000..cc47ab21f2 --- /dev/null +++ b/eth2/utils/cached_tree_hash/src/lib.rs @@ -0,0 +1,31 @@ +mod cache; +mod impls; +mod multi_cache; +#[cfg(test)] +mod test; + +pub use crate::cache::TreeHashCache; +pub use crate::impls::int_log; +pub use crate::multi_cache::MultiTreeHashCache; +use ethereum_types::H256 as Hash256; +use tree_hash::TreeHash; + +#[derive(Debug, PartialEq)] +pub enum Error { + /// Attempting to provide more than 2^depth leaves to a Merkle tree is disallowed. + TooManyLeaves, + /// Shrinking a Merkle tree cache by providing it with less leaves than it currently has is + /// disallowed (for simplicity). + CannotShrink, + /// Cache is inconsistent with the list of dirty indices provided. + CacheInconsistent, +} + +/// Trait for types which can make use of a cache to accelerate calculation of their tree hash root. +pub trait CachedTreeHash: TreeHash { + /// Create a new cache appropriate for use with values of this type. + fn new_tree_hash_cache() -> Cache; + + /// Update the cache and use it to compute the tree hash root for `self`. + fn recalculate_tree_hash_root(&self, cache: &mut Cache) -> Result; +} diff --git a/eth2/utils/cached_tree_hash/src/multi_cache.rs b/eth2/utils/cached_tree_hash/src/multi_cache.rs new file mode 100644 index 0000000000..df2f6a0113 --- /dev/null +++ b/eth2/utils/cached_tree_hash/src/multi_cache.rs @@ -0,0 +1,62 @@ +use crate::{int_log, CachedTreeHash, Error, Hash256, TreeHashCache}; +use ssz_derive::{Decode, Encode}; +use ssz_types::{typenum::Unsigned, VariableList}; +use tree_hash::mix_in_length; + +/// Multi-level tree hash cache. +/// +/// Suitable for lists/vectors/containers holding values which themselves have caches. +/// +/// Note: this cache could be made composable by replacing the hardcoded `Vec` with +/// `Vec`, allowing arbitrary nesting, but for now we stick to 2-level nesting because that's all +/// we need. +#[derive(Debug, PartialEq, Clone, Default, Encode, Decode)] +pub struct MultiTreeHashCache { + list_cache: TreeHashCache, + value_caches: Vec, +} + +impl CachedTreeHash for VariableList +where + T: CachedTreeHash, + N: Unsigned, +{ + fn new_tree_hash_cache() -> MultiTreeHashCache { + MultiTreeHashCache { + list_cache: TreeHashCache::new(int_log(N::to_usize())), + value_caches: vec![], + } + } + + fn recalculate_tree_hash_root(&self, cache: &mut MultiTreeHashCache) -> Result { + if self.len() < cache.value_caches.len() { + return Err(Error::CannotShrink); + } + + // Resize the value caches to the size of the list. + cache + .value_caches + .resize(self.len(), T::new_tree_hash_cache()); + + // Update all individual value caches. + self.iter() + .zip(cache.value_caches.iter_mut()) + .try_for_each(|(value, cache)| value.recalculate_tree_hash_root(cache).map(|_| ()))?; + + // Pipe the value roots into the list cache, then mix in the length. + // Note: it's possible to avoid this 2nd iteration (or an allocation) by using + // `itertools::process_results`, but it requires removing the `ExactSizeIterator` + // bound from `recalculate_merkle_root`, and only saves about 5% in benchmarks. + let list_root = cache.list_cache.recalculate_merkle_root( + cache + .value_caches + .iter() + .map(|value_cache| value_cache.root().to_fixed_bytes()), + )?; + + Ok(Hash256::from_slice(&mix_in_length( + list_root.as_bytes(), + self.len(), + ))) + } +} diff --git a/eth2/utils/cached_tree_hash/src/test.rs b/eth2/utils/cached_tree_hash/src/test.rs new file mode 100644 index 0000000000..68173fd6a6 --- /dev/null +++ b/eth2/utils/cached_tree_hash/src/test.rs @@ -0,0 +1,147 @@ +use crate::impls::hash256_iter; +use crate::{CachedTreeHash, Error, Hash256, TreeHashCache}; +use eth2_hashing::ZERO_HASHES; +use quickcheck_macros::quickcheck; +use ssz_types::{ + typenum::{Unsigned, U16, U255, U256, U257}, + FixedVector, VariableList, +}; +use tree_hash::TreeHash; + +fn int_hashes(start: u64, end: u64) -> Vec { + (start..end).map(Hash256::from_low_u64_le).collect() +} + +type List16 = VariableList; +type Vector16 = FixedVector; +type Vector16u64 = FixedVector; + +#[test] +fn max_leaves() { + let depth = 4; + let max_len = 2u64.pow(depth as u32); + let mut cache = TreeHashCache::new(depth); + assert!(cache + .recalculate_merkle_root(hash256_iter(&int_hashes(0, max_len - 1))) + .is_ok()); + assert!(cache + .recalculate_merkle_root(hash256_iter(&int_hashes(0, max_len))) + .is_ok()); + assert_eq!( + cache.recalculate_merkle_root(hash256_iter(&int_hashes(0, max_len + 1))), + Err(Error::TooManyLeaves) + ); + assert_eq!( + cache.recalculate_merkle_root(hash256_iter(&int_hashes(0, max_len * 2))), + Err(Error::TooManyLeaves) + ); +} + +#[test] +fn cannot_shrink() { + let init_len = 12; + let list1 = List16::new(int_hashes(0, init_len)).unwrap(); + let list2 = List16::new(int_hashes(0, init_len - 1)).unwrap(); + + let mut cache = List16::new_tree_hash_cache(); + assert!(list1.recalculate_tree_hash_root(&mut cache).is_ok()); + assert_eq!( + list2.recalculate_tree_hash_root(&mut cache), + Err(Error::CannotShrink) + ); +} + +#[test] +fn empty_leaves() { + let depth = 20; + let mut cache = TreeHashCache::new(depth); + assert_eq!( + cache + .recalculate_merkle_root(vec![].into_iter()) + .unwrap() + .as_bytes(), + &ZERO_HASHES[depth][..] + ); +} + +#[test] +fn fixed_vector_hash256() { + let len = 16; + let vec = Vector16::new(int_hashes(0, len)).unwrap(); + + let mut cache = Vector16::new_tree_hash_cache(); + + assert_eq!( + Hash256::from_slice(&vec.tree_hash_root()), + vec.recalculate_tree_hash_root(&mut cache).unwrap() + ); +} + +#[test] +fn fixed_vector_u64() { + let len = 16; + let vec = Vector16u64::new((0..len).collect()).unwrap(); + + let mut cache = Vector16u64::new_tree_hash_cache(); + + assert_eq!( + Hash256::from_slice(&vec.tree_hash_root()), + vec.recalculate_tree_hash_root(&mut cache).unwrap() + ); +} + +#[test] +fn variable_list_hash256() { + let len = 13; + let list = List16::new(int_hashes(0, len)).unwrap(); + + let mut cache = List16::new_tree_hash_cache(); + + assert_eq!( + Hash256::from_slice(&list.tree_hash_root()), + list.recalculate_tree_hash_root(&mut cache).unwrap() + ); +} + +#[quickcheck] +fn quickcheck_variable_list_h256_256(leaves_and_skips: Vec<(u64, bool)>) -> bool { + variable_list_h256_test::(leaves_and_skips) +} + +#[quickcheck] +fn quickcheck_variable_list_h256_255(leaves_and_skips: Vec<(u64, bool)>) -> bool { + variable_list_h256_test::(leaves_and_skips) +} + +#[quickcheck] +fn quickcheck_variable_list_h256_257(leaves_and_skips: Vec<(u64, bool)>) -> bool { + variable_list_h256_test::(leaves_and_skips) +} + +fn variable_list_h256_test(leaves_and_skips: Vec<(u64, bool)>) -> bool { + let leaves: Vec<_> = leaves_and_skips + .iter() + .map(|(l, _)| Hash256::from_low_u64_be(*l)) + .take(Len::to_usize()) + .collect(); + + let mut list: VariableList; + let mut cache = VariableList::::new_tree_hash_cache(); + + for (end, (_, update_cache)) in leaves_and_skips.into_iter().enumerate() { + list = VariableList::new(leaves[..end].to_vec()).unwrap(); + + if update_cache { + if list + .recalculate_tree_hash_root(&mut cache) + .unwrap() + .as_bytes() + != &list.tree_hash_root()[..] + { + return false; + } + } + } + + true +} diff --git a/eth2/utils/eth2_hashing/Cargo.toml b/eth2/utils/eth2_hashing/Cargo.toml index af48d0d4e7..3047a7a4df 100644 --- a/eth2/utils/eth2_hashing/Cargo.toml +++ b/eth2/utils/eth2_hashing/Cargo.toml @@ -1,11 +1,14 @@ [package] name = "eth2_hashing" -version = "0.1.0" +version = "0.1.1" authors = ["Paul Hauner "] edition = "2018" license = "Apache-2.0" description = "Hashing primitives used in Ethereum 2.0" +[dependencies] +lazy_static = { version = "1.4.0", optional = true } + [target.'cfg(not(target_arch = "wasm32"))'.dependencies] ring = "0.16.9" @@ -17,3 +20,7 @@ rustc-hex = "2.0.1" [target.'cfg(target_arch = "wasm32")'.dev-dependencies] wasm-bindgen-test = "0.3.2" + +[features] +default = ["zero_hash_cache"] +zero_hash_cache = ["lazy_static"] diff --git a/eth2/utils/eth2_hashing/src/lib.rs b/eth2/utils/eth2_hashing/src/lib.rs index 94d072d8df..555c5bbe30 100644 --- a/eth2/utils/eth2_hashing/src/lib.rs +++ b/eth2/utils/eth2_hashing/src/lib.rs @@ -10,6 +10,9 @@ use ring::digest::{digest, SHA256}; #[cfg(target_arch = "wasm32")] use sha2::{Digest, Sha256}; +#[cfg(feature = "zero_hash_cache")] +use lazy_static::lazy_static; + /// Returns the digest of `input`. /// /// Uses `ring::digest::SHA256`. @@ -23,6 +26,31 @@ pub fn hash(input: &[u8]) -> Vec { h } +/// Compute the hash of two slices concatenated. +pub fn hash_concat(h1: &[u8], h2: &[u8]) -> Vec { + let mut vec1 = h1.to_vec(); + vec1.extend_from_slice(h2); + hash(&vec1) +} + +/// The max index that can be used with `ZERO_HASHES`. +#[cfg(feature = "zero_hash_cache")] +pub const ZERO_HASHES_MAX_INDEX: usize = 48; + +#[cfg(feature = "zero_hash_cache")] +lazy_static! { + /// Cached zero hashes where `ZERO_HASHES[i]` is the hash of a Merkle tree with 2^i zero leaves. + pub static ref ZERO_HASHES: Vec> = { + let mut hashes = vec![vec![0; 32]; ZERO_HASHES_MAX_INDEX + 1]; + + for i in 0..ZERO_HASHES_MAX_INDEX { + hashes[i + 1] = hash_concat(&hashes[i], &hashes[i]); + } + + hashes + }; +} + #[cfg(test)] mod tests { use super::*; @@ -41,4 +69,14 @@ mod tests { let expected: Vec = expected_hex.from_hex().unwrap(); assert_eq!(expected, output); } + + #[cfg(feature = "zero_hash_cache")] + mod zero_hash { + use super::*; + + #[test] + fn zero_hash_zero() { + assert_eq!(ZERO_HASHES[0], vec![0; 32]); + } + } } diff --git a/eth2/utils/merkle_proof/src/lib.rs b/eth2/utils/merkle_proof/src/lib.rs index 785072eb4e..356c668352 100644 --- a/eth2/utils/merkle_proof/src/lib.rs +++ b/eth2/utils/merkle_proof/src/lib.rs @@ -1,24 +1,11 @@ -#[macro_use] -extern crate lazy_static; - -use eth2_hashing::hash; +use eth2_hashing::{hash, hash_concat, ZERO_HASHES}; use ethereum_types::H256; +use lazy_static::lazy_static; const MAX_TREE_DEPTH: usize = 32; const EMPTY_SLICE: &[H256] = &[]; lazy_static! { - /// Cached zero hashes where `ZERO_HASHES[i]` is the hash of a Merkle tree with 2^i zero leaves. - static ref ZERO_HASHES: Vec = { - let mut hashes = vec![H256::from([0; 32]); MAX_TREE_DEPTH + 1]; - - for i in 0..MAX_TREE_DEPTH { - hashes[i + 1] = hash_concat(hashes[i], hashes[i]); - } - - hashes - }; - /// Zero nodes to act as "synthetic" left and right subtrees of other zero nodes. static ref ZERO_NODES: Vec = { (0..=MAX_TREE_DEPTH).map(MerkleTree::Zero).collect() @@ -78,7 +65,10 @@ impl MerkleTree { let left_subtree = MerkleTree::create(left_leaves, depth - 1); let right_subtree = MerkleTree::create(right_leaves, depth - 1); - let hash = hash_concat(left_subtree.hash(), right_subtree.hash()); + let hash = H256::from_slice(&hash_concat( + left_subtree.hash().as_bytes(), + right_subtree.hash().as_bytes(), + )); Node(hash, Box::new(left_subtree), Box::new(right_subtree)) } @@ -146,7 +136,7 @@ impl MerkleTree { match *self { MerkleTree::Leaf(h) => h, MerkleTree::Node(h, _, _) => h, - MerkleTree::Zero(depth) => ZERO_HASHES[depth], + MerkleTree::Zero(depth) => H256::from_slice(&ZERO_HASHES[depth]), } } @@ -228,8 +218,7 @@ fn merkle_root_from_branch(leaf: H256, branch: &[H256], depth: usize, index: usi for (i, leaf) in branch.iter().enumerate().take(depth) { let ith_bit = (index >> i) & 0x01; if ith_bit == 1 { - let input = concat(leaf.as_bytes().to_vec(), merkle_root); - merkle_root = hash(&input); + merkle_root = hash_concat(leaf.as_bytes(), &merkle_root); } else { let mut input = merkle_root; input.extend_from_slice(leaf.as_bytes()); @@ -240,20 +229,6 @@ fn merkle_root_from_branch(leaf: H256, branch: &[H256], depth: usize, index: usi H256::from_slice(&merkle_root) } -/// Concatenate two vectors. -fn concat(mut vec1: Vec, mut vec2: Vec) -> Vec { - vec1.append(&mut vec2); - vec1 -} - -/// Compute the hash of two other hashes concatenated. -fn hash_concat(h1: H256, h2: H256) -> H256 { - H256::from_slice(&hash(&concat( - h1.as_bytes().to_vec(), - h2.as_bytes().to_vec(), - ))) -} - #[cfg(test)] mod tests { use super::*; @@ -318,10 +293,10 @@ mod tests { let leaf_b10 = H256::from([0xCC; 32]); let leaf_b11 = H256::from([0xDD; 32]); - let node_b0x = hash_concat(leaf_b00, leaf_b01); - let node_b1x = hash_concat(leaf_b10, leaf_b11); + let node_b0x = H256::from_slice(&hash_concat(leaf_b00.as_bytes(), leaf_b01.as_bytes())); + let node_b1x = H256::from_slice(&hash_concat(leaf_b10.as_bytes(), leaf_b11.as_bytes())); - let root = hash_concat(node_b0x, node_b1x); + let root = H256::from_slice(&hash_concat(node_b0x.as_bytes(), node_b1x.as_bytes())); let tree = MerkleTree::create(&[leaf_b00, leaf_b01, leaf_b10, leaf_b11], 2); assert_eq!(tree.hash(), root); @@ -335,10 +310,10 @@ mod tests { let leaf_b10 = H256::from([0xCC; 32]); let leaf_b11 = H256::from([0xDD; 32]); - let node_b0x = hash_concat(leaf_b00, leaf_b01); - let node_b1x = hash_concat(leaf_b10, leaf_b11); + let node_b0x = H256::from_slice(&hash_concat(leaf_b00.as_bytes(), leaf_b01.as_bytes())); + let node_b1x = H256::from_slice(&hash_concat(leaf_b10.as_bytes(), leaf_b11.as_bytes())); - let root = hash_concat(node_b0x, node_b1x); + let root = H256::from_slice(&hash_concat(node_b0x.as_bytes(), node_b1x.as_bytes())); // Run some proofs assert!(verify_merkle_proof( diff --git a/eth2/utils/tree_hash/Cargo.toml b/eth2/utils/tree_hash/Cargo.toml index e416a3f8e7..7d48b17072 100644 --- a/eth2/utils/tree_hash/Cargo.toml +++ b/eth2/utils/tree_hash/Cargo.toml @@ -15,8 +15,8 @@ criterion = "0.3.0" rand = "0.7.2" tree_hash_derive = "0.2" types = { path = "../../types" } +lazy_static = "1.4.0" [dependencies] ethereum-types = "0.8.0" eth2_hashing = "0.1.0" -lazy_static = "1.4.0" diff --git a/eth2/utils/tree_hash/benches/benches.rs b/eth2/utils/tree_hash/benches/benches.rs index bad6f3a39c..d734a7342a 100644 --- a/eth2/utils/tree_hash/benches/benches.rs +++ b/eth2/utils/tree_hash/benches/benches.rs @@ -1,8 +1,6 @@ -#[macro_use] -extern crate lazy_static; - use criterion::Criterion; use criterion::{black_box, criterion_group, criterion_main, Benchmark}; +use lazy_static::lazy_static; use types::test_utils::{generate_deterministic_keypairs, TestingBeaconStateBuilder}; use types::{BeaconState, EthSpec, Keypair, MainnetEthSpec, MinimalEthSpec}; @@ -27,25 +25,61 @@ fn build_state(validator_count: usize) -> BeaconState { state } +// Note: `state.canonical_root()` uses whatever `tree_hash` that the `types` crate +// uses, which is not necessarily this crate. If you want to ensure that types is +// using this local version of `tree_hash`, ensure you add a workspace-level +// [dependency +// patch](https://doc.rust-lang.org/cargo/reference/manifest.html#the-patch-section). fn bench_suite(c: &mut Criterion, spec_desc: &str, validator_count: usize) { - let state = build_state::(validator_count); + let state1 = build_state::(validator_count); + let state2 = state1.clone(); + let mut state3 = state1.clone(); + state3.build_tree_hash_cache().unwrap(); c.bench( - &format!("{}/{}_validators", spec_desc, validator_count), + &format!("{}/{}_validators/no_cache", spec_desc, validator_count), Benchmark::new("genesis_state", move |b| { b.iter_batched_ref( - || state.clone(), - // Note: `state.canonical_root()` uses whatever `tree_hash` that the `types` crate - // uses, which is not necessarily this crate. If you want to ensure that types is - // using this local version of `tree_hash`, ensure you add a workspace-level - // [dependency - // patch](https://doc.rust-lang.org/cargo/reference/manifest.html#the-patch-section). + || state1.clone(), |state| black_box(state.canonical_root()), criterion::BatchSize::SmallInput, ) }) .sample_size(10), ); + + c.bench( + &format!("{}/{}_validators/empty_cache", spec_desc, validator_count), + Benchmark::new("genesis_state", move |b| { + b.iter_batched_ref( + || state2.clone(), + |state| { + assert!(!state.tree_hash_cache.is_initialized()); + black_box(state.update_tree_hash_cache().unwrap()) + }, + criterion::BatchSize::SmallInput, + ) + }) + .sample_size(10), + ); + + c.bench( + &format!( + "{}/{}_validators/up_to_date_cache", + spec_desc, validator_count + ), + Benchmark::new("genesis_state", move |b| { + b.iter_batched_ref( + || state3.clone(), + |state| { + assert!(state.tree_hash_cache.is_initialized()); + black_box(state.update_tree_hash_cache().unwrap()) + }, + criterion::BatchSize::SmallInput, + ) + }) + .sample_size(10), + ); } fn all_benches(c: &mut Criterion) { diff --git a/eth2/utils/tree_hash/src/impls.rs b/eth2/utils/tree_hash/src/impls.rs index 9f09f50ce7..25630cf970 100644 --- a/eth2/utils/tree_hash/src/impls.rs +++ b/eth2/utils/tree_hash/src/impls.rs @@ -131,36 +131,6 @@ impl TreeHash for H256 { } } -// TODO: this implementation always panics, it only exists to allow us to compile whilst -// refactoring tree hash. Should be removed. -macro_rules! impl_for_list { - ($type: ty) => { - impl TreeHash for $type - where - T: TreeHash, - { - fn tree_hash_type() -> TreeHashType { - unimplemented!("TreeHash is not implemented for Vec or slice") - } - - fn tree_hash_packed_encoding(&self) -> Vec { - unimplemented!("TreeHash is not implemented for Vec or slice") - } - - fn tree_hash_packing_factor() -> usize { - unimplemented!("TreeHash is not implemented for Vec or slice") - } - - fn tree_hash_root(&self) -> Vec { - unimplemented!("TreeHash is not implemented for Vec or slice") - } - } - }; -} - -impl_for_list!(Vec); -impl_for_list!(&[T]); - /// Returns `int` as little-endian bytes with a length of 32. fn int_to_bytes32(int: u64) -> Vec { let mut vec = int.to_le_bytes().to_vec(); diff --git a/eth2/utils/tree_hash/src/lib.rs b/eth2/utils/tree_hash/src/lib.rs index 72a77f03ee..0b3be72c46 100644 --- a/eth2/utils/tree_hash/src/lib.rs +++ b/eth2/utils/tree_hash/src/lib.rs @@ -1,6 +1,3 @@ -#[macro_use] -extern crate lazy_static; - pub mod impls; mod merkleize_padded; mod merkleize_standard; @@ -27,7 +24,7 @@ pub fn mix_in_length(root: &[u8], length: usize) -> Vec { let mut length_bytes = length.to_le_bytes().to_vec(); length_bytes.resize(BYTES_PER_CHUNK, 0); - merkleize_padded::hash_concat(root, &length_bytes) + eth2_hashing::hash_concat(root, &length_bytes) } #[derive(Debug, PartialEq, Clone)] diff --git a/eth2/utils/tree_hash/src/merkleize_padded.rs b/eth2/utils/tree_hash/src/merkleize_padded.rs index bfec55e1c4..832c0bbd80 100644 --- a/eth2/utils/tree_hash/src/merkleize_padded.rs +++ b/eth2/utils/tree_hash/src/merkleize_padded.rs @@ -1,25 +1,10 @@ use super::BYTES_PER_CHUNK; -use eth2_hashing::hash; +use eth2_hashing::{hash, hash_concat, ZERO_HASHES, ZERO_HASHES_MAX_INDEX}; /// The size of the cache that stores padding nodes for a given height. /// /// Currently, we panic if we encounter a tree with a height larger than `MAX_TREE_DEPTH`. -/// -/// It is set to 48 as we expect it to be sufficiently high that we won't exceed it. -pub const MAX_TREE_DEPTH: usize = 48; - -lazy_static! { - /// Cached zero hashes where `ZERO_HASHES[i]` is the hash of a Merkle tree with 2^i zero leaves. - static ref ZERO_HASHES: Vec> = { - let mut hashes = vec![vec![0; 32]; MAX_TREE_DEPTH + 1]; - - for i in 0..MAX_TREE_DEPTH { - hashes[i + 1] = hash_concat(&hashes[i], &hashes[i]); - } - - hashes - }; -} +pub const MAX_TREE_DEPTH: usize = ZERO_HASHES_MAX_INDEX; /// Merkleize `bytes` and return the root, optionally padding the tree out to `min_leaves` number of /// leaves. @@ -236,17 +221,6 @@ fn get_zero_hash(height: usize) -> &'static [u8] { } } -/// Concatenate two vectors. -fn concat(mut vec1: Vec, mut vec2: Vec) -> Vec { - vec1.append(&mut vec2); - vec1 -} - -/// Compute the hash of two other hashes concatenated. -pub fn hash_concat(h1: &[u8], h2: &[u8]) -> Vec { - hash(&concat(h1.to_vec(), h2.to_vec())) -} - /// Returns the next even number following `n`. If `n` is even, `n` is returned. fn next_even_number(n: usize) -> usize { n + n % 2 diff --git a/eth2/utils/tree_hash_derive/src/lib.rs b/eth2/utils/tree_hash_derive/src/lib.rs index e396b4fdaa..bd869dee56 100644 --- a/eth2/utils/tree_hash_derive/src/lib.rs +++ b/eth2/utils/tree_hash_derive/src/lib.rs @@ -3,14 +3,25 @@ extern crate proc_macro; use proc_macro::TokenStream; use quote::quote; -use syn::{parse_macro_input, DeriveInput}; +use std::collections::HashMap; +use syn::{parse_macro_input, Attribute, DeriveInput, Meta}; -/// Returns a Vec of `syn::Ident` for each named field in the struct, whilst filtering out fields +/// Return a Vec of `syn::Ident` for each named field in the struct, whilst filtering out fields /// that should not be hashed. /// /// # Panics /// Any unnamed struct field (like in a tuple struct) will raise a panic at compile time. -fn get_hashable_named_field_idents<'a>(struct_data: &'a syn::DataStruct) -> Vec<&'a syn::Ident> { +fn get_hashable_fields<'a>(struct_data: &'a syn::DataStruct) -> Vec<&'a syn::Ident> { + get_hashable_fields_and_their_caches(struct_data) + .into_iter() + .map(|(ident, _, _)| ident) + .collect() +} + +/// Return a Vec of the hashable fields of a struct, and each field's type and optional cache field. +fn get_hashable_fields_and_their_caches<'a>( + struct_data: &'a syn::DataStruct, +) -> Vec<(&'a syn::Ident, syn::Type, Option)> { struct_data .fields .iter() @@ -18,15 +29,77 @@ fn get_hashable_named_field_idents<'a>(struct_data: &'a syn::DataStruct) -> Vec< if should_skip_hashing(&f) { None } else { - Some(match &f.ident { - Some(ref ident) => ident, - _ => panic!("tree_hash_derive only supports named struct fields."), - }) + let ident = f + .ident + .as_ref() + .expect("tree_hash_derive only supports named struct fields"); + let opt_cache_field = get_cache_field_for(&f); + Some((ident, f.ty.clone(), opt_cache_field)) } }) .collect() } +/// Parse the cached_tree_hash attribute for a field. +/// +/// Extract the cache field name from `#[cached_tree_hash(cache_field_name)]` +/// +/// Return `Some(cache_field_name)` if the field has a cached tree hash attribute, +/// or `None` otherwise. +fn get_cache_field_for<'a>(field: &'a syn::Field) -> Option { + use syn::{MetaList, NestedMeta}; + + let parsed_attrs = cached_tree_hash_attr_metas(&field.attrs); + if let [Meta::List(MetaList { nested, .. })] = &parsed_attrs[..] { + nested.iter().find_map(|x| match x { + NestedMeta::Meta(Meta::Word(cache_field_ident)) => Some(cache_field_ident.clone()), + _ => None, + }) + } else { + None + } +} + +/// Process the `cached_tree_hash` attributes from a list of attributes into structured `Meta`s. +fn cached_tree_hash_attr_metas(attrs: &[Attribute]) -> Vec { + attrs + .iter() + .filter(|attr| attr.path.is_ident("cached_tree_hash")) + .flat_map(|attr| attr.parse_meta()) + .collect() +} + +/// Parse the top-level cached_tree_hash struct attribute. +/// +/// Return the type from `#[cached_tree_hash(type = "T")]`. +/// +/// **Panics** if the attribute is missing or the type is malformed. +fn parse_cached_tree_hash_struct_attrs(attrs: &[Attribute]) -> syn::Type { + use syn::{Lit, MetaList, MetaNameValue, NestedMeta}; + + let parsed_attrs = cached_tree_hash_attr_metas(attrs); + if let [Meta::List(MetaList { nested, .. })] = &parsed_attrs[..] { + let eqns = nested + .iter() + .flat_map(|x| match x { + NestedMeta::Meta(Meta::NameValue(MetaNameValue { + ident, + lit: Lit::Str(lit_str), + .. + })) => Some((ident.to_string(), lit_str.clone())), + _ => None, + }) + .collect::>(); + + eqns["type"] + .clone() + .parse() + .expect("valid type required for cache") + } else { + panic!("missing attribute `#[cached_tree_hash(type = ...)` on struct"); + } +} + /// Returns true if some field has an attribute declaring it should not be hashed. /// /// The field attribute is: `#[tree_hash(skip_hashing)]` @@ -51,7 +124,7 @@ pub fn tree_hash_derive(input: TokenStream) -> TokenStream { _ => panic!("tree_hash_derive only supports structs."), }; - let idents = get_hashable_named_field_idents(&struct_data); + let idents = get_hashable_fields(&struct_data); let output = quote! { impl #impl_generics tree_hash::TreeHash for #name #ty_generics #where_clause { @@ -112,6 +185,82 @@ pub fn tree_hash_signed_root_derive(input: TokenStream) -> TokenStream { output.into() } +/// Derive the `CachedTreeHash` trait for a type. +/// +/// Requires two attributes: +/// * `#[cached_tree_hash(type = "T")]` on the struct, declaring +/// that the type `T` should be used as the tree hash cache. +/// * `#[cached_tree_hash(f)]` on each struct field that makes use +/// of the cache, which declares that the sub-cache for that field +/// can be found in the field `cache.f` of the struct's cache. +#[proc_macro_derive(CachedTreeHash, attributes(cached_tree_hash))] +pub fn cached_tree_hash_derive(input: TokenStream) -> TokenStream { + let item = parse_macro_input!(input as DeriveInput); + + let name = &item.ident; + + let cache_type = parse_cached_tree_hash_struct_attrs(&item.attrs); + + let (impl_generics, ty_generics, where_clause) = &item.generics.split_for_impl(); + + let struct_data = match &item.data { + syn::Data::Struct(s) => s, + _ => panic!("tree_hash_derive only supports structs."), + }; + + let fields = get_hashable_fields_and_their_caches(&struct_data); + let caching_field_ty = fields + .iter() + .filter(|(_, _, cache_field)| cache_field.is_some()) + .map(|(_, ty, _)| ty); + let caching_field_cache_field = fields + .iter() + .flat_map(|(_, _, cache_field)| cache_field.as_ref()); + + let tree_hash_root_expr = fields + .iter() + .map(|(field, _, caching_field)| match caching_field { + None => quote! { + self.#field.tree_hash_root() + }, + Some(caching_field) => quote! { + self.#field + .recalculate_tree_hash_root(&mut cache.#caching_field)? + .as_bytes() + .to_vec() + }, + }); + + let output = quote! { + impl #impl_generics cached_tree_hash::CachedTreeHash<#cache_type> for #name #ty_generics #where_clause { + fn new_tree_hash_cache() -> #cache_type { + // Call new cache for each sub type + #cache_type { + initialized: true, + #( + #caching_field_cache_field: <#caching_field_ty>::new_tree_hash_cache() + ),* + } + } + + fn recalculate_tree_hash_root( + &self, + cache: &mut #cache_type) + -> Result + { + let mut leaves = vec![]; + + #( + leaves.append(&mut #tree_hash_root_expr); + )* + + Ok(Hash256::from_slice(&tree_hash::merkle_root(&leaves, 0))) + } + } + }; + output.into() +} + fn get_signed_root_named_field_idents(struct_data: &syn::DataStruct) -> Vec<&syn::Ident> { struct_data .fields diff --git a/tests/ef_tests/Cargo.toml b/tests/ef_tests/Cargo.toml index b0d281b8d6..e893ea8e2b 100644 --- a/tests/ef_tests/Cargo.toml +++ b/tests/ef_tests/Cargo.toml @@ -23,6 +23,7 @@ eth2_ssz = "0.1.2" eth2_ssz_derive = "0.1.0" tree_hash = "0.1.0" tree_hash_derive = "0.2" +cached_tree_hash = { path = "../../eth2/utils/cached_tree_hash" } state_processing = { path = "../../eth2/state_processing" } swap_or_not_shuffle = { path = "../../eth2/utils/swap_or_not_shuffle" } types = { path = "../../eth2/types" } diff --git a/tests/ef_tests/src/cases/ssz_generic.rs b/tests/ef_tests/src/cases/ssz_generic.rs index 442dd6e096..7aa198beae 100644 --- a/tests/ef_tests/src/cases/ssz_generic.rs +++ b/tests/ef_tests/src/cases/ssz_generic.rs @@ -218,7 +218,7 @@ fn ssz_generic_test(path: &Path) -> Result<(), Error> { check_serialization(&value, &serialized)?; if let Some(ref meta) = meta { - check_tree_hash(&meta.root, value.tree_hash_root())?; + check_tree_hash(&meta.root, &value.tree_hash_root())?; } } // Invalid diff --git a/tests/ef_tests/src/cases/ssz_static.rs b/tests/ef_tests/src/cases/ssz_static.rs index 62f285d580..e4c216f765 100644 --- a/tests/ef_tests/src/cases/ssz_static.rs +++ b/tests/ef_tests/src/cases/ssz_static.rs @@ -2,8 +2,10 @@ use super::*; use crate::case_result::compare_result; use crate::cases::common::SszStaticType; use crate::decode::yaml_decode_file; +use cached_tree_hash::CachedTreeHash; use serde_derive::Deserialize; use std::fs; +use std::marker::PhantomData; use tree_hash::SignedRoot; use types::Hash256; @@ -27,6 +29,14 @@ pub struct SszStaticSR { value: T, } +#[derive(Debug, Clone)] +pub struct SszStaticTHC { + roots: SszStaticRoots, + serialized: Vec, + value: T, + _phantom: PhantomData, +} + fn load_from_dir(path: &Path) -> Result<(SszStaticRoots, Vec, T), Error> { let roots = yaml_decode_file(&path.join("roots.yaml"))?; let serialized = fs::read(&path.join("serialized.ssz")).expect("serialized.ssz exists"); @@ -55,6 +65,17 @@ impl LoadCase for SszStaticSR { } } +impl, C: Debug + Sync> LoadCase for SszStaticTHC { + fn load_from_dir(path: &Path) -> Result { + load_from_dir(path).map(|(roots, serialized, value)| Self { + roots, + serialized, + value, + _phantom: PhantomData, + }) + } +} + pub fn check_serialization(value: &T, serialized: &[u8]) -> Result<(), Error> { // Check serialization let serialized_result = value.as_ssz_bytes(); @@ -68,18 +89,18 @@ pub fn check_serialization(value: &T, serialized: &[u8]) -> Re Ok(()) } -pub fn check_tree_hash(expected_str: &str, actual_root: Vec) -> Result<(), Error> { +pub fn check_tree_hash(expected_str: &str, actual_root: &[u8]) -> Result<(), Error> { let expected_root = hex::decode(&expected_str[2..]) .map_err(|e| Error::FailedToParseTest(format!("{:?}", e)))?; let expected_root = Hash256::from_slice(&expected_root); - let tree_hash_root = Hash256::from_slice(&actual_root); + let tree_hash_root = Hash256::from_slice(actual_root); compare_result::(&Ok(tree_hash_root), &Some(expected_root)) } impl Case for SszStatic { fn result(&self, _case_index: usize) -> Result<(), Error> { check_serialization(&self.value, &self.serialized)?; - check_tree_hash(&self.roots.root, self.value.tree_hash_root())?; + check_tree_hash(&self.roots.root, &self.value.tree_hash_root())?; Ok(()) } } @@ -87,15 +108,28 @@ impl Case for SszStatic { impl Case for SszStaticSR { fn result(&self, _case_index: usize) -> Result<(), Error> { check_serialization(&self.value, &self.serialized)?; - check_tree_hash(&self.roots.root, self.value.tree_hash_root())?; + check_tree_hash(&self.roots.root, &self.value.tree_hash_root())?; check_tree_hash( &self .roots .signing_root .as_ref() .expect("signed root exists"), - self.value.signed_root(), + &self.value.signed_root(), )?; Ok(()) } } + +impl, C: Debug + Sync> Case for SszStaticTHC { + fn result(&self, _case_index: usize) -> Result<(), Error> { + check_serialization(&self.value, &self.serialized)?; + check_tree_hash(&self.roots.root, &self.value.tree_hash_root())?; + + let mut cache = T::new_tree_hash_cache(); + let cached_tree_hash_root = self.value.recalculate_tree_hash_root(&mut cache).unwrap(); + check_tree_hash(&self.roots.root, cached_tree_hash_root.as_bytes())?; + + Ok(()) + } +} diff --git a/tests/ef_tests/src/handler.rs b/tests/ef_tests/src/handler.rs index e5d175e115..df2b6603b2 100644 --- a/tests/ef_tests/src/handler.rs +++ b/tests/ef_tests/src/handler.rs @@ -1,6 +1,8 @@ use crate::cases::{self, Case, Cases, EpochTransition, LoadCase, Operation}; use crate::type_name; use crate::type_name::TypeName; +use cached_tree_hash::CachedTreeHash; +use std::fmt::Debug; use std::fs; use std::marker::PhantomData; use std::path::PathBuf; @@ -93,6 +95,9 @@ pub struct SszStaticHandler(PhantomData<(T, E)>); /// Handler for SSZ types that do implement `SignedRoot`. pub struct SszStaticSRHandler(PhantomData<(T, E)>); +/// Handler for SSZ types that implement `CachedTreeHash`. +pub struct SszStaticTHCHandler(PhantomData<(T, C, E)>); + impl Handler for SszStaticHandler where T: cases::SszStaticType + TypeName, @@ -133,6 +138,27 @@ where } } +impl Handler for SszStaticTHCHandler +where + T: cases::SszStaticType + CachedTreeHash + TypeName, + C: Debug + Sync, + E: TypeName, +{ + type Case = cases::SszStaticTHC; + + fn config_name() -> &'static str { + E::name() + } + + fn runner_name() -> &'static str { + "ssz_static" + } + + fn handler_name() -> String { + T::name().into() + } +} + pub struct ShufflingHandler(PhantomData); impl Handler for ShufflingHandler { diff --git a/tests/ef_tests/tests/tests.rs b/tests/ef_tests/tests/tests.rs index 43cb79a8dd..a9b7c22ac3 100644 --- a/tests/ef_tests/tests/tests.rs +++ b/tests/ef_tests/tests/tests.rs @@ -99,7 +99,7 @@ macro_rules! ssz_static_test { ($test_name:ident, $typ:ident$(<$generics:tt>)?, SR) => { ssz_static_test!($test_name, SszStaticSRHandler, $typ$(<$generics>)?); }; - // Non-signed root + // Non-signed root, non-tree hash caching ($test_name:ident, $typ:ident$(<$generics:tt>)?) => { ssz_static_test!($test_name, SszStaticHandler, $typ$(<$generics>)?); }; @@ -122,11 +122,11 @@ macro_rules! ssz_static_test { ); }; // Base case - ($test_name:ident, $handler:ident, { $(($typ:ty, $spec:ident)),+ }) => { + ($test_name:ident, $handler:ident, { $(($($typ:ty),+)),+ }) => { #[test] fn $test_name() { $( - $handler::<$typ, $spec>::run(); + $handler::<$($typ),+>::run(); )+ } }; @@ -134,7 +134,7 @@ macro_rules! ssz_static_test { #[cfg(feature = "fake_crypto")] mod ssz_static { - use ef_tests::{Handler, SszStaticHandler, SszStaticSRHandler}; + use ef_tests::{Handler, SszStaticHandler, SszStaticSRHandler, SszStaticTHCHandler}; use types::*; ssz_static_test!(attestation, Attestation<_>, SR); @@ -147,7 +147,13 @@ mod ssz_static { ssz_static_test!(beacon_block, BeaconBlock<_>, SR); ssz_static_test!(beacon_block_body, BeaconBlockBody<_>); ssz_static_test!(beacon_block_header, BeaconBlockHeader, SR); - ssz_static_test!(beacon_state, BeaconState<_>); + ssz_static_test!( + beacon_state, + SszStaticTHCHandler, { + (BeaconState, BeaconTreeHashCache, MinimalEthSpec), + (BeaconState, BeaconTreeHashCache, MainnetEthSpec) + } + ); ssz_static_test!(checkpoint, Checkpoint); ssz_static_test!(compact_committee, CompactCommittee<_>); ssz_static_test!(crosslink, Crosslink); From f5d0ee0ed7a481d918a6c5bb491ce1fafcb26d5a Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Tue, 5 Nov 2019 18:06:41 +1100 Subject: [PATCH 12/21] Fix merkle_proof for eth2_hashing refactor (#593) --- eth2/utils/merkle_proof/src/lib.rs | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/eth2/utils/merkle_proof/src/lib.rs b/eth2/utils/merkle_proof/src/lib.rs index 356c668352..bfc9cc26ec 100644 --- a/eth2/utils/merkle_proof/src/lib.rs +++ b/eth2/utils/merkle_proof/src/lib.rs @@ -124,7 +124,10 @@ impl MerkleTree { // All other possibilities are invalid MerkleTrees (_, _) => return Err(MerkleTreeError::Invalid), }; - *hash = hash_concat(left.hash(), right.hash()); + hash.assign_from_slice(&hash_concat( + left.hash().as_bytes(), + right.hash().as_bytes(), + )); } } From 613fdbeda669365a7413bc504ad02bbe3c786b1c Mon Sep 17 00:00:00 2001 From: Pawan Dhananjay Date: Mon, 11 Nov 2019 04:58:22 +0530 Subject: [PATCH 13/21] Derive `PartialEq` for Keypair (#595) * Derive `PartialEq` for Keypair * Silence clippy warning --- eth2/utils/bls/src/keypair.rs | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) diff --git a/eth2/utils/bls/src/keypair.rs b/eth2/utils/bls/src/keypair.rs index 75960a47d4..4a3b1e437b 100644 --- a/eth2/utils/bls/src/keypair.rs +++ b/eth2/utils/bls/src/keypair.rs @@ -3,7 +3,7 @@ use serde_derive::{Deserialize, Serialize}; use std::fmt; use std::hash::{Hash, Hasher}; -#[derive(Debug, Clone, Eq, Serialize, Deserialize)] +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] pub struct Keypair { pub sk: SecretKey, pub pk: PublicKey, @@ -22,12 +22,7 @@ impl Keypair { } } -impl PartialEq for Keypair { - fn eq(&self, other: &Keypair) -> bool { - self == other - } -} - +#[allow(clippy::derive_hash_xor_eq)] impl Hash for Keypair { /// Note: this is distinct from consensus serialization, it will produce a different hash. /// From c7b3a7abd8aa4020459967c52aa4f4b99f949ca2 Mon Sep 17 00:00:00 2001 From: pscott <30843220+pscott@users.noreply.github.com> Date: Tue, 12 Nov 2019 06:09:33 +0100 Subject: [PATCH 14/21] BlockProcessing testing (#559) * Add valid_deposit test and build_with_deposit method * Insert_deposit takes a num_deposit param * Deposit with spec.max_effective_balance * Copy int_to_bytes32 implem from beacon_chain_builder * Add debug information to insert_deposit * Remove length-proof assertion * Insert_deposit displays error * Batch deposits now pass tests * Optimize insert_deposit * Rename insert_deposits and set num_deposits to 2 in valid_deposit test * update test_utils to pass tests * fix typo in test_utils * update cast in test_utils * Add DepositCountInvalid tests * Add tests for bad deposit signature * Add tests and test utils in test_builder * Return error instead of ok on bad signature * Update DepositTestTask enum * Add comment about manually setting deposit_count and deposit_index * add badblsbytes test * add bad_index var for clarity ; remove underflow test * cargo fmt * Add insert 3 exits tests * Add validator_unknwon test * Add far_future_epoch test and already exited * Add MaxVoluntaryExits + 1 test * Add exit_already_initiated test * Add exit_not_active test * Add too_young_to_leave test * Cargo fmt * Confirm already_anitiated test * Fix typo in enum variant * Adjust some tests to return ok(()) and revert changes for early return in per_block_processing.rs * cargo fmt * Adjust AlreadyIniated test to expect Ok(()) and revert changes in per_block_processing.rs * Remove extraneous newline * Add insert_valid_attester_slashing * Initial cargo fmt * Add NotSlashable test * Cargo fmt * Remove AttestationDataIdentical * Make test_task pass through reference ; fix max_attester_slashing_plus_one test * Initial cargo fmt * Add InvalidIndexedAttestation1 and 2 * Add comments * Add ProposalsIdenticalTest * Add ProposalsIdentical test * Cargo fmt * Add ProposerUnknown test * Add ProposalEpochMismatch test * Add BadProposal1Signature and Badproposal2Signature tests * Add ProposerNotSlashable test * Derive PartialEq and use if instead of match * Merge attestation tests * Remove useless AlreadyInitiated variant in beacon_state * Remove MaxOperations plus one tests for each operation * Clean comments * add IncludedTooLate and BadTargetEpoch tests * Update AttestationDataBuilder call in operation_pool testing * Cargo fmt * Remove BadIndex enum variant, unused in the code * Cargo fmt * Cargo fmt updated * simply increment deposit_count instead of hardsetting deposit_index in insert_deposits * Fix bad_merkle_proof when calling insert_deposits --- eth2/operation_pool/src/lib.rs | 19 +- .../src/common/get_indexed_attestation.rs | 28 +- .../block_processing_builder.rs | 252 +++- .../src/per_block_processing/errors.rs | 8 +- .../src/per_block_processing/tests.rs | 1253 ++++++++++++++++- .../verify_attestation.rs | 1 + eth2/state_processing/src/test_utils.rs | 35 +- eth2/types/Cargo.toml | 1 + .../builders/testing_attestation_builder.rs | 57 +- .../testing_attestation_data_builder.rs | 70 +- .../testing_attester_slashing_builder.rs | 33 +- .../builders/testing_beacon_block_builder.rs | 206 ++- .../builders/testing_beacon_state_builder.rs | 3 +- .../builders/testing_deposit_builder.rs | 32 +- .../testing_pending_attestation_builder.rs | 12 +- .../testing_proposer_slashing_builder.rs | 44 +- 16 files changed, 1936 insertions(+), 118 deletions(-) diff --git a/eth2/operation_pool/src/lib.rs b/eth2/operation_pool/src/lib.rs index bb64c3ca26..618c9d870d 100644 --- a/eth2/operation_pool/src/lib.rs +++ b/eth2/operation_pool/src/lib.rs @@ -619,13 +619,28 @@ mod tests { spec: &ChainSpec, extra_signer: Option, ) -> Attestation { - let mut builder = TestingAttestationBuilder::new(state, committee, slot, shard, spec); + let mut builder = TestingAttestationBuilder::new( + &AttestationTestTask::Valid, + state, + committee, + slot, + shard, + spec, + ); let signers = &committee[signing_range]; let committee_keys = signers.iter().map(|&i| &keypairs[i].sk).collect::>(); - builder.sign(signers, &committee_keys, &state.fork, spec, false); + builder.sign( + &AttestationTestTask::Valid, + signers, + &committee_keys, + &state.fork, + spec, + false, + ); extra_signer.map(|c_idx| { let validator_index = committee[c_idx]; builder.sign( + &AttestationTestTask::Valid, &[validator_index], &[&keypairs[validator_index].sk], &state.fork, diff --git a/eth2/state_processing/src/common/get_indexed_attestation.rs b/eth2/state_processing/src/common/get_indexed_attestation.rs index 5e9362331b..6cae2e47ff 100644 --- a/eth2/state_processing/src/common/get_indexed_attestation.rs +++ b/eth2/state_processing/src/common/get_indexed_attestation.rs @@ -98,11 +98,31 @@ mod test { .map(|validator_index| &keypairs[*validator_index].sk) .collect::>(); - let mut attestation_builder = - TestingAttestationBuilder::new(&state, &cc.committee, cc.slot, shard, &spec); + let mut attestation_builder = TestingAttestationBuilder::new( + &AttestationTestTask::Valid, + &state, + &cc.committee, + cc.slot, + shard, + &spec, + ); attestation_builder - .sign(&bit_0_indices, &bit_0_keys, &state.fork, &spec, false) - .sign(&bit_1_indices, &bit_1_keys, &state.fork, &spec, true); + .sign( + &AttestationTestTask::Valid, + &bit_0_indices, + &bit_0_keys, + &state.fork, + &spec, + false, + ) + .sign( + &AttestationTestTask::Valid, + &bit_1_indices, + &bit_1_keys, + &state.fork, + &spec, + true, + ); let attestation = attestation_builder.build(); let indexed_attestation = get_indexed_attestation(&state, &attestation).unwrap(); diff --git a/eth2/state_processing/src/per_block_processing/block_processing_builder.rs b/eth2/state_processing/src/per_block_processing/block_processing_builder.rs index 329583759b..2e7e54b6ba 100644 --- a/eth2/state_processing/src/per_block_processing/block_processing_builder.rs +++ b/eth2/state_processing/src/per_block_processing/block_processing_builder.rs @@ -1,5 +1,9 @@ +use std::convert::TryInto; use tree_hash::SignedRoot; -use types::test_utils::{TestingBeaconBlockBuilder, TestingBeaconStateBuilder}; +use types::test_utils::{ + AttestationTestTask, AttesterSlashingTestTask, DepositTestTask, ExitTestTask, + ProposerSlashingTestTask, TestingBeaconBlockBuilder, TestingBeaconStateBuilder, +}; use types::*; pub struct BlockProcessingBuilder { @@ -30,6 +34,252 @@ impl BlockProcessingBuilder { self.state_builder.build_caches(&spec).unwrap(); } + pub fn build_with_n_deposits( + mut self, + num_deposits: u64, + test_task: DepositTestTask, + randao_sk: Option, + previous_block_root: Option, + spec: &ChainSpec, + ) -> (BeaconBlock, BeaconState) { + let (mut state, keypairs) = self.state_builder.build(); + + let builder = &mut self.block_builder; + + builder.set_slot(state.slot); + + match previous_block_root { + Some(root) => builder.set_parent_root(root), + None => builder.set_parent_root(Hash256::from_slice( + &state.latest_block_header.signed_root(), + )), + } + + let proposer_index = state + .get_beacon_proposer_index(state.slot, RelativeEpoch::Current, spec) + .unwrap(); + let keypair = &keypairs[proposer_index]; + + match randao_sk { + Some(sk) => builder.set_randao_reveal(&sk, &state.fork, spec), + None => builder.set_randao_reveal(&keypair.sk, &state.fork, spec), + } + + self.block_builder.insert_deposits( + spec.max_effective_balance, + test_task, + 1, + num_deposits, + &mut state, + spec, + ); + + let block = self.block_builder.build(&keypair.sk, &state.fork, spec); + + (block, state) + } + + pub fn build_with_n_exits( + mut self, + num_exits: usize, + test_task: ExitTestTask, + randao_sk: Option, + previous_block_root: Option, + spec: &ChainSpec, + ) -> (BeaconBlock, BeaconState) { + let (mut state, keypairs) = self.state_builder.build(); + let builder = &mut self.block_builder; + + builder.set_slot(state.slot); + + match previous_block_root { + Some(root) => builder.set_parent_root(root), + None => builder.set_parent_root(Hash256::from_slice( + &state.latest_block_header.signed_root(), + )), + } + + let proposer_index = state + .get_beacon_proposer_index(state.slot, RelativeEpoch::Current, spec) + .unwrap(); + let keypair = &keypairs[proposer_index]; + + match randao_sk { + Some(sk) => builder.set_randao_reveal(&sk, &state.fork, spec), + None => builder.set_randao_reveal(&keypair.sk, &state.fork, spec), + } + match test_task { + ExitTestTask::AlreadyInitiated => { + for _ in 0..2 { + self.block_builder.insert_exit( + &test_task, + &mut state, + (0 as usize).try_into().unwrap(), + &keypairs[0].sk, + spec, + ) + } + } + _ => { + for (i, keypair) in keypairs.iter().take(num_exits).enumerate() { + self.block_builder.insert_exit( + &test_task, + &mut state, + (i as usize).try_into().unwrap(), + &keypair.sk, + spec, + ); + } + } + } + + let block = self.block_builder.build(&keypair.sk, &state.fork, spec); + + (block, state) + } + + pub fn build_with_n_attestations( + mut self, + test_task: &AttestationTestTask, + num_attestations: u64, + randao_sk: Option, + previous_block_root: Option, + spec: &ChainSpec, + ) -> (BeaconBlock, BeaconState) { + let (state, keypairs) = self.state_builder.build(); + let builder = &mut self.block_builder; + + builder.set_slot(state.slot); + + match previous_block_root { + Some(root) => builder.set_parent_root(root), + None => builder.set_parent_root(Hash256::from_slice( + &state.latest_block_header.signed_root(), + )), + } + + let proposer_index = state + .get_beacon_proposer_index(state.slot, RelativeEpoch::Current, spec) + .unwrap(); + let keypair = &keypairs[proposer_index]; + + match randao_sk { + Some(sk) => builder.set_randao_reveal(&sk, &state.fork, spec), + None => builder.set_randao_reveal(&keypair.sk, &state.fork, spec), + } + + let all_secret_keys: Vec<&SecretKey> = keypairs.iter().map(|keypair| &keypair.sk).collect(); + self.block_builder + .insert_attestations( + test_task, + &state, + &all_secret_keys, + num_attestations as usize, + spec, + ) + .unwrap(); + let block = self.block_builder.build(&keypair.sk, &state.fork, spec); + + (block, state) + } + + pub fn build_with_attester_slashing( + mut self, + test_task: &AttesterSlashingTestTask, + num_attester_slashings: u64, + randao_sk: Option, + previous_block_root: Option, + spec: &ChainSpec, + ) -> (BeaconBlock, BeaconState) { + let (state, keypairs) = self.state_builder.build(); + let builder = &mut self.block_builder; + + builder.set_slot(state.slot); + + match previous_block_root { + Some(root) => builder.set_parent_root(root), + None => builder.set_parent_root(Hash256::from_slice( + &state.latest_block_header.signed_root(), + )), + } + + let proposer_index = state + .get_beacon_proposer_index(state.slot, RelativeEpoch::Current, spec) + .unwrap(); + let keypair = &keypairs[proposer_index]; + + match randao_sk { + Some(sk) => builder.set_randao_reveal(&sk, &state.fork, spec), + None => builder.set_randao_reveal(&keypair.sk, &state.fork, spec), + } + + let mut validator_indices = vec![]; + let mut secret_keys = vec![]; + for i in 0..num_attester_slashings { + validator_indices.push(i); + secret_keys.push(&keypairs[i as usize].sk); + } + + for _ in 0..num_attester_slashings { + self.block_builder.insert_attester_slashing( + test_task, + &validator_indices, + &secret_keys, + &state.fork, + spec, + ); + } + let block = self.block_builder.build(&keypair.sk, &state.fork, spec); + + (block, state) + } + + pub fn build_with_proposer_slashing( + mut self, + test_task: &ProposerSlashingTestTask, + num_proposer_slashings: u64, + randao_sk: Option, + previous_block_root: Option, + spec: &ChainSpec, + ) -> (BeaconBlock, BeaconState) { + let (state, keypairs) = self.state_builder.build(); + let builder = &mut self.block_builder; + + builder.set_slot(state.slot); + + match previous_block_root { + Some(root) => builder.set_parent_root(root), + None => builder.set_parent_root(Hash256::from_slice( + &state.latest_block_header.signed_root(), + )), + } + + let proposer_index = state + .get_beacon_proposer_index(state.slot, RelativeEpoch::Current, spec) + .unwrap(); + let keypair = &keypairs[proposer_index]; + + match randao_sk { + Some(sk) => builder.set_randao_reveal(&sk, &state.fork, spec), + None => builder.set_randao_reveal(&keypair.sk, &state.fork, spec), + } + + for i in 0..num_proposer_slashings { + let validator_indices = i; + let secret_keys = &keypairs[i as usize].sk; + self.block_builder.insert_proposer_slashing( + test_task, + validator_indices, + &secret_keys, + &state.fork, + spec, + ); + } + let block = self.block_builder.build(&keypair.sk, &state.fork, spec); + + (block, state) + } + pub fn build( mut self, randao_sk: Option, diff --git a/eth2/state_processing/src/per_block_processing/errors.rs b/eth2/state_processing/src/per_block_processing/errors.rs index b5f440ab50..1d3094a89f 100644 --- a/eth2/state_processing/src/per_block_processing/errors.rs +++ b/eth2/state_processing/src/per_block_processing/errors.rs @@ -190,8 +190,6 @@ pub enum ProposerSlashingInvalid { #[derive(Debug, PartialEq)] pub enum AttesterSlashingInvalid { - /// The attestation data is identical, an attestation cannot conflict with itself. - AttestationDataIdentical, /// The attestations were not in conflict. NotSlashable, /// The first `IndexedAttestation` was invalid. @@ -257,8 +255,6 @@ pub enum AttestationInvalid { CustodyBitfieldNotSubset, /// There was no known committee in this `epoch` for the given shard and slot. NoCommitteeForShard { shard: u64, slot: Slot }, - /// The validator index was unknown. - UnknownValidator(u64), /// The attestation signature verification failed. BadSignature, /// The shard block root was not set to zero. This is a phase 0 requirement. @@ -311,8 +307,6 @@ pub enum IndexedAttestationInvalid { #[derive(Debug, PartialEq)] pub enum DepositInvalid { - /// The deposit index does not match the state index. - BadIndex { state: u64, deposit: u64 }, /// The signature (proof-of-possession) does not match the given pubkey. BadSignature, /// The signature or pubkey does not represent a valid BLS point. @@ -331,7 +325,7 @@ pub enum ExitInvalid { /// The specified validator has a non-maximum exit epoch. AlreadyExited(u64), /// The specified validator has already initiated exit. - AlreadyInitiatedExited(u64), + AlreadyInitiatedExit(u64), /// The exit is for a future epoch. FutureEpoch { state: Epoch, exit: Epoch }, /// The validator has not been active for long enough. diff --git a/eth2/state_processing/src/per_block_processing/tests.rs b/eth2/state_processing/src/per_block_processing/tests.rs index 31704de44c..fe94bf72ff 100644 --- a/eth2/state_processing/src/per_block_processing/tests.rs +++ b/eth2/state_processing/src/per_block_processing/tests.rs @@ -4,14 +4,22 @@ use super::block_processing_builder::BlockProcessingBuilder; use super::errors::*; use crate::{per_block_processing, BlockSignatureStrategy}; use tree_hash::SignedRoot; +use types::test_utils::{ + AttestationTestTask, AttesterSlashingTestTask, DepositTestTask, ExitTestTask, + ProposerSlashingTestTask, +}; use types::*; +pub const NUM_DEPOSITS: u64 = 1; pub const VALIDATOR_COUNT: usize = 10; +pub const SLOT_OFFSET: u64 = 4; +pub const EXIT_SLOT_OFFSET: u64 = 2048; +pub const NUM_ATTESTATIONS: u64 = 1; #[test] fn valid_block_ok() { let spec = MainnetEthSpec::default_spec(); - let builder = get_builder(&spec); + let builder = get_builder(&spec, SLOT_OFFSET, VALIDATOR_COUNT); let (block, mut state) = builder.build(None, None, &spec); let result = per_block_processing( @@ -28,7 +36,7 @@ fn valid_block_ok() { #[test] fn invalid_block_header_state_slot() { let spec = MainnetEthSpec::default_spec(); - let builder = get_builder(&spec); + let builder = get_builder(&spec, SLOT_OFFSET, VALIDATOR_COUNT); let (mut block, mut state) = builder.build(None, None, &spec); state.slot = Slot::new(133_713); @@ -53,7 +61,7 @@ fn invalid_block_header_state_slot() { #[test] fn invalid_parent_block_root() { let spec = MainnetEthSpec::default_spec(); - let builder = get_builder(&spec); + let builder = get_builder(&spec, SLOT_OFFSET, VALIDATOR_COUNT); let invalid_parent_root = Hash256::from([0xAA; 32]); let (block, mut state) = builder.build(None, Some(invalid_parent_root), &spec); @@ -79,7 +87,7 @@ fn invalid_parent_block_root() { #[test] fn invalid_block_signature() { let spec = MainnetEthSpec::default_spec(); - let builder = get_builder(&spec); + let builder = get_builder(&spec, SLOT_OFFSET, VALIDATOR_COUNT); let (mut block, mut state) = builder.build(None, None, &spec); // sign the block with a keypair that is not the expected proposer @@ -110,7 +118,7 @@ fn invalid_block_signature() { #[test] fn invalid_randao_reveal_signature() { let spec = MainnetEthSpec::default_spec(); - let builder = get_builder(&spec); + let builder = get_builder(&spec, SLOT_OFFSET, VALIDATOR_COUNT); // sign randao reveal with random keypair let keypair = Keypair::random(); @@ -128,12 +136,1239 @@ fn invalid_randao_reveal_signature() { assert_eq!(result, Err(BlockProcessingError::RandaoSignatureInvalid)); } -fn get_builder(spec: &ChainSpec) -> (BlockProcessingBuilder) { - let mut builder = BlockProcessingBuilder::new(VALIDATOR_COUNT, &spec); +#[test] +fn valid_4_deposits() { + let spec = MainnetEthSpec::default_spec(); + let builder = get_builder(&spec, SLOT_OFFSET, VALIDATOR_COUNT); + let test_task = DepositTestTask::Valid; - // Set the state and block to be in the last slot of the 4th epoch. + let (block, mut state) = builder.build_with_n_deposits(4, test_task, None, None, &spec); + + let result = per_block_processing( + &mut state, + &block, + None, + BlockSignatureStrategy::VerifyIndividual, + &spec, + ); + + // Expecting Ok because these are valid deposits. + assert_eq!(result, Ok(())); +} + +#[test] +fn invalid_deposit_deposit_count_too_big() { + let spec = MainnetEthSpec::default_spec(); + let builder = get_builder(&spec, SLOT_OFFSET, VALIDATOR_COUNT); + let test_task = DepositTestTask::Valid; + + let (block, mut state) = + builder.build_with_n_deposits(NUM_DEPOSITS, test_task, None, None, &spec); + + let big_deposit_count = NUM_DEPOSITS + 1; + state.eth1_data.deposit_count = big_deposit_count; + + let result = per_block_processing( + &mut state, + &block, + None, + BlockSignatureStrategy::VerifyIndividual, + &spec, + ); + + // Expecting DepositCountInvalid because we incremented the deposit_count + assert_eq!( + result, + Err(BlockProcessingError::DepositCountInvalid { + expected: big_deposit_count as usize, + found: 1 + }) + ); +} + +#[test] +fn invalid_deposit_count_too_small() { + let spec = MainnetEthSpec::default_spec(); + let builder = get_builder(&spec, SLOT_OFFSET, VALIDATOR_COUNT); + let test_task = DepositTestTask::Valid; + + let (block, mut state) = + builder.build_with_n_deposits(NUM_DEPOSITS, test_task, None, None, &spec); + + let small_deposit_count = NUM_DEPOSITS - 1; + state.eth1_data.deposit_count = small_deposit_count; + + let result = per_block_processing( + &mut state, + &block, + None, + BlockSignatureStrategy::VerifyIndividual, + &spec, + ); + + // Expecting DepositCountInvalid because we decremented the deposit_count + assert_eq!( + result, + Err(BlockProcessingError::DepositCountInvalid { + expected: small_deposit_count as usize, + found: 1 + }) + ); +} + +#[test] +fn invalid_deposit_bad_merkle_proof() { + let spec = MainnetEthSpec::default_spec(); + let builder = get_builder(&spec, SLOT_OFFSET, VALIDATOR_COUNT); + let test_task = DepositTestTask::Valid; + + let (block, mut state) = + builder.build_with_n_deposits(NUM_DEPOSITS, test_task, None, None, &spec); + + let bad_index = state.eth1_deposit_index as usize; + + // Manually offsetting deposit count and index to trigger bad merkle proof + state.eth1_data.deposit_count += 1; + state.eth1_deposit_index += 1; + let result = per_block_processing( + &mut state, + &block, + None, + BlockSignatureStrategy::VerifyIndividual, + &spec, + ); + + // Expecting BadMerkleProof because the proofs were created with different indices + assert_eq!( + result, + Err(BlockProcessingError::DepositInvalid { + index: bad_index, + reason: DepositInvalid::BadMerkleProof + }) + ); +} + +#[test] +fn invalid_deposit_wrong_pubkey() { + let spec = MainnetEthSpec::default_spec(); + let builder = get_builder(&spec, SLOT_OFFSET, VALIDATOR_COUNT); + let test_task = DepositTestTask::BadPubKey; + + let (block, mut state) = + builder.build_with_n_deposits(NUM_DEPOSITS, test_task, None, None, &spec); + + let result = per_block_processing( + &mut state, + &block, + None, + BlockSignatureStrategy::VerifyIndividual, + &spec, + ); + + // Expecting Ok(()) even though the public key provided does not correspond to the correct public key + assert_eq!(result, Ok(())); +} + +#[test] +fn invalid_deposit_wrong_sig() { + let spec = MainnetEthSpec::default_spec(); + let builder = get_builder(&spec, SLOT_OFFSET, VALIDATOR_COUNT); + let test_task = DepositTestTask::BadSig; + + let (block, mut state) = + builder.build_with_n_deposits(NUM_DEPOSITS, test_task, None, None, &spec); + + let result = per_block_processing( + &mut state, + &block, + None, + BlockSignatureStrategy::VerifyIndividual, + &spec, + ); + + // Expecting Ok(()) even though the block signature does not correspond to the correct public key + assert_eq!(result, Ok(())); +} + +#[test] +fn invalid_deposit_invalid_pub_key() { + let spec = MainnetEthSpec::default_spec(); + let builder = get_builder(&spec, SLOT_OFFSET, VALIDATOR_COUNT); + let test_task = DepositTestTask::InvalidPubKey; + + let (block, mut state) = + builder.build_with_n_deposits(NUM_DEPOSITS, test_task, None, None, &spec); + + let result = per_block_processing( + &mut state, + &block, + None, + BlockSignatureStrategy::VerifyIndividual, + &spec, + ); + + // Expecting Ok(()) even though we passed in invalid publickeybytes in the public key field of the deposit data. + assert_eq!(result, Ok(())); +} + +#[test] +fn valid_insert_3_exits() { + use std::cmp::max; + + let spec = MainnetEthSpec::default_spec(); + let num_exits = 3; + let num_validators = max(VALIDATOR_COUNT, num_exits); + let test_task = ExitTestTask::Valid; + let builder = get_builder(&spec, EXIT_SLOT_OFFSET, num_validators); + + let (block, mut state) = builder.build_with_n_exits(num_exits, test_task, None, None, &spec); + let result = per_block_processing( + &mut state, + &block, + None, + BlockSignatureStrategy::VerifyIndividual, + &spec, + ); + + // Expecting Ok because these are valid exits. + assert_eq!(result, Ok(())); +} + +#[test] +fn invalid_exit_validator_unknown() { + use std::cmp::max; + + let spec = MainnetEthSpec::default_spec(); + let num_exits = 1; + let test_task = ExitTestTask::ValidatorUnknown; + let num_validators = max(VALIDATOR_COUNT, num_exits); + let builder = get_builder(&spec, EXIT_SLOT_OFFSET, num_validators); + + let (block, mut state) = builder.build_with_n_exits(num_exits, test_task, None, None, &spec); + + let result = per_block_processing( + &mut state, + &block, + None, + BlockSignatureStrategy::VerifyIndividual, + &spec, + ); + + // Expecting Validator Unknwon because the exit index is incorrect + assert_eq!( + result, + Err(BlockProcessingError::ExitInvalid { + index: 0, + reason: ExitInvalid::ValidatorUnknown(4242), + }) + ); +} + +#[test] +fn invalid_exit_already_exited() { + use std::cmp::max; + + let spec = MainnetEthSpec::default_spec(); + let num_exits = 1; + let test_task = ExitTestTask::AlreadyExited; + let num_validators = max(VALIDATOR_COUNT, num_exits); + let builder = get_builder(&spec, EXIT_SLOT_OFFSET, num_validators); + + let (block, mut state) = builder.build_with_n_exits(num_exits, test_task, None, None, &spec); + let result = per_block_processing( + &mut state, + &block, + None, + BlockSignatureStrategy::VerifyIndividual, + &spec, + ); + + // Expecting AlreadyExited because we manually set the exit_epoch to be different than far_future_epoch. + assert_eq!( + result, + Err(BlockProcessingError::ExitInvalid { + index: 0, + reason: ExitInvalid::AlreadyExited(0), + }) + ); +} + +#[test] +fn invalid_exit_not_active() { + use std::cmp::max; + + let spec = MainnetEthSpec::default_spec(); + let num_exits = 1; + let test_task = ExitTestTask::NotActive; + let num_validators = max(VALIDATOR_COUNT, num_exits); + let builder = get_builder(&spec, EXIT_SLOT_OFFSET, num_validators); + + let (block, mut state) = builder.build_with_n_exits(num_exits, test_task, None, None, &spec); + let result = per_block_processing( + &mut state, + &block, + None, + BlockSignatureStrategy::VerifyIndividual, + &spec, + ); + + // Expecting NotActive because we manually set the activation_epoch to be in the future + assert_eq!( + result, + Err(BlockProcessingError::ExitInvalid { + index: 0, + reason: ExitInvalid::NotActive(0), + }) + ); +} + +#[test] +fn invalid_exit_already_initiated() { + use std::cmp::max; + + let spec = MainnetEthSpec::default_spec(); + let num_exits = 1; + let test_task = ExitTestTask::AlreadyInitiated; + let num_validators = max(VALIDATOR_COUNT, num_exits); + let builder = get_builder(&spec, EXIT_SLOT_OFFSET, num_validators); + + let (block, mut state) = builder.build_with_n_exits(num_exits, test_task, None, None, &spec); + + let result = per_block_processing( + &mut state, + &block, + None, + BlockSignatureStrategy::VerifyIndividual, + &spec, + ); + + // Expecting Ok(()) even though we inserted the same exit twice + assert_eq!(result, Ok(())); +} + +#[test] +fn invalid_exit_future_epoch() { + use std::cmp::max; + + let spec = MainnetEthSpec::default_spec(); + let num_exits = 1; + let test_task = ExitTestTask::FutureEpoch; + let num_validators = max(VALIDATOR_COUNT, num_exits); + let builder = get_builder(&spec, EXIT_SLOT_OFFSET, num_validators); + + let (block, mut state) = builder.build_with_n_exits(num_exits, test_task, None, None, &spec); + + let result = per_block_processing( + &mut state, + &block, + None, + BlockSignatureStrategy::VerifyIndividual, + &spec, + ); + + // Expecting FutureEpoch because we set the exit_epoch to be far_future_epoch + assert_eq!( + result, + Err(BlockProcessingError::ExitInvalid { + index: 0, + reason: ExitInvalid::FutureEpoch { + state: Epoch::from(2048 as u64), + exit: spec.far_future_epoch + } + }) + ); +} + +#[test] +fn invalid_exit_too_young() { + use std::cmp::max; + + let spec = MainnetEthSpec::default_spec(); + let num_exits = 1; + let test_task = ExitTestTask::Valid; + let num_validators = max(VALIDATOR_COUNT, num_exits); + let builder = get_builder(&spec, SLOT_OFFSET, num_validators); + + let (block, mut state) = builder.build_with_n_exits(num_exits, test_task, None, None, &spec); + let result = per_block_processing( + &mut state, + &block, + None, + BlockSignatureStrategy::VerifyIndividual, + &spec, + ); + + // Expecting TooYoung because validator has not been active for long enough when trying to exit + assert_eq!( + result, + Err(BlockProcessingError::ExitInvalid { + index: 0, + reason: ExitInvalid::TooYoungToExit { + current_epoch: Epoch::from(SLOT_OFFSET), + earliest_exit_epoch: Epoch::from(2048 as u64) + }, + }) + ); +} + +#[test] +fn invalid_exit_bad_signature() { + use std::cmp::max; + + let spec = MainnetEthSpec::default_spec(); + let num_exits = 1; + let test_task = ExitTestTask::BadSignature; + let num_validators = max(VALIDATOR_COUNT, num_exits); + let builder = get_builder(&spec, EXIT_SLOT_OFFSET, num_validators); + + let (block, mut state) = builder.build_with_n_exits(num_exits, test_task, None, None, &spec); + let result = per_block_processing( + &mut state, + &block, + None, + BlockSignatureStrategy::VerifyIndividual, + &spec, + ); + + // Expecting Bad Signature because we signed with a different secret key than the correct one. + assert_eq!( + result, + Err(BlockProcessingError::ExitInvalid { + index: 0, + reason: ExitInvalid::BadSignature, + }) + ); +} + +#[test] +fn valid_attestations() { + let spec = MainnetEthSpec::default_spec(); + let builder = get_builder(&spec, SLOT_OFFSET, VALIDATOR_COUNT); + let test_task = AttestationTestTask::Valid; + let (block, mut state) = + builder.build_with_n_attestations(&test_task, NUM_ATTESTATIONS, None, None, &spec); + + let result = per_block_processing( + &mut state, + &block, + None, + BlockSignatureStrategy::VerifyIndividual, + &spec, + ); + + // Expecting Ok(()) because these are valid attestations + assert_eq!(result, Ok(())); +} + +#[test] +fn invalid_attestation_parent_crosslink_start_epoch() { + let spec = MainnetEthSpec::default_spec(); + let builder = get_builder(&spec, SLOT_OFFSET, VALIDATOR_COUNT); + let test_task = AttestationTestTask::BadParentCrosslinkStartEpoch; + let (block, mut state) = + builder.build_with_n_attestations(&test_task, NUM_ATTESTATIONS, None, None, &spec); + + let result = per_block_processing( + &mut state, + &block, + None, + BlockSignatureStrategy::VerifyIndividual, + &spec, + ); + + // Expecting BadParentCrosslinkEndEpoch because we manually set an invalid crosslink start epoch + assert_eq!( + result, + Err(BlockProcessingError::AttestationInvalid { + index: 0, + reason: AttestationInvalid::BadParentCrosslinkStartEpoch + }) + ); +} + +#[test] +fn invalid_attestation_parent_crosslink_end_epoch() { + let spec = MainnetEthSpec::default_spec(); + let builder = get_builder(&spec, SLOT_OFFSET, VALIDATOR_COUNT); + let test_task = AttestationTestTask::BadParentCrosslinkEndEpoch; + let (block, mut state) = + builder.build_with_n_attestations(&test_task, NUM_ATTESTATIONS, None, None, &spec); + + let result = per_block_processing( + &mut state, + &block, + None, + BlockSignatureStrategy::VerifyIndividual, + &spec, + ); + + // Expecting BadParentCrosslinkEndEpoch because we manually set an invalid crosslink end epoch + assert_eq!( + result, + Err(BlockProcessingError::AttestationInvalid { + index: 0, + reason: AttestationInvalid::BadParentCrosslinkEndEpoch + }) + ); +} + +#[test] +fn invalid_attestation_parent_crosslink_hash() { + let spec = MainnetEthSpec::default_spec(); + let builder = get_builder(&spec, SLOT_OFFSET, VALIDATOR_COUNT); + let test_task = AttestationTestTask::BadParentCrosslinkHash; + let (block, mut state) = + builder.build_with_n_attestations(&test_task, NUM_ATTESTATIONS, None, None, &spec); + + let result = per_block_processing( + &mut state, + &block, + None, + BlockSignatureStrategy::VerifyIndividual, + &spec, + ); + + // Expecting BadParentCrosslinkHash because we manually set an invalid crosslink parent_root + assert_eq!( + result, + Err(BlockProcessingError::AttestationInvalid { + index: 0, + reason: AttestationInvalid::BadParentCrosslinkHash + }) + ); +} + +#[test] +fn invalid_attestation_no_committee_for_shard() { + let spec = MainnetEthSpec::default_spec(); + let builder = get_builder(&spec, SLOT_OFFSET, VALIDATOR_COUNT); + let test_task = AttestationTestTask::NoCommiteeForShard; + let (block, mut state) = + builder.build_with_n_attestations(&test_task, NUM_ATTESTATIONS, None, None, &spec); + + let result = per_block_processing( + &mut state, + &block, + None, + BlockSignatureStrategy::VerifyIndividual, + &spec, + ); + + // Expecting NoCommiteeForShard because we manually set the crosslink's shard to be invalid + assert_eq!( + result, + Err(BlockProcessingError::BeaconStateError( + BeaconStateError::NoCommitteeForShard + )) + ); +} + +#[test] +fn invalid_attestation_wrong_justified_checkpoint() { + let spec = MainnetEthSpec::default_spec(); + let builder = get_builder(&spec, SLOT_OFFSET, VALIDATOR_COUNT); + let test_task = AttestationTestTask::WrongJustifiedCheckpoint; + let (block, mut state) = + builder.build_with_n_attestations(&test_task, NUM_ATTESTATIONS, None, None, &spec); + + let result = per_block_processing( + &mut state, + &block, + None, + BlockSignatureStrategy::VerifyIndividual, + &spec, + ); + + // Expecting WrongJustifiedCheckpoint because we manually set the + // source field of the AttestationData object to be invalid + assert_eq!( + result, + Err(BlockProcessingError::AttestationInvalid { + index: 0, + reason: AttestationInvalid::WrongJustifiedCheckpoint { + state: Checkpoint { + epoch: Epoch::from(2 as u64), + root: Hash256::zero(), + }, + attestation: Checkpoint { + epoch: Epoch::from(0 as u64), + root: Hash256::zero(), + }, + is_current: true, + } + }) + ); +} + +#[test] +fn invalid_attestation_bad_target_too_low() { + let spec = MainnetEthSpec::default_spec(); + let builder = get_builder(&spec, SLOT_OFFSET, VALIDATOR_COUNT); + let test_task = AttestationTestTask::BadTargetTooLow; + let (block, mut state) = + builder.build_with_n_attestations(&test_task, NUM_ATTESTATIONS, None, None, &spec); + + let result = per_block_processing( + &mut state, + &block, + None, + BlockSignatureStrategy::VerifyIndividual, + &spec, + ); + + // Expecting EpochTooLow because we manually set the + // target field of the AttestationData object to be invalid + assert_eq!( + result, + Err(BlockProcessingError::BeaconStateError( + BeaconStateError::RelativeEpochError(RelativeEpochError::EpochTooLow { + base: state.current_epoch(), + other: Epoch::from(0 as u64), + }) + )) + ); +} + +#[test] +fn invalid_attestation_bad_target_too_high() { + let spec = MainnetEthSpec::default_spec(); + let builder = get_builder(&spec, SLOT_OFFSET, VALIDATOR_COUNT); + let test_task = AttestationTestTask::BadTargetTooHigh; + let (block, mut state) = + builder.build_with_n_attestations(&test_task, NUM_ATTESTATIONS, None, None, &spec); + + let result = per_block_processing( + &mut state, + &block, + None, + BlockSignatureStrategy::VerifyIndividual, + &spec, + ); + + // Expecting EpochTooHigh because we manually set the + // target field of the AttestationData object to be invalid + assert_eq!( + result, + Err(BlockProcessingError::BeaconStateError( + BeaconStateError::RelativeEpochError(RelativeEpochError::EpochTooHigh { + base: state.current_epoch(), + other: Epoch::from(10 as u64), + }) + )) + ); +} + +#[test] +fn invalid_attestation_bad_crosslink_data_root() { + let spec = MainnetEthSpec::default_spec(); + let builder = get_builder(&spec, SLOT_OFFSET, VALIDATOR_COUNT); + let test_task = AttestationTestTask::BadParentCrosslinkDataRoot; + let (block, mut state) = + builder.build_with_n_attestations(&test_task, NUM_ATTESTATIONS, None, None, &spec); + + let result = per_block_processing( + &mut state, + &block, + None, + BlockSignatureStrategy::VerifyIndividual, + &spec, + ); + + // Expecting ShardBlockRootNotZero because we manually set the + // data_root of the cross link to be non zero + + assert_eq!( + result, + Err(BlockProcessingError::AttestationInvalid { + index: 0, + reason: AttestationInvalid::ShardBlockRootNotZero, + }) + ); +} + +#[test] +fn invalid_attestation_bad_indexed_attestation_bad_signature() { + let spec = MainnetEthSpec::default_spec(); + let builder = get_builder(&spec, SLOT_OFFSET, 33); // minmium number of validators required for this test + let test_task = AttestationTestTask::BadIndexedAttestationBadSignature; + let (block, mut state) = + builder.build_with_n_attestations(&test_task, NUM_ATTESTATIONS, None, None, &spec); + + let result = per_block_processing( + &mut state, + &block, + None, + BlockSignatureStrategy::VerifyIndividual, + &spec, + ); + + // Expecting BadIndexedAttestation(BadSignature) because we ommitted the aggregation bits in the attestation + assert_eq!( + result, + Err(BlockProcessingError::AttestationInvalid { + index: 0, + reason: AttestationInvalid::BadIndexedAttestation( + IndexedAttestationInvalid::BadSignature + ) + }) + ); +} + +#[test] +fn invalid_attestation_custody_bitfield_not_subset() { + let spec = MainnetEthSpec::default_spec(); + let builder = get_builder(&spec, SLOT_OFFSET, 33); // minmium number of validators required for this test + let test_task = AttestationTestTask::CustodyBitfieldNotSubset; + let (block, mut state) = + builder.build_with_n_attestations(&test_task, NUM_ATTESTATIONS, None, None, &spec); + let result = per_block_processing( + &mut state, + &block, + None, + BlockSignatureStrategy::VerifyIndividual, + &spec, + ); + + // Expecting CustodyBitfieldNotSubset because we set custody_bit to true without setting the aggregation bits. + assert_eq!( + result, + Err(BlockProcessingError::AttestationInvalid { + index: 0, + reason: AttestationInvalid::CustodyBitfieldNotSubset + }) + ); +} + +#[test] +fn invalid_attestation_custody_bitfield_has_set_bits() { + let spec = MainnetEthSpec::default_spec(); + let builder = get_builder(&spec, SLOT_OFFSET, 33); // minmium number of validators required for this test + let test_task = AttestationTestTask::CustodyBitfieldHasSetBits; + let (block, mut state) = + builder.build_with_n_attestations(&test_task, NUM_ATTESTATIONS, None, None, &spec); + let result = per_block_processing( + &mut state, + &block, + None, + BlockSignatureStrategy::VerifyIndividual, + &spec, + ); + + // Expecting CustodyBitfieldHasSetBits because we set custody bits even though the custody_bit boolean is set to false + assert_eq!( + result, + Err(BlockProcessingError::AttestationInvalid { + index: 0, + reason: AttestationInvalid::BadIndexedAttestation( + IndexedAttestationInvalid::CustodyBitfieldHasSetBits + ) + }) + ); +} + +#[test] +fn invalid_attestation_bad_custody_bitfield_len() { + let spec = MainnetEthSpec::default_spec(); + let builder = get_builder(&spec, SLOT_OFFSET, VALIDATOR_COUNT); + let test_task = AttestationTestTask::BadCustodyBitfieldLen; + let (block, mut state) = + builder.build_with_n_attestations(&test_task, NUM_ATTESTATIONS, None, None, &spec); + + let result = per_block_processing( + &mut state, + &block, + None, + BlockSignatureStrategy::VerifyIndividual, + &spec, + ); + + // Expecting InvalidBitfield because the size of the custody_bitfield is bigger than the commitee size. + assert_eq!( + result, + Err(BlockProcessingError::BeaconStateError( + BeaconStateError::InvalidBitfield + )) + ); +} + +#[test] +fn invalid_attestation_bad_aggregation_bitfield_len() { + let spec = MainnetEthSpec::default_spec(); + let builder = get_builder(&spec, SLOT_OFFSET, VALIDATOR_COUNT); + let test_task = AttestationTestTask::BadAggregationBitfieldLen; + let (block, mut state) = + builder.build_with_n_attestations(&test_task, NUM_ATTESTATIONS, None, None, &spec); + + let result = per_block_processing( + &mut state, + &block, + None, + BlockSignatureStrategy::VerifyIndividual, + &spec, + ); + + // Expecting InvalidBitfield because the size of the aggregation_bitfield is bigger than the commitee size. + assert_eq!( + result, + Err(BlockProcessingError::BeaconStateError( + BeaconStateError::InvalidBitfield + )) + ); +} + +#[test] +fn invalid_attestation_bad_signature() { + let spec = MainnetEthSpec::default_spec(); + let builder = get_builder(&spec, SLOT_OFFSET, 97); // minimal number of required validators for this test + let test_task = AttestationTestTask::BadSignature; + let (block, mut state) = + builder.build_with_n_attestations(&test_task, NUM_ATTESTATIONS, None, None, &spec); + let result = per_block_processing( + &mut state, + &block, + None, + BlockSignatureStrategy::VerifyIndividual, + &spec, + ); + + // Expecting BadSignature because we're signing with invalid secret_keys + assert_eq!( + result, + Err(BlockProcessingError::AttestationInvalid { + index: 0, + reason: AttestationInvalid::BadIndexedAttestation( + IndexedAttestationInvalid::BadSignature + ) + }) + ); +} + +#[test] +fn invalid_attestation_included_too_early() { + let spec = MainnetEthSpec::default_spec(); + let builder = get_builder(&spec, SLOT_OFFSET, VALIDATOR_COUNT); + let test_task = AttestationTestTask::IncludedTooEarly; + let (block, mut state) = + builder.build_with_n_attestations(&test_task, NUM_ATTESTATIONS, None, None, &spec); + + let result = per_block_processing( + &mut state, + &block, + None, + BlockSignatureStrategy::VerifyIndividual, + &spec, + ); + + // Expecting IncludedTooEarly because the shard included in the crosslink is bigger than expected + assert_eq!( + result, + Err(BlockProcessingError::AttestationInvalid { + index: 0, + reason: AttestationInvalid::IncludedTooEarly { + state: Slot::from(319 as u64), + delay: spec.min_attestation_inclusion_delay, + attestation: Slot::from(319 as u64) + } + }) + ); +} + +#[test] +fn invalid_attestation_included_too_late() { + let spec = MainnetEthSpec::default_spec(); + // note to maintainer: might need to increase validator count if we get NoCommitteeForShard + let builder = get_builder(&spec, SLOT_OFFSET, VALIDATOR_COUNT); + let test_task = AttestationTestTask::IncludedTooLate; + let (block, mut state) = + builder.build_with_n_attestations(&test_task, NUM_ATTESTATIONS, None, None, &spec); + + let result = per_block_processing( + &mut state, + &block, + None, + BlockSignatureStrategy::VerifyIndividual, + &spec, + ); + + // Expecting IncludedTooLate because the shard included in the crosslink is bigger than expected + assert!( + result + == Err(BlockProcessingError::BeaconStateError( + BeaconStateError::NoCommitteeForShard + )) + || result + == Err(BlockProcessingError::AttestationInvalid { + index: 0, + reason: AttestationInvalid::IncludedTooLate { + state: state.slot, + attestation: Slot::from(254 as u64), + } + }) + ); +} + +#[test] +fn invalid_attestation_bad_target_epoch() { + let spec = MainnetEthSpec::default_spec(); + // note to maintainer: might need to increase validator count if we get NoCommitteeForShard + let builder = get_builder(&spec, SLOT_OFFSET, VALIDATOR_COUNT); + let test_task = AttestationTestTask::BadTargetEpoch; + let (block, mut state) = + builder.build_with_n_attestations(&test_task, NUM_ATTESTATIONS, None, None, &spec); + + let result = per_block_processing( + &mut state, + &block, + None, + BlockSignatureStrategy::VerifyIndividual, + &spec, + ); + + // Expecting BadTargetEpoch because the target epoch is bigger by one than the epoch expected + assert!( + result + == Err(BlockProcessingError::BeaconStateError( + BeaconStateError::NoCommitteeForShard + )) + || result + == Err(BlockProcessingError::AttestationInvalid { + index: 0, + reason: AttestationInvalid::BadTargetEpoch + }) + ); +} + +#[test] +fn invalid_attestation_bad_shard() { + let spec = MainnetEthSpec::default_spec(); + let builder = get_builder(&spec, SLOT_OFFSET, VALIDATOR_COUNT); + let test_task = AttestationTestTask::BadShard; + let (block, mut state) = + builder.build_with_n_attestations(&test_task, NUM_ATTESTATIONS, None, None, &spec); + + let result = per_block_processing( + &mut state, + &block, + None, + BlockSignatureStrategy::VerifyIndividual, + &spec, + ); + + // Expecting BadShard or NoCommitteeForShard because the shard number is higher than ShardCount + assert!( + result + == Err(BlockProcessingError::AttestationInvalid { + index: 0, + reason: AttestationInvalid::BadShard + }) + || result + == Err(BlockProcessingError::BeaconStateError( + BeaconStateError::NoCommitteeForShard + )) + ); +} + +#[test] +fn valid_insert_attester_slashing() { + let spec = MainnetEthSpec::default_spec(); + let builder = get_builder(&spec, SLOT_OFFSET, VALIDATOR_COUNT); + let test_task = AttesterSlashingTestTask::Valid; + let num_attester_slashings = 1; + let (block, mut state) = + builder.build_with_attester_slashing(&test_task, num_attester_slashings, None, None, &spec); + + let result = per_block_processing( + &mut state, + &block, + None, + BlockSignatureStrategy::VerifyIndividual, + &spec, + ); + + // Expecting Ok(()) because attester slashing is valid + assert_eq!(result, Ok(())); +} + +#[test] +fn invalid_attester_slashing_not_slashable() { + let spec = MainnetEthSpec::default_spec(); + let builder = get_builder(&spec, SLOT_OFFSET, VALIDATOR_COUNT); + let test_task = AttesterSlashingTestTask::NotSlashable; + let num_attester_slashings = 1; + let (block, mut state) = + builder.build_with_attester_slashing(&test_task, num_attester_slashings, None, None, &spec); + let result = per_block_processing( + &mut state, + &block, + None, + BlockSignatureStrategy::VerifyIndividual, + &spec, + ); + + // Expecting NotSlashable because the two attestations are the same + assert_eq!( + result, + Err(BlockProcessingError::AttesterSlashingInvalid { + index: 0, + reason: AttesterSlashingInvalid::NotSlashable + }) + ); +} + +#[test] +fn invalid_attester_slashing_1_invalid() { + let spec = MainnetEthSpec::default_spec(); + let builder = get_builder(&spec, SLOT_OFFSET, VALIDATOR_COUNT); + let test_task = AttesterSlashingTestTask::IndexedAttestation1Invalid; + let num_attester_slashings = 1; + let (block, mut state) = + builder.build_with_attester_slashing(&test_task, num_attester_slashings, None, None, &spec); + + let result = per_block_processing( + &mut state, + &block, + None, + BlockSignatureStrategy::VerifyIndividual, + &spec, + ); + + // Expecting IndexedAttestation1Invalid or IndexedAttestationInvalid because Attestation1 has CustodyBitfield bits set. + assert!( + result + == Err(BlockProcessingError::IndexedAttestationInvalid { + index: 0, + reason: IndexedAttestationInvalid::CustodyBitfieldHasSetBits + }) + || result + == Err(BlockProcessingError::AttesterSlashingInvalid { + index: 0, + reason: AttesterSlashingInvalid::IndexedAttestation1Invalid( + BlockOperationError::Invalid( + IndexedAttestationInvalid::CustodyBitfieldHasSetBits + ) + ) + }) + ); +} + +#[test] +fn invalid_attester_slashing_2_invalid() { + let spec = MainnetEthSpec::default_spec(); + let builder = get_builder(&spec, SLOT_OFFSET, VALIDATOR_COUNT); + let test_task = AttesterSlashingTestTask::IndexedAttestation2Invalid; + let num_attester_slashings = 1; + let (block, mut state) = + builder.build_with_attester_slashing(&test_task, num_attester_slashings, None, None, &spec); + + let result = per_block_processing( + &mut state, + &block, + None, + BlockSignatureStrategy::VerifyIndividual, + &spec, + ); + + // Expecting IndexedAttestation2Invalid or IndexedAttestationInvalid because Attestation2 has CustodyBitfield bits set. + assert!( + result + == Err(BlockProcessingError::IndexedAttestationInvalid { + index: 1, + reason: IndexedAttestationInvalid::CustodyBitfieldHasSetBits + }) + || result + == Err(BlockProcessingError::AttesterSlashingInvalid { + index: 1, + reason: AttesterSlashingInvalid::IndexedAttestation2Invalid( + BlockOperationError::Invalid( + IndexedAttestationInvalid::CustodyBitfieldHasSetBits + ) + ) + }) + ); +} + +#[test] +fn valid_insert_proposer_slashing() { + let spec = MainnetEthSpec::default_spec(); + let builder = get_builder(&spec, SLOT_OFFSET, VALIDATOR_COUNT); + let test_task = ProposerSlashingTestTask::Valid; + let (block, mut state) = builder.build_with_proposer_slashing(&test_task, 1, None, None, &spec); + + let result = per_block_processing( + &mut state, + &block, + None, + BlockSignatureStrategy::VerifyIndividual, + &spec, + ); + + // Expecting Ok(()) because we inserted a valid proposer slashing + assert_eq!(result, Ok(())); +} + +#[test] +fn invalid_proposer_slashing_proposals_identical() { + let spec = MainnetEthSpec::default_spec(); + let builder = get_builder(&spec, SLOT_OFFSET, VALIDATOR_COUNT); + let test_task = ProposerSlashingTestTask::ProposalsIdentical; + let (block, mut state) = builder.build_with_proposer_slashing(&test_task, 1, None, None, &spec); + + let result = per_block_processing( + &mut state, + &block, + None, + BlockSignatureStrategy::VerifyIndividual, + &spec, + ); + // Expecting ProposalsIdentical because we the two headers are identical + assert_eq!( + result, + Err(BlockProcessingError::ProposerSlashingInvalid { + index: 0, + reason: ProposerSlashingInvalid::ProposalsIdentical + }) + ); +} + +#[test] +fn invalid_proposer_slashing_proposer_unknown() { + let spec = MainnetEthSpec::default_spec(); + let builder = get_builder(&spec, SLOT_OFFSET, VALIDATOR_COUNT); + let test_task = ProposerSlashingTestTask::ProposerUnknown; + let (block, mut state) = builder.build_with_proposer_slashing(&test_task, 1, None, None, &spec); + + let result = per_block_processing( + &mut state, + &block, + None, + BlockSignatureStrategy::VerifyIndividual, + &spec, + ); + + // Expecting ProposerUnknown because validator_index is unknown + assert_eq!( + result, + Err(BlockProcessingError::ProposerSlashingInvalid { + index: 0, + reason: ProposerSlashingInvalid::ProposerUnknown(3_141_592) + }) + ); +} + +#[test] +fn invalid_proposer_slashing_not_slashable() { + let spec = MainnetEthSpec::default_spec(); + let builder = get_builder(&spec, SLOT_OFFSET, VALIDATOR_COUNT); + let test_task = ProposerSlashingTestTask::ProposerNotSlashable; + let (block, mut state) = builder.build_with_proposer_slashing(&test_task, 1, None, None, &spec); + + state.validators[0].slashed = true; + let result = per_block_processing( + &mut state, + &block, + None, + BlockSignatureStrategy::VerifyIndividual, + &spec, + ); + + // Expecting ProposerNotSlashable because we've already slashed the validator + assert_eq!( + result, + Err(BlockProcessingError::ProposerSlashingInvalid { + index: 0, + reason: ProposerSlashingInvalid::ProposerNotSlashable(0) + }) + ); +} + +#[test] +fn invalid_bad_proposal_1_signature() { + let spec = MainnetEthSpec::default_spec(); + let builder = get_builder(&spec, SLOT_OFFSET, VALIDATOR_COUNT); + let test_task = ProposerSlashingTestTask::BadProposal1Signature; + let (block, mut state) = builder.build_with_proposer_slashing(&test_task, 1, None, None, &spec); + + let result = per_block_processing( + &mut state, + &block, + None, + BlockSignatureStrategy::VerifyIndividual, + &spec, + ); + + // Expecting BadProposal1Signature because signature of proposal 1 is invalid + assert_eq!( + result, + Err(BlockProcessingError::ProposerSlashingInvalid { + index: 0, + reason: ProposerSlashingInvalid::BadProposal1Signature + }) + ); +} + +#[test] +fn invalid_bad_proposal_2_signature() { + let spec = MainnetEthSpec::default_spec(); + let builder = get_builder(&spec, SLOT_OFFSET, VALIDATOR_COUNT); + let test_task = ProposerSlashingTestTask::BadProposal2Signature; + let (block, mut state) = builder.build_with_proposer_slashing(&test_task, 1, None, None, &spec); + + let result = per_block_processing( + &mut state, + &block, + None, + BlockSignatureStrategy::VerifyIndividual, + &spec, + ); + + // Expecting BadProposal2Signature because signature of proposal 2 is invalid + assert_eq!( + result, + Err(BlockProcessingError::ProposerSlashingInvalid { + index: 0, + reason: ProposerSlashingInvalid::BadProposal2Signature + }) + ); +} + +#[test] +fn invalid_proposer_slashing_proposal_epoch_mismatch() { + let spec = MainnetEthSpec::default_spec(); + let builder = get_builder(&spec, SLOT_OFFSET, VALIDATOR_COUNT); + let test_task = ProposerSlashingTestTask::ProposalEpochMismatch; + let (block, mut state) = builder.build_with_proposer_slashing(&test_task, 1, None, None, &spec); + + let result = per_block_processing( + &mut state, + &block, + None, + BlockSignatureStrategy::VerifyIndividual, + &spec, + ); + + // Expecting ProposalEpochMismatch because the two epochs are different + assert_eq!( + result, + Err(BlockProcessingError::ProposerSlashingInvalid { + index: 0, + reason: ProposerSlashingInvalid::ProposalEpochMismatch( + Slot::from(0 as u64), + Slot::from(128 as u64) + ) + }) + ); +} + +fn get_builder( + spec: &ChainSpec, + slot_offset: u64, + num_validators: usize, +) -> (BlockProcessingBuilder) { + let mut builder = BlockProcessingBuilder::new(num_validators, &spec); + + // Set the state and block to be in the last slot of the `slot_offset`th epoch. let last_slot_of_epoch = - (MainnetEthSpec::genesis_epoch() + 4).end_slot(MainnetEthSpec::slots_per_epoch()); + (MainnetEthSpec::genesis_epoch() + slot_offset).end_slot(MainnetEthSpec::slots_per_epoch()); builder.set_slot(last_slot_of_epoch); builder.build_caches(&spec); diff --git a/eth2/state_processing/src/per_block_processing/verify_attestation.rs b/eth2/state_processing/src/per_block_processing/verify_attestation.rs index 4384409010..2f5b1252e9 100644 --- a/eth2/state_processing/src/per_block_processing/verify_attestation.rs +++ b/eth2/state_processing/src/per_block_processing/verify_attestation.rs @@ -61,6 +61,7 @@ pub fn verify_attestation_for_state( spec: &ChainSpec, ) -> Result<()> { let data = &attestation.data; + verify!( data.crosslink.shard < T::ShardCount::to_u64(), Invalid::BadShard diff --git a/eth2/state_processing/src/test_utils.rs b/eth2/state_processing/src/test_utils.rs index ce06a4fbb4..1651cf7943 100644 --- a/eth2/state_processing/src/test_utils.rs +++ b/eth2/state_processing/src/test_utils.rs @@ -1,5 +1,8 @@ use log::info; -use types::test_utils::{TestingBeaconBlockBuilder, TestingBeaconStateBuilder}; +use types::test_utils::{ + AttestationTestTask, AttesterSlashingTestTask, DepositTestTask, ExitTestTask, + ProposerSlashingTestTask, TestingBeaconBlockBuilder, TestingBeaconStateBuilder, +}; use types::{EthSpec, *}; pub struct BlockBuilder { @@ -77,6 +80,7 @@ impl BlockBuilder { let validator_index = validators_iter.next().expect("Insufficient validators."); builder.insert_proposer_slashing( + &ProposerSlashingTestTask::Valid, validator_index, &keypairs[validator_index as usize].sk, &state.fork, @@ -102,7 +106,13 @@ impl BlockBuilder { secret_keys.push(&keypairs[validator_index as usize].sk); } - builder.insert_attester_slashing(&attesters, &secret_keys, &state.fork, spec); + builder.insert_attester_slashing( + &AttesterSlashingTestTask::Valid, + &attesters, + &secret_keys, + &state.fork, + spec, + ); } info!( "Inserted {} attester slashings.", @@ -113,6 +123,7 @@ impl BlockBuilder { let all_secret_keys: Vec<&SecretKey> = keypairs.iter().map(|keypair| &keypair.sk).collect(); builder .insert_attestations( + &AttestationTestTask::Valid, &state, &all_secret_keys, self.num_attestations as usize, @@ -125,15 +136,14 @@ impl BlockBuilder { ); // Insert `Deposit` objects. - for i in 0..self.num_deposits { - builder.insert_deposit( - 32_000_000_000, - state.eth1_data.deposit_count + (i as u64), - &state, - spec, - ); - } - state.eth1_data.deposit_count += self.num_deposits as u64; + builder.insert_deposits( + 32_000_000_000, + DepositTestTask::NoReset, + state.eth1_data.deposit_count, + self.num_deposits as u64, + &mut state, + spec, + ); info!("Inserted {} deposits.", builder.block.body.deposits.len()); // Insert the maximum possible number of `Exit` objects. @@ -141,7 +151,8 @@ impl BlockBuilder { let validator_index = validators_iter.next().expect("Insufficient validators."); builder.insert_exit( - &state, + &ExitTestTask::Valid, + &mut state, validator_index, &keypairs[validator_index as usize].sk, spec, diff --git a/eth2/types/Cargo.toml b/eth2/types/Cargo.toml index e3138b26cc..ca78da3408 100644 --- a/eth2/types/Cargo.toml +++ b/eth2/types/Cargo.toml @@ -16,6 +16,7 @@ eth2_hashing = "0.1.0" hex = "0.3" int_to_bytes = { path = "../utils/int_to_bytes" } log = "0.4.8" +merkle_proof = { path = "../utils/merkle_proof" } rayon = "1.2.0" rand = "0.7.2" serde = "1.0.102" diff --git a/eth2/types/src/test_utils/builders/testing_attestation_builder.rs b/eth2/types/src/test_utils/builders/testing_attestation_builder.rs index f794919f31..1742ce4d89 100644 --- a/eth2/types/src/test_utils/builders/testing_attestation_builder.rs +++ b/eth2/types/src/test_utils/builders/testing_attestation_builder.rs @@ -1,4 +1,4 @@ -use crate::test_utils::TestingAttestationDataBuilder; +use crate::test_utils::{AttestationTestTask, TestingAttestationDataBuilder}; use crate::*; use tree_hash::TreeHash; @@ -13,18 +13,27 @@ pub struct TestingAttestationBuilder { impl TestingAttestationBuilder { /// Create a new attestation builder. pub fn new( + test_task: &AttestationTestTask, state: &BeaconState, committee: &[usize], slot: Slot, shard: u64, spec: &ChainSpec, ) -> Self { - let data_builder = TestingAttestationDataBuilder::new(state, shard, slot, spec); + let data_builder = TestingAttestationDataBuilder::new(test_task, state, shard, slot, spec); - let mut aggregation_bits = BitList::with_capacity(committee.len()).unwrap(); - let mut custody_bits = BitList::with_capacity(committee.len()).unwrap(); + let mut aggregation_bits_len = committee.len(); + let mut custody_bits_len = committee.len(); - for (i, _) in committee.iter().enumerate() { + match test_task { + AttestationTestTask::BadAggregationBitfieldLen => aggregation_bits_len += 1, + AttestationTestTask::BadCustodyBitfieldLen => custody_bits_len += 1, + _ => (), + } + let mut aggregation_bits = BitList::with_capacity(aggregation_bits_len).unwrap(); + let mut custody_bits = BitList::with_capacity(custody_bits_len).unwrap(); + + for i in 0..committee.len() { custody_bits.set(i, false).unwrap(); aggregation_bits.set(i, false).unwrap(); } @@ -48,11 +57,12 @@ impl TestingAttestationBuilder { /// keypair must be that of the first signing validator. pub fn sign( &mut self, + test_task: &AttestationTestTask, signing_validators: &[usize], secret_keys: &[&SecretKey], fork: &Fork, spec: &ChainSpec, - custody_bit: bool, + mut custody_bit: bool, ) -> &mut Self { assert_eq!( signing_validators.len(), @@ -67,16 +77,24 @@ impl TestingAttestationBuilder { .position(|v| *v == *validator_index) .expect("Signing validator not in attestation committee"); - self.attestation - .aggregation_bits - .set(committee_index, true) - .unwrap(); - - if custody_bit { - self.attestation - .custody_bits - .set(committee_index, true) - .unwrap(); + match test_task { + AttestationTestTask::BadIndexedAttestationBadSignature => (), + AttestationTestTask::CustodyBitfieldNotSubset => custody_bit = true, + _ => { + self.attestation + .aggregation_bits + .set(committee_index, true) + .unwrap(); + } + } + match (custody_bit, test_task) { + (true, _) | (_, AttestationTestTask::CustodyBitfieldHasSetBits) => { + self.attestation + .custody_bits + .set(committee_index, true) + .unwrap(); + } + (false, _) => (), } let message = AttestationDataAndCustodyBit { @@ -91,7 +109,12 @@ impl TestingAttestationBuilder { fork, ); - let signature = Signature::new(&message, domain, secret_keys[key_index]); + let index = if *test_task == AttestationTestTask::BadSignature { + 0 + } else { + key_index + }; + let signature = Signature::new(&message, domain, secret_keys[index]); self.attestation.signature.add(&signature) } diff --git a/eth2/types/src/test_utils/builders/testing_attestation_data_builder.rs b/eth2/types/src/test_utils/builders/testing_attestation_data_builder.rs index ac45abe0fe..d439490e90 100644 --- a/eth2/types/src/test_utils/builders/testing_attestation_data_builder.rs +++ b/eth2/types/src/test_utils/builders/testing_attestation_data_builder.rs @@ -1,3 +1,4 @@ +use crate::test_utils::AttestationTestTask; use crate::*; use tree_hash::TreeHash; @@ -12,8 +13,9 @@ impl TestingAttestationDataBuilder { /// Configures a new `AttestationData` which attests to all of the same parameters as the /// state. pub fn new( + test_task: &AttestationTestTask, state: &BeaconState, - shard: u64, + mut shard: u64, slot: Slot, spec: &ChainSpec, ) -> Self { @@ -22,13 +24,13 @@ impl TestingAttestationDataBuilder { let is_previous_epoch = slot.epoch(T::slots_per_epoch()) != current_epoch; - let source = if is_previous_epoch { + let mut source = if is_previous_epoch { state.previous_justified_checkpoint.clone() } else { state.current_justified_checkpoint.clone() }; - let target = if is_previous_epoch { + let mut target = if is_previous_epoch { Checkpoint { epoch: previous_epoch, root: *state @@ -50,20 +52,66 @@ impl TestingAttestationDataBuilder { state.get_current_crosslink(shard).unwrap() }; + let mut start = parent_crosslink.end_epoch; + let mut end = std::cmp::min( + target.epoch, + parent_crosslink.end_epoch + spec.max_epochs_per_crosslink, + ); + let mut parent_root = Hash256::from_slice(&parent_crosslink.tree_hash_root()); + let mut data_root = Hash256::zero(); + let beacon_block_root = *state.get_block_root(slot).unwrap(); + + match test_task { + AttestationTestTask::BadParentCrosslinkStartEpoch => start = Epoch::from(10 as u64), + AttestationTestTask::BadParentCrosslinkEndEpoch => end = Epoch::from(0 as u64), + AttestationTestTask::BadParentCrosslinkHash => parent_root = Hash256::zero(), + AttestationTestTask::NoCommiteeForShard => shard += 2, + AttestationTestTask::BadShard => shard = T::ShardCount::to_u64(), + AttestationTestTask::IncludedTooEarly => shard += 1, + AttestationTestTask::IncludedTooLate => { + target = Checkpoint { + epoch: Epoch::from(3 as u64), + root: Hash256::zero(), + } + } + AttestationTestTask::BadTargetEpoch => { + target = Checkpoint { + epoch: Epoch::from(5 as u64), + root: Hash256::zero(), + } + } + AttestationTestTask::WrongJustifiedCheckpoint => { + source = Checkpoint { + epoch: Epoch::from(0 as u64), + root: Hash256::zero(), + } + } + AttestationTestTask::BadTargetTooLow => { + target = Checkpoint { + epoch: Epoch::from(0 as u64), + root: Hash256::zero(), + } + } + AttestationTestTask::BadTargetTooHigh => { + target = Checkpoint { + epoch: Epoch::from(10 as u64), + root: Hash256::zero(), + } + } + AttestationTestTask::BadParentCrosslinkDataRoot => data_root = parent_root, + _ => (), + } let crosslink = Crosslink { shard, - parent_root: Hash256::from_slice(&parent_crosslink.tree_hash_root()), - start_epoch: parent_crosslink.end_epoch, - end_epoch: std::cmp::min( - target.epoch, - parent_crosslink.end_epoch + spec.max_epochs_per_crosslink, - ), - data_root: Hash256::zero(), + parent_root, + start_epoch: start, + end_epoch: end, + data_root, }; let data = AttestationData { // LMD GHOST vote - beacon_block_root: *state.get_block_root(slot).unwrap(), + beacon_block_root, // FFG Vote source, diff --git a/eth2/types/src/test_utils/builders/testing_attester_slashing_builder.rs b/eth2/types/src/test_utils/builders/testing_attester_slashing_builder.rs index 39673ef38a..353c4e38bc 100644 --- a/eth2/types/src/test_utils/builders/testing_attester_slashing_builder.rs +++ b/eth2/types/src/test_utils/builders/testing_attester_slashing_builder.rs @@ -1,3 +1,4 @@ +use crate::test_utils::AttesterSlashingTestTask; use crate::*; use tree_hash::TreeHash; @@ -17,7 +18,11 @@ impl TestingAttesterSlashingBuilder { /// - `domain: Domain` /// /// Where domain is a domain "constant" (e.g., `spec.domain_attestation`). - pub fn double_vote(validator_indices: &[u64], signer: F) -> AttesterSlashing + pub fn double_vote( + test_task: &AttesterSlashingTestTask, + validator_indices: &[u64], + signer: F, + ) -> AttesterSlashing where F: Fn(u64, &[u8], Epoch, Domain) -> Signature, { @@ -49,21 +54,37 @@ impl TestingAttesterSlashingBuilder { crosslink, }; - let data_2 = AttestationData { - target: checkpoint_2, - ..data_1.clone() + let data_2 = if *test_task == AttesterSlashingTestTask::NotSlashable { + AttestationData { ..data_1.clone() } + } else { + AttestationData { + target: checkpoint_2, + ..data_1.clone() + } }; let mut attestation_1 = IndexedAttestation { custody_bit_0_indices: validator_indices.to_vec().into(), - custody_bit_1_indices: VariableList::empty(), + custody_bit_1_indices: if *test_task + == AttesterSlashingTestTask::IndexedAttestation1Invalid + { + validator_indices.to_vec().into() + } else { + VariableList::empty() + }, data: data_1, signature: AggregateSignature::new(), }; let mut attestation_2 = IndexedAttestation { custody_bit_0_indices: validator_indices.to_vec().into(), - custody_bit_1_indices: VariableList::empty(), + custody_bit_1_indices: if *test_task + == AttesterSlashingTestTask::IndexedAttestation2Invalid + { + validator_indices.to_vec().into() + } else { + VariableList::empty() + }, data: data_2, signature: AggregateSignature::new(), }; diff --git a/eth2/types/src/test_utils/builders/testing_beacon_block_builder.rs b/eth2/types/src/test_utils/builders/testing_beacon_block_builder.rs index ebb9a64f83..fa77254d94 100644 --- a/eth2/types/src/test_utils/builders/testing_beacon_block_builder.rs +++ b/eth2/types/src/test_utils/builders/testing_beacon_block_builder.rs @@ -3,8 +3,11 @@ use crate::{ TestingAttestationBuilder, TestingAttesterSlashingBuilder, TestingDepositBuilder, TestingProposerSlashingBuilder, TestingTransferBuilder, TestingVoluntaryExitBuilder, }, + typenum::U4294967296, *, }; +use int_to_bytes::int_to_bytes32; +use merkle_proof::MerkleTree; use rayon::prelude::*; use tree_hash::{SignedRoot, TreeHash}; @@ -15,6 +18,73 @@ pub struct TestingBeaconBlockBuilder { pub block: BeaconBlock, } +/// Enum used for passing test options to builder +#[derive(PartialEq)] +pub enum DepositTestTask { + Valid, + BadPubKey, + BadSig, + InvalidPubKey, + NoReset, +} + +/// Enum used for passing test options to builder +pub enum ExitTestTask { + AlreadyInitiated, + AlreadyExited, + BadSignature, + FutureEpoch, + NotActive, + Valid, + ValidatorUnknown, +} + +#[derive(PartialEq)] +/// Enum used for passing test options to builder +pub enum AttestationTestTask { + Valid, + BadParentCrosslinkStartEpoch, + BadParentCrosslinkEndEpoch, + BadParentCrosslinkHash, + NoCommiteeForShard, + WrongJustifiedCheckpoint, + BadTargetTooLow, + BadTargetTooHigh, + BadShard, + BadParentCrosslinkDataRoot, + BadIndexedAttestationBadSignature, + CustodyBitfieldNotSubset, + CustodyBitfieldHasSetBits, + BadCustodyBitfieldLen, + BadAggregationBitfieldLen, + BadSignature, + ValidatorUnknown, + IncludedTooEarly, + IncludedTooLate, + BadTargetEpoch, +} + +#[derive(PartialEq)] +/// Enum used for passing test options to builder +pub enum AttesterSlashingTestTask { + Valid, + NotSlashable, + IndexedAttestation1Invalid, + IndexedAttestation2Invalid, +} + +/// Enum used for passing test options to builder +#[derive(PartialEq)] +pub enum ProposerSlashingTestTask { + Valid, + ProposerUnknown, + ProposalEpochMismatch, + ProposalsIdentical, + ProposerNotSlashable, + BadProposal1Signature, + BadProposal2Signature, +} + impl TestingBeaconBlockBuilder { /// Create a new builder from genesis. pub fn new(spec: &ChainSpec) -> Self { @@ -61,13 +131,14 @@ impl TestingBeaconBlockBuilder { /// Inserts a signed, valid `ProposerSlashing` for the validator. pub fn insert_proposer_slashing( &mut self, + test_task: &ProposerSlashingTestTask, validator_index: u64, secret_key: &SecretKey, fork: &Fork, spec: &ChainSpec, ) { let proposer_slashing = - build_proposer_slashing::(validator_index, secret_key, fork, spec); + build_proposer_slashing::(test_task, validator_index, secret_key, fork, spec); self.block .body .proposer_slashings @@ -78,18 +149,20 @@ impl TestingBeaconBlockBuilder { /// Inserts a signed, valid `AttesterSlashing` for each validator index in `validator_indices`. pub fn insert_attester_slashing( &mut self, + test_task: &AttesterSlashingTestTask, validator_indices: &[u64], secret_keys: &[&SecretKey], fork: &Fork, spec: &ChainSpec, ) { - let attester_slashing = - build_double_vote_attester_slashing(validator_indices, secret_keys, fork, spec); - self.block - .body - .attester_slashings - .push(attester_slashing) - .unwrap(); + let attester_slashing = build_double_vote_attester_slashing( + test_task, + validator_indices, + secret_keys, + fork, + spec, + ); + let _ = self.block.body.attester_slashings.push(attester_slashing); } /// Fills the block with `num_attestations` attestations. @@ -103,6 +176,7 @@ impl TestingBeaconBlockBuilder { /// to aggregate these split attestations. pub fn insert_attestations( &mut self, + test_task: &AttestationTestTask, state: &BeaconState, secret_keys: &[&SecretKey], num_attestations: usize, @@ -175,14 +249,16 @@ impl TestingBeaconBlockBuilder { let attestations: Vec<_> = committees .par_iter() .map(|(slot, committee, signing_validators, shard)| { - let mut builder = - TestingAttestationBuilder::new(state, committee, *slot, *shard, spec); + let mut builder = TestingAttestationBuilder::new( + test_task, state, committee, *slot, *shard, spec, + ); let signing_secret_keys: Vec<&SecretKey> = signing_validators .iter() .map(|validator_index| secret_keys[*validator_index]) .collect(); builder.sign( + test_task, signing_validators, &signing_secret_keys, &state.fork, @@ -202,47 +278,113 @@ impl TestingBeaconBlockBuilder { } /// Insert a `Valid` deposit into the state. - pub fn insert_deposit( + pub fn insert_deposits( &mut self, amount: u64, + test_task: DepositTestTask, // TODO: deal with the fact deposits no longer have explicit indices _index: u64, - state: &BeaconState, + num_deposits: u64, + state: &mut BeaconState, spec: &ChainSpec, ) { - let keypair = Keypair::random(); + // Vector containing deposits' data + let mut datas = vec![]; + for _ in 0..num_deposits { + let keypair = Keypair::random(); - let mut builder = TestingDepositBuilder::new(keypair.pk.clone(), amount); - builder.sign( - &keypair, - state.slot.epoch(T::slots_per_epoch()), - &state.fork, - spec, - ); + let mut builder = TestingDepositBuilder::new(keypair.pk.clone(), amount); + builder.sign( + &test_task, + &keypair, + state.slot.epoch(T::slots_per_epoch()), + &state.fork, + spec, + ); + datas.push(builder.build().data); + } - self.block.body.deposits.push(builder.build()).unwrap() + // Vector containing all leaves + let leaves = datas + .iter() + .map(|data| Hash256::from_slice(&data.tree_hash_root())) + .collect::>(); + + // Building a VarList from leaves + let deposit_data_list = VariableList::<_, U4294967296>::from(leaves.clone()); + + // Setting the deposit_root to be the tree_hash_root of the VarList + state.eth1_data.deposit_root = Hash256::from_slice(&deposit_data_list.tree_hash_root()); + + // Building the merkle tree used for generating proofs + let tree = MerkleTree::create(&leaves[..], spec.deposit_contract_tree_depth as usize); + + // Building proofs + let mut proofs = vec![]; + for i in 0..leaves.len() { + let (_, mut proof) = tree.generate_proof(i, spec.deposit_contract_tree_depth as usize); + proof.push(Hash256::from_slice(&int_to_bytes32(leaves.len() as u64))); + proofs.push(proof); + } + + // Building deposits + let deposits = datas + .into_par_iter() + .zip(proofs.into_par_iter()) + .map(|(data, proof)| (data, proof.into())) + .map(|(data, proof)| Deposit { proof, data }) + .collect::>(); + + // Pushing deposits to block body + for deposit in deposits { + let _ = self.block.body.deposits.push(deposit); + } + + // Manually setting the deposit_count to process deposits + // This is for test purposes only + if test_task == DepositTestTask::NoReset { + state.eth1_data.deposit_count += num_deposits; + } else { + state.eth1_deposit_index = 0; + state.eth1_data.deposit_count = num_deposits; + } } /// Insert a `Valid` exit into the state. pub fn insert_exit( &mut self, - state: &BeaconState, - validator_index: u64, + test_task: &ExitTestTask, + state: &mut BeaconState, + mut validator_index: u64, secret_key: &SecretKey, spec: &ChainSpec, ) { - let mut builder = TestingVoluntaryExitBuilder::new( - state.slot.epoch(T::slots_per_epoch()), - validator_index, - ); + let sk = &mut secret_key.clone(); + let mut exit_epoch = state.slot.epoch(T::slots_per_epoch()); - builder.sign(secret_key, &state.fork, spec); + match test_task { + ExitTestTask::BadSignature => *sk = SecretKey::random(), + ExitTestTask::ValidatorUnknown => validator_index = 4242, + ExitTestTask::AlreadyExited => { + state.validators[validator_index as usize].exit_epoch = Epoch::from(314_159 as u64) + } + ExitTestTask::NotActive => { + state.validators[validator_index as usize].activation_epoch = + Epoch::from(314_159 as u64) + } + ExitTestTask::FutureEpoch => exit_epoch = spec.far_future_epoch, + _ => (), + } + + let mut builder = TestingVoluntaryExitBuilder::new(exit_epoch, validator_index); + + builder.sign(sk, &state.fork, spec); self.block .body .voluntary_exits .push(builder.build()) - .unwrap() + .unwrap(); } /// Insert a `Valid` transfer into the state. @@ -280,6 +422,7 @@ impl TestingBeaconBlockBuilder { /// /// Signs the message using a `BeaconChainHarness`. fn build_proposer_slashing( + test_task: &ProposerSlashingTestTask, validator_index: u64, secret_key: &SecretKey, fork: &Fork, @@ -290,13 +433,14 @@ fn build_proposer_slashing( Signature::new(message, domain, secret_key) }; - TestingProposerSlashingBuilder::double_vote::(validator_index, signer) + TestingProposerSlashingBuilder::double_vote::(test_task, validator_index, signer) } /// Builds an `AttesterSlashing` for some `validator_indices`. /// /// Signs the message using a `BeaconChainHarness`. fn build_double_vote_attester_slashing( + test_task: &AttesterSlashingTestTask, validator_indices: &[u64], secret_keys: &[&SecretKey], fork: &Fork, @@ -311,5 +455,5 @@ fn build_double_vote_attester_slashing( Signature::new(message, domain, secret_keys[key_index]) }; - TestingAttesterSlashingBuilder::double_vote(validator_indices, signer) + TestingAttesterSlashingBuilder::double_vote(test_task, validator_indices, signer) } diff --git a/eth2/types/src/test_utils/builders/testing_beacon_state_builder.rs b/eth2/types/src/test_utils/builders/testing_beacon_state_builder.rs index 2f4dde7e63..d6aa488f99 100644 --- a/eth2/types/src/test_utils/builders/testing_beacon_state_builder.rs +++ b/eth2/types/src/test_utils/builders/testing_beacon_state_builder.rs @@ -1,5 +1,5 @@ use super::super::{generate_deterministic_keypairs, KeypairsFile}; -use crate::test_utils::TestingPendingAttestationBuilder; +use crate::test_utils::{AttestationTestTask, TestingPendingAttestationBuilder}; use crate::*; use bls::get_withdrawal_credentials; use dirs; @@ -224,6 +224,7 @@ impl TestingBeaconStateBuilder { for crosslink_committee in committees { let mut builder = TestingPendingAttestationBuilder::new( + &AttestationTestTask::Valid, state, crosslink_committee.shard, slot, diff --git a/eth2/types/src/test_utils/builders/testing_deposit_builder.rs b/eth2/types/src/test_utils/builders/testing_deposit_builder.rs index ed08571a72..dcde1a74f0 100644 --- a/eth2/types/src/test_utils/builders/testing_deposit_builder.rs +++ b/eth2/types/src/test_utils/builders/testing_deposit_builder.rs @@ -1,3 +1,4 @@ +use crate::test_utils::DepositTestTask; use crate::*; use bls::{get_withdrawal_credentials, PublicKeyBytes, SignatureBytes}; @@ -29,18 +30,41 @@ impl TestingDepositBuilder { /// - `pubkey` to the signing pubkey. /// - `withdrawal_credentials` to the signing pubkey. /// - `proof_of_possession` - pub fn sign(&mut self, keypair: &Keypair, epoch: Epoch, fork: &Fork, spec: &ChainSpec) { + pub fn sign( + &mut self, + test_task: &DepositTestTask, + keypair: &Keypair, + epoch: Epoch, + fork: &Fork, + spec: &ChainSpec, + ) { + let new_key = Keypair::random(); + let mut pubkeybytes = PublicKeyBytes::from(keypair.pk.clone()); + let mut secret_key = keypair.sk.clone(); + + match test_task { + DepositTestTask::BadPubKey => pubkeybytes = PublicKeyBytes::from(new_key.pk.clone()), + DepositTestTask::InvalidPubKey => { + // Creating invalid public key bytes + let mut public_key_bytes: Vec = vec![0; 48]; + public_key_bytes[0] = 255; + pubkeybytes = PublicKeyBytes::from_bytes(&public_key_bytes).unwrap(); + } + DepositTestTask::BadSig => secret_key = new_key.sk, + _ => (), + } + let withdrawal_credentials = Hash256::from_slice( &get_withdrawal_credentials(&keypair.pk, spec.bls_withdrawal_prefix_byte)[..], ); - self.deposit.data.pubkey = PublicKeyBytes::from(keypair.pk.clone()); + // Building the data and signing it + self.deposit.data.pubkey = pubkeybytes; self.deposit.data.withdrawal_credentials = withdrawal_credentials; - self.deposit.data.signature = self.deposit .data - .create_signature(&keypair.sk, epoch, fork, spec); + .create_signature(&secret_key, epoch, fork, spec); } /// Builds the deposit, consuming the builder. diff --git a/eth2/types/src/test_utils/builders/testing_pending_attestation_builder.rs b/eth2/types/src/test_utils/builders/testing_pending_attestation_builder.rs index 14fe9a5f90..77cb6a302f 100644 --- a/eth2/types/src/test_utils/builders/testing_pending_attestation_builder.rs +++ b/eth2/types/src/test_utils/builders/testing_pending_attestation_builder.rs @@ -1,4 +1,4 @@ -use crate::test_utils::TestingAttestationDataBuilder; +use crate::test_utils::{AttestationTestTask, TestingAttestationDataBuilder}; use crate::*; /// Builds an `AttesterSlashing` to be used for testing purposes. @@ -15,8 +15,14 @@ impl TestingPendingAttestationBuilder { /// /// * The aggregation and custody bitfields will all be empty, they need to be set with /// `Self::add_committee_participation`. - pub fn new(state: &BeaconState, shard: u64, slot: Slot, spec: &ChainSpec) -> Self { - let data_builder = TestingAttestationDataBuilder::new(state, shard, slot, spec); + pub fn new( + test_task: &AttestationTestTask, + state: &BeaconState, + shard: u64, + slot: Slot, + spec: &ChainSpec, + ) -> Self { + let data_builder = TestingAttestationDataBuilder::new(test_task, state, shard, slot, spec); let relative_epoch = RelativeEpoch::from_epoch(state.current_epoch(), slot.epoch(T::slots_per_epoch())) diff --git a/eth2/types/src/test_utils/builders/testing_proposer_slashing_builder.rs b/eth2/types/src/test_utils/builders/testing_proposer_slashing_builder.rs index b972934276..0c14f0a754 100644 --- a/eth2/types/src/test_utils/builders/testing_proposer_slashing_builder.rs +++ b/eth2/types/src/test_utils/builders/testing_proposer_slashing_builder.rs @@ -1,3 +1,4 @@ +use crate::test_utils::ProposerSlashingTestTask; use crate::*; use tree_hash::SignedRoot; @@ -17,14 +18,22 @@ impl TestingProposerSlashingBuilder { /// - `domain: Domain` /// /// Where domain is a domain "constant" (e.g., `spec.domain_attestation`). - pub fn double_vote(proposer_index: u64, signer: F) -> ProposerSlashing + pub fn double_vote( + test_task: &ProposerSlashingTestTask, + mut proposer_index: u64, + signer: F, + ) -> ProposerSlashing where T: EthSpec, F: Fn(u64, &[u8], Epoch, Domain) -> Signature, { let slot = Slot::new(0); let hash_1 = Hash256::from([1; 32]); - let hash_2 = Hash256::from([2; 32]); + let hash_2 = if *test_task == ProposerSlashingTestTask::ProposalsIdentical { + hash_1.clone() + } else { + Hash256::from([2; 32]) + }; let mut header_1 = BeaconBlockHeader { slot, @@ -34,22 +43,37 @@ impl TestingProposerSlashingBuilder { signature: Signature::empty_signature(), }; + let slot_2 = if *test_task == ProposerSlashingTestTask::ProposalEpochMismatch { + Slot::new(128) + } else { + Slot::new(0) + }; + let mut header_2 = BeaconBlockHeader { parent_root: hash_2, + slot: slot_2, ..header_1.clone() }; let epoch = slot.epoch(T::slots_per_epoch()); - header_1.signature = { - let message = header_1.signed_root(); - signer(proposer_index, &message[..], epoch, Domain::BeaconProposer) - }; + if *test_task != ProposerSlashingTestTask::BadProposal1Signature { + header_1.signature = { + let message = header_1.signed_root(); + signer(proposer_index, &message[..], epoch, Domain::BeaconProposer) + }; + } - header_2.signature = { - let message = header_2.signed_root(); - signer(proposer_index, &message[..], epoch, Domain::BeaconProposer) - }; + if *test_task != ProposerSlashingTestTask::BadProposal2Signature { + header_2.signature = { + let message = header_2.signed_root(); + signer(proposer_index, &message[..], epoch, Domain::BeaconProposer) + }; + } + + if *test_task == ProposerSlashingTestTask::ProposerUnknown { + proposer_index = 3_141_592; + } ProposerSlashing { proposer_index, From 97729f865468086a0d73daeefae6479d7447a86d Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Tue, 12 Nov 2019 19:15:56 +1100 Subject: [PATCH 15/21] Run cargo-fmt (#599) --- beacon_node/network/src/message_handler.rs | 12 ++++++++--- .../src/attestation_producer/mod.rs | 20 ++++++++++++++----- validator_client/src/block_producer/mod.rs | 16 +++++++++++---- validator_client/src/duties/mod.rs | 8 ++++++-- 4 files changed, 42 insertions(+), 14 deletions(-) diff --git a/beacon_node/network/src/message_handler.rs b/beacon_node/network/src/message_handler.rs index 0e56b4c331..898304272e 100644 --- a/beacon_node/network/src/message_handler.rs +++ b/beacon_node/network/src/message_handler.rs @@ -146,9 +146,15 @@ impl MessageHandler { ) { // an error could have occurred. match error_response { - RPCErrorResponse::InvalidRequest(error) => warn!(self.log, "Peer indicated invalid request";"peer_id" => format!("{:?}", peer_id), "error" => error.as_string()), - RPCErrorResponse::ServerError(error) => warn!(self.log, "Peer internal server error";"peer_id" => format!("{:?}", peer_id), "error" => error.as_string()), - RPCErrorResponse::Unknown(error) => warn!(self.log, "Unknown peer error";"peer" => format!("{:?}", peer_id), "error" => error.as_string()), + RPCErrorResponse::InvalidRequest(error) => { + warn!(self.log, "Peer indicated invalid request";"peer_id" => format!("{:?}", peer_id), "error" => error.as_string()) + } + RPCErrorResponse::ServerError(error) => { + warn!(self.log, "Peer internal server error";"peer_id" => format!("{:?}", peer_id), "error" => error.as_string()) + } + RPCErrorResponse::Unknown(error) => { + warn!(self.log, "Unknown peer error";"peer" => format!("{:?}", peer_id), "error" => error.as_string()) + } RPCErrorResponse::Success(response) => { match response { RPCResponse::Hello(hello_message) => { diff --git a/validator_client/src/attestation_producer/mod.rs b/validator_client/src/attestation_producer/mod.rs index d73bf5e237..6f4a5f304e 100644 --- a/validator_client/src/attestation_producer/mod.rs +++ b/validator_client/src/attestation_producer/mod.rs @@ -57,11 +57,21 @@ impl<'a, B: BeaconNodeAttestation, S: Signer, E: EthSpec> AttestationProducer<'a "slot" => slot, ), Err(e) => error!(log, "Attestation production error"; "Error" => format!("{:?}", e)), - Ok(ValidatorEvent::SignerRejection(_slot)) => error!(log, "Attestation production error"; "Error" => "Signer could not sign the attestation".to_string()), - Ok(ValidatorEvent::IndexedAttestationNotProduced(_slot)) => error!(log, "Attestation production error"; "Error" => "Rejected the attestation as it could have been slashed".to_string()), - Ok(ValidatorEvent::PublishAttestationFailed) => error!(log, "Attestation production error"; "Error" => "Beacon node was unable to publish an attestation".to_string()), - Ok(ValidatorEvent::InvalidAttestation) => error!(log, "Attestation production error"; "Error" => "The signed attestation was invalid".to_string()), - Ok(v) => warn!(log, "Unknown result for attestation production"; "Error" => format!("{:?}",v)), + Ok(ValidatorEvent::SignerRejection(_slot)) => { + error!(log, "Attestation production error"; "Error" => "Signer could not sign the attestation".to_string()) + } + Ok(ValidatorEvent::IndexedAttestationNotProduced(_slot)) => { + error!(log, "Attestation production error"; "Error" => "Rejected the attestation as it could have been slashed".to_string()) + } + Ok(ValidatorEvent::PublishAttestationFailed) => { + error!(log, "Attestation production error"; "Error" => "Beacon node was unable to publish an attestation".to_string()) + } + Ok(ValidatorEvent::InvalidAttestation) => { + error!(log, "Attestation production error"; "Error" => "The signed attestation was invalid".to_string()) + } + Ok(v) => { + warn!(log, "Unknown result for attestation production"; "Error" => format!("{:?}",v)) + } } } diff --git a/validator_client/src/block_producer/mod.rs b/validator_client/src/block_producer/mod.rs index 0d8b39991a..d88ac15f63 100644 --- a/validator_client/src/block_producer/mod.rs +++ b/validator_client/src/block_producer/mod.rs @@ -68,10 +68,18 @@ impl<'a, B: BeaconNodeBlock, S: Signer, E: EthSpec> BlockProducer<'a, B, S, E> { "slot" => slot, ), Err(e) => error!(self.log, "Block production error"; "Error" => format!("{:?}", e)), - Ok(ValidatorEvent::SignerRejection(_slot)) => error!(self.log, "Block production error"; "Error" => "Signer Could not sign the block".to_string()), - Ok(ValidatorEvent::SlashableBlockNotProduced(_slot)) => error!(self.log, "Block production error"; "Error" => "Rejected the block as it could have been slashed".to_string()), - Ok(ValidatorEvent::BeaconNodeUnableToProduceBlock(_slot)) => error!(self.log, "Block production error"; "Error" => "Beacon node was unable to produce a block".to_string()), - Ok(v) => warn!(self.log, "Unknown result for block production"; "Error" => format!("{:?}",v)), + Ok(ValidatorEvent::SignerRejection(_slot)) => { + error!(self.log, "Block production error"; "Error" => "Signer Could not sign the block".to_string()) + } + Ok(ValidatorEvent::SlashableBlockNotProduced(_slot)) => { + error!(self.log, "Block production error"; "Error" => "Rejected the block as it could have been slashed".to_string()) + } + Ok(ValidatorEvent::BeaconNodeUnableToProduceBlock(_slot)) => { + error!(self.log, "Block production error"; "Error" => "Beacon node was unable to produce a block".to_string()) + } + Ok(v) => { + warn!(self.log, "Unknown result for block production"; "Error" => format!("{:?}",v)) + } } } diff --git a/validator_client/src/duties/mod.rs b/validator_client/src/duties/mod.rs index 429aea6480..f0269a41f6 100644 --- a/validator_client/src/duties/mod.rs +++ b/validator_client/src/duties/mod.rs @@ -77,8 +77,12 @@ impl DutiesManager { pub fn run_update(&self, epoch: Epoch, log: slog::Logger) -> Result, ()> { match self.update(epoch) { Err(error) => error!(log, "Epoch duties poll error"; "error" => format!("{:?}", error)), - Ok(UpdateOutcome::NoChange(epoch)) => debug!(log, "No change in duties"; "epoch" => epoch), - Ok(UpdateOutcome::DutiesChanged(epoch, duties)) => info!(log, "Duties changed (potential re-org)"; "epoch" => epoch, "duties" => format!("{:?}", duties)), + Ok(UpdateOutcome::NoChange(epoch)) => { + debug!(log, "No change in duties"; "epoch" => epoch) + } + Ok(UpdateOutcome::DutiesChanged(epoch, duties)) => { + info!(log, "Duties changed (potential re-org)"; "epoch" => epoch, "duties" => format!("{:?}", duties)) + } Ok(UpdateOutcome::NewDuties(epoch, duties)) => { info!(log, "New duties obtained"; "epoch" => epoch); print_duties(&log, duties); From f229bbba1ce39ac8628db4d06b5615be67a589d8 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Fri, 15 Nov 2019 14:47:51 +1100 Subject: [PATCH 16/21] Eth1 Integration (#542) * Refactor to cache Eth1Data * Fix merge conflicts and minor refactorings * Rename Eth1Cache to Eth1DataCache * Refactor events subscription * Add deposits module to interface with BeaconChain deposits * Remove utils * Rename to types.rs and add trait constraints to Eth1DataFetcher * Confirm to trait constraints. Make Web3DataFetcher cloneable * Make fetcher object member of deposit and eth1_data cache and other fixes * Fix update_cache function * Move fetch_eth1_data to impl block * Fix deposit tests * Create Eth1 object for interfacing with Beacon chain * Add `run` function for running update_cache and subscribe_deposit_logs tasks * Add logging * Run `cargo fmt` and make tests pass * Convert sync functions to async * Add timeouts to web3 functions * Return futures from cache functions * Add failed chaining of futures * Working cache updation * Clean up tests and `update_cache` function * Refactor `get_eth1_data` functions to work with future returning functions * Refactor eth1 `run` function to work with modified `update_cache` api * Minor changes * Add distance parameter to `update_cache` * Fix tests and other minor fixes * Working integration with cache and deposits * Add merkle_tree construction, proof generation and verification code * Add function to construct and fetch Deposits for BeaconNode * Add error handling * Import ssz * Add error handling to eth1 cache and fix minor errors * Run rustfmt * Fix minor bug * Rename Eth1Error and change to Result * Change deposit fetching mechanism from notification based to poll based * Add deposits from eth1 chain in a given range every `x` blocks * Modify `run` function to accommodate changes * Minor fixes * Fix formatting * Initial commit. web3 api working. * Tidied up lib. Add function for fetching logs. * Refactor with `Eth1DataFetcher` trait * Add parsing for deposit contract logs and get_eth1_data function * Add `get_eth1_votes` function * Refactor to cache Eth1Data * Fix merge conflicts and minor refactorings * Rename Eth1Cache to Eth1DataCache * Refactor events subscription * Add deposits module to interface with BeaconChain deposits * Remove utils * Rename to types.rs and add trait constraints to Eth1DataFetcher * Confirm to trait constraints. Make Web3DataFetcher cloneable * Make fetcher object member of deposit and eth1_data cache and other fixes * Fix update_cache function * Move fetch_eth1_data to impl block * Fix deposit tests * Create Eth1 object for interfacing with Beacon chain * Add `run` function for running update_cache and subscribe_deposit_logs tasks * Add logging * Run `cargo fmt` and make tests pass * Convert sync functions to async * Add timeouts to web3 functions * Return futures from cache functions * Add failed chaining of futures * Working cache updation * Clean up tests and `update_cache` function * Refactor `get_eth1_data` functions to work with future returning functions * Refactor eth1 `run` function to work with modified `update_cache` api * Minor changes * Add distance parameter to `update_cache` * Fix tests and other minor fixes * Working integration with cache and deposits * Add merkle_tree construction, proof generation and verification code * Add function to construct and fetch Deposits for BeaconNode * Add error handling * Import ssz * Add error handling to eth1 cache and fix minor errors * Run rustfmt * Fix minor bug * Rename Eth1Error and change to Result * Change deposit fetching mechanism from notification based to poll based * Add deposits from eth1 chain in a given range every `x` blocks * Modify `run` function to accommodate changes * Minor fixes * Fix formatting * Fix merge issue * Refactor with `Config` struct. Remote `ContractConfig` * Rename eth1_chain crate to eth1 * Rename files and read abi file using `fs::read` * Move eth1 to lib * Remove unnecessary mutability constraint * Add `Web3Backend` for returning actual eth1 data * Refactor `get_eth1_votes` to return a Result * Delete `eth1_chain` crate * Return `Result` from `get_deposits` * Fix range of deposits to return to beacon chain * Add `get_block_height_by_hash` trait function * Add naive method for getting `previous_eth1_distance` * Add eth1 config params to main config * Add instructions for setting up eth1 testing environment * Add build script to fetch deposit contract abi * Contract ABI is part of compiled binary * Fix minor bugs * Move docs to lib * Add timeout to config * Remove print statements * Change warn to error * Fix typos * Removed prints in test and get timeout value from config * Fixed error types * Added logging to web3_fetcher * Refactor for modified web3 api * Fix minor stuff * Add build script * Tidy, hide eth1 integration tests behind flag * Add http crate * Add first stages of eth1_test_rig * Fix deposits on test rig * Fix bug with deposit count method * Add block hash getter to http eth1 * Clean eth1 http crate and tests * Add script to start ganache * Adds deposit tree to eth1-http * Extend deposit tree tests * Tidy tests in eth1-http * Add more detail to get block request * Add block cache to eth1-http * Rename deposit tree to deposit cache * Add inital updating to eth1-http * Tidy updater * Fix compile bugs in tests * Adds an Eth1DataCache builder * Reorg eth1-http files * Add (failing) tests for eth1 updater * Rename files, fix bug in eth1-http * Ensure that ganache timestamps are increasing * Fix bugs with getting eth1data ancestors * Improve eth1 testing, fix bugs * Add truncate method to block cache * Add pruning to block cache update process * Add tests for block pruning * Allow for dropping an expired cache. * Add more comments * Add first compiling version of deposit updater * Add common fn for getting range of required blocks * Add passing deposit update test * Improve tests * Fix block pruning bug * Add tests for running two updates at once * Add updater services to eth1 * Add deposit collection to beacon chain * Add incomplete builder experiments * Add first working version of beacon chain builder * Update test harness to new beacon chain type * Rename builder file, tidy * Add first working client builder * Progress further on client builder * Update becaon node binary to use client builder * Ensure release tests compile * Remove old eth1 crate * Add first pass of new lighthouse binary * Fix websocket server startup * Remove old binary code from beacon_node crate * Add first working beacon node tests * Add genesis crate, new eth1 cache_2 * Add Serivce to Eth1Cache * Refactor with general eth1 improvements * Add passing genesis test * Tidy, add comments * Add more comments to eth1 service * Add further eth1 progress * Fix some bugs with genesis * Fix eth1 bugs, make eth1 linking more efficient * Shift logic in genesis service * Add more comments to genesis service * Add gzip, max request values, timeouts to http * Update testnet parameters to suit goerli testnet * Add ability to vary Fork, fix custom spec * Be more explicit about deposit fork version * Start adding beacon chain eth1 option * Add more flexibility to prod client * Further runtime refactoring * Allow for starting from store * Add bootstrapping to client config * Add remote_beacon_node crate * Update eth1 service for more configurability * Update eth1 tests to use less runtimes * Patch issues with tests using too many files * Move dummy eth1 backend flag * Ensure all tests pass * Add ganache-cli to Dockerfile * Use a special docker hub image for testing * Appease clippy * Move validator client into lighthouse binary * Allow starting with dummy eth1 backend * Improve logging * Fix dummy eth1 backend from cli * Add extra testnet command * Ensure consistent spec in beacon node * Update eth1 rig to work on goerli * Tidy lcli, start adding support for yaml config * Add incomplete YamlConfig struct * Remove efforts at YamlConfig * Add incomplete eth1 voting. Blocked on spec issues * Add (untested) first pass at eth1 vote algo * Add tests for winning vote * Add more tests for eth1 chain * Add more eth1 voting tests * Added more eth1 voting testing * Change test name * Add more tests to eth1 chain * Tidy eth1 generics, add more tests * Improve comments * Tidy beacon_node tests * Tidy, rename JsonRpc.. to Caching.. * Tidy voting logic * Tidy builder docs * Add comments, tidy eth1 * Add more comments to eth1 * Fix bug with winning_vote * Add doc comments to the `ClientBuilder` * Remove commented-out code * Improve `ClientBuilder` docs * Add comments to client config * Add decoding test for `ClientConfig` * Remove unused `DepositSet` struct * Tidy `block_cache` * Remove commented out lines * Remove unused code in `eth1` crate * Remove old validator binary `main.rs` * Tidy, fix tests compile error * Add initial tests for get_deposits * Remove dead code in eth1_test_rig * Update TestingDepositBuilder * Add testing for getting eth1 deposits * Fix duplicate rand dep * Remove dead code * Remove accidentally-added files * Fix comment in eth1_genesis_service * Add .gitignore for eth1_test_rig * Fix bug in eth1_genesis_service * Remove dead code from eth2_config * Fix tabs/spaces in root Cargo.toml * Tidy eth1 crate * Allow for re-use of eth1 service after genesis * Update docs for new CLI * Change README gif * Tidy eth1 http module * Tidy eth1 service * Tidy environment crate * Remove unused file * Tidy, add comments * Remove commented-out code * Address majority of Michael's comments * Address other PR comments * Add link to issue alongside TODO --- .gitlab-ci.yml | 2 +- Cargo.toml | 5 + Dockerfile | 2 + README.md | 4 +- beacon_node/Cargo.toml | 9 + beacon_node/beacon_chain/Cargo.toml | 9 +- beacon_node/beacon_chain/src/beacon_chain.rs | 137 +-- beacon_node/beacon_chain/src/builder.rs | 622 ++++++++++ beacon_node/beacon_chain/src/errors.rs | 3 + beacon_node/beacon_chain/src/eth1_chain.rs | 1096 ++++++++++++++++- beacon_node/beacon_chain/src/events.rs | 10 + beacon_node/beacon_chain/src/fork_choice.rs | 8 +- beacon_node/beacon_chain/src/lib.rs | 9 +- beacon_node/beacon_chain/src/test_utils.rs | 93 +- beacon_node/beacon_chain/tests/tests.rs | 13 +- beacon_node/client/Cargo.toml | 9 + beacon_node/client/src/builder.rs | 715 +++++++++++ beacon_node/client/src/config.rs | 119 +- beacon_node/client/src/lib.rs | 341 +---- beacon_node/client/src/notifier.rs | 58 - beacon_node/eth1/Cargo.toml | 29 + beacon_node/eth1/src/block_cache.rs | 271 ++++ beacon_node/eth1/src/deposit_cache.rs | 371 ++++++ beacon_node/eth1/src/deposit_log.rs | 107 ++ beacon_node/eth1/src/http.rs | 405 ++++++ beacon_node/eth1/src/inner.rs | 27 + beacon_node/eth1/src/lib.rs | 11 + beacon_node/eth1/src/service.rs | 643 ++++++++++ beacon_node/eth1/tests/test.rs | 713 +++++++++++ beacon_node/eth2-libp2p/src/behaviour.rs | 2 +- beacon_node/eth2-libp2p/src/rpc/mod.rs | 4 +- beacon_node/genesis/Cargo.toml | 28 + beacon_node/genesis/src/common.rs | 44 + .../genesis/src/eth1_genesis_service.rs | 379 ++++++ beacon_node/genesis/src/interop.rs | 142 +++ beacon_node/genesis/src/lib.rs | 31 + beacon_node/genesis/tests/tests.rs | 105 ++ beacon_node/network/src/message_handler.rs | 2 +- beacon_node/network/src/service.rs | 13 +- beacon_node/network/src/sync/simple_sync.rs | 2 +- beacon_node/rest_api/src/lib.rs | 32 +- beacon_node/rpc/src/attestation.rs | 12 +- beacon_node/rpc/src/beacon_block.rs | 12 +- beacon_node/rpc/src/beacon_node.rs | 11 +- beacon_node/rpc/src/lib.rs | 11 +- beacon_node/rpc/src/validator.rs | 11 +- beacon_node/src/{main.rs => cli.rs} | 157 +-- beacon_node/src/config.rs | 109 +- beacon_node/src/lib.rs | 153 +++ beacon_node/src/run.rs | 138 --- beacon_node/tests/test.rs | 40 + beacon_node/websocket_server/Cargo.toml | 1 - beacon_node/websocket_server/src/lib.rs | 45 +- book/src/cli.md | 57 +- book/src/setup.md | 12 + book/src/simple-testnet.md | 18 +- book/src/testnets.md | 26 +- eth2/lmd_ghost/tests/test.rs | 9 +- eth2/operation_pool/src/lib.rs | 2 +- eth2/state_processing/src/genesis.rs | 30 +- eth2/state_processing/src/lib.rs | 2 +- .../src/per_block_processing.rs | 2 +- .../per_block_processing/signature_sets.rs | 22 +- .../per_block_processing/verify_deposit.rs | 10 +- eth2/types/src/beacon_state.rs | 2 +- eth2/types/src/chain_spec.rs | 40 +- eth2/types/src/deposit.rs | 2 + eth2/types/src/deposit_data.rs | 10 +- eth2/types/src/eth1_data.rs | 13 +- eth2/types/src/fork.rs | 29 - eth2/types/src/lib.rs | 2 +- .../builders/testing_beacon_block_builder.rs | 8 +- .../builders/testing_deposit_builder.rs | 14 +- eth2/utils/eth2_config/Cargo.toml | 1 - eth2/utils/eth2_config/src/lib.rs | 41 +- eth2/utils/remote_beacon_node/Cargo.toml | 14 + eth2/utils/remote_beacon_node/src/lib.rs | 141 +++ lcli/Cargo.toml | 4 + lcli/src/deposit_contract.rs | 78 ++ lcli/src/main.rs | 55 +- lighthouse/Cargo.toml | 20 + lighthouse/environment/Cargo.toml | 19 + lighthouse/environment/src/lib.rs | 241 ++++ lighthouse/src/main.rs | 165 +++ scripts/ganache_test_node.sh | 8 + scripts/whiteblock_start.sh | 8 +- tests/eth1_test_rig/.gitignore | 1 + tests/eth1_test_rig/Cargo.toml | 19 + tests/eth1_test_rig/build.rs | 95 ++ tests/eth1_test_rig/src/ganache.rs | 157 +++ tests/eth1_test_rig/src/lib.rs | 240 ++++ tests/node_test_rig/Cargo.toml | 18 + tests/node_test_rig/src/lib.rs | 67 + validator_client/Cargo.toml | 7 +- validator_client/src/cli.rs | 123 ++ validator_client/src/duties/mod.rs | 12 +- validator_client/src/lib.rs | 260 +++- validator_client/src/main.rs | 354 ------ validator_client/src/service.rs | 111 +- 99 files changed, 8263 insertions(+), 1631 deletions(-) create mode 100644 beacon_node/beacon_chain/src/builder.rs create mode 100644 beacon_node/client/src/builder.rs delete mode 100644 beacon_node/client/src/notifier.rs create mode 100644 beacon_node/eth1/Cargo.toml create mode 100644 beacon_node/eth1/src/block_cache.rs create mode 100644 beacon_node/eth1/src/deposit_cache.rs create mode 100644 beacon_node/eth1/src/deposit_log.rs create mode 100644 beacon_node/eth1/src/http.rs create mode 100644 beacon_node/eth1/src/inner.rs create mode 100644 beacon_node/eth1/src/lib.rs create mode 100644 beacon_node/eth1/src/service.rs create mode 100644 beacon_node/eth1/tests/test.rs create mode 100644 beacon_node/genesis/Cargo.toml create mode 100644 beacon_node/genesis/src/common.rs create mode 100644 beacon_node/genesis/src/eth1_genesis_service.rs create mode 100644 beacon_node/genesis/src/interop.rs create mode 100644 beacon_node/genesis/src/lib.rs create mode 100644 beacon_node/genesis/tests/tests.rs rename beacon_node/src/{main.rs => cli.rs} (74%) create mode 100644 beacon_node/src/lib.rs delete mode 100644 beacon_node/src/run.rs create mode 100644 beacon_node/tests/test.rs create mode 100644 eth2/utils/remote_beacon_node/Cargo.toml create mode 100644 eth2/utils/remote_beacon_node/src/lib.rs create mode 100644 lcli/src/deposit_contract.rs create mode 100644 lighthouse/Cargo.toml create mode 100644 lighthouse/environment/Cargo.toml create mode 100644 lighthouse/environment/src/lib.rs create mode 100644 lighthouse/src/main.rs create mode 100755 scripts/ganache_test_node.sh create mode 100644 tests/eth1_test_rig/.gitignore create mode 100644 tests/eth1_test_rig/Cargo.toml create mode 100644 tests/eth1_test_rig/build.rs create mode 100644 tests/eth1_test_rig/src/ganache.rs create mode 100644 tests/eth1_test_rig/src/lib.rs create mode 100644 tests/node_test_rig/Cargo.toml create mode 100644 tests/node_test_rig/src/lib.rs create mode 100644 validator_client/src/cli.rs delete mode 100644 validator_client/src/main.rs diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 1636d51724..3b26fa79da 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -1,7 +1,7 @@ #Adapted from https://users.rust-lang.org/t/my-gitlab-config-docs-tests/16396 default: - image: 'sigp/lighthouse:latest' + image: 'sigp/lighthouse:eth1' cache: paths: - tests/ef_tests/*-v0.8.3.tar.gz diff --git a/Cargo.toml b/Cargo.toml index 2edbced095..f7abd8ae2b 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -33,13 +33,18 @@ members = [ "beacon_node/eth2-libp2p", "beacon_node/rpc", "beacon_node/version", + "beacon_node/eth1", "beacon_node/beacon_chain", "beacon_node/websocket_server", "tests/ef_tests", + "tests/eth1_test_rig", + "tests/node_test_rig", "lcli", "protos", "validator_client", "account_manager", + "lighthouse", + "lighthouse/environment" ] [patch] diff --git a/Dockerfile b/Dockerfile index e2b5269631..0aa5582066 100644 --- a/Dockerfile +++ b/Dockerfile @@ -19,6 +19,8 @@ RUN git clone https://github.com/google/protobuf.git && \ cd .. && \ rm -r protobuf +RUN apt-get install -y nodejs npm +RUN npm install -g ganache-cli --unsafe-perm RUN mkdir -p /cache/cargocache && chmod -R ugo+rwX /cache/cargocache diff --git a/README.md b/README.md index 05a481b4d6..793a275ef8 100644 --- a/README.md +++ b/README.md @@ -15,7 +15,7 @@ An open-source Ethereum 2.0 client, written in Rust and maintained by Sigma Prim [Swagger Badge]: https://img.shields.io/badge/Open%20API-0.2.0-success [Swagger Link]: https://app.swaggerhub.com/apis-docs/spble/lighthouse_rest_api/0.2.0 -![terminalize](https://i.postimg.cc/Y0BQ0z3R/terminalize.gif) +![terminalize](https://i.postimg.cc/kG11dpCW/lighthouse-cli-png.gif) ## Overview @@ -47,7 +47,7 @@ Current development overview: - ~~**April 2019**: Inital single-client testnets.~~ - ~~**September 2019**: Inter-operability with other Ethereum 2.0 clients.~~ -- **Early-October 2019**: `lighthouse-0.0.1` release: All major phase 0 +- **Q4 2019**: `lighthouse-0.0.1` release: All major phase 0 features implemented. - **Q4 2019**: Public, multi-client testnet with user-facing functionality. - **Q4 2019**: Third-party security review. diff --git a/beacon_node/Cargo.toml b/beacon_node/Cargo.toml index 8238b5f8da..57bf4f7bcc 100644 --- a/beacon_node/Cargo.toml +++ b/beacon_node/Cargo.toml @@ -4,6 +4,13 @@ version = "0.1.0" authors = ["Paul Hauner ", "Age Manning { /// inclusion in a block. pub op_pool: OperationPool, /// Provides information from the Ethereum 1 (PoW) chain. - pub eth1_chain: Eth1Chain, + pub eth1_chain: Option>, /// Stores a "snapshot" of the chain at the time the head-of-the-chain block was received. - canonical_head: RwLock>, + pub(crate) canonical_head: RwLock>, /// The root of the genesis block. pub genesis_block_root: Hash256, /// A state-machine that is updated with information from the network and chooses a canonical @@ -124,119 +123,12 @@ pub struct BeaconChain { /// A handler for events generated by the beacon chain. pub event_handler: T::EventHandler, /// Logging to CLI, etc. - log: Logger, + pub(crate) log: Logger, } -type BeaconInfo = (BeaconBlock, BeaconState); +type BeaconBlockAndState = (BeaconBlock, BeaconState); impl BeaconChain { - /// Instantiate a new Beacon Chain, from genesis. - pub fn from_genesis( - store: Arc, - eth1_backend: T::Eth1Chain, - event_handler: T::EventHandler, - mut genesis_state: BeaconState, - mut genesis_block: BeaconBlock, - spec: ChainSpec, - log: Logger, - ) -> Result { - genesis_state.build_all_caches(&spec)?; - - let genesis_state_root = genesis_state.canonical_root(); - store.put(&genesis_state_root, &genesis_state)?; - - genesis_block.state_root = genesis_state_root; - - let genesis_block_root = genesis_block.block_header().canonical_root(); - store.put(&genesis_block_root, &genesis_block)?; - - // Also store the genesis block under the `ZERO_HASH` key. - let genesis_block_root = genesis_block.canonical_root(); - store.put(&Hash256::zero(), &genesis_block)?; - - let canonical_head = RwLock::new(CheckPoint::new( - genesis_block.clone(), - genesis_block_root, - genesis_state.clone(), - genesis_state_root, - )); - - // Slot clock - let slot_clock = T::SlotClock::new( - spec.genesis_slot, - Duration::from_secs(genesis_state.genesis_time), - Duration::from_millis(spec.milliseconds_per_slot), - ); - - info!(log, "Beacon chain initialized from genesis"; - "validator_count" => genesis_state.validators.len(), - "state_root" => format!("{}", genesis_state_root), - "block_root" => format!("{}", genesis_block_root), - ); - - Ok(Self { - spec, - slot_clock, - op_pool: OperationPool::new(), - eth1_chain: Eth1Chain::new(eth1_backend), - canonical_head, - genesis_block_root, - fork_choice: ForkChoice::new(store.clone(), &genesis_block, genesis_block_root), - event_handler, - store, - log, - }) - } - - /// Attempt to load an existing instance from the given `store`. - pub fn from_store( - store: Arc, - eth1_backend: T::Eth1Chain, - event_handler: T::EventHandler, - spec: ChainSpec, - log: Logger, - ) -> Result>, Error> { - let key = Hash256::from_slice(&BEACON_CHAIN_DB_KEY.as_bytes()); - let p: PersistedBeaconChain = match store.get(&key) { - Err(e) => return Err(e.into()), - Ok(None) => return Ok(None), - Ok(Some(p)) => p, - }; - - let state = &p.canonical_head.beacon_state; - - let slot_clock = T::SlotClock::new( - spec.genesis_slot, - Duration::from_secs(state.genesis_time), - Duration::from_millis(spec.milliseconds_per_slot), - ); - - let last_finalized_root = p.canonical_head.beacon_state.finalized_checkpoint.root; - let last_finalized_block = &p.canonical_head.beacon_block; - - let op_pool = p.op_pool.into_operation_pool(state, &spec); - - info!(log, "Beacon chain initialized from store"; - "head_root" => format!("{}", p.canonical_head.beacon_block_root), - "head_epoch" => format!("{}", p.canonical_head.beacon_block.slot.epoch(T::EthSpec::slots_per_epoch())), - "finalized_root" => format!("{}", last_finalized_root), - "finalized_epoch" => format!("{}", last_finalized_block.slot.epoch(T::EthSpec::slots_per_epoch())), - ); - - Ok(Some(BeaconChain { - spec, - slot_clock, - fork_choice: ForkChoice::new(store.clone(), last_finalized_block, last_finalized_root), - op_pool, - event_handler, - eth1_chain: Eth1Chain::new(eth1_backend), - canonical_head: RwLock::new(p.canonical_head), - genesis_block_root: p.genesis_block_root, - store, - log, - })) - } - /// Attempt to save this instance to `self.store`. pub fn persist(&self) -> Result<(), Error> { let timer = metrics::start_timer(&metrics::PERSIST_CHAIN); @@ -1270,7 +1162,7 @@ impl BeaconChain { &self, randao_reveal: Signature, slot: Slot, - ) -> Result, BlockProductionError> { + ) -> Result, BlockProductionError> { let state = self .state_at_slot(slot - 1) .map_err(|_| BlockProductionError::UnableToProduceAtSlot(slot))?; @@ -1291,10 +1183,15 @@ impl BeaconChain { mut state: BeaconState, produce_at_slot: Slot, randao_reveal: Signature, - ) -> Result, BlockProductionError> { + ) -> Result, BlockProductionError> { metrics::inc_counter(&metrics::BLOCK_PRODUCTION_REQUESTS); let timer = metrics::start_timer(&metrics::BLOCK_PRODUCTION_TIMES); + let eth1_chain = self + .eth1_chain + .as_ref() + .ok_or_else(|| BlockProductionError::NoEth1ChainConnection)?; + // If required, transition the new state to the present slot. while state.slot < produce_at_slot { per_slot_processing(&mut state, &self.spec)?; @@ -1319,17 +1216,19 @@ impl BeaconChain { let mut block = BeaconBlock { slot: state.slot, parent_root, - state_root: Hash256::zero(), // Updated after the state is calculated. - signature: Signature::empty_signature(), // To be completed by a validator. + state_root: Hash256::zero(), + // The block is not signed here, that is the task of a validator client. + signature: Signature::empty_signature(), body: BeaconBlockBody { randao_reveal, - // TODO: replace with real data. - eth1_data: self.eth1_chain.eth1_data_for_block_production(&state)?, + eth1_data: eth1_chain.eth1_data_for_block_production(&state, &self.spec)?, graffiti, proposer_slashings: proposer_slashings.into(), attester_slashings: attester_slashings.into(), attestations: self.op_pool.get_attestations(&state, &self.spec).into(), - deposits: self.eth1_chain.deposits_for_block_inclusion(&state)?.into(), + deposits: eth1_chain + .deposits_for_block_inclusion(&state, &self.spec)? + .into(), voluntary_exits: self.op_pool.get_voluntary_exits(&state, &self.spec).into(), transfers: self.op_pool.get_transfers(&state, &self.spec).into(), }, diff --git a/beacon_node/beacon_chain/src/builder.rs b/beacon_node/beacon_chain/src/builder.rs new file mode 100644 index 0000000000..cef818359d --- /dev/null +++ b/beacon_node/beacon_chain/src/builder.rs @@ -0,0 +1,622 @@ +use crate::eth1_chain::CachingEth1Backend; +use crate::events::NullEventHandler; +use crate::persisted_beacon_chain::{PersistedBeaconChain, BEACON_CHAIN_DB_KEY}; +use crate::{ + BeaconChain, BeaconChainTypes, CheckPoint, Eth1Chain, Eth1ChainBackend, EventHandler, + ForkChoice, +}; +use eth1::Config as Eth1Config; +use lmd_ghost::{LmdGhost, ThreadSafeReducedTree}; +use operation_pool::OperationPool; +use parking_lot::RwLock; +use slog::{info, Logger}; +use slot_clock::{SlotClock, TestingSlotClock}; +use std::marker::PhantomData; +use std::sync::Arc; +use std::time::Duration; +use store::Store; +use types::{BeaconBlock, BeaconState, ChainSpec, EthSpec, Hash256, Slot}; + +/// An empty struct used to "witness" all the `BeaconChainTypes` traits. It has no user-facing +/// functionality and only exists to satisfy the type system. +pub struct Witness( + PhantomData<( + TStore, + TSlotClock, + TLmdGhost, + TEth1Backend, + TEthSpec, + TEventHandler, + )>, +); + +impl BeaconChainTypes + for Witness +where + TStore: Store + 'static, + TSlotClock: SlotClock + 'static, + TLmdGhost: LmdGhost + 'static, + TEth1Backend: Eth1ChainBackend + 'static, + TEthSpec: EthSpec + 'static, + TEventHandler: EventHandler + 'static, +{ + type Store = TStore; + type SlotClock = TSlotClock; + type LmdGhost = TLmdGhost; + type Eth1Chain = TEth1Backend; + type EthSpec = TEthSpec; + type EventHandler = TEventHandler; +} + +/// Builds a `BeaconChain` by either creating anew from genesis, or, resuming from an existing chain +/// persisted to `store`. +/// +/// Types may be elided and the compiler will infer them if all necessary builder methods have been +/// called. If type inference errors are being raised, it is likely that not all required methods +/// have been called. +/// +/// See the tests for an example of a complete working example. +pub struct BeaconChainBuilder { + store: Option>, + /// The finalized checkpoint to anchor the chain. May be genesis or a higher + /// checkpoint. + pub finalized_checkpoint: Option>, + genesis_block_root: Option, + op_pool: Option>, + fork_choice: Option>, + eth1_chain: Option>, + event_handler: Option, + slot_clock: Option, + spec: ChainSpec, + log: Option, +} + +impl + BeaconChainBuilder< + Witness, + > +where + TStore: Store + 'static, + TSlotClock: SlotClock + 'static, + TLmdGhost: LmdGhost + 'static, + TEth1Backend: Eth1ChainBackend + 'static, + TEthSpec: EthSpec + 'static, + TEventHandler: EventHandler + 'static, +{ + /// Returns a new builder. + /// + /// The `_eth_spec_instance` parameter is only supplied to make concrete the `TEthSpec` trait. + /// This should generally be either the `MinimalEthSpec` or `MainnetEthSpec` types. + pub fn new(_eth_spec_instance: TEthSpec) -> Self { + Self { + store: None, + finalized_checkpoint: None, + genesis_block_root: None, + op_pool: None, + fork_choice: None, + eth1_chain: None, + event_handler: None, + slot_clock: None, + spec: TEthSpec::default_spec(), + log: None, + } + } + + /// Override the default spec (as defined by `TEthSpec`). + /// + /// This method should generally be called immediately after `Self::new` to ensure components + /// are started with a consistent spec. + pub fn custom_spec(mut self, spec: ChainSpec) -> Self { + self.spec = spec; + self + } + + /// Sets the store (database). + /// + /// Should generally be called early in the build chain. + pub fn store(mut self, store: Arc) -> Self { + self.store = Some(store); + self + } + + /// Sets the logger. + /// + /// Should generally be called early in the build chain. + pub fn logger(mut self, logger: Logger) -> Self { + self.log = Some(logger); + self + } + + /// Attempt to load an existing chain from the builder's `Store`. + /// + /// May initialize several components; including the op_pool and finalized checkpoints. + pub fn resume_from_db(mut self) -> Result { + let log = self + .log + .as_ref() + .ok_or_else(|| "resume_from_db requires a log".to_string())?; + + info!( + log, + "Starting beacon chain"; + "method" => "resume" + ); + + let store = self + .store + .clone() + .ok_or_else(|| "load_from_store requires a store.".to_string())?; + + let key = Hash256::from_slice(&BEACON_CHAIN_DB_KEY.as_bytes()); + let p: PersistedBeaconChain< + Witness, + > = match store.get(&key) { + Err(e) => { + return Err(format!( + "DB error when reading persisted beacon chain: {:?}", + e + )) + } + Ok(None) => return Err("No persisted beacon chain found in store".into()), + Ok(Some(p)) => p, + }; + + self.op_pool = Some( + p.op_pool + .into_operation_pool(&p.canonical_head.beacon_state, &self.spec), + ); + + self.finalized_checkpoint = Some(p.canonical_head); + self.genesis_block_root = Some(p.genesis_block_root); + + Ok(self) + } + + /// Starts a new chain from a genesis state. + pub fn genesis_state( + mut self, + mut beacon_state: BeaconState, + ) -> Result { + let store = self + .store + .clone() + .ok_or_else(|| "genesis_state requires a store")?; + + let mut beacon_block = genesis_block(&beacon_state, &self.spec); + + beacon_state + .build_all_caches(&self.spec) + .map_err(|e| format!("Failed to build genesis state caches: {:?}", e))?; + + let beacon_state_root = beacon_state.canonical_root(); + beacon_block.state_root = beacon_state_root; + let beacon_block_root = beacon_block.canonical_root(); + + self.genesis_block_root = Some(beacon_block_root); + + store + .put(&beacon_state_root, &beacon_state) + .map_err(|e| format!("Failed to store genesis state: {:?}", e))?; + store + .put(&beacon_block_root, &beacon_block) + .map_err(|e| format!("Failed to store genesis block: {:?}", e))?; + + // Store the genesis block under the `ZERO_HASH` key. + store.put(&Hash256::zero(), &beacon_block).map_err(|e| { + format!( + "Failed to store genesis block under 0x00..00 alias: {:?}", + e + ) + })?; + + self.finalized_checkpoint = Some(CheckPoint { + beacon_block_root, + beacon_block, + beacon_state_root, + beacon_state, + }); + + Ok(self.empty_op_pool()) + } + + /// Sets the `BeaconChain` fork choice backend. + /// + /// Requires the store and state to have been specified earlier in the build chain. + pub fn fork_choice_backend(mut self, backend: TLmdGhost) -> Result { + let store = self + .store + .clone() + .ok_or_else(|| "reduced_tree_fork_choice requires a store")?; + let genesis_block_root = self + .genesis_block_root + .ok_or_else(|| "fork_choice_backend requires a genesis_block_root")?; + + self.fork_choice = Some(ForkChoice::new(store, backend, genesis_block_root)); + + Ok(self) + } + + /// Sets the `BeaconChain` eth1 backend. + pub fn eth1_backend(mut self, backend: Option) -> Self { + self.eth1_chain = backend.map(Eth1Chain::new); + self + } + + /// Sets the `BeaconChain` event handler backend. + /// + /// For example, provide `WebSocketSender` as a `handler`. + pub fn event_handler(mut self, handler: TEventHandler) -> Self { + self.event_handler = Some(handler); + self + } + + /// Sets the `BeaconChain` slot clock. + /// + /// For example, provide `SystemTimeSlotClock` as a `clock`. + pub fn slot_clock(mut self, clock: TSlotClock) -> Self { + self.slot_clock = Some(clock); + self + } + + /// Creates a new, empty operation pool. + fn empty_op_pool(mut self) -> Self { + self.op_pool = Some(OperationPool::new()); + self + } + + /// Consumes `self`, returning a `BeaconChain` if all required parameters have been supplied. + /// + /// An error will be returned at runtime if all required parameters have not been configured. + /// + /// Will also raise ambiguous type errors at compile time if some parameters have not been + /// configured. + #[allow(clippy::type_complexity)] // I think there's nothing to be gained here from a type alias. + pub fn build( + self, + ) -> Result< + BeaconChain>, + String, + > { + let mut canonical_head = self + .finalized_checkpoint + .ok_or_else(|| "Cannot build without a state".to_string())?; + + canonical_head + .beacon_state + .build_all_caches(&self.spec) + .map_err(|e| format!("Failed to build state caches: {:?}", e))?; + + let log = self + .log + .ok_or_else(|| "Cannot build without a logger".to_string())?; + + if canonical_head.beacon_block.state_root != canonical_head.beacon_state_root { + return Err("beacon_block.state_root != beacon_state".to_string()); + } + + let beacon_chain = BeaconChain { + spec: self.spec, + store: self + .store + .ok_or_else(|| "Cannot build without store".to_string())?, + slot_clock: self + .slot_clock + .ok_or_else(|| "Cannot build without slot clock".to_string())?, + op_pool: self + .op_pool + .ok_or_else(|| "Cannot build without op pool".to_string())?, + eth1_chain: self.eth1_chain, + canonical_head: RwLock::new(canonical_head), + genesis_block_root: self + .genesis_block_root + .ok_or_else(|| "Cannot build without a genesis block root".to_string())?, + fork_choice: self + .fork_choice + .ok_or_else(|| "Cannot build without a fork choice".to_string())?, + event_handler: self + .event_handler + .ok_or_else(|| "Cannot build without an event handler".to_string())?, + log: log.clone(), + }; + + info!( + log, + "Beacon chain initialized"; + "head_state" => format!("{}", beacon_chain.head().beacon_state_root), + "head_block" => format!("{}", beacon_chain.head().beacon_block_root), + "head_slot" => format!("{}", beacon_chain.head().beacon_block.slot), + ); + + Ok(beacon_chain) + } +} + +impl + BeaconChainBuilder< + Witness< + TStore, + TSlotClock, + ThreadSafeReducedTree, + TEth1Backend, + TEthSpec, + TEventHandler, + >, + > +where + TStore: Store + 'static, + TSlotClock: SlotClock + 'static, + TEth1Backend: Eth1ChainBackend + 'static, + TEthSpec: EthSpec + 'static, + TEventHandler: EventHandler + 'static, +{ + /// Initializes a new, empty (no recorded votes or blocks) fork choice, using the + /// `ThreadSafeReducedTree` backend. + /// + /// Requires the store and state to be initialized. + pub fn empty_reduced_tree_fork_choice(self) -> Result { + let store = self + .store + .clone() + .ok_or_else(|| "reduced_tree_fork_choice requires a store")?; + let finalized_checkpoint = &self + .finalized_checkpoint + .as_ref() + .expect("should have finalized checkpoint"); + + let backend = ThreadSafeReducedTree::new( + store.clone(), + &finalized_checkpoint.beacon_block, + finalized_checkpoint.beacon_block_root, + ); + + self.fork_choice_backend(backend) + } +} + +impl + BeaconChainBuilder< + Witness< + TStore, + TSlotClock, + TLmdGhost, + CachingEth1Backend, + TEthSpec, + TEventHandler, + >, + > +where + TStore: Store + 'static, + TSlotClock: SlotClock + 'static, + TLmdGhost: LmdGhost + 'static, + TEthSpec: EthSpec + 'static, + TEventHandler: EventHandler + 'static, +{ + /// Sets the `BeaconChain` eth1 back-end to `CachingEth1Backend`. + pub fn caching_eth1_backend(self, backend: CachingEth1Backend) -> Self { + self.eth1_backend(Some(backend)) + } + + /// Do not use any eth1 backend. The client will not be able to produce beacon blocks. + pub fn no_eth1_backend(self) -> Self { + self.eth1_backend(None) + } + + /// Sets the `BeaconChain` eth1 back-end to produce predictably junk data when producing blocks. + pub fn dummy_eth1_backend(mut self) -> Result { + let log = self + .log + .as_ref() + .ok_or_else(|| "dummy_eth1_backend requires a log".to_string())?; + let store = self + .store + .clone() + .ok_or_else(|| "dummy_eth1_backend requires a store.".to_string())?; + + let backend = CachingEth1Backend::new(Eth1Config::default(), log.clone(), store); + + let mut eth1_chain = Eth1Chain::new(backend); + eth1_chain.use_dummy_backend = true; + + self.eth1_chain = Some(eth1_chain); + + Ok(self) + } +} + +impl + BeaconChainBuilder< + Witness, + > +where + TStore: Store + 'static, + TLmdGhost: LmdGhost + 'static, + TEth1Backend: Eth1ChainBackend + 'static, + TEthSpec: EthSpec + 'static, + TEventHandler: EventHandler + 'static, +{ + /// Sets the `BeaconChain` slot clock to `TestingSlotClock`. + /// + /// Requires the state to be initialized. + pub fn testing_slot_clock(self, slot_duration: Duration) -> Result { + let genesis_time = self + .finalized_checkpoint + .as_ref() + .ok_or_else(|| "testing_slot_clock requires an initialized state")? + .beacon_state + .genesis_time; + + let slot_clock = TestingSlotClock::new( + Slot::new(0), + Duration::from_secs(genesis_time), + slot_duration, + ); + + Ok(self.slot_clock(slot_clock)) + } +} + +impl + BeaconChainBuilder< + Witness>, + > +where + TStore: Store + 'static, + TSlotClock: SlotClock + 'static, + TLmdGhost: LmdGhost + 'static, + TEth1Backend: Eth1ChainBackend + 'static, + TEthSpec: EthSpec + 'static, +{ + /// Sets the `BeaconChain` event handler to `NullEventHandler`. + pub fn null_event_handler(self) -> Self { + let handler = NullEventHandler::default(); + self.event_handler(handler) + } +} + +fn genesis_block(genesis_state: &BeaconState, spec: &ChainSpec) -> BeaconBlock { + let mut genesis_block = BeaconBlock::empty(&spec); + + genesis_block.state_root = genesis_state.canonical_root(); + + genesis_block +} + +#[cfg(test)] +mod test { + use super::*; + use eth2_hashing::hash; + use genesis::{generate_deterministic_keypairs, interop_genesis_state}; + use sloggers::{null::NullLoggerBuilder, Build}; + use ssz::Encode; + use std::time::Duration; + use store::MemoryStore; + use types::{EthSpec, MinimalEthSpec, Slot}; + + type TestEthSpec = MinimalEthSpec; + + fn get_logger() -> Logger { + let builder = NullLoggerBuilder; + builder.build().expect("should build logger") + } + + #[test] + fn recent_genesis() { + let validator_count = 8; + let genesis_time = 13371337; + + let log = get_logger(); + let store = Arc::new(MemoryStore::open()); + let spec = MinimalEthSpec::default_spec(); + + let genesis_state = interop_genesis_state( + &generate_deterministic_keypairs(validator_count), + genesis_time, + &spec, + ) + .expect("should create interop genesis state"); + + let chain = BeaconChainBuilder::new(MinimalEthSpec) + .logger(log.clone()) + .store(store.clone()) + .genesis_state(genesis_state) + .expect("should build state using recent genesis") + .dummy_eth1_backend() + .expect("should build the dummy eth1 backend") + .null_event_handler() + .testing_slot_clock(Duration::from_secs(1)) + .expect("should configure testing slot clock") + .empty_reduced_tree_fork_choice() + .expect("should add fork choice to builder") + .build() + .expect("should build"); + + let head = chain.head(); + let state = head.beacon_state; + let block = head.beacon_block; + + assert_eq!(state.slot, Slot::new(0), "should start from genesis"); + assert_eq!( + state.genesis_time, 13371337, + "should have the correct genesis time" + ); + assert_eq!( + block.state_root, + state.canonical_root(), + "block should have correct state root" + ); + assert_eq!( + chain + .store + .get::>(&Hash256::zero()) + .expect("should read db") + .expect("should find genesis block"), + block, + "should store genesis block under zero hash alias" + ); + assert_eq!( + state.validators.len(), + validator_count, + "should have correct validator count" + ); + assert_eq!( + chain.genesis_block_root, + block.canonical_root(), + "should have correct genesis block root" + ); + } + + #[test] + fn interop_state() { + let validator_count = 16; + let genesis_time = 42; + let spec = &TestEthSpec::default_spec(); + + let keypairs = generate_deterministic_keypairs(validator_count); + + let state = interop_genesis_state::(&keypairs, genesis_time, spec) + .expect("should build state"); + + assert_eq!( + state.eth1_data.block_hash, + Hash256::from_slice(&[0x42; 32]), + "eth1 block hash should be co-ordinated junk" + ); + + assert_eq!( + state.genesis_time, genesis_time, + "genesis time should be as specified" + ); + + for b in &state.balances { + assert_eq!( + *b, spec.max_effective_balance, + "validator balances should be max effective balance" + ); + } + + for v in &state.validators { + let creds = v.withdrawal_credentials.as_bytes(); + assert_eq!( + creds[0], spec.bls_withdrawal_prefix_byte, + "first byte of withdrawal creds should be bls prefix" + ); + assert_eq!( + &creds[1..], + &hash(&v.pubkey.as_ssz_bytes())[1..], + "rest of withdrawal creds should be pubkey hash" + ) + } + + assert_eq!( + state.balances.len(), + validator_count, + "validator balances len should be correct" + ); + + assert_eq!( + state.validators.len(), + validator_count, + "validator count should be correct" + ); + } +} diff --git a/beacon_node/beacon_chain/src/errors.rs b/beacon_node/beacon_chain/src/errors.rs index 0306899288..f8046980fd 100644 --- a/beacon_node/beacon_chain/src/errors.rs +++ b/beacon_node/beacon_chain/src/errors.rs @@ -55,6 +55,9 @@ pub enum BlockProductionError { BlockProcessingError(BlockProcessingError), Eth1ChainError(Eth1ChainError), BeaconStateError(BeaconStateError), + /// The `BeaconChain` was explicitly configured _without_ a connection to eth1, therefore it + /// cannot produce blocks. + NoEth1ChainConnection, } easy_from_to!(BlockProcessingError, BlockProductionError); diff --git a/beacon_node/beacon_chain/src/eth1_chain.rs b/beacon_node/beacon_chain/src/eth1_chain.rs index e4ccee3ba4..78f96ef177 100644 --- a/beacon_node/beacon_chain/src/eth1_chain.rs +++ b/beacon_node/beacon_chain/src/eth1_chain.rs @@ -1,27 +1,89 @@ -use crate::BeaconChainTypes; +use eth1::{Config as Eth1Config, Eth1Block, Service as HttpService}; use eth2_hashing::hash; +use exit_future::Exit; +use futures::Future; +use integer_sqrt::IntegerSquareRoot; +use rand::prelude::*; +use slog::{crit, Logger}; +use std::collections::HashMap; +use std::iter::DoubleEndedIterator; +use std::iter::FromIterator; use std::marker::PhantomData; -use types::{BeaconState, Deposit, Eth1Data, EthSpec, Hash256}; +use std::sync::Arc; +use store::{Error as StoreError, Store}; +use types::{ + BeaconState, BeaconStateError, ChainSpec, Deposit, Eth1Data, EthSpec, Hash256, Slot, Unsigned, + DEPOSIT_TREE_DEPTH, +}; -type Result = std::result::Result; +type BlockNumber = u64; +type Eth1DataBlockNumber = HashMap; +type Eth1DataVoteCount = HashMap<(Eth1Data, BlockNumber), u64>; -/// Holds an `Eth1ChainBackend` and serves requests from the `BeaconChain`. -pub struct Eth1Chain { - backend: T::Eth1Chain, +#[derive(Debug, PartialEq)] +pub enum Error { + /// Unable to return an Eth1Data for the given epoch. + EpochUnavailable, + /// An error from the backend service (e.g., the web3 data fetcher). + BackendError(String), + /// The deposit index of the state is higher than the deposit contract. This is a critical + /// consensus error. + DepositIndexTooHigh, + /// The current state was unable to return the root for the state at the start of the eth1 + /// voting period. + UnableToGetPreviousStateRoot(BeaconStateError), + /// The state required to find the previous eth1 block was not found in the store. + PreviousStateNotInDB, + /// There was an error accessing an object in the database. + StoreError(StoreError), + /// The eth1 head block at the start of the eth1 voting period is unknown. + /// + /// The eth1 caches are likely stale. + UnknownVotingPeriodHead, + /// The block that was previously voted into the state is unknown. + /// + /// The eth1 caches are stale, or a junk value was voted into the chain. + UnknownPreviousEth1BlockHash, } -impl Eth1Chain { - pub fn new(backend: T::Eth1Chain) -> Self { - Self { backend } +/// Holds an `Eth1ChainBackend` and serves requests from the `BeaconChain`. +pub struct Eth1Chain +where + T: Eth1ChainBackend, + E: EthSpec, +{ + backend: T, + /// When `true`, the backend will be ignored and dummy data from the 2019 Canada interop method + /// will be used instead. + pub use_dummy_backend: bool, + _phantom: PhantomData, +} + +impl Eth1Chain +where + T: Eth1ChainBackend, + E: EthSpec, +{ + pub fn new(backend: T) -> Self { + Self { + backend, + use_dummy_backend: false, + _phantom: PhantomData, + } } /// Returns the `Eth1Data` that should be included in a block being produced for the given /// `state`. pub fn eth1_data_for_block_production( &self, - state: &BeaconState, - ) -> Result { - self.backend.eth1_data(state) + state: &BeaconState, + spec: &ChainSpec, + ) -> Result { + if self.use_dummy_backend { + DummyEth1ChainBackend::default().eth1_data(state, spec) + } else { + self.backend.eth1_data(state, spec) + } } /// Returns a list of `Deposits` that may be included in a block. @@ -30,30 +92,22 @@ impl Eth1Chain { /// invalid. pub fn deposits_for_block_inclusion( &self, - state: &BeaconState, - ) -> Result> { - let deposits = self.backend.queued_deposits(state)?; - - // TODO: truncate deposits if required. - - Ok(deposits) + state: &BeaconState, + spec: &ChainSpec, + ) -> Result, Error> { + if self.use_dummy_backend { + DummyEth1ChainBackend::default().queued_deposits(state, spec) + } else { + self.backend.queued_deposits(state, spec) + } } } -#[derive(Debug, PartialEq)] -pub enum Error { - /// Unable to return an Eth1Data for the given epoch. - EpochUnavailable, - /// An error from the backend service (e.g., the web3 data fetcher). - BackendError(String), -} - pub trait Eth1ChainBackend: Sized + Send + Sync { - fn new(server: String) -> Result; - /// Returns the `Eth1Data` that should be included in a block being produced for the given /// `state`. - fn eth1_data(&self, beacon_state: &BeaconState) -> Result; + fn eth1_data(&self, beacon_state: &BeaconState, spec: &ChainSpec) + -> Result; /// Returns all `Deposits` between `state.eth1_deposit_index` and /// `state.eth1_data.deposit_count`. @@ -62,19 +116,22 @@ pub trait Eth1ChainBackend: Sized + Send + Sync { /// /// It is possible that not all returned `Deposits` can be included in a block. E.g., there may /// be more than `MAX_DEPOSIT_COUNT` or the churn may be too high. - fn queued_deposits(&self, beacon_state: &BeaconState) -> Result>; + fn queued_deposits( + &self, + beacon_state: &BeaconState, + spec: &ChainSpec, + ) -> Result, Error>; } -pub struct InteropEth1ChainBackend { - _phantom: PhantomData, -} +/// Provides a simple, testing-only backend that generates deterministic, meaningless eth1 data. +/// +/// Never creates deposits, therefore the validator set is static. +/// +/// This was used in the 2019 Canada interop workshops. +pub struct DummyEth1ChainBackend(PhantomData); -impl Eth1ChainBackend for InteropEth1ChainBackend { - fn new(_server: String) -> Result { - Ok(Self::default()) - } - - fn eth1_data(&self, state: &BeaconState) -> Result { +impl Eth1ChainBackend for DummyEth1ChainBackend { + fn eth1_data(&self, state: &BeaconState, _spec: &ChainSpec) -> Result { let current_epoch = state.current_epoch(); let slots_per_voting_period = T::slots_per_eth1_voting_period() as u64; let current_voting_period: u64 = current_epoch.as_u64() / slots_per_voting_period; @@ -89,17 +146,256 @@ impl Eth1ChainBackend for InteropEth1ChainBackend { }) } - fn queued_deposits(&self, _: &BeaconState) -> Result> { + fn queued_deposits(&self, _: &BeaconState, _: &ChainSpec) -> Result, Error> { Ok(vec![]) } } -impl Default for InteropEth1ChainBackend { +impl Default for DummyEth1ChainBackend { fn default() -> Self { + Self(PhantomData) + } +} + +/// Maintains a cache of eth1 blocks and deposits and provides functions to allow block producers +/// to include new deposits and vote on `Eth1Data`. +/// +/// The `core` connects to some external eth1 client (e.g., Parity/Geth) and polls it for +/// information. +#[derive(Clone)] +pub struct CachingEth1Backend { + pub core: HttpService, + store: Arc, + log: Logger, + _phantom: PhantomData, +} + +impl CachingEth1Backend { + /// Instantiates `self` with empty caches. + /// + /// Does not connect to the eth1 node or start any tasks to keep the cache updated. + pub fn new(config: Eth1Config, log: Logger, store: Arc) -> Self { Self { + core: HttpService::new(config, log.clone()), + store, + log, _phantom: PhantomData, } } + + /// Starts the routine which connects to the external eth1 node and updates the caches. + pub fn start(&self, exit: Exit) -> impl Future { + self.core.auto_update(exit) + } + + /// Instantiates `self` from an existing service. + pub fn from_service(service: HttpService, store: Arc) -> Self { + Self { + log: service.log.clone(), + core: service, + store, + _phantom: PhantomData, + } + } +} + +impl Eth1ChainBackend for CachingEth1Backend { + fn eth1_data(&self, state: &BeaconState, spec: &ChainSpec) -> Result { + let prev_eth1_hash = eth1_block_hash_at_start_of_voting_period(self.store.clone(), state)?; + + let blocks = self.core.blocks().read(); + + let eth1_data = eth1_data_sets(blocks.iter(), state, prev_eth1_hash, spec) + .map(|(new_eth1_data, all_eth1_data)| { + collect_valid_votes(state, new_eth1_data, all_eth1_data) + }) + .and_then(find_winning_vote) + .unwrap_or_else(|| { + crit!( + self.log, + "Unable to cast valid vote for Eth1Data"; + "hint" => "check connection to eth1 node", + "reason" => "no votes", + ); + random_eth1_data() + }); + + Ok(eth1_data) + } + + fn queued_deposits( + &self, + state: &BeaconState, + _spec: &ChainSpec, + ) -> Result, Error> { + let deposit_count = state.eth1_data.deposit_count; + let deposit_index = state.eth1_deposit_index; + + if deposit_index > deposit_count { + Err(Error::DepositIndexTooHigh) + } else if deposit_index == deposit_count { + Ok(vec![]) + } else { + let next = deposit_index; + let last = std::cmp::min(deposit_count, next + T::MaxDeposits::to_u64()); + + self.core + .deposits() + .read() + .cache + .get_deposits(next..last, deposit_count, DEPOSIT_TREE_DEPTH) + .map_err(|e| Error::BackendError(format!("Failed to get deposits: {:?}", e))) + .map(|(_deposit_root, deposits)| deposits) + } + } +} + +/// Produces an `Eth1Data` with all fields sourced from `rand::thread_rng()`. +fn random_eth1_data() -> Eth1Data { + let mut rng = rand::thread_rng(); + + macro_rules! rand_bytes { + ($num_bytes: expr) => {{ + let mut arr = [0_u8; $num_bytes]; + rng.fill(&mut arr[..]); + arr + }}; + } + + // Note: it seems easier to just use `Hash256::random(..)` to get the hash values, however I + // prefer to be explicit about the source of entropy instead of relying upon the maintainers of + // `Hash256` to ensure their entropy is suitable for our purposes. + + Eth1Data { + block_hash: Hash256::from_slice(&rand_bytes!(32)), + deposit_root: Hash256::from_slice(&rand_bytes!(32)), + deposit_count: u64::from_le_bytes(rand_bytes!(8)), + } +} + +/// Returns `state.eth1_data.block_hash` at the start of eth1 voting period defined by +/// `state.slot`. +fn eth1_block_hash_at_start_of_voting_period( + store: Arc, + state: &BeaconState, +) -> Result { + let period = T::SlotsPerEth1VotingPeriod::to_u64(); + + // Find `state.eth1_data.block_hash` for the state at the start of the voting period. + if state.slot % period < period / 2 { + // When the state is less than half way through the period we can safely assume that + // the eth1_data has not changed since the start of the period. + Ok(state.eth1_data.block_hash) + } else { + let slot = (state.slot / period) * period; + let prev_state_root = state + .get_state_root(slot) + .map_err(|e| Error::UnableToGetPreviousStateRoot(e))?; + + store + .get::>(&prev_state_root) + .map_err(|e| Error::StoreError(e))? + .map(|state| state.eth1_data.block_hash) + .ok_or_else(|| Error::PreviousStateNotInDB) + } +} + +/// Calculates and returns `(new_eth1_data, all_eth1_data)` for the given `state`, based upon the +/// blocks in the `block` iterator. +/// +/// `prev_eth1_hash` is the `eth1_data.block_hash` at the start of the voting period defined by +/// `state.slot`. +fn eth1_data_sets<'a, T: EthSpec, I>( + blocks: I, + state: &BeaconState, + prev_eth1_hash: Hash256, + spec: &ChainSpec, +) -> Option<(Eth1DataBlockNumber, Eth1DataBlockNumber)> +where + T: EthSpec, + I: DoubleEndedIterator + Clone, +{ + let period = T::SlotsPerEth1VotingPeriod::to_u64(); + let eth1_follow_distance = spec.eth1_follow_distance; + let voting_period_start_slot = (state.slot / period) * period; + let voting_period_start_seconds = slot_start_seconds::( + state.genesis_time, + spec.milliseconds_per_slot, + voting_period_start_slot, + ); + + let in_scope_eth1_data = blocks + .rev() + .skip_while(|eth1_block| eth1_block.timestamp > voting_period_start_seconds) + .skip(eth1_follow_distance as usize) + .filter_map(|block| Some((block.clone().eth1_data()?, block.number))); + + if in_scope_eth1_data + .clone() + .any(|(eth1_data, _)| eth1_data.block_hash == prev_eth1_hash) + { + let new_eth1_data = in_scope_eth1_data + .clone() + .take(eth1_follow_distance as usize); + let all_eth1_data = + in_scope_eth1_data.take_while(|(eth1_data, _)| eth1_data.block_hash != prev_eth1_hash); + + Some(( + HashMap::from_iter(new_eth1_data), + HashMap::from_iter(all_eth1_data), + )) + } else { + None + } +} + +/// Selects and counts the votes in `state.eth1_data_votes`, if they appear in `new_eth1_data` or +/// `all_eth1_data` when it is the voting period tail. +fn collect_valid_votes( + state: &BeaconState, + new_eth1_data: Eth1DataBlockNumber, + all_eth1_data: Eth1DataBlockNumber, +) -> Eth1DataVoteCount { + let slots_per_eth1_voting_period = T::SlotsPerEth1VotingPeriod::to_u64(); + + let mut valid_votes = HashMap::new(); + + state + .eth1_data_votes + .iter() + .filter_map(|vote| { + new_eth1_data + .get(vote) + .map(|block_number| (vote.clone(), *block_number)) + .or_else(|| { + let slot = state.slot % slots_per_eth1_voting_period; + let period_tail = slot >= slots_per_eth1_voting_period.integer_sqrt(); + + if period_tail { + all_eth1_data + .get(vote) + .map(|block_number| (vote.clone(), *block_number)) + } else { + None + } + }) + }) + .for_each(|(eth1_data, block_number)| { + valid_votes + .entry((eth1_data, block_number)) + .and_modify(|count| *count += 1) + .or_insert(1_u64); + }); + + valid_votes +} + +/// Selects the winning vote from `valid_votes`. +fn find_winning_vote(valid_votes: Eth1DataVoteCount) -> Option { + valid_votes + .iter() + .max_by_key(|((_eth1_data, block_number), vote_count)| (*vote_count, block_number)) + .map(|((eth1_data, _), _)| eth1_data.clone()) } /// Returns `int` as little-endian bytes with a length of 32. @@ -108,3 +404,719 @@ fn int_to_bytes32(int: u64) -> Vec { vec.resize(32, 0); vec } + +/// Returns the unix-epoch seconds at the start of the given `slot`. +fn slot_start_seconds( + genesis_unix_seconds: u64, + milliseconds_per_slot: u64, + slot: Slot, +) -> u64 { + genesis_unix_seconds + slot.as_u64() * milliseconds_per_slot / 1_000 +} + +#[cfg(test)] +mod test { + use super::*; + use types::{test_utils::DepositTestTask, MinimalEthSpec}; + + type E = MinimalEthSpec; + + fn get_eth1_data(i: u64) -> Eth1Data { + Eth1Data { + block_hash: Hash256::from_low_u64_be(i), + deposit_root: Hash256::from_low_u64_be(u64::max_value() - i), + deposit_count: i, + } + } + + #[test] + fn random_eth1_data_doesnt_panic() { + random_eth1_data(); + } + + #[test] + fn slot_start_time() { + let zero_sec = 0; + assert_eq!(slot_start_seconds::(100, zero_sec, Slot::new(2)), 100); + + let half_sec = 500; + assert_eq!(slot_start_seconds::(100, half_sec, Slot::new(0)), 100); + assert_eq!(slot_start_seconds::(100, half_sec, Slot::new(1)), 100); + assert_eq!(slot_start_seconds::(100, half_sec, Slot::new(2)), 101); + assert_eq!(slot_start_seconds::(100, half_sec, Slot::new(3)), 101); + + let one_sec = 1_000; + assert_eq!(slot_start_seconds::(100, one_sec, Slot::new(0)), 100); + assert_eq!(slot_start_seconds::(100, one_sec, Slot::new(1)), 101); + assert_eq!(slot_start_seconds::(100, one_sec, Slot::new(2)), 102); + + let three_sec = 3_000; + assert_eq!(slot_start_seconds::(100, three_sec, Slot::new(0)), 100); + assert_eq!(slot_start_seconds::(100, three_sec, Slot::new(1)), 103); + assert_eq!(slot_start_seconds::(100, three_sec, Slot::new(2)), 106); + } + + fn get_eth1_block(timestamp: u64, number: u64) -> Eth1Block { + Eth1Block { + number, + timestamp, + hash: Hash256::from_low_u64_be(number), + deposit_root: Some(Hash256::from_low_u64_be(number)), + deposit_count: Some(number), + } + } + + mod eth1_chain_json_backend { + use super::*; + use environment::null_logger; + use eth1::DepositLog; + use store::MemoryStore; + use types::test_utils::{generate_deterministic_keypair, TestingDepositBuilder}; + + fn get_eth1_chain() -> Eth1Chain, E> { + let eth1_config = Eth1Config { + ..Eth1Config::default() + }; + + let log = null_logger().unwrap(); + let store = Arc::new(MemoryStore::open()); + Eth1Chain::new(CachingEth1Backend::new(eth1_config, log, store)) + } + + fn get_deposit_log(i: u64, spec: &ChainSpec) -> DepositLog { + let keypair = generate_deterministic_keypair(i as usize); + let mut builder = + TestingDepositBuilder::new(keypair.pk.clone(), spec.max_effective_balance); + builder.sign(&DepositTestTask::Valid, &keypair, spec); + let deposit_data = builder.build().data; + + DepositLog { + deposit_data, + block_number: i, + index: i, + } + } + + #[test] + fn deposits_empty_cache() { + let spec = &E::default_spec(); + + let eth1_chain = get_eth1_chain(); + + assert_eq!( + eth1_chain.use_dummy_backend, false, + "test should not use dummy backend" + ); + + let mut state: BeaconState = BeaconState::new(0, get_eth1_data(0), &spec); + state.eth1_deposit_index = 0; + state.eth1_data.deposit_count = 0; + + assert!( + eth1_chain + .deposits_for_block_inclusion(&state, spec) + .is_ok(), + "should succeed if cache is empty but no deposits are required" + ); + + state.eth1_data.deposit_count = 1; + + assert!( + eth1_chain + .deposits_for_block_inclusion(&state, spec) + .is_err(), + "should fail to get deposits if required, but cache is empty" + ); + } + + #[test] + fn deposits_with_cache() { + let spec = &E::default_spec(); + + let eth1_chain = get_eth1_chain(); + let max_deposits = ::MaxDeposits::to_u64(); + + assert_eq!( + eth1_chain.use_dummy_backend, false, + "test should not use dummy backend" + ); + + let deposits: Vec<_> = (0..max_deposits + 2) + .map(|i| get_deposit_log(i, spec)) + .inspect(|log| { + eth1_chain + .backend + .core + .deposits() + .write() + .cache + .insert_log(log.clone()) + .expect("should insert log") + }) + .collect(); + + assert_eq!( + eth1_chain.backend.core.deposits().write().cache.len(), + deposits.len(), + "cache should store all logs" + ); + + let mut state: BeaconState = BeaconState::new(0, get_eth1_data(0), &spec); + state.eth1_deposit_index = 0; + state.eth1_data.deposit_count = 0; + + assert!( + eth1_chain + .deposits_for_block_inclusion(&state, spec) + .is_ok(), + "should succeed if no deposits are required" + ); + + (0..3).for_each(|initial_deposit_index| { + state.eth1_deposit_index = initial_deposit_index as u64; + + (initial_deposit_index..deposits.len()).for_each(|i| { + state.eth1_data.deposit_count = i as u64; + + let deposits_for_inclusion = eth1_chain + .deposits_for_block_inclusion(&state, spec) + .expect(&format!("should find deposit for {}", i)); + + let expected_len = + std::cmp::min(i - initial_deposit_index, max_deposits as usize); + + assert_eq!( + deposits_for_inclusion.len(), + expected_len, + "should find {} deposits", + expected_len + ); + + let deposit_data_for_inclusion: Vec<_> = deposits_for_inclusion + .into_iter() + .map(|deposit| deposit.data) + .collect(); + + let expected_deposit_data: Vec<_> = deposits[initial_deposit_index + ..std::cmp::min(initial_deposit_index + expected_len, deposits.len())] + .iter() + .map(|log| log.deposit_data.clone()) + .collect(); + + assert_eq!( + deposit_data_for_inclusion, expected_deposit_data, + "should find the correct deposits for {}", + i + ); + }); + }) + } + + #[test] + fn eth1_data_empty_cache() { + let spec = &E::default_spec(); + + let eth1_chain = get_eth1_chain(); + + assert_eq!( + eth1_chain.use_dummy_backend, false, + "test should not use dummy backend" + ); + + let state: BeaconState = BeaconState::new(0, get_eth1_data(0), &spec); + + let a = eth1_chain + .eth1_data_for_block_production(&state, &spec) + .expect("should produce first random eth1 data"); + let b = eth1_chain + .eth1_data_for_block_production(&state, &spec) + .expect("should produce second random eth1 data"); + + assert!( + a != b, + "random votes should be returned with an empty cache" + ); + } + + #[test] + fn eth1_data_unknown_previous_state() { + let spec = &E::default_spec(); + let period = ::SlotsPerEth1VotingPeriod::to_u64(); + + let eth1_chain = get_eth1_chain(); + let store = eth1_chain.backend.store.clone(); + + assert_eq!( + eth1_chain.use_dummy_backend, false, + "test should not use dummy backend" + ); + + let mut state: BeaconState = BeaconState::new(0, get_eth1_data(0), &spec); + let mut prev_state = state.clone(); + + prev_state.slot = Slot::new(period * 1_000); + state.slot = Slot::new(period * 1_000 + period / 2); + + (0..2048).for_each(|i| { + eth1_chain + .backend + .core + .blocks() + .write() + .insert_root_or_child(get_eth1_block(i, i)) + .expect("should add blocks to cache"); + }); + + let expected_root = Hash256::from_low_u64_be(u64::max_value()); + prev_state.eth1_data.block_hash = expected_root; + + assert!( + prev_state.eth1_data != state.eth1_data, + "test requires state eth1_data are different" + ); + + store + .put( + &state + .get_state_root(prev_state.slot) + .expect("should find state root"), + &prev_state, + ) + .expect("should store state"); + + let a = eth1_chain + .eth1_data_for_block_production(&state, &spec) + .expect("should produce first random eth1 data"); + let b = eth1_chain + .eth1_data_for_block_production(&state, &spec) + .expect("should produce second random eth1 data"); + + assert!( + a != b, + "random votes should be returned if the previous eth1 data block hash is unknown" + ); + } + } + + mod prev_block_hash { + use super::*; + use store::MemoryStore; + + #[test] + fn without_store_lookup() { + let spec = &E::default_spec(); + let store = Arc::new(MemoryStore::open()); + + let state: BeaconState = BeaconState::new(0, get_eth1_data(0), spec); + + assert_eq!( + eth1_block_hash_at_start_of_voting_period(store, &state), + Ok(state.eth1_data.block_hash), + "should return the states eth1 data in the first half of the period" + ); + } + + #[test] + fn with_store_lookup() { + let spec = &E::default_spec(); + let store = Arc::new(MemoryStore::open()); + + let period = ::SlotsPerEth1VotingPeriod::to_u64(); + + let mut state: BeaconState = BeaconState::new(0, get_eth1_data(0), spec); + let mut prev_state = state.clone(); + + state.slot = Slot::new(period / 2); + + let expected_root = Hash256::from_low_u64_be(42); + + prev_state.eth1_data.block_hash = expected_root; + + assert!( + prev_state.eth1_data != state.eth1_data, + "test requires state eth1_data are different" + ); + + store + .put( + &state + .get_state_root(Slot::new(0)) + .expect("should find state root"), + &prev_state, + ) + .expect("should store state"); + + assert_eq!( + eth1_block_hash_at_start_of_voting_period(store, &state), + Ok(expected_root), + "should return the eth1_data from the previous state" + ); + } + } + + mod eth1_data_sets { + use super::*; + + #[test] + fn empty_cache() { + let spec = &E::default_spec(); + let state: BeaconState = BeaconState::new(0, get_eth1_data(0), spec); + let prev_eth1_hash = Hash256::zero(); + + let blocks = vec![]; + + assert_eq!( + eth1_data_sets(blocks.iter(), &state, prev_eth1_hash, &spec), + None + ); + } + + #[test] + fn no_known_block_hash() { + let mut spec = E::default_spec(); + spec.milliseconds_per_slot = 1_000; + + let state: BeaconState = BeaconState::new(0, get_eth1_data(0), &spec); + let prev_eth1_hash = Hash256::from_low_u64_be(42); + + let blocks = vec![get_eth1_block(0, 0)]; + + assert_eq!( + eth1_data_sets(blocks.iter(), &state, prev_eth1_hash, &spec), + None + ); + } + + #[test] + fn ideal_scenario() { + let mut spec = E::default_spec(); + spec.milliseconds_per_slot = 1_000; + + let slots_per_eth1_voting_period = ::SlotsPerEth1VotingPeriod::to_u64(); + let eth1_follow_distance = spec.eth1_follow_distance; + + let mut state: BeaconState = BeaconState::new(0, get_eth1_data(0), &spec); + state.genesis_time = 0; + state.slot = Slot::from(slots_per_eth1_voting_period * 3); + + let prev_eth1_hash = Hash256::zero(); + + let blocks = (0..eth1_follow_distance * 4) + .map(|i| get_eth1_block(i, i)) + .collect::>(); + + let (new_eth1_data, all_eth1_data) = + eth1_data_sets(blocks.iter(), &state, prev_eth1_hash, &spec) + .expect("should find data"); + + assert_eq!( + all_eth1_data.len(), + eth1_follow_distance as usize * 2, + "all_eth1_data should have appropriate length" + ); + assert_eq!( + new_eth1_data.len(), + eth1_follow_distance as usize, + "new_eth1_data should have appropriate length" + ); + + for (eth1_data, block_number) in &new_eth1_data { + assert_eq!( + all_eth1_data.get(eth1_data), + Some(block_number), + "all_eth1_data should contain all items in new_eth1_data" + ); + } + + (1..=eth1_follow_distance * 2) + .map(|i| get_eth1_block(i, i)) + .for_each(|eth1_block| { + assert_eq!( + eth1_block.number, + *all_eth1_data + .get(ð1_block.clone().eth1_data().unwrap()) + .expect("all_eth1_data should have expected block") + ) + }); + + (eth1_follow_distance + 1..=eth1_follow_distance * 2) + .map(|i| get_eth1_block(i, i)) + .for_each(|eth1_block| { + assert_eq!( + eth1_block.number, + *new_eth1_data + .get(ð1_block.clone().eth1_data().unwrap()) + .expect(&format!( + "new_eth1_data should have expected block #{}", + eth1_block.number + )) + ) + }); + } + } + + mod collect_valid_votes { + use super::*; + + fn get_eth1_data_vec(n: u64, block_number_offset: u64) -> Vec<(Eth1Data, BlockNumber)> { + (0..n) + .map(|i| (get_eth1_data(i), i + block_number_offset)) + .collect() + } + + macro_rules! assert_votes { + ($votes: expr, $expected: expr, $text: expr) => { + let expected: Vec<(Eth1Data, BlockNumber)> = $expected; + assert_eq!( + $votes.len(), + expected.len(), + "map should have the same number of elements" + ); + expected.iter().for_each(|(eth1_data, block_number)| { + $votes + .get(&(eth1_data.clone(), *block_number)) + .expect("should contain eth1 data"); + }) + }; + } + + #[test] + fn no_votes_in_state() { + let slots = ::SlotsPerEth1VotingPeriod::to_u64(); + let spec = &E::default_spec(); + let state: BeaconState = BeaconState::new(0, get_eth1_data(0), spec); + + let all_eth1_data = get_eth1_data_vec(slots, 0); + let new_eth1_data = all_eth1_data[slots as usize / 2..].to_vec(); + + let votes = collect_valid_votes( + &state, + HashMap::from_iter(new_eth1_data.clone().into_iter()), + HashMap::from_iter(all_eth1_data.clone().into_iter()), + ); + assert_eq!( + votes.len(), + 0, + "should not find any votes when state has no votes" + ); + } + + #[test] + fn distinct_votes_in_state() { + let slots = ::SlotsPerEth1VotingPeriod::to_u64(); + let spec = &E::default_spec(); + let mut state: BeaconState = BeaconState::new(0, get_eth1_data(0), spec); + + let all_eth1_data = get_eth1_data_vec(slots, 0); + let new_eth1_data = all_eth1_data[slots as usize / 2..].to_vec(); + + state.eth1_data_votes = new_eth1_data[0..slots as usize / 4] + .iter() + .map(|(eth1_data, _)| eth1_data) + .cloned() + .collect::>() + .into(); + + let votes = collect_valid_votes( + &state, + HashMap::from_iter(new_eth1_data.clone().into_iter()), + HashMap::from_iter(all_eth1_data.clone().into_iter()), + ); + assert_votes!( + votes, + new_eth1_data[0..slots as usize / 4].to_vec(), + "should find as many votes as were in the state" + ); + } + + #[test] + fn duplicate_votes_in_state() { + let slots = ::SlotsPerEth1VotingPeriod::to_u64(); + let spec = &E::default_spec(); + let mut state: BeaconState = BeaconState::new(0, get_eth1_data(0), spec); + + let all_eth1_data = get_eth1_data_vec(slots, 0); + let new_eth1_data = all_eth1_data[slots as usize / 2..].to_vec(); + + let duplicate_eth1_data = new_eth1_data + .last() + .expect("should have some eth1 data") + .clone(); + + state.eth1_data_votes = vec![duplicate_eth1_data.clone(); 4] + .iter() + .map(|(eth1_data, _)| eth1_data) + .cloned() + .collect::>() + .into(); + + let votes = collect_valid_votes( + &state, + HashMap::from_iter(new_eth1_data.clone().into_iter()), + HashMap::from_iter(all_eth1_data.clone().into_iter()), + ); + assert_votes!( + votes, + // There should only be one value if there's a duplicate + vec![duplicate_eth1_data.clone()], + "should find as many votes as were in the state" + ); + + assert_eq!( + *votes + .get(&duplicate_eth1_data) + .expect("should contain vote"), + 4, + "should have four votes" + ); + } + + #[test] + fn non_period_tail() { + let slots = ::SlotsPerEth1VotingPeriod::to_u64(); + let spec = &E::default_spec(); + let mut state: BeaconState = BeaconState::new(0, get_eth1_data(0), spec); + state.slot = Slot::from(::SlotsPerEpoch::to_u64()) * 10; + + let all_eth1_data = get_eth1_data_vec(slots, 0); + let new_eth1_data = all_eth1_data[slots as usize / 2..].to_vec(); + + let non_new_eth1_data = all_eth1_data + .first() + .expect("should have some eth1 data") + .clone(); + + state.eth1_data_votes = vec![non_new_eth1_data.0.clone()].into(); + + let votes = collect_valid_votes( + &state, + HashMap::from_iter(new_eth1_data.clone().into_iter()), + HashMap::from_iter(all_eth1_data.clone().into_iter()), + ); + + assert_votes!( + votes, + vec![], + "should not find votes from all_eth1_data when it is not the period tail" + ); + } + + #[test] + fn period_tail() { + let slots_per_eth1_voting_period = ::SlotsPerEth1VotingPeriod::to_u64(); + + let slots = ::SlotsPerEth1VotingPeriod::to_u64(); + let spec = &E::default_spec(); + let mut state: BeaconState = BeaconState::new(0, get_eth1_data(0), spec); + + state.slot = Slot::from(::SlotsPerEpoch::to_u64()) * 10 + + slots_per_eth1_voting_period.integer_sqrt(); + + let all_eth1_data = get_eth1_data_vec(slots, 0); + let new_eth1_data = all_eth1_data[slots as usize / 2..].to_vec(); + + let non_new_eth1_data = all_eth1_data + .first() + .expect("should have some eth1 data") + .clone(); + + state.eth1_data_votes = vec![non_new_eth1_data.0.clone()].into(); + + let votes = collect_valid_votes( + &state, + HashMap::from_iter(new_eth1_data.clone().into_iter()), + HashMap::from_iter(all_eth1_data.clone().into_iter()), + ); + + assert_votes!( + votes, + vec![non_new_eth1_data], + "should find all_eth1_data votes when it is the period tail" + ); + } + } + + mod winning_vote { + use super::*; + + type Vote = ((Eth1Data, u64), u64); + + fn vote(block_number: u64, vote_count: u64) -> Vote { + ( + ( + Eth1Data { + deposit_root: Hash256::from_low_u64_be(block_number), + deposit_count: block_number, + block_hash: Hash256::from_low_u64_be(block_number), + }, + block_number, + ), + vote_count, + ) + } + + fn vote_data(vote: &Vote) -> Eth1Data { + (vote.0).0.clone() + } + + #[test] + fn no_votes() { + let no_votes = vec![vote(0, 0), vote(1, 0), vote(3, 0), vote(2, 0)]; + + assert_eq!( + // Favour the highest block number when there are no votes. + vote_data(&no_votes[2]), + find_winning_vote(Eth1DataVoteCount::from_iter(no_votes.into_iter())) + .expect("should find winner") + ); + } + + #[test] + fn equal_votes() { + let votes = vec![vote(0, 1), vote(1, 1), vote(3, 1), vote(2, 1)]; + + assert_eq!( + // Favour the highest block number when there are equal votes. + vote_data(&votes[2]), + find_winning_vote(Eth1DataVoteCount::from_iter(votes.into_iter())) + .expect("should find winner") + ); + } + + #[test] + fn some_votes() { + let votes = vec![vote(0, 0), vote(1, 1), vote(3, 1), vote(2, 2)]; + + assert_eq!( + // Favour the highest vote over the highest block number. + vote_data(&votes[3]), + find_winning_vote(Eth1DataVoteCount::from_iter(votes.into_iter())) + .expect("should find winner") + ); + } + + #[test] + fn tying_votes() { + let votes = vec![vote(0, 0), vote(1, 1), vote(2, 2), vote(3, 2)]; + + assert_eq!( + // Favour the highest block number for tying votes. + vote_data(&votes[3]), + find_winning_vote(Eth1DataVoteCount::from_iter(votes.into_iter())) + .expect("should find winner") + ); + } + + #[test] + fn all_tying_votes() { + let votes = vec![vote(3, 42), vote(2, 42), vote(1, 42), vote(0, 42)]; + + assert_eq!( + // Favour the highest block number for tying votes. + vote_data(&votes[0]), + find_winning_vote(Eth1DataVoteCount::from_iter(votes.into_iter())) + .expect("should find winner") + ); + } + } +} diff --git a/beacon_node/beacon_chain/src/events.rs b/beacon_node/beacon_chain/src/events.rs index c93a13c8a4..91bc4a1b0c 100644 --- a/beacon_node/beacon_chain/src/events.rs +++ b/beacon_node/beacon_chain/src/events.rs @@ -1,6 +1,7 @@ use serde_derive::{Deserialize, Serialize}; use std::marker::PhantomData; use types::{Attestation, BeaconBlock, Epoch, EthSpec, Hash256}; +pub use websocket_server::WebSocketSender; pub trait EventHandler: Sized + Send + Sync { fn register(&self, kind: EventKind) -> Result<(), String>; @@ -8,6 +9,15 @@ pub trait EventHandler: Sized + Send + Sync { pub struct NullEventHandler(PhantomData); +impl EventHandler for WebSocketSender { + fn register(&self, kind: EventKind) -> Result<(), String> { + self.send_string( + serde_json::to_string(&kind) + .map_err(|e| format!("Unable to serialize event: {:?}", e))?, + ) + } +} + impl EventHandler for NullEventHandler { fn register(&self, _kind: EventKind) -> Result<(), String> { Ok(()) diff --git a/beacon_node/beacon_chain/src/fork_choice.rs b/beacon_node/beacon_chain/src/fork_choice.rs index 26084e04a7..5645a925a4 100644 --- a/beacon_node/beacon_chain/src/fork_choice.rs +++ b/beacon_node/beacon_chain/src/fork_choice.rs @@ -33,14 +33,10 @@ impl ForkChoice { /// /// "Genesis" does not necessarily need to be the absolute genesis, it can be some finalized /// block. - pub fn new( - store: Arc, - genesis_block: &BeaconBlock, - genesis_block_root: Hash256, - ) -> Self { + pub fn new(store: Arc, backend: T::LmdGhost, genesis_block_root: Hash256) -> Self { Self { store: store.clone(), - backend: T::LmdGhost::new(store, genesis_block, genesis_block_root), + backend, genesis_block_root, } } diff --git a/beacon_node/beacon_chain/src/lib.rs b/beacon_node/beacon_chain/src/lib.rs index 7f7e4ec2b9..375abe8753 100644 --- a/beacon_node/beacon_chain/src/lib.rs +++ b/beacon_node/beacon_chain/src/lib.rs @@ -3,10 +3,10 @@ extern crate lazy_static; mod beacon_chain; -mod beacon_chain_builder; +pub mod builder; mod checkpoint; mod errors; -mod eth1_chain; +pub mod eth1_chain; pub mod events; mod fork_choice; mod iter; @@ -19,8 +19,9 @@ pub use self::beacon_chain::{ }; pub use self::checkpoint::CheckPoint; pub use self::errors::{BeaconChainError, BlockProductionError}; -pub use beacon_chain_builder::BeaconChainBuilder; -pub use eth1_chain::{Eth1ChainBackend, InteropEth1ChainBackend}; +pub use eth1_chain::{Eth1Chain, Eth1ChainBackend}; +pub use events::EventHandler; +pub use fork_choice::ForkChoice; pub use lmd_ghost; pub use metrics::scrape_for_metrics; pub use parking_lot; diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index 8efbefe84c..01e50ee247 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -1,14 +1,17 @@ use crate::{ - events::NullEventHandler, AttestationProcessingOutcome, BeaconChain, BeaconChainBuilder, - BeaconChainTypes, BlockProcessingOutcome, InteropEth1ChainBackend, + builder::{BeaconChainBuilder, Witness}, + eth1_chain::CachingEth1Backend, + events::NullEventHandler, + AttestationProcessingOutcome, BeaconChain, BeaconChainTypes, BlockProcessingOutcome, }; -use lmd_ghost::LmdGhost; +use genesis::interop_genesis_state; +use lmd_ghost::ThreadSafeReducedTree; use rayon::prelude::*; use sloggers::{terminal::TerminalLoggerBuilder, types::Severity, Build}; use slot_clock::TestingSlotClock; use state_processing::per_slot_processing; -use std::marker::PhantomData; use std::sync::Arc; +use std::time::Duration; use store::MemoryStore; use tree_hash::{SignedRoot, TreeHash}; use types::{ @@ -17,12 +20,20 @@ use types::{ Slot, }; +pub use crate::persisted_beacon_chain::{PersistedBeaconChain, BEACON_CHAIN_DB_KEY}; pub use types::test_utils::generate_deterministic_keypairs; -pub use crate::persisted_beacon_chain::{PersistedBeaconChain, BEACON_CHAIN_DB_KEY}; - pub const HARNESS_GENESIS_TIME: u64 = 1_567_552_690; // 4th September 2019 +pub type HarnessType = Witness< + MemoryStore, + TestingSlotClock, + ThreadSafeReducedTree, + CachingEth1Backend, + E, + NullEventHandler, +>; + /// Indicates how the `BeaconChainHarness` should produce blocks. #[derive(Clone, Copy, Debug)] pub enum BlockStrategy { @@ -48,50 +59,19 @@ pub enum AttestationStrategy { SomeValidators(Vec), } -/// Used to make the `BeaconChainHarness` generic over some types. -pub struct CommonTypes -where - L: LmdGhost, - E: EthSpec, -{ - _phantom_l: PhantomData, - _phantom_e: PhantomData, -} - -impl BeaconChainTypes for CommonTypes -where - L: LmdGhost + 'static, - E: EthSpec, -{ - type Store = MemoryStore; - type SlotClock = TestingSlotClock; - type LmdGhost = L; - type Eth1Chain = InteropEth1ChainBackend; - type EthSpec = E; - type EventHandler = NullEventHandler; -} - /// A testing harness which can instantiate a `BeaconChain` and populate it with blocks and /// attestations. /// /// Used for testing. -pub struct BeaconChainHarness -where - L: LmdGhost + 'static, - E: EthSpec, -{ - pub chain: BeaconChain>, +pub struct BeaconChainHarness { + pub chain: BeaconChain, pub keypairs: Vec, pub spec: ChainSpec, } -impl BeaconChainHarness -where - L: LmdGhost, - E: EthSpec, -{ +impl BeaconChainHarness> { /// Instantiate a new harness with `validator_count` initial validators. - pub fn new(keypairs: Vec) -> Self { + pub fn new(eth_spec_instance: E, keypairs: Vec) -> Self { let spec = E::default_spec(); let log = TerminalLoggerBuilder::new() @@ -99,22 +79,29 @@ where .build() .expect("logger should build"); - let store = Arc::new(MemoryStore::open()); - - let chain = - BeaconChainBuilder::quick_start(HARNESS_GENESIS_TIME, &keypairs, spec.clone(), log) - .unwrap_or_else(|e| panic!("Failed to create beacon chain builder: {}", e)) - .build( - store.clone(), - InteropEth1ChainBackend::default(), - NullEventHandler::default(), - ) - .unwrap_or_else(|e| panic!("Failed to build beacon chain: {}", e)); + let chain = BeaconChainBuilder::new(eth_spec_instance) + .logger(log.clone()) + .custom_spec(spec.clone()) + .store(Arc::new(MemoryStore::open())) + .genesis_state( + interop_genesis_state::(&keypairs, HARNESS_GENESIS_TIME, &spec) + .expect("should generate interop state"), + ) + .expect("should build state using recent genesis") + .dummy_eth1_backend() + .expect("should build dummy backend") + .null_event_handler() + .testing_slot_clock(Duration::from_secs(1)) + .expect("should configure testing slot clock") + .empty_reduced_tree_fork_choice() + .expect("should add fork choice to builder") + .build() + .expect("should build"); Self { + spec: chain.spec.clone(), chain, keypairs, - spec, } } diff --git a/beacon_node/beacon_chain/tests/tests.rs b/beacon_node/beacon_chain/tests/tests.rs index 82fc882168..a06c652e3d 100644 --- a/beacon_node/beacon_chain/tests/tests.rs +++ b/beacon_node/beacon_chain/tests/tests.rs @@ -6,14 +6,13 @@ extern crate lazy_static; use beacon_chain::AttestationProcessingOutcome; use beacon_chain::{ test_utils::{ - AttestationStrategy, BeaconChainHarness, BlockStrategy, CommonTypes, PersistedBeaconChain, + AttestationStrategy, BeaconChainHarness, BlockStrategy, HarnessType, PersistedBeaconChain, BEACON_CHAIN_DB_KEY, }, BlockProcessingOutcome, }; -use lmd_ghost::ThreadSafeReducedTree; use rand::Rng; -use store::{MemoryStore, Store}; +use store::Store; use types::test_utils::{SeedableRng, TestRandom, XorShiftRng}; use types::{Deposit, EthSpec, Hash256, Keypair, MinimalEthSpec, RelativeEpoch, Slot}; @@ -25,10 +24,8 @@ lazy_static! { static ref KEYPAIRS: Vec = types::test_utils::generate_deterministic_keypairs(VALIDATOR_COUNT); } -type TestForkChoice = ThreadSafeReducedTree; - -fn get_harness(validator_count: usize) -> BeaconChainHarness { - let harness = BeaconChainHarness::new(KEYPAIRS[0..validator_count].to_vec()); +fn get_harness(validator_count: usize) -> BeaconChainHarness> { + let harness = BeaconChainHarness::new(MinimalEthSpec, KEYPAIRS[0..validator_count].to_vec()); harness.advance_slot(); @@ -322,7 +319,7 @@ fn roundtrip_operation_pool() { harness.chain.persist().unwrap(); let key = Hash256::from_slice(&BEACON_CHAIN_DB_KEY.as_bytes()); - let p: PersistedBeaconChain> = + let p: PersistedBeaconChain> = harness.chain.store.get(&key).unwrap().unwrap(); let restored_op_pool = p diff --git a/beacon_node/client/Cargo.toml b/beacon_node/client/Cargo.toml index ec0c14159a..1a82cd22be 100644 --- a/beacon_node/client/Cargo.toml +++ b/beacon_node/client/Cargo.toml @@ -4,6 +4,10 @@ version = "0.1.0" authors = ["Age Manning "] edition = "2018" +[dev-dependencies] +sloggers = "0.3.4" +toml = "^0.5" + [dependencies] beacon_chain = { path = "../beacon_chain" } store = { path = "../store" } @@ -31,3 +35,8 @@ exit-future = "0.1.4" futures = "0.1.29" reqwest = "0.9.22" url = "2.1.0" +lmd_ghost = { path = "../../eth2/lmd_ghost" } +eth1 = { path = "../eth1" } +genesis = { path = "../genesis" } +environment = { path = "../../lighthouse/environment" } +lighthouse_bootstrap = { path = "../../eth2/utils/lighthouse_bootstrap" } diff --git a/beacon_node/client/src/builder.rs b/beacon_node/client/src/builder.rs new file mode 100644 index 0000000000..c160081456 --- /dev/null +++ b/beacon_node/client/src/builder.rs @@ -0,0 +1,715 @@ +use crate::config::{ClientGenesis, Config as ClientConfig}; +use crate::Client; +use beacon_chain::{ + builder::{BeaconChainBuilder, Witness}, + eth1_chain::CachingEth1Backend, + lmd_ghost::ThreadSafeReducedTree, + slot_clock::{SlotClock, SystemTimeSlotClock}, + store::{DiskStore, MemoryStore, Store}, + BeaconChain, BeaconChainTypes, Eth1ChainBackend, EventHandler, +}; +use environment::RuntimeContext; +use eth1::{Config as Eth1Config, Service as Eth1Service}; +use eth2_config::Eth2Config; +use exit_future::Signal; +use futures::{future, Future, IntoFuture, Stream}; +use genesis::{ + generate_deterministic_keypairs, interop_genesis_state, state_from_ssz_file, Eth1GenesisService, +}; +use lighthouse_bootstrap::Bootstrapper; +use lmd_ghost::LmdGhost; +use network::{NetworkConfig, NetworkMessage, Service as NetworkService}; +use rpc::Config as RpcConfig; +use slog::{debug, error, info, warn}; +use std::net::SocketAddr; +use std::path::Path; +use std::sync::Arc; +use std::time::{Duration, Instant}; +use tokio::sync::mpsc::UnboundedSender; +use tokio::timer::Interval; +use types::{ChainSpec, EthSpec}; +use websocket_server::{Config as WebSocketConfig, WebSocketSender}; + +/// The interval between notifier events. +pub const NOTIFIER_INTERVAL_SECONDS: u64 = 15; +/// Create a warning log whenever the peer count is at or below this value. +pub const WARN_PEER_COUNT: usize = 1; +/// Interval between polling the eth1 node for genesis information. +pub const ETH1_GENESIS_UPDATE_INTERVAL_MILLIS: u64 = 500; + +/// Builds a `Client` instance. +/// +/// ## Notes +/// +/// The builder may start some services (e.g.., libp2p, http server) immediately after they are +/// initialized, _before_ the `self.build(..)` method has been called. +/// +/// Types may be elided and the compiler will infer them once all required methods have been +/// called. +/// +/// If type inference errors are raised, ensure all necessary components have been initialized. For +/// example, the compiler will be unable to infer `T::Store` unless `self.disk_store(..)` or +/// `self.memory_store(..)` has been called. +pub struct ClientBuilder { + slot_clock: Option, + store: Option>, + runtime_context: Option>, + chain_spec: Option, + beacon_chain_builder: Option>, + beacon_chain: Option>>, + eth1_service: Option, + exit_signals: Vec, + event_handler: Option, + libp2p_network: Option>>, + libp2p_network_send: Option>, + http_listen_addr: Option, + websocket_listen_addr: Option, + eth_spec_instance: T::EthSpec, +} + +impl + ClientBuilder> +where + TStore: Store + 'static, + TSlotClock: SlotClock + Clone + 'static, + TLmdGhost: LmdGhost + 'static, + TEth1Backend: Eth1ChainBackend + 'static, + TEthSpec: EthSpec + 'static, + TEventHandler: EventHandler + 'static, +{ + /// Instantiates a new, empty builder. + /// + /// The `eth_spec_instance` parameter is used to concretize `TEthSpec`. + pub fn new(eth_spec_instance: TEthSpec) -> Self { + Self { + slot_clock: None, + store: None, + runtime_context: None, + chain_spec: None, + beacon_chain_builder: None, + beacon_chain: None, + eth1_service: None, + exit_signals: vec![], + event_handler: None, + libp2p_network: None, + libp2p_network_send: None, + http_listen_addr: None, + websocket_listen_addr: None, + eth_spec_instance, + } + } + + /// Specifies the runtime context (tokio executor, logger, etc) for client services. + pub fn runtime_context(mut self, context: RuntimeContext) -> Self { + self.runtime_context = Some(context); + self + } + + /// Specifies the `ChainSpec`. + pub fn chain_spec(mut self, spec: ChainSpec) -> Self { + self.chain_spec = Some(spec); + self + } + + /// Initializes the `BeaconChainBuilder`. The `build_beacon_chain` method will need to be + /// called later in order to actually instantiate the `BeaconChain`. + pub fn beacon_chain_builder( + mut self, + client_genesis: ClientGenesis, + config: Eth1Config, + ) -> impl Future { + let store = self.store.clone(); + let chain_spec = self.chain_spec.clone(); + let runtime_context = self.runtime_context.clone(); + let eth_spec_instance = self.eth_spec_instance.clone(); + + future::ok(()) + .and_then(move |()| { + let store = store + .ok_or_else(|| "beacon_chain_start_method requires a store".to_string())?; + let context = runtime_context + .ok_or_else(|| "beacon_chain_start_method requires a log".to_string())? + .service_context("beacon"); + let spec = chain_spec + .ok_or_else(|| "beacon_chain_start_method requires a chain spec".to_string())?; + + let builder = BeaconChainBuilder::new(eth_spec_instance) + .logger(context.log.clone()) + .store(store.clone()) + .custom_spec(spec.clone()); + + Ok((builder, spec, context)) + }) + .and_then(move |(builder, spec, context)| { + let genesis_state_future: Box + Send> = + match client_genesis { + ClientGenesis::Interop { + validator_count, + genesis_time, + } => { + let keypairs = generate_deterministic_keypairs(validator_count); + let result = interop_genesis_state(&keypairs, genesis_time, &spec); + + let future = result + .and_then(move |genesis_state| builder.genesis_state(genesis_state)) + .into_future() + .map(|v| (v, None)); + + Box::new(future) + } + ClientGenesis::SszFile { path } => { + let result = state_from_ssz_file(path); + + let future = result + .and_then(move |genesis_state| builder.genesis_state(genesis_state)) + .into_future() + .map(|v| (v, None)); + + Box::new(future) + } + ClientGenesis::DepositContract => { + let genesis_service = Eth1GenesisService::new( + // Some of the configuration options for `Eth1Config` are + // hard-coded when listening for genesis from the deposit contract. + // + // The idea is that the `Eth1Config` supplied to this function + // (`config`) is intended for block production duties (i.e., + // listening for deposit events and voting on eth1 data) and that + // we can make listening for genesis more efficient if we modify + // some params. + Eth1Config { + // Truncating the block cache makes searching for genesis more + // complicated. + block_cache_truncation: None, + // Scan large ranges of blocks when awaiting genesis. + blocks_per_log_query: 1_000, + // Only perform a single log request each time the eth1 node is + // polled. + // + // For small testnets this makes finding genesis much faster, + // as it usually happens within 1,000 blocks. + max_log_requests_per_update: Some(1), + // Only perform a single block request each time the eth1 node + // is polled. + // + // For small testnets, this is much faster as they do not have + // a `MIN_GENESIS_SECONDS`, so after `MIN_GENESIS_VALIDATOR_COUNT` + // has been reached only a single block needs to be read. + max_blocks_per_update: Some(1), + ..config + }, + context.log.clone(), + ); + + let future = genesis_service + .wait_for_genesis_state( + Duration::from_millis(ETH1_GENESIS_UPDATE_INTERVAL_MILLIS), + context.eth2_config().spec.clone(), + ) + .and_then(move |genesis_state| builder.genesis_state(genesis_state)) + .map(|v| (v, Some(genesis_service.into_core_service()))); + + Box::new(future) + } + ClientGenesis::RemoteNode { server, .. } => { + let future = Bootstrapper::connect(server.to_string(), &context.log) + .map_err(|e| { + format!("Failed to initialize bootstrap client: {}", e) + }) + .into_future() + .and_then(|bootstrapper| { + let (genesis_state, _genesis_block) = + bootstrapper.genesis().map_err(|e| { + format!("Failed to bootstrap genesis state: {}", e) + })?; + + builder.genesis_state(genesis_state) + }) + .map(|v| (v, None)); + + Box::new(future) + } + ClientGenesis::Resume => { + let future = builder.resume_from_db().into_future().map(|v| (v, None)); + + Box::new(future) + } + }; + + genesis_state_future + }) + .map(move |(beacon_chain_builder, eth1_service_option)| { + self.eth1_service = eth1_service_option; + self.beacon_chain_builder = Some(beacon_chain_builder); + self + }) + } + + /// Immediately starts the libp2p networking stack. + pub fn libp2p_network(mut self, config: &NetworkConfig) -> Result { + let beacon_chain = self + .beacon_chain + .clone() + .ok_or_else(|| "libp2p_network requires a beacon chain")?; + let context = self + .runtime_context + .as_ref() + .ok_or_else(|| "libp2p_network requires a runtime_context")? + .service_context("network"); + + let (network, network_send) = + NetworkService::new(beacon_chain, config, &context.executor, context.log) + .map_err(|e| format!("Failed to start libp2p network: {:?}", e))?; + + self.libp2p_network = Some(network); + self.libp2p_network_send = Some(network_send); + + Ok(self) + } + + /// Immediately starts the gRPC server (gRPC is soon to be deprecated). + pub fn grpc_server(mut self, config: &RpcConfig) -> Result { + let beacon_chain = self + .beacon_chain + .clone() + .ok_or_else(|| "grpc_server requires a beacon chain")?; + let context = self + .runtime_context + .as_ref() + .ok_or_else(|| "grpc_server requires a runtime_context")? + .service_context("grpc"); + let network_send = self + .libp2p_network_send + .clone() + .ok_or_else(|| "grpc_server requires a libp2p network")?; + + let exit_signal = rpc::start_server( + config, + &context.executor, + network_send, + beacon_chain, + context.log, + ); + + self.exit_signals.push(exit_signal); + + Ok(self) + } + + /// Immediately starts the beacon node REST API http server. + pub fn http_server( + mut self, + client_config: &ClientConfig, + eth2_config: &Eth2Config, + ) -> Result { + let beacon_chain = self + .beacon_chain + .clone() + .ok_or_else(|| "grpc_server requires a beacon chain")?; + let context = self + .runtime_context + .as_ref() + .ok_or_else(|| "http_server requires a runtime_context")? + .service_context("http"); + let network = self + .libp2p_network + .clone() + .ok_or_else(|| "grpc_server requires a libp2p network")?; + let network_send = self + .libp2p_network_send + .clone() + .ok_or_else(|| "grpc_server requires a libp2p network sender")?; + + let network_info = rest_api::NetworkInfo { + network_service: network.clone(), + network_chan: network_send.clone(), + }; + + let (exit_signal, listening_addr) = rest_api::start_server( + &client_config.rest_api, + &context.executor, + beacon_chain.clone(), + network_info, + client_config.db_path().expect("unable to read datadir"), + eth2_config.clone(), + context.log, + ) + .map_err(|e| format!("Failed to start HTTP API: {:?}", e))?; + + self.exit_signals.push(exit_signal); + self.http_listen_addr = Some(listening_addr); + + Ok(self) + } + + /// Immediately starts the service that periodically logs about the libp2p peer count. + pub fn peer_count_notifier(mut self) -> Result { + let context = self + .runtime_context + .as_ref() + .ok_or_else(|| "peer_count_notifier requires a runtime_context")? + .service_context("peer_notifier"); + let log = context.log.clone(); + let log_2 = context.log.clone(); + let network = self + .libp2p_network + .clone() + .ok_or_else(|| "peer_notifier requires a libp2p network")?; + + let (exit_signal, exit) = exit_future::signal(); + + self.exit_signals.push(exit_signal); + + let interval_future = Interval::new( + Instant::now(), + Duration::from_secs(NOTIFIER_INTERVAL_SECONDS), + ) + .map_err(move |e| error!(log_2, "Notifier timer failed"; "error" => format!("{:?}", e))) + .for_each(move |_| { + // NOTE: Panics if libp2p is poisoned. + let connected_peer_count = network.libp2p_service().lock().swarm.connected_peers(); + + debug!(log, "Connected peer status"; "peer_count" => connected_peer_count); + + if connected_peer_count <= WARN_PEER_COUNT { + warn!(log, "Low peer count"; "peer_count" => connected_peer_count); + } + + Ok(()) + }); + + context + .executor + .spawn(exit.until(interval_future).map(|_| ())); + + Ok(self) + } + + /// Immediately starts the service that periodically logs information each slot. + pub fn slot_notifier(mut self) -> Result { + let context = self + .runtime_context + .as_ref() + .ok_or_else(|| "slot_notifier requires a runtime_context")? + .service_context("slot_notifier"); + let log = context.log.clone(); + let log_2 = log.clone(); + let beacon_chain = self + .beacon_chain + .clone() + .ok_or_else(|| "slot_notifier requires a libp2p network")?; + let spec = self + .chain_spec + .clone() + .ok_or_else(|| "slot_notifier requires a chain spec".to_string())?; + let slot_duration = Duration::from_millis(spec.milliseconds_per_slot); + let duration_to_next_slot = beacon_chain + .slot_clock + .duration_to_next_slot() + .ok_or_else(|| "slot_notifier unable to determine time to next slot")?; + + let (exit_signal, exit) = exit_future::signal(); + + self.exit_signals.push(exit_signal); + + let interval_future = Interval::new(Instant::now() + duration_to_next_slot, slot_duration) + .map_err(move |e| error!(log_2, "Slot timer failed"; "error" => format!("{:?}", e))) + .for_each(move |_| { + let best_slot = beacon_chain.head().beacon_block.slot; + let latest_block_root = beacon_chain.head().beacon_block_root; + + if let Ok(current_slot) = beacon_chain.slot() { + info!( + log, + "Slot start"; + "skip_slots" => current_slot.saturating_sub(best_slot), + "best_block_root" => format!("{}", latest_block_root), + "best_block_slot" => best_slot, + "slot" => current_slot, + ) + } else { + error!( + log, + "Beacon chain running whilst slot clock is unavailable." + ); + }; + + Ok(()) + }); + + context + .executor + .spawn(exit.until(interval_future).map(|_| ())); + + Ok(self) + } + + /// Consumers the builder, returning a `Client` if all necessary components have been + /// specified. + /// + /// If type inference errors are being raised, see the comment on the definition of `Self`. + pub fn build( + self, + ) -> Client> { + Client { + beacon_chain: self.beacon_chain, + libp2p_network: self.libp2p_network, + http_listen_addr: self.http_listen_addr, + websocket_listen_addr: self.websocket_listen_addr, + _exit_signals: self.exit_signals, + } + } +} + +impl + ClientBuilder< + Witness< + TStore, + TSlotClock, + ThreadSafeReducedTree, + TEth1Backend, + TEthSpec, + TEventHandler, + >, + > +where + TStore: Store + 'static, + TSlotClock: SlotClock + Clone + 'static, + TEth1Backend: Eth1ChainBackend + 'static, + TEthSpec: EthSpec + 'static, + TEventHandler: EventHandler + 'static, +{ + /// Consumes the internal `BeaconChainBuilder`, attaching the resulting `BeaconChain` to self. + pub fn build_beacon_chain(mut self) -> Result { + let chain = self + .beacon_chain_builder + .ok_or_else(|| "beacon_chain requires a beacon_chain_builder")? + .event_handler( + self.event_handler + .ok_or_else(|| "beacon_chain requires an event handler")?, + ) + .slot_clock( + self.slot_clock + .clone() + .ok_or_else(|| "beacon_chain requires a slot clock")?, + ) + .empty_reduced_tree_fork_choice() + .map_err(|e| format!("Failed to init fork choice: {}", e))? + .build() + .map_err(|e| format!("Failed to build beacon chain: {}", e))?; + + self.beacon_chain = Some(Arc::new(chain)); + self.beacon_chain_builder = None; + self.event_handler = None; + + Ok(self) + } +} + +impl + ClientBuilder< + Witness>, + > +where + TStore: Store + 'static, + TSlotClock: SlotClock + 'static, + TLmdGhost: LmdGhost + 'static, + TEth1Backend: Eth1ChainBackend + 'static, + TEthSpec: EthSpec + 'static, +{ + /// Specifies that the `BeaconChain` should publish events using the WebSocket server. + pub fn websocket_event_handler(mut self, config: WebSocketConfig) -> Result { + let context = self + .runtime_context + .as_ref() + .ok_or_else(|| "websocket_event_handler requires a runtime_context")? + .service_context("ws"); + + let (sender, exit_signal, listening_addr): ( + WebSocketSender, + Option<_>, + Option<_>, + ) = if config.enabled { + let (sender, exit, listening_addr) = + websocket_server::start_server(&config, &context.executor, &context.log)?; + (sender, Some(exit), Some(listening_addr)) + } else { + (WebSocketSender::dummy(), None, None) + }; + + if let Some(signal) = exit_signal { + self.exit_signals.push(signal); + } + self.event_handler = Some(sender); + self.websocket_listen_addr = listening_addr; + + Ok(self) + } +} + +impl + ClientBuilder> +where + TSlotClock: SlotClock + 'static, + TLmdGhost: LmdGhost + 'static, + TEth1Backend: Eth1ChainBackend + 'static, + TEthSpec: EthSpec + 'static, + TEventHandler: EventHandler + 'static, +{ + /// Specifies that the `Client` should use a `DiskStore` database. + pub fn disk_store(mut self, path: &Path) -> Result { + let store = DiskStore::open(path) + .map_err(|e| format!("Unable to open database: {:?}", e).to_string())?; + self.store = Some(Arc::new(store)); + Ok(self) + } +} + +impl + ClientBuilder< + Witness, + > +where + TSlotClock: SlotClock + 'static, + TLmdGhost: LmdGhost + 'static, + TEth1Backend: Eth1ChainBackend + 'static, + TEthSpec: EthSpec + 'static, + TEventHandler: EventHandler + 'static, +{ + /// Specifies that the `Client` should use a `MemoryStore` database. + pub fn memory_store(mut self) -> Self { + let store = MemoryStore::open(); + self.store = Some(Arc::new(store)); + self + } +} + +impl + ClientBuilder< + Witness< + TStore, + TSlotClock, + TLmdGhost, + CachingEth1Backend, + TEthSpec, + TEventHandler, + >, + > +where + TStore: Store + 'static, + TSlotClock: SlotClock + 'static, + TLmdGhost: LmdGhost + 'static, + TEthSpec: EthSpec + 'static, + TEventHandler: EventHandler + 'static, +{ + /// Specifies that the `BeaconChain` should cache eth1 blocks/logs from a remote eth1 node + /// (e.g., Parity/Geth) and refer to that cache when collecting deposits or eth1 votes during + /// block production. + pub fn caching_eth1_backend(mut self, config: Eth1Config) -> Result { + let context = self + .runtime_context + .as_ref() + .ok_or_else(|| "caching_eth1_backend requires a runtime_context")? + .service_context("eth1_rpc"); + let beacon_chain_builder = self + .beacon_chain_builder + .ok_or_else(|| "caching_eth1_backend requires a beacon_chain_builder")?; + let store = self + .store + .clone() + .ok_or_else(|| "caching_eth1_backend requires a store".to_string())?; + + let backend = if let Some(eth1_service_from_genesis) = self.eth1_service { + eth1_service_from_genesis.update_config(config.clone())?; + CachingEth1Backend::from_service(eth1_service_from_genesis, store) + } else { + CachingEth1Backend::new(config, context.log, store) + }; + + self.eth1_service = None; + + let exit = { + let (tx, rx) = exit_future::signal(); + self.exit_signals.push(tx); + rx + }; + + // Starts the service that connects to an eth1 node and periodically updates caches. + context.executor.spawn(backend.start(exit)); + + self.beacon_chain_builder = Some(beacon_chain_builder.eth1_backend(Some(backend))); + + Ok(self) + } + + /// Do not use any eth1 backend. The client will not be able to produce beacon blocks. + pub fn no_eth1_backend(mut self) -> Result { + let beacon_chain_builder = self + .beacon_chain_builder + .ok_or_else(|| "caching_eth1_backend requires a beacon_chain_builder")?; + + self.beacon_chain_builder = Some(beacon_chain_builder.no_eth1_backend()); + + Ok(self) + } + + /// Use an eth1 backend that can produce blocks but is not connected to an Eth1 node. + /// + /// This backend will never produce deposits so it's impossible to add validators after + /// genesis. The `Eth1Data` votes will be deterministic junk data. + /// + /// ## Notes + /// + /// The client is given the `CachingEth1Backend` type, but the http backend is never started and the + /// caches are never used. + pub fn dummy_eth1_backend(mut self) -> Result { + let beacon_chain_builder = self + .beacon_chain_builder + .ok_or_else(|| "caching_eth1_backend requires a beacon_chain_builder")?; + + self.beacon_chain_builder = Some(beacon_chain_builder.dummy_eth1_backend()?); + + Ok(self) + } +} + +impl + ClientBuilder< + Witness, + > +where + TStore: Store + 'static, + TLmdGhost: LmdGhost + 'static, + TEth1Backend: Eth1ChainBackend + 'static, + TEthSpec: EthSpec + 'static, + TEventHandler: EventHandler + 'static, +{ + /// Specifies that the slot clock should read the time from the computers system clock. + pub fn system_time_slot_clock(mut self) -> Result { + let beacon_chain_builder = self + .beacon_chain_builder + .as_ref() + .ok_or_else(|| "system_time_slot_clock requires a beacon_chain_builder")?; + + let genesis_time = beacon_chain_builder + .finalized_checkpoint + .as_ref() + .ok_or_else(|| "system_time_slot_clock requires an initialized beacon state")? + .beacon_state + .genesis_time; + + let spec = self + .chain_spec + .clone() + .ok_or_else(|| "system_time_slot_clock requires a chain spec".to_string())?; + + let slot_clock = SystemTimeSlotClock::new( + spec.genesis_slot, + Duration::from_secs(genesis_time), + Duration::from_millis(spec.milliseconds_per_slot), + ); + + self.slot_clock = Some(slot_clock); + Ok(self) + } +} diff --git a/beacon_node/client/src/config.rs b/beacon_node/client/src/config.rs index 997808cb46..331c905ccf 100644 --- a/beacon_node/client/src/config.rs +++ b/beacon_node/client/src/config.rs @@ -9,6 +9,32 @@ use std::sync::Mutex; /// The number initial validators when starting the `Minimal`. const TESTNET_SPEC_CONSTANTS: &str = "minimal"; +/// Defines how the client should initialize the `BeaconChain` and other components. +#[derive(Debug, Clone, Serialize, Deserialize)] +pub enum ClientGenesis { + /// Reads the genesis state and other persisted data from the `Store`. + Resume, + /// Creates a genesis state as per the 2019 Canada interop specifications. + Interop { + validator_count: usize, + genesis_time: u64, + }, + /// Connects to an eth1 node and waits until it can create the genesis state from the deposit + /// contract. + DepositContract, + /// Loads the genesis state from a SSZ-encoded `BeaconState` file. + SszFile { path: PathBuf }, + /// Connects to another Lighthouse instance and reads the genesis state and other data via the + /// HTTP API. + RemoteNode { server: String, port: Option }, +} + +impl Default for ClientGenesis { + fn default() -> Self { + Self::DepositContract + } +} + /// The core configuration of a Lighthouse beacon node. #[derive(Debug, Clone, Serialize, Deserialize)] pub struct Config { @@ -17,74 +43,20 @@ pub struct Config { db_name: String, pub log_file: PathBuf, pub spec_constants: String, - /// Defines how we should initialize a BeaconChain instances. + /// If true, the node will use co-ordinated junk for eth1 values. /// - /// This field is not serialized, there for it will not be written to (or loaded from) config - /// files. It can only be configured via the CLI. + /// This is the method used for the 2019 client interop in Canada. + pub dummy_eth1_backend: bool, + pub sync_eth1_chain: bool, #[serde(skip)] - pub beacon_chain_start_method: BeaconChainStartMethod, - pub eth1_backend_method: Eth1BackendMethod, + /// The `genesis` field is not serialized or deserialized by `serde` to ensure it is defined + /// via the CLI at runtime, instead of from a configuration file saved to disk. + pub genesis: ClientGenesis, pub network: network::NetworkConfig, - pub rpc: rpc::RPCConfig, - pub rest_api: rest_api::ApiConfig, + pub rpc: rpc::Config, + pub rest_api: rest_api::Config, pub websocket_server: websocket_server::Config, -} - -/// Defines how the client should initialize a BeaconChain. -/// -/// In general, there are two methods: -/// - resuming a new chain, or -/// - initializing a new one. -#[derive(Debug, Clone)] -pub enum BeaconChainStartMethod { - /// Resume from an existing BeaconChain, loaded from the existing local database. - Resume, - /// Resume from an existing BeaconChain, loaded from the existing local database. - Mainnet, - /// Create a new beacon chain that can connect to mainnet. - /// - /// Set the genesis time to be the start of the previous 30-minute window. - RecentGenesis { - validator_count: usize, - minutes: u64, - }, - /// Create a new beacon chain with `genesis_time` and `validator_count` validators, all with well-known - /// secret keys. - Generated { - validator_count: usize, - genesis_time: u64, - }, - /// Create a new beacon chain by loading a YAML-encoded genesis state from a file. - Yaml { file: PathBuf }, - /// Create a new beacon chain by loading a SSZ-encoded genesis state from a file. - Ssz { file: PathBuf }, - /// Create a new beacon chain by loading a JSON-encoded genesis state from a file. - Json { file: PathBuf }, - /// Create a new beacon chain by using a HTTP server (running our REST-API) to load genesis and - /// finalized states and blocks. - HttpBootstrap { server: String, port: Option }, -} - -impl Default for BeaconChainStartMethod { - fn default() -> Self { - BeaconChainStartMethod::Resume - } -} - -/// Defines which Eth1 backend the client should use. -#[derive(Debug, Clone, Serialize, Deserialize)] -#[serde(tag = "type")] -pub enum Eth1BackendMethod { - /// Use the mocked eth1 backend used in interop testing - Interop, - /// Use a web3 connection to a running Eth1 node. - Web3 { server: String }, -} - -impl Default for Eth1BackendMethod { - fn default() -> Self { - Eth1BackendMethod::Interop - } + pub eth1: eth1::Config, } impl Default for Config { @@ -94,13 +66,15 @@ impl Default for Config { log_file: PathBuf::from(""), db_type: "disk".to_string(), db_name: "chain_db".to_string(), + genesis: <_>::default(), network: NetworkConfig::new(), rpc: <_>::default(), rest_api: <_>::default(), websocket_server: <_>::default(), spec_constants: TESTNET_SPEC_CONSTANTS.into(), - beacon_chain_start_method: <_>::default(), - eth1_backend_method: <_>::default(), + dummy_eth1_backend: false, + sync_eth1_chain: false, + eth1: <_>::default(), } } } @@ -183,3 +157,16 @@ impl Config { Ok(()) } } + +#[cfg(test)] +mod tests { + use super::*; + use toml; + + #[test] + fn serde() { + let config = Config::default(); + let serialized = toml::to_string(&config).expect("should serde encode default config"); + toml::from_str::(&serialized).expect("should serde decode default config"); + } +} diff --git a/beacon_node/client/src/lib.rs b/beacon_node/client/src/lib.rs index 7b75a37add..5da442bb10 100644 --- a/beacon_node/client/src/lib.rs +++ b/beacon_node/client/src/lib.rs @@ -2,327 +2,58 @@ extern crate slog; mod config; +pub mod builder; pub mod error; -pub mod notifier; -use beacon_chain::{ - lmd_ghost::ThreadSafeReducedTree, slot_clock::SystemTimeSlotClock, store::Store, - test_utils::generate_deterministic_keypairs, BeaconChain, BeaconChainBuilder, -}; +use beacon_chain::BeaconChain; use exit_future::Signal; -use futures::{future::Future, Stream}; use network::Service as NetworkService; -use rest_api::NetworkInfo; -use slog::{crit, debug, error, info, o}; -use slot_clock::SlotClock; -use std::marker::PhantomData; +use std::net::SocketAddr; use std::sync::Arc; -use std::time::{Duration, Instant, SystemTime, UNIX_EPOCH}; -use tokio::runtime::TaskExecutor; -use tokio::timer::Interval; -use types::EthSpec; -use websocket_server::WebSocketSender; -pub use beacon_chain::{BeaconChainTypes, Eth1ChainBackend, InteropEth1ChainBackend}; -pub use config::{BeaconChainStartMethod, Config as ClientConfig, Eth1BackendMethod}; +pub use beacon_chain::{BeaconChainTypes, Eth1ChainBackend}; +pub use builder::ClientBuilder; +pub use config::{ClientGenesis, Config as ClientConfig}; pub use eth2_config::Eth2Config; -#[derive(Clone)] -pub struct RuntimeBeaconChainTypes { - _phantom_s: PhantomData, - _phantom_e: PhantomData, +/// The core "beacon node" client. +/// +/// Holds references to running services, cleanly shutting them down when dropped. +pub struct Client { + beacon_chain: Option>>, + libp2p_network: Option>>, + http_listen_addr: Option, + websocket_listen_addr: Option, + /// Exit signals will "fire" when dropped, causing each service to exit gracefully. + _exit_signals: Vec, } -impl BeaconChainTypes for RuntimeBeaconChainTypes -where - S: Store + 'static, - E: EthSpec, -{ - type Store = S; - type SlotClock = SystemTimeSlotClock; - type LmdGhost = ThreadSafeReducedTree; - type Eth1Chain = InteropEth1ChainBackend; - type EthSpec = E; - type EventHandler = WebSocketSender; -} +impl Client { + /// Returns an `Arc` reference to the client's `BeaconChain`, if it was started. + pub fn beacon_chain(&self) -> Option>> { + self.beacon_chain.clone() + } -/// Main beacon node client service. This provides the connection and initialisation of the clients -/// sub-services in multiple threads. -pub struct Client -where - S: Store + Clone + 'static, - E: EthSpec, -{ - /// Configuration for the lighthouse client. - _client_config: ClientConfig, - /// The beacon chain for the running client. - beacon_chain: Arc>>, - /// Reference to the network service. - pub network: Arc>>, - /// Signal to terminate the RPC server. - pub rpc_exit_signal: Option, - /// Signal to terminate the slot timer. - pub slot_timer_exit_signal: Option, - /// Signal to terminate the API - pub api_exit_signal: Option, - /// Signal to terminate the websocket server - pub websocket_exit_signal: Option, - /// The clients logger. - log: slog::Logger, -} + /// Returns the address of the client's HTTP API server, if it was started. + pub fn http_listen_addr(&self) -> Option { + self.http_listen_addr + } -impl Client -where - S: Store + Clone + 'static, - E: EthSpec, -{ - /// Generate an instance of the client. Spawn and link all internal sub-processes. - pub fn new( - client_config: ClientConfig, - eth2_config: Eth2Config, - store: S, - log: slog::Logger, - executor: &TaskExecutor, - ) -> error::Result { - let store = Arc::new(store); - let milliseconds_per_slot = eth2_config.spec.milliseconds_per_slot; + /// Returns the address of the client's WebSocket API server, if it was started. + pub fn websocket_listen_addr(&self) -> Option { + self.websocket_listen_addr + } - let spec = ð2_config.spec.clone(); - - let beacon_chain_builder = match &client_config.beacon_chain_start_method { - BeaconChainStartMethod::Resume => { - info!( - log, - "Starting beacon chain"; - "method" => "resume" - ); - BeaconChainBuilder::from_store(spec.clone(), log.clone()) - } - BeaconChainStartMethod::Mainnet => { - crit!(log, "No mainnet beacon chain startup specification."); - return Err("Mainnet launch is not yet announced.".into()); - } - BeaconChainStartMethod::RecentGenesis { - validator_count, - minutes, - } => { - info!( - log, - "Starting beacon chain"; - "validator_count" => validator_count, - "minutes" => minutes, - "method" => "recent" - ); - BeaconChainBuilder::recent_genesis( - &generate_deterministic_keypairs(*validator_count), - *minutes, - spec.clone(), - log.clone(), - )? - } - BeaconChainStartMethod::Generated { - validator_count, - genesis_time, - } => { - info!( - log, - "Starting beacon chain"; - "validator_count" => validator_count, - "genesis_time" => genesis_time, - "method" => "quick" - ); - BeaconChainBuilder::quick_start( - *genesis_time, - &generate_deterministic_keypairs(*validator_count), - spec.clone(), - log.clone(), - )? - } - BeaconChainStartMethod::Yaml { file } => { - info!( - log, - "Starting beacon chain"; - "file" => format!("{:?}", file), - "method" => "yaml" - ); - BeaconChainBuilder::yaml_state(file, spec.clone(), log.clone())? - } - BeaconChainStartMethod::Ssz { file } => { - info!( - log, - "Starting beacon chain"; - "file" => format!("{:?}", file), - "method" => "ssz" - ); - BeaconChainBuilder::ssz_state(file, spec.clone(), log.clone())? - } - BeaconChainStartMethod::Json { file } => { - info!( - log, - "Starting beacon chain"; - "file" => format!("{:?}", file), - "method" => "json" - ); - BeaconChainBuilder::json_state(file, spec.clone(), log.clone())? - } - BeaconChainStartMethod::HttpBootstrap { server, port } => { - info!( - log, - "Starting beacon chain"; - "port" => port, - "server" => server, - "method" => "bootstrap" - ); - BeaconChainBuilder::http_bootstrap(server, spec.clone(), log.clone())? - } - }; - - let eth1_backend = - InteropEth1ChainBackend::new(String::new()).map_err(|e| format!("{:?}", e))?; - - // Start the websocket server. - let (websocket_sender, websocket_exit_signal): (WebSocketSender, Option<_>) = - if client_config.websocket_server.enabled { - let (sender, exit) = websocket_server::start_server( - &client_config.websocket_server, - executor, - &log, - )?; - (sender, Some(exit)) - } else { - (WebSocketSender::dummy(), None) - }; - - let beacon_chain: Arc>> = Arc::new( - beacon_chain_builder - .build(store, eth1_backend, websocket_sender) - .map_err(error::Error::from)?, - ); - - let since_epoch = SystemTime::now() - .duration_since(UNIX_EPOCH) - .map_err(|e| format!("Unable to read system time: {}", e))?; - let since_genesis = Duration::from_secs(beacon_chain.head().beacon_state.genesis_time); - - if since_genesis > since_epoch { - info!( - log, - "Starting node prior to genesis"; - "now" => since_epoch.as_secs(), - "genesis_seconds" => since_genesis.as_secs(), - ); - } - - let network_config = &client_config.network; - let (network, network_send) = - NetworkService::new(beacon_chain.clone(), network_config, executor, log.clone())?; - - // spawn the RPC server - let rpc_exit_signal = if client_config.rpc.enabled { - Some(rpc::start_server( - &client_config.rpc, - executor, - network_send.clone(), - beacon_chain.clone(), - &log, - )) - } else { - None - }; - - // Start the `rest_api` service - let api_exit_signal = if client_config.rest_api.enabled { - let network_info = NetworkInfo { - network_service: network.clone(), - network_chan: network_send.clone(), - }; - match rest_api::start_server( - &client_config.rest_api, - executor, - beacon_chain.clone(), - network_info, - client_config.db_path().expect("unable to read datadir"), - eth2_config.clone(), - &log, - ) { - Ok(s) => Some(s), - Err(e) => { - error!(log, "API service failed to start."; "error" => format!("{:?}",e)); - None - } - } - } else { - None - }; - - let (slot_timer_exit_signal, exit) = exit_future::signal(); - if let Some(duration_to_next_slot) = beacon_chain.slot_clock.duration_to_next_slot() { - // set up the validator work interval - start at next slot and proceed every slot - let interval = { - // Set the interval to start at the next slot, and every slot after - let slot_duration = Duration::from_millis(milliseconds_per_slot); - //TODO: Handle checked add correctly - Interval::new(Instant::now() + duration_to_next_slot, slot_duration) - }; - - let chain = beacon_chain.clone(); - let log = log.new(o!("Service" => "SlotTimer")); - executor.spawn( - exit.until( - interval - .for_each(move |_| { - log_new_slot(&chain, &log); - - Ok(()) - }) - .map_err(|_| ()), - ) - .map(|_| ()), - ); - } - - Ok(Client { - _client_config: client_config, - beacon_chain, - rpc_exit_signal, - slot_timer_exit_signal: Some(slot_timer_exit_signal), - api_exit_signal, - websocket_exit_signal, - log, - network, - }) + /// Returns the port of the client's libp2p stack, if it was started. + pub fn libp2p_listen_port(&self) -> Option { + self.libp2p_network.as_ref().map(|n| n.listen_port()) } } -impl Drop for Client { +impl Drop for Client { fn drop(&mut self) { - // Save the beacon chain to it's store before dropping. - let _result = self.beacon_chain.persist(); + if let Some(beacon_chain) = &self.beacon_chain { + let _result = beacon_chain.persist(); + } } } - -fn log_new_slot(chain: &Arc>, log: &slog::Logger) { - let best_slot = chain.head().beacon_block.slot; - let latest_block_root = chain.head().beacon_block_root; - - if let Ok(current_slot) = chain.slot() { - info!( - log, - "Slot start"; - "best_slot" => best_slot, - "slot" => current_slot, - ); - debug!( - log, - "Slot info"; - "skip_slots" => current_slot.saturating_sub(best_slot), - "best_block_root" => format!("{}", latest_block_root), - "slot" => current_slot, - ); - } else { - error!( - log, - "Beacon chain running whilst slot clock is unavailable." - ); - }; -} diff --git a/beacon_node/client/src/notifier.rs b/beacon_node/client/src/notifier.rs deleted file mode 100644 index 20da963ec3..0000000000 --- a/beacon_node/client/src/notifier.rs +++ /dev/null @@ -1,58 +0,0 @@ -use crate::Client; -use exit_future::Exit; -use futures::{Future, Stream}; -use slog::{debug, o, warn}; -use std::time::{Duration, Instant}; -use store::Store; -use tokio::runtime::TaskExecutor; -use tokio::timer::Interval; -use types::EthSpec; - -/// The interval between heartbeat events. -pub const HEARTBEAT_INTERVAL_SECONDS: u64 = 15; - -/// Create a warning log whenever the peer count is at or below this value. -pub const WARN_PEER_COUNT: usize = 1; - -/// Spawns a thread that can be used to run code periodically, on `HEARTBEAT_INTERVAL_SECONDS` -/// durations. -/// -/// Presently unused, but remains for future use. -pub fn run(client: &Client, executor: TaskExecutor, exit: Exit) -where - S: Store + Clone + 'static, - E: EthSpec, -{ - // notification heartbeat - let interval = Interval::new( - Instant::now(), - Duration::from_secs(HEARTBEAT_INTERVAL_SECONDS), - ); - - let log = client.log.new(o!("Service" => "Notifier")); - - let libp2p = client.network.libp2p_service(); - - let heartbeat = move |_| { - // Number of libp2p (not discv5) peers connected. - // - // Panics if libp2p is poisoned. - let connected_peer_count = libp2p.lock().swarm.connected_peers(); - - debug!(log, "Connected peer status"; "peer_count" => connected_peer_count); - - if connected_peer_count <= WARN_PEER_COUNT { - warn!(log, "Low peer count"; "peer_count" => connected_peer_count); - } - - Ok(()) - }; - - // map error and spawn - let err_log = client.log.clone(); - let heartbeat_interval = interval - .map_err(move |e| debug!(err_log, "Timer error {}", e)) - .for_each(heartbeat); - - executor.spawn(exit.until(heartbeat_interval).map(|_| ())); -} diff --git a/beacon_node/eth1/Cargo.toml b/beacon_node/eth1/Cargo.toml new file mode 100644 index 0000000000..bdd9ded4d4 --- /dev/null +++ b/beacon_node/eth1/Cargo.toml @@ -0,0 +1,29 @@ +[package] +name = "eth1" +version = "0.1.0" +authors = ["Paul Hauner "] +edition = "2018" + +[dev-dependencies] +eth1_test_rig = { path = "../../tests/eth1_test_rig" } +environment = { path = "../../lighthouse/environment" } +toml = "^0.5" +web3 = "0.8.0" + +[dependencies] +reqwest = "0.9" +futures = "0.1.25" +serde_json = "1.0" +serde = { version = "1.0", features = ["derive"] } +hex = "0.4" +types = { path = "../../eth2/types"} +merkle_proof = { path = "../../eth2/utils/merkle_proof"} +eth2_ssz = { path = "../../eth2/utils/ssz"} +tree_hash = { path = "../../eth2/utils/tree_hash"} +eth2_hashing = { path = "../../eth2/utils/eth2_hashing"} +parking_lot = "0.7" +slog = "^2.2.3" +tokio = "0.1.17" +state_processing = { path = "../../eth2/state_processing" } +exit-future = "0.1.4" +libflate = "0.1" diff --git a/beacon_node/eth1/src/block_cache.rs b/beacon_node/eth1/src/block_cache.rs new file mode 100644 index 0000000000..1a6464ca76 --- /dev/null +++ b/beacon_node/eth1/src/block_cache.rs @@ -0,0 +1,271 @@ +use std::ops::RangeInclusive; +use types::{Eth1Data, Hash256}; + +#[derive(Debug, PartialEq, Clone)] +pub enum Error { + /// The timestamp of each block equal to or later than the block prior to it. + InconsistentTimestamp { parent: u64, child: u64 }, + /// Some `Eth1Block` was provided with the same block number but different data. The source + /// of eth1 data is inconsistent. + Conflicting(u64), + /// The given block was not one block number higher than the higest known block number. + NonConsecutive { given: u64, expected: u64 }, + /// Some invariant was violated, there is a likely bug in the code. + Internal(String), +} + +/// A block of the eth1 chain. +/// +/// Contains all information required to add a `BlockCache` entry. +#[derive(Debug, PartialEq, Clone, Eq, Hash)] +pub struct Eth1Block { + pub hash: Hash256, + pub timestamp: u64, + pub number: u64, + pub deposit_root: Option, + pub deposit_count: Option, +} + +impl Eth1Block { + pub fn eth1_data(self) -> Option { + Some(Eth1Data { + deposit_root: self.deposit_root?, + deposit_count: self.deposit_count?, + block_hash: self.hash, + }) + } +} + +/// Stores block and deposit contract information and provides queries based upon the block +/// timestamp. +#[derive(Debug, PartialEq, Clone, Default)] +pub struct BlockCache { + blocks: Vec, +} + +impl BlockCache { + /// Returns the number of blocks stored in `self`. + pub fn len(&self) -> usize { + self.blocks.len() + } + + /// True if the cache does not store any blocks. + pub fn is_empty(&self) -> bool { + self.blocks.is_empty() + } + + /// Returns the highest block number stored. + pub fn highest_block_number(&self) -> Option { + self.blocks.last().map(|block| block.number) + } + + /// Returns an iterator over all blocks. + /// + /// Blocks a guaranteed to be returned with; + /// + /// - Monotonically increasing block numbers. + /// - Non-uniformly increasing block timestamps. + pub fn iter(&self) -> impl DoubleEndedIterator + Clone { + self.blocks.iter() + } + + /// Shortens the cache, keeping the latest (by block number) `len` blocks while dropping the + /// rest. + /// + /// If `len` is greater than the vector's current length, this has no effect. + pub fn truncate(&mut self, len: usize) { + if len < self.blocks.len() { + self.blocks = self.blocks.split_off(self.blocks.len() - len); + } + } + + /// Returns the range of block numbers stored in the block cache. All blocks in this range can + /// be accessed. + fn available_block_numbers(&self) -> Option> { + Some(self.blocks.first()?.number..=self.blocks.last()?.number) + } + + /// Returns a block with the corresponding number, if any. + pub fn block_by_number(&self, block_number: u64) -> Option<&Eth1Block> { + self.blocks.get( + self.blocks + .as_slice() + .binary_search_by(|block| block.number.cmp(&block_number)) + .ok()?, + ) + } + + /// Insert an `Eth1Snapshot` into `self`, allowing future queries. + /// + /// Allows inserting either: + /// + /// - The root block (i.e., any block if there are no existing blocks), or, + /// - An immediate child of the most recent (highest block number) block. + /// + /// ## Errors + /// + /// - If the cache is not empty and `item.block.block_number - 1` is not already in `self`. + /// - If `item.block.block_number` is in `self`, but is not identical to the supplied + /// `Eth1Snapshot`. + /// - If `item.block.timestamp` is prior to the parent. + pub fn insert_root_or_child(&mut self, block: Eth1Block) -> Result<(), Error> { + let expected_block_number = self + .highest_block_number() + .map(|n| n + 1) + .unwrap_or_else(|| block.number); + + // If there are already some cached blocks, check to see if the new block number is one of + // them. + // + // If the block is already known, check to see the given block is identical to it. If not, + // raise an inconsistency error. This is mostly likely caused by some fork on the eth1 + // chain. + if let Some(local) = self.available_block_numbers() { + if local.contains(&block.number) { + let known_block = self.block_by_number(block.number).ok_or_else(|| { + Error::Internal("An expected block was not present".to_string()) + })?; + + if known_block == &block { + return Ok(()); + } else { + return Err(Error::Conflicting(block.number)); + }; + } + } + + // Only permit blocks when it's either: + // + // - The first block inserted. + // - Exactly one block number higher than the highest known block number. + if block.number != expected_block_number { + return Err(Error::NonConsecutive { + given: block.number, + expected: expected_block_number, + }); + } + + // If the block is not the first block inserted, ensure that its timestamp is not higher + // than its parents. + if let Some(previous_block) = self.blocks.last() { + if previous_block.timestamp > block.timestamp { + return Err(Error::InconsistentTimestamp { + parent: previous_block.timestamp, + child: block.timestamp, + }); + } + } + + self.blocks.push(block); + + Ok(()) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + fn get_block(i: u64, interval_secs: u64) -> Eth1Block { + Eth1Block { + hash: Hash256::from_low_u64_be(i), + timestamp: i * interval_secs, + number: i, + deposit_root: Some(Hash256::from_low_u64_be(i << 32)), + deposit_count: Some(i), + } + } + + fn get_blocks(n: usize, interval_secs: u64) -> Vec { + (0..n as u64) + .into_iter() + .map(|i| get_block(i, interval_secs)) + .collect() + } + + fn insert(cache: &mut BlockCache, s: Eth1Block) -> Result<(), Error> { + cache.insert_root_or_child(s) + } + + #[test] + fn truncate() { + let n = 16; + let blocks = get_blocks(n, 10); + + let mut cache = BlockCache::default(); + + for block in blocks { + insert(&mut cache, block.clone()).expect("should add consecutive blocks"); + } + + for len in vec![0, 1, 2, 3, 4, 8, 15, 16] { + let mut cache = cache.clone(); + + cache.truncate(len); + + assert_eq!( + cache.blocks.len(), + len, + "should truncate to length: {}", + len + ); + } + + let mut cache_2 = cache.clone(); + cache_2.truncate(17); + assert_eq!( + cache_2.blocks.len(), + n, + "truncate to larger than n should be a no-op" + ); + } + + #[test] + fn inserts() { + let n = 16; + let blocks = get_blocks(n, 10); + + let mut cache = BlockCache::default(); + + for block in blocks { + insert(&mut cache, block.clone()).expect("should add consecutive blocks"); + } + + // No error for re-adding a block identical to one that exists. + assert!(insert(&mut cache, get_block(n as u64 - 1, 10)).is_ok()); + + // Error for re-adding a block that is different to the one that exists. + assert!(insert(&mut cache, get_block(n as u64 - 1, 11)).is_err()); + + // Error for adding non-consecutive blocks. + assert!(insert(&mut cache, get_block(n as u64 + 1, 10)).is_err()); + assert!(insert(&mut cache, get_block(n as u64 + 2, 10)).is_err()); + + // Error for adding timestamp prior to previous. + assert!(insert(&mut cache, get_block(n as u64, 1)).is_err()); + // Double check to make sure previous test was only affected by timestamp. + assert!(insert(&mut cache, get_block(n as u64, 10)).is_ok()); + } + + #[test] + fn duplicate_timestamp() { + let mut blocks = get_blocks(7, 10); + + blocks[0].timestamp = 0; + blocks[1].timestamp = 10; + blocks[2].timestamp = 10; + blocks[3].timestamp = 20; + blocks[4].timestamp = 30; + blocks[5].timestamp = 40; + blocks[6].timestamp = 40; + + let mut cache = BlockCache::default(); + + for block in &blocks { + insert(&mut cache, block.clone()) + .expect("should add consecutive blocks with duplicate timestamps"); + } + + assert_eq!(cache.blocks, blocks, "should have added all blocks"); + } +} diff --git a/beacon_node/eth1/src/deposit_cache.rs b/beacon_node/eth1/src/deposit_cache.rs new file mode 100644 index 0000000000..f2b43f55ea --- /dev/null +++ b/beacon_node/eth1/src/deposit_cache.rs @@ -0,0 +1,371 @@ +use crate::DepositLog; +use eth2_hashing::hash; +use std::ops::Range; +use tree_hash::TreeHash; +use types::{Deposit, Hash256}; + +#[derive(Debug, PartialEq, Clone)] +pub enum Error { + /// A deposit log was added when a prior deposit was not already in the cache. + /// + /// Logs have to be added with monotonically-increasing block numbers. + NonConsecutive { log_index: u64, expected: usize }, + /// The eth1 event log data was unable to be parsed. + LogParseError(String), + /// There are insufficient deposits in the cache to fulfil the request. + InsufficientDeposits { + known_deposits: usize, + requested: u64, + }, + /// A log with the given index is already present in the cache and it does not match the one + /// provided. + DuplicateDistinctLog(u64), + /// The deposit count must always be large enough to account for the requested deposit range. + /// + /// E.g., you cannot request deposit 10 when the deposit count is 9. + DepositCountInvalid { deposit_count: u64, range_end: u64 }, + /// An unexpected condition was encountered. + InternalError(String), +} + +/// Emulates the eth1 deposit contract merkle tree. +pub struct DepositDataTree { + tree: merkle_proof::MerkleTree, + mix_in_length: usize, + depth: usize, +} + +impl DepositDataTree { + /// Create a new Merkle tree from a list of leaves (`DepositData::tree_hash_root`) and a fixed depth. + pub fn create(leaves: &[Hash256], mix_in_length: usize, depth: usize) -> Self { + Self { + tree: merkle_proof::MerkleTree::create(leaves, depth), + mix_in_length, + depth, + } + } + + /// Returns 32 bytes representing the "mix in length" for the merkle root of this tree. + fn length_bytes(&self) -> Vec { + int_to_bytes32(self.mix_in_length) + } + + /// Retrieve the root hash of this Merkle tree with the length mixed in. + pub fn root(&self) -> Hash256 { + let mut preimage = [0; 64]; + preimage[0..32].copy_from_slice(&self.tree.hash()[..]); + preimage[32..64].copy_from_slice(&self.length_bytes()); + Hash256::from_slice(&hash(&preimage)) + } + + /// Return the leaf at `index` and a Merkle proof of its inclusion. + /// + /// The Merkle proof is in "bottom-up" order, starting with a leaf node + /// and moving up the tree. Its length will be exactly equal to `depth + 1`. + pub fn generate_proof(&self, index: usize) -> (Hash256, Vec) { + let (root, mut proof) = self.tree.generate_proof(index, self.depth); + proof.push(Hash256::from_slice(&self.length_bytes())); + (root, proof) + } +} + +/// Mirrors the merkle tree of deposits in the eth1 deposit contract. +/// +/// Provides `Deposit` objects with merkle proofs included. +#[derive(Default)] +pub struct DepositCache { + logs: Vec, + roots: Vec, +} + +impl DepositCache { + /// Returns the number of deposits available in the cache. + pub fn len(&self) -> usize { + self.logs.len() + } + + /// True if the cache does not store any blocks. + pub fn is_empty(&self) -> bool { + self.logs.is_empty() + } + + /// Returns the block number for the most recent deposit in the cache. + pub fn latest_block_number(&self) -> Option { + self.logs.last().map(|log| log.block_number) + } + + /// Returns an iterator over all the logs in `self`. + pub fn iter(&self) -> impl Iterator { + self.logs.iter() + } + + /// Returns the i'th deposit log. + pub fn get(&self, i: usize) -> Option<&DepositLog> { + self.logs.get(i) + } + + /// Adds `log` to self. + /// + /// This function enforces that `logs` are imported one-by-one with no gaps between + /// `log.index`, starting at `log.index == 0`. + /// + /// ## Errors + /// + /// - If a log with index `log.index - 1` is not already present in `self` (ignored when empty). + /// - If a log with `log.index` is already known, but the given `log` is distinct to it. + pub fn insert_log(&mut self, log: DepositLog) -> Result<(), Error> { + if log.index == self.logs.len() as u64 { + self.roots + .push(Hash256::from_slice(&log.deposit_data.tree_hash_root())); + self.logs.push(log); + + Ok(()) + } else if log.index < self.logs.len() as u64 { + if self.logs[log.index as usize] == log { + Ok(()) + } else { + Err(Error::DuplicateDistinctLog(log.index)) + } + } else { + Err(Error::NonConsecutive { + log_index: log.index, + expected: self.logs.len(), + }) + } + } + + /// Returns a list of `Deposit` objects, within the given deposit index `range`. + /// + /// The `deposit_count` is used to generate the proofs for the `Deposits`. For example, if we + /// have 100 proofs, but the eth2 chain only acknowledges 50 of them, we must produce our + /// proofs with respect to a tree size of 50. + /// + /// + /// ## Errors + /// + /// - If `deposit_count` is larger than `range.end`. + /// - There are not sufficient deposits in the tree to generate the proof. + pub fn get_deposits( + &self, + range: Range, + deposit_count: u64, + tree_depth: usize, + ) -> Result<(Hash256, Vec), Error> { + if deposit_count < range.end { + // It's invalid to ask for more deposits than should exist. + Err(Error::DepositCountInvalid { + deposit_count, + range_end: range.end, + }) + } else if range.end > self.logs.len() as u64 { + // The range of requested deposits exceeds the deposits stored locally. + Err(Error::InsufficientDeposits { + requested: range.end, + known_deposits: self.logs.len(), + }) + } else if deposit_count > self.roots.len() as u64 { + // There are not `deposit_count` known deposit roots, so we can't build the merkle tree + // to prove into. + Err(Error::InsufficientDeposits { + requested: deposit_count, + known_deposits: self.logs.len(), + }) + } else { + let roots = self + .roots + .get(0..deposit_count as usize) + .ok_or_else(|| Error::InternalError("Unable to get known root".into()))?; + + // Note: there is likely a more optimal solution than recreating the `DepositDataTree` + // each time this function is called. + // + // Perhaps a base merkle tree could be maintained that contains all deposits up to the + // last finalized eth1 deposit count. Then, that tree could be cloned and extended for + // each of these calls. + + let tree = DepositDataTree::create(roots, deposit_count as usize, tree_depth); + + let deposits = self + .logs + .get(range.start as usize..range.end as usize) + .ok_or_else(|| Error::InternalError("Unable to get known log".into()))? + .iter() + .map(|deposit_log| { + let (_leaf, proof) = tree.generate_proof(deposit_log.index as usize); + + Deposit { + proof: proof.into(), + data: deposit_log.deposit_data.clone(), + } + }) + .collect(); + + Ok((tree.root(), deposits)) + } + } +} + +/// Returns `int` as little-endian bytes with a length of 32. +fn int_to_bytes32(int: usize) -> Vec { + let mut vec = int.to_le_bytes().to_vec(); + vec.resize(32, 0); + vec +} + +#[cfg(test)] +pub mod tests { + use super::*; + use crate::deposit_log::tests::EXAMPLE_LOG; + use crate::http::Log; + + pub const TREE_DEPTH: usize = 32; + + fn example_log() -> DepositLog { + let log = Log { + block_number: 42, + data: EXAMPLE_LOG.to_vec(), + }; + DepositLog::from_log(&log).expect("should decode log") + } + + #[test] + fn insert_log_valid() { + let mut tree = DepositCache::default(); + + for i in 0..16 { + let mut log = example_log(); + log.index = i; + tree.insert_log(log).expect("should add consecutive logs") + } + } + + #[test] + fn insert_log_invalid() { + let mut tree = DepositCache::default(); + + for i in 0..4 { + let mut log = example_log(); + log.index = i; + tree.insert_log(log).expect("should add consecutive logs") + } + + // Add duplicate, when given is the same as the one known. + let mut log = example_log(); + log.index = 3; + assert!(tree.insert_log(log).is_ok()); + + // Add duplicate, when given is different to the one known. + let mut log = example_log(); + log.index = 3; + log.block_number = 99; + assert!(tree.insert_log(log).is_err()); + + // Skip inserting a log. + let mut log = example_log(); + log.index = 5; + assert!(tree.insert_log(log).is_err()); + } + + #[test] + fn get_deposit_valid() { + let n = 1_024; + let mut tree = DepositCache::default(); + + for i in 0..n { + let mut log = example_log(); + log.index = i; + log.block_number = i; + log.deposit_data.withdrawal_credentials = Hash256::from_low_u64_be(i); + tree.insert_log(log).expect("should add consecutive logs") + } + + // Get 0 deposits, with max deposit count. + let (_, deposits) = tree + .get_deposits(0..0, n, TREE_DEPTH) + .expect("should get the full tree"); + assert_eq!(deposits.len(), 0, "should return no deposits"); + + // Get 0 deposits, with 0 deposit count. + let (_, deposits) = tree + .get_deposits(0..0, 0, TREE_DEPTH) + .expect("should get the full tree"); + assert_eq!(deposits.len(), 0, "should return no deposits"); + + // Get 0 deposits, with 0 deposit count, tree depth 0. + let (_, deposits) = tree + .get_deposits(0..0, 0, 0) + .expect("should get the full tree"); + assert_eq!(deposits.len(), 0, "should return no deposits"); + + // Get all deposits, with max deposit count. + let (full_root, deposits) = tree + .get_deposits(0..n, n, TREE_DEPTH) + .expect("should get the full tree"); + assert_eq!(deposits.len(), n as usize, "should return all deposits"); + + // Get 4 deposits, with max deposit count. + let (root, deposits) = tree + .get_deposits(0..4, n, TREE_DEPTH) + .expect("should get the four from the full tree"); + assert_eq!( + deposits.len(), + 4 as usize, + "should get 4 deposits from full tree" + ); + assert_eq!( + root, full_root, + "should still return full root when getting deposit subset" + ); + + // Get half of the deposits, with half deposit count. + let (half_root, deposits) = tree + .get_deposits(0..n / 2, n / 2, TREE_DEPTH) + .expect("should get the half tree"); + assert_eq!( + deposits.len(), + n as usize / 2, + "should return half deposits" + ); + + // Get 4 deposits, with half deposit count. + let (root, deposits) = tree + .get_deposits(0..4, n / 2, TREE_DEPTH) + .expect("should get the half tree"); + assert_eq!( + deposits.len(), + 4 as usize, + "should get 4 deposits from half tree" + ); + assert_eq!( + root, half_root, + "should still return half root when getting deposit subset" + ); + assert_ne!( + full_root, half_root, + "should get different root when pinning deposit count" + ); + } + + #[test] + fn get_deposit_invalid() { + let n = 16; + let mut tree = DepositCache::default(); + + for i in 0..n { + let mut log = example_log(); + log.index = i; + log.block_number = i; + log.deposit_data.withdrawal_credentials = Hash256::from_low_u64_be(i); + tree.insert_log(log).expect("should add consecutive logs") + } + + // Range too high. + assert!(tree.get_deposits(0..n + 1, n, TREE_DEPTH).is_err()); + + // Count too high. + assert!(tree.get_deposits(0..n, n + 1, TREE_DEPTH).is_err()); + + // Range higher than count. + assert!(tree.get_deposits(0..4, 2, TREE_DEPTH).is_err()); + } +} diff --git a/beacon_node/eth1/src/deposit_log.rs b/beacon_node/eth1/src/deposit_log.rs new file mode 100644 index 0000000000..d42825c756 --- /dev/null +++ b/beacon_node/eth1/src/deposit_log.rs @@ -0,0 +1,107 @@ +use super::http::Log; +use ssz::Decode; +use types::{DepositData, Hash256, PublicKeyBytes, SignatureBytes}; + +/// The following constants define the layout of bytes in the deposit contract `DepositEvent`. The +/// event bytes are formatted according to the Ethereum ABI. +const PUBKEY_START: usize = 192; +const PUBKEY_LEN: usize = 48; +const CREDS_START: usize = PUBKEY_START + 64 + 32; +const CREDS_LEN: usize = 32; +const AMOUNT_START: usize = CREDS_START + 32 + 32; +const AMOUNT_LEN: usize = 8; +const SIG_START: usize = AMOUNT_START + 32 + 32; +const SIG_LEN: usize = 96; +const INDEX_START: usize = SIG_START + 96 + 32; +const INDEX_LEN: usize = 8; + +/// A fully parsed eth1 deposit contract log. +#[derive(Debug, PartialEq, Clone)] +pub struct DepositLog { + pub deposit_data: DepositData, + /// The block number of the log that included this `DepositData`. + pub block_number: u64, + /// The index included with the deposit log. + pub index: u64, +} + +impl DepositLog { + /// Attempts to parse a raw `Log` from the deposit contract into a `DepositLog`. + pub fn from_log(log: &Log) -> Result { + let bytes = &log.data; + + let pubkey = bytes + .get(PUBKEY_START..PUBKEY_START + PUBKEY_LEN) + .ok_or_else(|| "Insufficient bytes for pubkey".to_string())?; + let withdrawal_credentials = bytes + .get(CREDS_START..CREDS_START + CREDS_LEN) + .ok_or_else(|| "Insufficient bytes for withdrawal credential".to_string())?; + let amount = bytes + .get(AMOUNT_START..AMOUNT_START + AMOUNT_LEN) + .ok_or_else(|| "Insufficient bytes for amount".to_string())?; + let signature = bytes + .get(SIG_START..SIG_START + SIG_LEN) + .ok_or_else(|| "Insufficient bytes for signature".to_string())?; + let index = bytes + .get(INDEX_START..INDEX_START + INDEX_LEN) + .ok_or_else(|| "Insufficient bytes for index".to_string())?; + + let deposit_data = DepositData { + pubkey: PublicKeyBytes::from_ssz_bytes(pubkey) + .map_err(|e| format!("Invalid pubkey ssz: {:?}", e))?, + withdrawal_credentials: Hash256::from_ssz_bytes(withdrawal_credentials) + .map_err(|e| format!("Invalid withdrawal_credentials ssz: {:?}", e))?, + amount: u64::from_ssz_bytes(amount) + .map_err(|e| format!("Invalid amount ssz: {:?}", e))?, + signature: SignatureBytes::from_ssz_bytes(signature) + .map_err(|e| format!("Invalid signature ssz: {:?}", e))?, + }; + + Ok(DepositLog { + deposit_data, + block_number: log.block_number, + index: u64::from_ssz_bytes(index).map_err(|e| format!("Invalid index ssz: {:?}", e))?, + }) + } +} + +#[cfg(test)] +pub mod tests { + use super::*; + use crate::http::Log; + + /// The data from a deposit event, using the v0.8.3 version of the deposit contract. + pub const EXAMPLE_LOG: &[u8] = &[ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 1, 64, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 1, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 48, 167, 108, 6, 69, 88, 17, 3, 51, 6, 4, 158, 232, 82, + 248, 218, 2, 71, 219, 55, 102, 86, 125, 136, 203, 36, 77, 64, 213, 43, 52, 175, 154, 239, + 50, 142, 52, 201, 77, 54, 239, 0, 229, 22, 46, 139, 120, 62, 240, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 8, 0, 64, 89, 115, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 96, 140, 74, 175, 158, 209, 20, 206, + 30, 63, 215, 238, 113, 60, 132, 216, 211, 100, 186, 202, 71, 34, 200, 160, 225, 212, 213, + 119, 88, 51, 80, 101, 74, 2, 45, 78, 153, 12, 192, 44, 51, 77, 40, 10, 72, 246, 34, 193, + 187, 22, 95, 4, 211, 245, 224, 13, 162, 21, 163, 54, 225, 22, 124, 3, 56, 14, 81, 122, 189, + 149, 250, 251, 159, 22, 77, 94, 157, 197, 196, 253, 110, 201, 88, 193, 246, 136, 226, 221, + 18, 113, 232, 105, 100, 114, 103, 237, 189, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 8, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ]; + + #[test] + fn can_parse_example_log() { + let log = Log { + block_number: 42, + data: EXAMPLE_LOG.to_vec(), + }; + DepositLog::from_log(&log).expect("should decode log"); + } +} diff --git a/beacon_node/eth1/src/http.rs b/beacon_node/eth1/src/http.rs new file mode 100644 index 0000000000..404e357d19 --- /dev/null +++ b/beacon_node/eth1/src/http.rs @@ -0,0 +1,405 @@ +//! Provides a very minimal set of functions for interfacing with the eth2 deposit contract via an +//! eth1 HTTP JSON-RPC endpoint. +//! +//! All remote functions return a future (i.e., are async). +//! +//! Does not use a web3 library, instead it uses `reqwest` (`hyper`) to call the remote endpoint +//! and `serde` to decode the response. +//! +//! ## Note +//! +//! There is no ABI parsing here, all function signatures and topics are hard-coded as constants. + +use futures::{Future, Stream}; +use libflate::gzip::Decoder; +use reqwest::{header::CONTENT_TYPE, r#async::ClientBuilder, StatusCode}; +use serde_json::{json, Value}; +use std::io::prelude::*; +use std::ops::Range; +use std::time::Duration; +use types::Hash256; + +/// `keccak("DepositEvent(bytes,bytes,bytes,bytes,bytes)")` +pub const DEPOSIT_EVENT_TOPIC: &str = + "0x649bbc62d0e31342afea4e5cd82d4049e7e1ee912fc0889aa790803be39038c5"; +/// `keccak("get_deposit_root()")[0..4]` +pub const DEPOSIT_ROOT_FN_SIGNATURE: &str = "0x863a311b"; +/// `keccak("get_deposit_count()")[0..4]` +pub const DEPOSIT_COUNT_FN_SIGNATURE: &str = "0x621fd130"; + +/// Number of bytes in deposit contract deposit root response. +pub const DEPOSIT_COUNT_RESPONSE_BYTES: usize = 96; +/// Number of bytes in deposit contract deposit root (value only). +pub const DEPOSIT_ROOT_BYTES: usize = 32; + +#[derive(Debug, PartialEq, Clone)] +pub struct Block { + pub hash: Hash256, + pub timestamp: u64, + pub number: u64, +} + +/// Returns the current block number. +/// +/// Uses HTTP JSON RPC at `endpoint`. E.g., `http://localhost:8545`. +pub fn get_block_number( + endpoint: &str, + timeout: Duration, +) -> impl Future { + send_rpc_request(endpoint, "eth_blockNumber", json!([]), timeout) + .and_then(|response_body| { + hex_to_u64_be( + response_result(&response_body)? + .ok_or_else(|| "No result field was returned for block number".to_string())? + .as_str() + .ok_or_else(|| "Data was not string")?, + ) + }) + .map_err(|e| format!("Failed to get block number: {}", e)) +} + +/// Gets a block hash by block number. +/// +/// Uses HTTP JSON RPC at `endpoint`. E.g., `http://localhost:8545`. +pub fn get_block( + endpoint: &str, + block_number: u64, + timeout: Duration, +) -> impl Future { + let params = json!([ + format!("0x{:x}", block_number), + false // do not return full tx objects. + ]); + + send_rpc_request(endpoint, "eth_getBlockByNumber", params, timeout) + .and_then(|response_body| { + let hash = hex_to_bytes( + response_result(&response_body)? + .ok_or_else(|| "No result field was returned for block".to_string())? + .get("hash") + .ok_or_else(|| "No hash for block")? + .as_str() + .ok_or_else(|| "Block hash was not string")?, + )?; + let hash = if hash.len() == 32 { + Ok(Hash256::from_slice(&hash)) + } else { + Err(format!("Block has was not 32 bytes: {:?}", hash)) + }?; + + let timestamp = hex_to_u64_be( + response_result(&response_body)? + .ok_or_else(|| "No result field was returned for timestamp".to_string())? + .get("timestamp") + .ok_or_else(|| "No timestamp for block")? + .as_str() + .ok_or_else(|| "Block timestamp was not string")?, + )?; + + let number = hex_to_u64_be( + response_result(&response_body)? + .ok_or_else(|| "No result field was returned for number".to_string())? + .get("number") + .ok_or_else(|| "No number for block")? + .as_str() + .ok_or_else(|| "Block number was not string")?, + )?; + + if number <= usize::max_value() as u64 { + Ok(Block { + hash, + timestamp, + number, + }) + } else { + Err(format!("Block number {} is larger than a usize", number)) + } + }) + .map_err(|e| format!("Failed to get block number: {}", e)) +} + +/// Returns the value of the `get_deposit_count()` call at the given `address` for the given +/// `block_number`. +/// +/// Assumes that the `address` has the same ABI as the eth2 deposit contract. +/// +/// Uses HTTP JSON RPC at `endpoint`. E.g., `http://localhost:8545`. +pub fn get_deposit_count( + endpoint: &str, + address: &str, + block_number: u64, + timeout: Duration, +) -> impl Future, Error = String> { + call( + endpoint, + address, + DEPOSIT_COUNT_FN_SIGNATURE, + block_number, + timeout, + ) + .and_then(|result| result.ok_or_else(|| "No response to deposit count".to_string())) + .and_then(|bytes| { + if bytes.is_empty() { + Ok(None) + } else if bytes.len() == DEPOSIT_COUNT_RESPONSE_BYTES { + let mut array = [0; 8]; + array.copy_from_slice(&bytes[32 + 32..32 + 32 + 8]); + Ok(Some(u64::from_le_bytes(array))) + } else { + Err(format!( + "Deposit count response was not {} bytes: {:?}", + DEPOSIT_COUNT_RESPONSE_BYTES, bytes + )) + } + }) +} + +/// Returns the value of the `get_hash_tree_root()` call at the given `block_number`. +/// +/// Assumes that the `address` has the same ABI as the eth2 deposit contract. +/// +/// Uses HTTP JSON RPC at `endpoint`. E.g., `http://localhost:8545`. +pub fn get_deposit_root( + endpoint: &str, + address: &str, + block_number: u64, + timeout: Duration, +) -> impl Future, Error = String> { + call( + endpoint, + address, + DEPOSIT_ROOT_FN_SIGNATURE, + block_number, + timeout, + ) + .and_then(|result| result.ok_or_else(|| "No response to deposit root".to_string())) + .and_then(|bytes| { + if bytes.is_empty() { + Ok(None) + } else if bytes.len() == DEPOSIT_ROOT_BYTES { + Ok(Some(Hash256::from_slice(&bytes))) + } else { + Err(format!( + "Deposit root response was not {} bytes: {:?}", + DEPOSIT_ROOT_BYTES, bytes + )) + } + }) +} + +/// Performs a instant, no-transaction call to the contract `address` with the given `0x`-prefixed +/// `hex_data`. +/// +/// Returns bytes, if any. +/// +/// Uses HTTP JSON RPC at `endpoint`. E.g., `http://localhost:8545`. +fn call( + endpoint: &str, + address: &str, + hex_data: &str, + block_number: u64, + timeout: Duration, +) -> impl Future>, Error = String> { + let params = json! ([ + { + "to": address, + "data": hex_data, + }, + format!("0x{:x}", block_number) + ]); + + send_rpc_request(endpoint, "eth_call", params, timeout).and_then(|response_body| { + match response_result(&response_body)? { + None => Ok(None), + Some(result) => { + let hex = result + .as_str() + .map(|s| s.to_string()) + .ok_or_else(|| "'result' value was not a string".to_string())?; + + Ok(Some(hex_to_bytes(&hex)?)) + } + } + }) +} + +/// A reduced set of fields from an Eth1 contract log. +#[derive(Debug, PartialEq, Clone)] +pub struct Log { + pub(crate) block_number: u64, + pub(crate) data: Vec, +} + +/// Returns logs for the `DEPOSIT_EVENT_TOPIC`, for the given `address` in the given +/// `block_height_range`. +/// +/// It's not clear from the Ethereum JSON-RPC docs if this range is inclusive or not. +/// +/// Uses HTTP JSON RPC at `endpoint`. E.g., `http://localhost:8545`. +pub fn get_deposit_logs_in_range( + endpoint: &str, + address: &str, + block_height_range: Range, + timeout: Duration, +) -> impl Future, Error = String> { + let params = json! ([{ + "address": address, + "topics": [DEPOSIT_EVENT_TOPIC], + "fromBlock": format!("0x{:x}", block_height_range.start), + "toBlock": format!("0x{:x}", block_height_range.end), + }]); + + send_rpc_request(endpoint, "eth_getLogs", params, timeout) + .and_then(|response_body| { + response_result(&response_body)? + .ok_or_else(|| "No result field was returned for deposit logs".to_string())? + .as_array() + .cloned() + .ok_or_else(|| "'result' value was not an array".to_string())? + .into_iter() + .map(|value| { + let block_number = value + .get("blockNumber") + .ok_or_else(|| "No block number field in log")? + .as_str() + .ok_or_else(|| "Block number was not string")?; + + let data = value + .get("data") + .ok_or_else(|| "No block number field in log")? + .as_str() + .ok_or_else(|| "Data was not string")?; + + Ok(Log { + block_number: hex_to_u64_be(&block_number)?, + data: hex_to_bytes(data)?, + }) + }) + .collect::, String>>() + }) + .map_err(|e| format!("Failed to get logs in range: {}", e)) +} + +/// Sends an RPC request to `endpoint`, using a POST with the given `body`. +/// +/// Tries to receive the response and parse the body as a `String`. +pub fn send_rpc_request( + endpoint: &str, + method: &str, + params: Value, + timeout: Duration, +) -> impl Future { + let body = json! ({ + "jsonrpc": "2.0", + "method": method, + "params": params, + "id": 1 + }) + .to_string(); + + // Note: it is not ideal to create a new client for each request. + // + // A better solution would be to create some struct that contains a built client and pass it + // around (similar to the `web3` crate's `Transport` structs). + ClientBuilder::new() + .timeout(timeout) + .build() + .expect("The builder should always build a client") + .post(endpoint) + .header(CONTENT_TYPE, "application/json") + .body(body) + .send() + .map_err(|e| format!("Request failed: {:?}", e)) + .and_then(|response| { + if response.status() != StatusCode::OK { + Err(format!( + "Response HTTP status was not 200 OK: {}.", + response.status() + )) + } else { + Ok(response) + } + }) + .and_then(|response| { + response + .headers() + .get(CONTENT_TYPE) + .ok_or_else(|| "No content-type header in response".to_string()) + .and_then(|encoding| { + encoding + .to_str() + .map(|s| s.to_string()) + .map_err(|e| format!("Failed to parse content-type header: {}", e)) + }) + .map(|encoding| (response, encoding)) + }) + .and_then(|(response, encoding)| { + response + .into_body() + .concat2() + .map(|chunk| chunk.iter().cloned().collect::>()) + .map_err(|e| format!("Failed to receive body: {:?}", e)) + .and_then(move |bytes| match encoding.as_str() { + "application/json" => Ok(bytes), + "application/json; charset=utf-8" => Ok(bytes), + // Note: gzip is not presently working because we always seem to get an empty + // response from the server. + // + // I expect this is some simple-to-solve issue for someone who is familiar with + // the eth1 JSON RPC. + // + // Some public-facing web3 servers use gzip to compress their traffic, it would + // be good to support this. + "application/x-gzip" => { + let mut decoder = Decoder::new(&bytes[..]) + .map_err(|e| format!("Failed to create gzip decoder: {}", e))?; + let mut decompressed = vec![]; + decoder + .read_to_end(&mut decompressed) + .map_err(|e| format!("Failed to decompress gzip data: {}", e))?; + + Ok(decompressed) + } + other => Err(format!("Unsupported encoding: {}", other)), + }) + .map(|bytes| String::from_utf8_lossy(&bytes).into_owned()) + .map_err(|e| format!("Failed to receive body: {:?}", e)) + }) +} + +/// Accepts an entire HTTP body (as a string) and returns the `result` field, as a serde `Value`. +fn response_result(response: &str) -> Result, String> { + Ok(serde_json::from_str::(&response) + .map_err(|e| format!("Failed to parse response: {:?}", e))? + .get("result") + .cloned() + .map(Some) + .unwrap_or_else(|| None)) +} + +/// Parses a `0x`-prefixed, **big-endian** hex string as a u64. +/// +/// Note: the JSON-RPC encodes integers as big-endian. The deposit contract uses little-endian. +/// Therefore, this function is only useful for numbers encoded by the JSON RPC. +/// +/// E.g., `0x01 == 1` +fn hex_to_u64_be(hex: &str) -> Result { + u64::from_str_radix(strip_prefix(hex)?, 16) + .map_err(|e| format!("Failed to parse hex as u64: {:?}", e)) +} + +/// Parses a `0x`-prefixed, big-endian hex string as bytes. +/// +/// E.g., `0x0102 == vec![1, 2]` +fn hex_to_bytes(hex: &str) -> Result, String> { + hex::decode(strip_prefix(hex)?).map_err(|e| format!("Failed to parse hex as bytes: {:?}", e)) +} + +/// Removes the `0x` prefix from some bytes. Returns an error if the prefix is not present. +fn strip_prefix(hex: &str) -> Result<&str, String> { + if hex.starts_with("0x") { + Ok(&hex[2..]) + } else { + Err("Hex string did not start with `0x`".to_string()) + } +} diff --git a/beacon_node/eth1/src/inner.rs b/beacon_node/eth1/src/inner.rs new file mode 100644 index 0000000000..88e6981473 --- /dev/null +++ b/beacon_node/eth1/src/inner.rs @@ -0,0 +1,27 @@ +use crate::Config; +use crate::{block_cache::BlockCache, deposit_cache::DepositCache}; +use parking_lot::RwLock; + +#[derive(Default)] +pub struct DepositUpdater { + pub cache: DepositCache, + pub last_processed_block: Option, +} + +#[derive(Default)] +pub struct Inner { + pub block_cache: RwLock, + pub deposit_cache: RwLock, + pub config: RwLock, +} + +impl Inner { + /// Prunes the block cache to `self.target_block_cache_len`. + /// + /// Is a no-op if `self.target_block_cache_len` is `None`. + pub fn prune_blocks(&self) { + if let Some(block_cache_truncation) = self.config.read().block_cache_truncation { + self.block_cache.write().truncate(block_cache_truncation); + } + } +} diff --git a/beacon_node/eth1/src/lib.rs b/beacon_node/eth1/src/lib.rs new file mode 100644 index 0000000000..f1bc5b852f --- /dev/null +++ b/beacon_node/eth1/src/lib.rs @@ -0,0 +1,11 @@ +mod block_cache; +mod deposit_cache; +mod deposit_log; +pub mod http; +mod inner; +mod service; + +pub use block_cache::{BlockCache, Eth1Block}; +pub use deposit_cache::DepositCache; +pub use deposit_log::DepositLog; +pub use service::{BlockCacheUpdateOutcome, Config, DepositCacheUpdateOutcome, Error, Service}; diff --git a/beacon_node/eth1/src/service.rs b/beacon_node/eth1/src/service.rs new file mode 100644 index 0000000000..5ec89d3bf2 --- /dev/null +++ b/beacon_node/eth1/src/service.rs @@ -0,0 +1,643 @@ +use crate::{ + block_cache::{BlockCache, Error as BlockCacheError, Eth1Block}, + deposit_cache::Error as DepositCacheError, + http::{ + get_block, get_block_number, get_deposit_count, get_deposit_logs_in_range, get_deposit_root, + }, + inner::{DepositUpdater, Inner}, + DepositLog, +}; +use exit_future::Exit; +use futures::{ + future::{loop_fn, Loop}, + stream, Future, Stream, +}; +use parking_lot::{RwLock, RwLockReadGuard}; +use serde::{Deserialize, Serialize}; +use slog::{debug, error, trace, Logger}; +use std::ops::{Range, RangeInclusive}; +use std::sync::Arc; +use std::time::{Duration, Instant}; +use tokio::timer::Delay; + +const STANDARD_TIMEOUT_MILLIS: u64 = 15_000; + +/// Timeout when doing a eth_blockNumber call. +const BLOCK_NUMBER_TIMEOUT_MILLIS: u64 = STANDARD_TIMEOUT_MILLIS; +/// Timeout when doing an eth_getBlockByNumber call. +const GET_BLOCK_TIMEOUT_MILLIS: u64 = STANDARD_TIMEOUT_MILLIS; +/// Timeout when doing an eth_call to read the deposit contract root. +const GET_DEPOSIT_ROOT_TIMEOUT_MILLIS: u64 = STANDARD_TIMEOUT_MILLIS; +/// Timeout when doing an eth_call to read the deposit contract deposit count. +const GET_DEPOSIT_COUNT_TIMEOUT_MILLIS: u64 = STANDARD_TIMEOUT_MILLIS; +/// Timeout when doing an eth_getLogs to read the deposit contract logs. +const GET_DEPOSIT_LOG_TIMEOUT_MILLIS: u64 = STANDARD_TIMEOUT_MILLIS; + +#[derive(Debug, PartialEq, Clone)] +pub enum Error { + /// The remote node is less synced that we expect, it is not useful until has done more + /// syncing. + RemoteNotSynced { + next_required_block: u64, + remote_highest_block: u64, + follow_distance: u64, + }, + /// Failed to download a block from the eth1 node. + BlockDownloadFailed(String), + /// Failed to get the current block number from the eth1 node. + GetBlockNumberFailed(String), + /// Failed to read the deposit contract root from the eth1 node. + GetDepositRootFailed(String), + /// Failed to read the deposit contract deposit count from the eth1 node. + GetDepositCountFailed(String), + /// Failed to read the deposit contract root from the eth1 node. + GetDepositLogsFailed(String), + /// There was an inconsistency when adding a block to the cache. + FailedToInsertEth1Block(BlockCacheError), + /// There was an inconsistency when adding a deposit to the cache. + FailedToInsertDeposit(DepositCacheError), + /// A log downloaded from the eth1 contract was not well formed. + FailedToParseDepositLog { + block_range: Range, + error: String, + }, + /// There was an unexpected internal error. + Internal(String), +} + +/// The success message for an Eth1Data cache update. +#[derive(Debug, PartialEq, Clone)] +pub enum BlockCacheUpdateOutcome { + /// The cache was sucessfully updated. + Success { + blocks_imported: usize, + head_block_number: Option, + }, +} + +/// The success message for an Eth1 deposit cache update. +#[derive(Debug, PartialEq, Clone)] +pub enum DepositCacheUpdateOutcome { + /// The cache was sucessfully updated. + Success { logs_imported: usize }, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct Config { + /// An Eth1 node (e.g., Geth) running a HTTP JSON-RPC endpoint. + pub endpoint: String, + /// The address the `BlockCache` and `DepositCache` should assume is the canonical deposit contract. + pub deposit_contract_address: String, + /// Defines the first block that the `DepositCache` will start searching for deposit logs. + /// + /// Setting too high can result in missed logs. Setting too low will result in unnecessary + /// calls to the Eth1 node's HTTP JSON RPC. + pub deposit_contract_deploy_block: u64, + /// Defines the lowest block number that should be downloaded and added to the `BlockCache`. + pub lowest_cached_block_number: u64, + /// Defines how far behind the Eth1 node's head we should follow. + /// + /// Note: this should be less than or equal to the specification's `ETH1_FOLLOW_DISTANCE`. + pub follow_distance: u64, + /// Defines the number of blocks that should be retained each time the `BlockCache` calls truncate on + /// itself. + pub block_cache_truncation: Option, + /// The interval between updates when using the `auto_update` function. + pub auto_update_interval_millis: u64, + /// The span of blocks we should query for logs, per request. + pub blocks_per_log_query: usize, + /// The maximum number of log requests per update. + pub max_log_requests_per_update: Option, + /// The maximum number of log requests per update. + pub max_blocks_per_update: Option, +} + +impl Default for Config { + fn default() -> Self { + Self { + endpoint: "http://localhost:8545".into(), + deposit_contract_address: "0x0000000000000000000000000000000000000000".into(), + deposit_contract_deploy_block: 0, + lowest_cached_block_number: 0, + follow_distance: 128, + block_cache_truncation: Some(4_096), + auto_update_interval_millis: 500, + blocks_per_log_query: 1_000, + max_log_requests_per_update: None, + max_blocks_per_update: None, + } + } +} + +/// Provides a set of Eth1 caches and async functions to update them. +/// +/// Stores the following caches: +/// +/// - Deposit cache: stores all deposit logs from the deposit contract. +/// - Block cache: stores some number of eth1 blocks. +#[derive(Clone)] +pub struct Service { + inner: Arc, + pub log: Logger, +} + +impl Service { + /// Creates a new service. Does not attempt to connect to the eth1 node. + pub fn new(config: Config, log: Logger) -> Self { + Self { + inner: Arc::new(Inner { + config: RwLock::new(config), + ..Inner::default() + }), + log, + } + } + + /// Provides access to the block cache. + pub fn blocks(&self) -> &RwLock { + &self.inner.block_cache + } + + /// Provides access to the deposit cache. + pub fn deposits(&self) -> &RwLock { + &self.inner.deposit_cache + } + + /// Returns the number of currently cached blocks. + pub fn block_cache_len(&self) -> usize { + self.blocks().read().len() + } + + /// Returns the number deposits available in the deposit cache. + pub fn deposit_cache_len(&self) -> usize { + self.deposits().read().cache.len() + } + + /// Read the service's configuration. + pub fn config(&self) -> RwLockReadGuard { + self.inner.config.read() + } + + /// Updates the configuration in `self to be `new_config`. + /// + /// Will truncate the block cache if the new configure specifies truncation. + pub fn update_config(&self, new_config: Config) -> Result<(), String> { + let mut old_config = self.inner.config.write(); + + if new_config.deposit_contract_deploy_block != old_config.deposit_contract_deploy_block { + // This may be possible, I just haven't looked into the details to ensure it's safe. + Err("Updating deposit_contract_deploy_block is not supported".to_string()) + } else { + *old_config = new_config; + + // Prevents a locking condition when calling prune_blocks. + drop(old_config); + + self.inner.prune_blocks(); + + Ok(()) + } + } + + /// Set the lowest block that the block cache will store. + /// + /// Note: this block may not always be present if truncating is enabled. + pub fn set_lowest_cached_block(&self, block_number: u64) { + self.inner.config.write().lowest_cached_block_number = block_number; + } + + /// Update the deposit and block cache, returning an error if either fail. + /// + /// ## Returns + /// + /// - Ok(_) if the update was successful (the cache may or may not have been modified). + /// - Err(_) if there is an error. + /// + /// Emits logs for debugging and errors. + pub fn update( + &self, + ) -> impl Future + { + let log_a = self.log.clone(); + let log_b = self.log.clone(); + + let deposit_future = self + .update_deposit_cache() + .map_err(|e| format!("Failed to update eth1 cache: {:?}", e)) + .then(move |result| { + match &result { + Ok(DepositCacheUpdateOutcome::Success { logs_imported }) => trace!( + log_a, + "Updated eth1 deposit cache"; + "logs_imported" => logs_imported, + ), + Err(e) => error!( + log_a, + "Failed to update eth1 deposit cache"; + "error" => e + ), + }; + + result + }); + + let block_future = self + .update_block_cache() + .map_err(|e| format!("Failed to update eth1 cache: {:?}", e)) + .then(move |result| { + match &result { + Ok(BlockCacheUpdateOutcome::Success { + blocks_imported, + head_block_number, + }) => trace!( + log_b, + "Updated eth1 block cache"; + "blocks_imported" => blocks_imported, + "head_block" => head_block_number, + ), + Err(e) => error!( + log_b, + "Failed to update eth1 block cache"; + "error" => e + ), + }; + + result + }); + + deposit_future.join(block_future) + } + + /// A looping future that updates the cache, then waits `config.auto_update_interval` before + /// updating it again. + /// + /// ## Returns + /// + /// - Ok(_) if the update was successful (the cache may or may not have been modified). + /// - Err(_) if there is an error. + /// + /// Emits logs for debugging and errors. + pub fn auto_update(&self, exit: Exit) -> impl Future { + let service = self.clone(); + let log = self.log.clone(); + let update_interval = Duration::from_millis(self.config().auto_update_interval_millis); + + loop_fn((), move |()| { + let exit = exit.clone(); + let service = service.clone(); + let log_a = log.clone(); + let log_b = log.clone(); + + service + .update() + .then(move |update_result| { + match update_result { + Err(e) => error!( + log_a, + "Failed to update eth1 genesis cache"; + "retry_millis" => update_interval.as_millis(), + "error" => e, + ), + Ok((deposit, block)) => debug!( + log_a, + "Updated eth1 genesis cache"; + "retry_millis" => update_interval.as_millis(), + "blocks" => format!("{:?}", block), + "deposits" => format!("{:?}", deposit), + ), + }; + + // Do not break the loop if there is an update failure. + Ok(()) + }) + .and_then(move |_| Delay::new(Instant::now() + update_interval)) + .then(move |timer_result| { + if let Err(e) = timer_result { + error!( + log_b, + "Failed to trigger eth1 cache update delay"; + "error" => format!("{:?}", e), + ); + } + // Do not break the loop if there is an timer failure. + Ok(()) + }) + .map(move |_| { + if exit.is_live() { + Loop::Continue(()) + } else { + Loop::Break(()) + } + }) + }) + } + + /// Contacts the remote eth1 node and attempts to import deposit logs up to the configured + /// follow-distance block. + /// + /// Will process no more than `BLOCKS_PER_LOG_QUERY * MAX_LOG_REQUESTS_PER_UPDATE` blocks in a + /// single update. + /// + /// ## Resolves with + /// + /// - Ok(_) if the update was successful (the cache may or may not have been modified). + /// - Err(_) if there is an error. + /// + /// Emits logs for debugging and errors. + pub fn update_deposit_cache( + &self, + ) -> impl Future { + let service_1 = self.clone(); + let service_2 = self.clone(); + let blocks_per_log_query = self.config().blocks_per_log_query; + let max_log_requests_per_update = self + .config() + .max_log_requests_per_update + .unwrap_or_else(usize::max_value); + + let next_required_block = self + .deposits() + .read() + .last_processed_block + .map(|n| n + 1) + .unwrap_or_else(|| self.config().deposit_contract_deploy_block); + + get_new_block_numbers( + &self.config().endpoint, + next_required_block, + self.config().follow_distance, + ) + .map(move |range| { + range + .map(|range| { + range + .collect::>() + .chunks(blocks_per_log_query) + .take(max_log_requests_per_update) + .map(|vec| { + let first = vec.first().cloned().unwrap_or_else(|| 0); + let last = vec.last().map(|n| n + 1).unwrap_or_else(|| 0); + (first..last) + }) + .collect::>>() + }) + .unwrap_or_else(|| vec![]) + }) + .and_then(move |block_number_chunks| { + stream::unfold( + block_number_chunks.into_iter(), + move |mut chunks| match chunks.next() { + Some(chunk) => { + let chunk_1 = chunk.clone(); + Some( + get_deposit_logs_in_range( + &service_1.config().endpoint, + &service_1.config().deposit_contract_address, + chunk, + Duration::from_millis(GET_DEPOSIT_LOG_TIMEOUT_MILLIS), + ) + .map_err(Error::GetDepositLogsFailed) + .map(|logs| (chunk_1, logs)) + .map(|logs| (logs, chunks)), + ) + } + None => None, + }, + ) + .fold(0, move |mut sum, (block_range, log_chunk)| { + let mut cache = service_2.deposits().write(); + + log_chunk + .into_iter() + .map(|raw_log| { + DepositLog::from_log(&raw_log).map_err(|error| { + Error::FailedToParseDepositLog { + block_range: block_range.clone(), + error, + } + }) + }) + // Return early if any of the logs cannot be parsed. + // + // This costs an additional `collect`, however it enforces that no logs are + // imported if any one of them cannot be parsed. + .collect::, _>>()? + .into_iter() + .map(|deposit_log| { + cache + .cache + .insert_log(deposit_log) + .map_err(Error::FailedToInsertDeposit)?; + + sum += 1; + + Ok(()) + }) + // Returns if a deposit is unable to be added to the cache. + // + // If this error occurs, the cache will no longer be guaranteed to hold either + // none or all of the logs for each block (i.e., they may exist _some_ logs for + // a block, but not _all_ logs for that block). This scenario can cause the + // node to choose an invalid genesis state or propose an invalid block. + .collect::>()?; + + cache.last_processed_block = Some(block_range.end.saturating_sub(1)); + + Ok(sum) + }) + .map(|logs_imported| DepositCacheUpdateOutcome::Success { logs_imported }) + }) + } + + /// Contacts the remote eth1 node and attempts to import all blocks up to the configured + /// follow-distance block. + /// + /// If configured, prunes the block cache after importing new blocks. + /// + /// ## Resolves with + /// + /// - Ok(_) if the update was successful (the cache may or may not have been modified). + /// - Err(_) if there is an error. + /// + /// Emits logs for debugging and errors. + pub fn update_block_cache(&self) -> impl Future { + let cache_1 = self.inner.clone(); + let cache_2 = self.inner.clone(); + let cache_3 = self.inner.clone(); + let cache_4 = self.inner.clone(); + let cache_5 = self.inner.clone(); + + let block_cache_truncation = self.config().block_cache_truncation; + let max_blocks_per_update = self + .config() + .max_blocks_per_update + .unwrap_or_else(usize::max_value); + + let next_required_block = cache_1 + .block_cache + .read() + .highest_block_number() + .map(|n| n + 1) + .unwrap_or_else(|| self.config().lowest_cached_block_number); + + get_new_block_numbers( + &self.config().endpoint, + next_required_block, + self.config().follow_distance, + ) + // Map the range of required blocks into a Vec. + // + // If the required range is larger than the size of the cache, drop the exiting cache + // because it's exipred and just download enough blocks to fill the cache. + .and_then(move |range| { + range + .map(|range| { + if range.start() > range.end() { + // Note: this check is not strictly necessary, however it remains to safe + // guard against any regression which may cause an underflow in a following + // subtraction operation. + Err(Error::Internal("Range was not increasing".into())) + } else { + let range_size = range.end() - range.start(); + let max_size = block_cache_truncation + .map(|n| n as u64) + .unwrap_or_else(u64::max_value); + + if range_size > max_size { + // If the range of required blocks is larger than `max_size`, drop all + // existing blocks and download `max_size` count of blocks. + let first_block = range.end() - max_size; + (*cache_5.block_cache.write()) = BlockCache::default(); + Ok((first_block..=*range.end()).collect::>()) + } else { + Ok(range.collect::>()) + } + } + }) + .unwrap_or_else(|| Ok(vec![])) + }) + // Download the range of blocks and sequentially import them into the cache. + .and_then(move |required_block_numbers| { + let required_block_numbers = required_block_numbers + .into_iter() + .take(max_blocks_per_update); + + // Produce a stream from the list of required block numbers and return a future that + // consumes the it. + stream::unfold( + required_block_numbers, + move |mut block_numbers| match block_numbers.next() { + Some(block_number) => Some( + download_eth1_block(cache_2.clone(), block_number) + .map(|v| (v, block_numbers)), + ), + None => None, + }, + ) + .fold(0, move |sum, eth1_block| { + cache_3 + .block_cache + .write() + .insert_root_or_child(eth1_block) + .map_err(Error::FailedToInsertEth1Block)?; + + Ok(sum + 1) + }) + }) + .and_then(move |blocks_imported| { + // Prune the block cache, preventing it from growing too large. + cache_4.prune_blocks(); + + Ok(BlockCacheUpdateOutcome::Success { + blocks_imported, + head_block_number: cache_4.clone().block_cache.read().highest_block_number(), + }) + }) + } +} + +/// Determine the range of blocks that need to be downloaded, given the remotes best block and +/// the locally stored best block. +fn get_new_block_numbers<'a>( + endpoint: &str, + next_required_block: u64, + follow_distance: u64, +) -> impl Future>, Error = Error> + 'a { + get_block_number(endpoint, Duration::from_millis(BLOCK_NUMBER_TIMEOUT_MILLIS)) + .map_err(Error::GetBlockNumberFailed) + .and_then(move |remote_highest_block| { + let remote_follow_block = remote_highest_block.saturating_sub(follow_distance); + + if next_required_block <= remote_follow_block { + Ok(Some(next_required_block..=remote_follow_block)) + } else if next_required_block > remote_highest_block + 1 { + // If this is the case, the node must have gone "backwards" in terms of it's sync + // (i.e., it's head block is lower than it was before). + // + // We assume that the `follow_distance` should be sufficient to ensure this never + // happens, otherwise it is an error. + Err(Error::RemoteNotSynced { + next_required_block, + remote_highest_block, + follow_distance, + }) + } else { + // Return an empty range. + Ok(None) + } + }) +} + +/// Downloads the `(block, deposit_root, deposit_count)` tuple from an eth1 node for the given +/// `block_number`. +/// +/// Performs three async calls to an Eth1 HTTP JSON RPC endpoint. +fn download_eth1_block<'a>( + cache: Arc, + block_number: u64, +) -> impl Future + 'a { + // Performs a `get_blockByNumber` call to an eth1 node. + get_block( + &cache.config.read().endpoint, + block_number, + Duration::from_millis(GET_BLOCK_TIMEOUT_MILLIS), + ) + .map_err(Error::BlockDownloadFailed) + .join3( + // Perform 2x `eth_call` via an eth1 node to read the deposit contract root and count. + get_deposit_root( + &cache.config.read().endpoint, + &cache.config.read().deposit_contract_address, + block_number, + Duration::from_millis(GET_DEPOSIT_ROOT_TIMEOUT_MILLIS), + ) + .map_err(Error::GetDepositRootFailed), + get_deposit_count( + &cache.config.read().endpoint, + &cache.config.read().deposit_contract_address, + block_number, + Duration::from_millis(GET_DEPOSIT_COUNT_TIMEOUT_MILLIS), + ) + .map_err(Error::GetDepositCountFailed), + ) + .map(|(http_block, deposit_root, deposit_count)| Eth1Block { + hash: http_block.hash, + number: http_block.number, + timestamp: http_block.timestamp, + deposit_root, + deposit_count, + }) +} + +#[cfg(test)] +mod tests { + use super::*; + use toml; + + #[test] + fn serde_serialize() { + let serialized = + toml::to_string(&Config::default()).expect("Should serde encode default config"); + toml::from_str::(&serialized).expect("Should serde decode default config"); + } +} diff --git a/beacon_node/eth1/tests/test.rs b/beacon_node/eth1/tests/test.rs new file mode 100644 index 0000000000..8f471815ae --- /dev/null +++ b/beacon_node/eth1/tests/test.rs @@ -0,0 +1,713 @@ +#![cfg(test)] +use environment::{Environment, EnvironmentBuilder}; +use eth1::http::{get_deposit_count, get_deposit_logs_in_range, get_deposit_root, Block, Log}; +use eth1::{Config, Service}; +use eth1::{DepositCache, DepositLog}; +use eth1_test_rig::GanacheEth1Instance; +use exit_future; +use futures::Future; +use merkle_proof::verify_merkle_proof; +use std::ops::Range; +use std::time::Duration; +use tokio::runtime::Runtime; +use tree_hash::TreeHash; +use types::{DepositData, EthSpec, Hash256, Keypair, MainnetEthSpec, MinimalEthSpec, Signature}; +use web3::{transports::Http, Web3}; + +const DEPOSIT_CONTRACT_TREE_DEPTH: usize = 32; + +pub fn new_env() -> Environment { + EnvironmentBuilder::minimal() + // Use a single thread, so that when all tests are run in parallel they don't have so many + // threads. + .single_thread_tokio_runtime() + .expect("should start tokio runtime") + .null_logger() + .expect("should start null logger") + .build() + .expect("should build env") +} + +fn timeout() -> Duration { + Duration::from_secs(1) +} + +fn random_deposit_data() -> DepositData { + let keypair = Keypair::random(); + + let mut deposit = DepositData { + pubkey: keypair.pk.into(), + withdrawal_credentials: Hash256::zero(), + amount: 32_000_000_000, + signature: Signature::empty_signature().into(), + }; + + deposit.signature = deposit.create_signature(&keypair.sk, &MainnetEthSpec::default_spec()); + + deposit +} + +/// Blocking operation to get the deposit logs from the `deposit_contract`. +fn blocking_deposit_logs( + runtime: &mut Runtime, + eth1: &GanacheEth1Instance, + range: Range, +) -> Vec { + runtime + .block_on(get_deposit_logs_in_range( + ð1.endpoint(), + ð1.deposit_contract.address(), + range, + timeout(), + )) + .expect("should get logs") +} + +/// Blocking operation to get the deposit root from the `deposit_contract`. +fn blocking_deposit_root( + runtime: &mut Runtime, + eth1: &GanacheEth1Instance, + block_number: u64, +) -> Option { + runtime + .block_on(get_deposit_root( + ð1.endpoint(), + ð1.deposit_contract.address(), + block_number, + timeout(), + )) + .expect("should get deposit root") +} + +/// Blocking operation to get the deposit count from the `deposit_contract`. +fn blocking_deposit_count( + runtime: &mut Runtime, + eth1: &GanacheEth1Instance, + block_number: u64, +) -> Option { + runtime + .block_on(get_deposit_count( + ð1.endpoint(), + ð1.deposit_contract.address(), + block_number, + timeout(), + )) + .expect("should get deposit count") +} + +fn get_block_number(runtime: &mut Runtime, web3: &Web3) -> u64 { + runtime + .block_on(web3.eth().block_number().map(|v| v.as_u64())) + .expect("should get block number") +} + +mod auto_update { + use super::*; + + #[test] + fn can_auto_update() { + let mut env = new_env(); + let log = env.core_context().log; + let runtime = env.runtime(); + + let eth1 = runtime + .block_on(GanacheEth1Instance::new()) + .expect("should start eth1 environment"); + let deposit_contract = ð1.deposit_contract; + let web3 = eth1.web3(); + + let now = get_block_number(runtime, &web3); + + let service = Service::new( + Config { + endpoint: eth1.endpoint(), + deposit_contract_address: deposit_contract.address(), + deposit_contract_deploy_block: now, + lowest_cached_block_number: now, + follow_distance: 0, + block_cache_truncation: None, + ..Config::default() + }, + log, + ); + + // NOTE: this test is sensitive to the response speed of the external web3 server. If + // you're experiencing failures, try increasing the update_interval. + let update_interval = Duration::from_millis(2_000); + + assert_eq!( + service.block_cache_len(), + 0, + "should have imported no blocks" + ); + assert_eq!( + service.deposit_cache_len(), + 0, + "should have imported no deposits" + ); + + let (_exit, signal) = exit_future::signal(); + + runtime.executor().spawn(service.auto_update(signal)); + + let n = 4; + + for _ in 0..n { + deposit_contract + .deposit(runtime, random_deposit_data()) + .expect("should do first deposits"); + } + + std::thread::sleep(update_interval * 5); + + assert!( + service.deposit_cache_len() >= n, + "should have imported n deposits" + ); + + for _ in 0..n { + deposit_contract + .deposit(runtime, random_deposit_data()) + .expect("should do second deposits"); + } + + std::thread::sleep(update_interval * 4); + + assert!( + service.block_cache_len() >= n * 2, + "should have imported all blocks" + ); + assert!( + service.deposit_cache_len() >= n * 2, + "should have imported all deposits, not {}", + service.deposit_cache_len() + ); + } +} + +mod eth1_cache { + use super::*; + + #[test] + fn simple_scenario() { + let mut env = new_env(); + let log = env.core_context().log; + let runtime = env.runtime(); + + for follow_distance in 0..2 { + let eth1 = runtime + .block_on(GanacheEth1Instance::new()) + .expect("should start eth1 environment"); + let deposit_contract = ð1.deposit_contract; + let web3 = eth1.web3(); + + let initial_block_number = get_block_number(runtime, &web3); + + let service = Service::new( + Config { + endpoint: eth1.endpoint(), + deposit_contract_address: deposit_contract.address(), + lowest_cached_block_number: initial_block_number, + follow_distance, + ..Config::default() + }, + log.clone(), + ); + + // Create some blocks and then consume them, performing the test `rounds` times. + for round in 0..2 { + let blocks = 4; + + let initial = if round == 0 { + initial_block_number + } else { + service + .blocks() + .read() + .highest_block_number() + .map(|n| n + follow_distance) + .expect("should have a latest block after the first round") + }; + + for _ in 0..blocks { + runtime + .block_on(eth1.ganache.evm_mine()) + .expect("should mine block"); + } + + runtime + .block_on(service.update_block_cache()) + .expect("should update cache"); + + runtime + .block_on(service.update_block_cache()) + .expect("should update cache when nothing has changed"); + + assert_eq!( + service + .blocks() + .read() + .highest_block_number() + .map(|n| n + follow_distance), + Some(initial + blocks), + "should update {} blocks in round {} (follow {})", + blocks, + round, + follow_distance, + ); + } + } + } + + /// Tests the case where we attempt to download more blocks than will fit in the cache. + #[test] + fn big_skip() { + let mut env = new_env(); + let log = env.core_context().log; + let runtime = env.runtime(); + + let eth1 = runtime + .block_on(GanacheEth1Instance::new()) + .expect("should start eth1 environment"); + let deposit_contract = ð1.deposit_contract; + let web3 = eth1.web3(); + + let cache_len = 4; + + let service = Service::new( + Config { + endpoint: eth1.endpoint(), + deposit_contract_address: deposit_contract.address(), + lowest_cached_block_number: get_block_number(runtime, &web3), + follow_distance: 0, + block_cache_truncation: Some(cache_len), + ..Config::default() + }, + log, + ); + + let blocks = cache_len * 2; + + for _ in 0..blocks { + runtime + .block_on(eth1.ganache.evm_mine()) + .expect("should mine block") + } + + runtime + .block_on(service.update_block_cache()) + .expect("should update cache"); + + assert_eq!( + service.block_cache_len(), + cache_len, + "should not grow cache beyond target" + ); + } + + /// Tests to ensure that the cache gets pruned when doing multiple downloads smaller than the + /// cache size. + #[test] + fn pruning() { + let mut env = new_env(); + let log = env.core_context().log; + let runtime = env.runtime(); + + let eth1 = runtime + .block_on(GanacheEth1Instance::new()) + .expect("should start eth1 environment"); + let deposit_contract = ð1.deposit_contract; + let web3 = eth1.web3(); + + let cache_len = 4; + + let service = Service::new( + Config { + endpoint: eth1.endpoint(), + deposit_contract_address: deposit_contract.address(), + lowest_cached_block_number: get_block_number(runtime, &web3), + follow_distance: 0, + block_cache_truncation: Some(cache_len), + ..Config::default() + }, + log, + ); + + for _ in 0..4 { + for _ in 0..cache_len / 2 { + runtime + .block_on(eth1.ganache.evm_mine()) + .expect("should mine block") + } + runtime + .block_on(service.update_block_cache()) + .expect("should update cache"); + } + + assert_eq!( + service.block_cache_len(), + cache_len, + "should not grow cache beyond target" + ); + } + + #[test] + fn double_update() { + let mut env = new_env(); + let log = env.core_context().log; + let runtime = env.runtime(); + + let n = 16; + + let eth1 = runtime + .block_on(GanacheEth1Instance::new()) + .expect("should start eth1 environment"); + let deposit_contract = ð1.deposit_contract; + let web3 = eth1.web3(); + + let service = Service::new( + Config { + endpoint: eth1.endpoint(), + deposit_contract_address: deposit_contract.address(), + lowest_cached_block_number: get_block_number(runtime, &web3), + follow_distance: 0, + ..Config::default() + }, + log, + ); + + for _ in 0..n { + runtime + .block_on(eth1.ganache.evm_mine()) + .expect("should mine block") + } + + runtime + .block_on( + service + .update_block_cache() + .join(service.update_block_cache()), + ) + .expect("should perform two simultaneous updates"); + + assert!(service.block_cache_len() >= n, "should grow the cache"); + } +} + +mod deposit_tree { + use super::*; + + #[test] + fn updating() { + let mut env = new_env(); + let log = env.core_context().log; + let runtime = env.runtime(); + + let n = 4; + + let eth1 = runtime + .block_on(GanacheEth1Instance::new()) + .expect("should start eth1 environment"); + let deposit_contract = ð1.deposit_contract; + let web3 = eth1.web3(); + + let start_block = get_block_number(runtime, &web3); + + let service = Service::new( + Config { + endpoint: eth1.endpoint(), + deposit_contract_address: deposit_contract.address(), + deposit_contract_deploy_block: start_block, + follow_distance: 0, + ..Config::default() + }, + log, + ); + + for round in 0..3 { + let deposits: Vec<_> = (0..n).into_iter().map(|_| random_deposit_data()).collect(); + + for deposit in &deposits { + deposit_contract + .deposit(runtime, deposit.clone()) + .expect("should perform a deposit"); + } + + runtime + .block_on(service.update_deposit_cache()) + .expect("should perform update"); + + runtime + .block_on(service.update_deposit_cache()) + .expect("should perform update when nothing has changed"); + + let first = n * round; + let last = n * (round + 1); + + let (_root, local_deposits) = service + .deposits() + .read() + .cache + .get_deposits(first..last, last, 32) + .expect(&format!("should get deposits in round {}", round)); + + assert_eq!( + local_deposits.len(), + n as usize, + "should get the right number of deposits in round {}", + round + ); + + assert_eq!( + local_deposits + .iter() + .map(|d| d.data.clone()) + .collect::>(), + deposits.to_vec(), + "obtained deposits should match those submitted in round {}", + round + ); + } + } + + #[test] + fn double_update() { + let mut env = new_env(); + let log = env.core_context().log; + let runtime = env.runtime(); + + let n = 8; + + let eth1 = runtime + .block_on(GanacheEth1Instance::new()) + .expect("should start eth1 environment"); + let deposit_contract = ð1.deposit_contract; + let web3 = eth1.web3(); + + let start_block = get_block_number(runtime, &web3); + + let service = Service::new( + Config { + endpoint: eth1.endpoint(), + deposit_contract_address: deposit_contract.address(), + deposit_contract_deploy_block: start_block, + lowest_cached_block_number: start_block, + follow_distance: 0, + ..Config::default() + }, + log, + ); + + let deposits: Vec<_> = (0..n).into_iter().map(|_| random_deposit_data()).collect(); + + for deposit in &deposits { + deposit_contract + .deposit(runtime, deposit.clone()) + .expect("should perform a deposit"); + } + + runtime + .block_on( + service + .update_deposit_cache() + .join(service.update_deposit_cache()), + ) + .expect("should perform two updates concurrently"); + + assert_eq!(service.deposit_cache_len(), n); + } + + #[test] + fn cache_consistency() { + let mut env = new_env(); + let runtime = env.runtime(); + + let n = 8; + + let deposits: Vec<_> = (0..n).into_iter().map(|_| random_deposit_data()).collect(); + + let eth1 = runtime + .block_on(GanacheEth1Instance::new()) + .expect("should start eth1 environment"); + let deposit_contract = ð1.deposit_contract; + let web3 = eth1.web3(); + + let mut deposit_roots = vec![]; + let mut deposit_counts = vec![]; + + // Perform deposits to the smart contract, recording it's state along the way. + for deposit in &deposits { + deposit_contract + .deposit(runtime, deposit.clone()) + .expect("should perform a deposit"); + let block_number = get_block_number(runtime, &web3); + deposit_roots.push( + blocking_deposit_root(runtime, ð1, block_number) + .expect("should get root if contract exists"), + ); + deposit_counts.push( + blocking_deposit_count(runtime, ð1, block_number) + .expect("should get count if contract exists"), + ); + } + + let mut tree = DepositCache::default(); + + // Pull all the deposit logs from the contract. + let block_number = get_block_number(runtime, &web3); + let logs: Vec<_> = blocking_deposit_logs(runtime, ð1, 0..block_number) + .iter() + .map(|raw| DepositLog::from_log(raw).expect("should parse deposit log")) + .inspect(|log| { + tree.insert_log(log.clone()) + .expect("should add consecutive logs") + }) + .collect(); + + // Check the logs for invariants. + for i in 0..logs.len() { + let log = &logs[i]; + assert_eq!( + log.deposit_data, deposits[i], + "log {} should have correct deposit data", + i + ); + assert_eq!(log.index, i as u64, "log {} should have correct index", i); + } + + // For each deposit test some more invariants + for i in 0..n { + // Ensure the deposit count from the smart contract was as expected. + assert_eq!( + deposit_counts[i], + i as u64 + 1, + "deposit count should be accurate" + ); + + // Ensure that the root from the deposit tree matches what the contract reported. + let (root, deposits) = tree + .get_deposits(0..i as u64, deposit_counts[i], DEPOSIT_CONTRACT_TREE_DEPTH) + .expect("should get deposits"); + assert_eq!( + root, deposit_roots[i], + "tree deposit root {} should match the contract", + i + ); + + // Ensure that the deposits all prove into the root from the smart contract. + let deposit_root = deposit_roots[i]; + for (j, deposit) in deposits.iter().enumerate() { + assert!( + verify_merkle_proof( + Hash256::from_slice(&deposit.data.tree_hash_root()), + &deposit.proof, + DEPOSIT_CONTRACT_TREE_DEPTH + 1, + j, + deposit_root + ), + "deposit merkle proof should prove into deposit contract root" + ) + } + } + } +} + +/// Tests for the base HTTP requests and response handlers. +mod http { + use super::*; + + fn get_block(runtime: &mut Runtime, eth1: &GanacheEth1Instance, block_number: u64) -> Block { + runtime + .block_on(eth1::http::get_block( + ð1.endpoint(), + block_number, + timeout(), + )) + .expect("should get block number") + } + + #[test] + fn incrementing_deposits() { + let mut env = new_env(); + let runtime = env.runtime(); + + let eth1 = runtime + .block_on(GanacheEth1Instance::new()) + .expect("should start eth1 environment"); + let deposit_contract = ð1.deposit_contract; + let web3 = eth1.web3(); + + let block_number = get_block_number(runtime, &web3); + let logs = blocking_deposit_logs(runtime, ð1, 0..block_number); + assert_eq!(logs.len(), 0); + + let mut old_root = blocking_deposit_root(runtime, ð1, block_number); + let mut old_block = get_block(runtime, ð1, block_number); + let mut old_block_number = block_number; + + assert_eq!( + blocking_deposit_count(runtime, ð1, block_number), + Some(0), + "should have deposit count zero" + ); + + for i in 1..=8 { + runtime + .block_on(eth1.ganache.increase_time(1)) + .expect("should be able to increase time on ganache"); + + deposit_contract + .deposit(runtime, random_deposit_data()) + .expect("should perform a deposit"); + + // Check the logs. + let block_number = get_block_number(runtime, &web3); + let logs = blocking_deposit_logs(runtime, ð1, 0..block_number); + assert_eq!(logs.len(), i, "the number of logs should be as expected"); + + // Check the deposit count. + assert_eq!( + blocking_deposit_count(runtime, ð1, block_number), + Some(i as u64), + "should have a correct deposit count" + ); + + // Check the deposit root. + let new_root = blocking_deposit_root(runtime, ð1, block_number); + assert_ne!( + new_root, old_root, + "deposit root should change with each deposit" + ); + old_root = new_root; + + // Check the block hash. + let new_block = get_block(runtime, ð1, block_number); + assert_ne!( + new_block.hash, old_block.hash, + "block hash should change with each deposit" + ); + + // Check to ensure the timestamp is increasing + assert!( + old_block.timestamp <= new_block.timestamp, + "block timestamp should increase" + ); + + old_block = new_block.clone(); + + // Check the block number. + assert!( + block_number > old_block_number, + "block number should increase" + ); + old_block_number = block_number; + + // Check to ensure the block root is changing + assert_ne!( + new_root, + Some(new_block.hash), + "the deposit root should be different to the block hash" + ); + } + } +} diff --git a/beacon_node/eth2-libp2p/src/behaviour.rs b/beacon_node/eth2-libp2p/src/behaviour.rs index aa11d586ff..d8301ad8be 100644 --- a/beacon_node/eth2-libp2p/src/behaviour.rs +++ b/beacon_node/eth2-libp2p/src/behaviour.rs @@ -69,7 +69,7 @@ impl Behaviour { ); Ok(Behaviour { - eth2_rpc: RPC::new(log), + eth2_rpc: RPC::new(log.clone()), gossipsub: Gossipsub::new(local_peer_id.clone(), net_conf.gs_config.clone()), discovery: Discovery::new(local_key, net_conf, log)?, ping: Ping::new(ping_config), diff --git a/beacon_node/eth2-libp2p/src/rpc/mod.rs b/beacon_node/eth2-libp2p/src/rpc/mod.rs index 2076615a9c..2bbb6a05c8 100644 --- a/beacon_node/eth2-libp2p/src/rpc/mod.rs +++ b/beacon_node/eth2-libp2p/src/rpc/mod.rs @@ -69,8 +69,8 @@ pub struct RPC { } impl RPC { - pub fn new(log: &slog::Logger) -> Self { - let log = log.new(o!("Service" => "Libp2p-RPC")); + pub fn new(log: slog::Logger) -> Self { + let log = log.new(o!("service" => "libp2p_rpc")); RPC { events: Vec::new(), marker: PhantomData, diff --git a/beacon_node/genesis/Cargo.toml b/beacon_node/genesis/Cargo.toml new file mode 100644 index 0000000000..60d7f3f4b4 --- /dev/null +++ b/beacon_node/genesis/Cargo.toml @@ -0,0 +1,28 @@ +[package] +name = "genesis" +version = "0.1.0" +authors = ["Paul Hauner "] +edition = "2018" + +[dev-dependencies] +eth1_test_rig = { path = "../../tests/eth1_test_rig" } +futures = "0.1.25" + +[dependencies] +futures = "0.1.25" +types = { path = "../../eth2/types"} +environment = { path = "../../lighthouse/environment"} +eth1 = { path = "../eth1"} +rayon = "1.0" +state_processing = { path = "../../eth2/state_processing" } +merkle_proof = { path = "../../eth2/utils/merkle_proof" } +eth2_ssz = "0.1" +eth2_hashing = { path = "../../eth2/utils/eth2_hashing" } +tree_hash = "0.1" +tokio = "0.1.17" +parking_lot = "0.7" +slog = "^2.2.3" +exit-future = "0.1.4" +serde = "1.0" +serde_derive = "1.0" +int_to_bytes = { path = "../../eth2/utils/int_to_bytes" } diff --git a/beacon_node/genesis/src/common.rs b/beacon_node/genesis/src/common.rs new file mode 100644 index 0000000000..9353ad33ae --- /dev/null +++ b/beacon_node/genesis/src/common.rs @@ -0,0 +1,44 @@ +use int_to_bytes::int_to_bytes32; +use merkle_proof::MerkleTree; +use rayon::prelude::*; +use tree_hash::TreeHash; +use types::{ChainSpec, Deposit, DepositData, Hash256}; + +/// Accepts the genesis block validator `DepositData` list and produces a list of `Deposit`, with +/// proofs. +pub fn genesis_deposits( + deposit_data: Vec, + spec: &ChainSpec, +) -> Result, String> { + let deposit_root_leaves = deposit_data + .par_iter() + .map(|data| Hash256::from_slice(&data.tree_hash_root())) + .collect::>(); + + let mut proofs = vec![]; + let depth = spec.deposit_contract_tree_depth as usize; + let mut tree = MerkleTree::create(&[], depth); + for (i, deposit_leaf) in deposit_root_leaves.iter().enumerate() { + if let Err(_) = tree.push_leaf(*deposit_leaf, depth) { + return Err(String::from("Failed to push leaf")); + } + + let (_, mut proof) = tree.generate_proof(i, depth); + proof.push(Hash256::from_slice(&int_to_bytes32((i + 1) as u64))); + + assert_eq!( + proof.len(), + depth + 1, + "Deposit proof should be correct len" + ); + + proofs.push(proof); + } + + Ok(deposit_data + .into_iter() + .zip(proofs.into_iter()) + .map(|(data, proof)| (data, proof.into())) + .map(|(data, proof)| Deposit { proof, data }) + .collect()) +} diff --git a/beacon_node/genesis/src/eth1_genesis_service.rs b/beacon_node/genesis/src/eth1_genesis_service.rs new file mode 100644 index 0000000000..71632b2613 --- /dev/null +++ b/beacon_node/genesis/src/eth1_genesis_service.rs @@ -0,0 +1,379 @@ +pub use crate::{common::genesis_deposits, interop::interop_genesis_state}; +pub use eth1::Config as Eth1Config; + +use eth1::{DepositLog, Eth1Block, Service}; +use futures::{ + future, + future::{loop_fn, Loop}, + Future, +}; +use parking_lot::Mutex; +use slog::{debug, error, info, Logger}; +use state_processing::{ + initialize_beacon_state_from_eth1, is_valid_genesis_state, + per_block_processing::process_deposit, process_activations, +}; +use std::sync::Arc; +use std::time::{Duration, Instant}; +use tokio::timer::Delay; +use types::{BeaconState, ChainSpec, Deposit, Eth1Data, EthSpec, Hash256}; + +/// Provides a service that connects to some Eth1 HTTP JSON-RPC endpoint and maintains a cache of eth1 +/// blocks and deposits, listening for the eth1 block that triggers eth2 genesis and returning the +/// genesis `BeaconState`. +/// +/// Is a wrapper around the `Service` struct of the `eth1` crate. +#[derive(Clone)] +pub struct Eth1GenesisService { + /// The underlying service. Access to this object is only required for testing and diagnosis. + pub core: Service, + /// The highest block number we've processed and determined it does not trigger genesis. + highest_processed_block: Arc>>, + /// Enabled when the genesis service should start downloading blocks. + /// + /// It is disabled until there are enough deposit logs to start syncing. + sync_blocks: Arc>, +} + +impl Eth1GenesisService { + /// Creates a new service. Does not attempt to connect to the Eth1 node. + pub fn new(config: Eth1Config, log: Logger) -> Self { + Self { + core: Service::new(config, log), + highest_processed_block: Arc::new(Mutex::new(None)), + sync_blocks: Arc::new(Mutex::new(false)), + } + } + + fn first_viable_eth1_block(&self, min_genesis_active_validator_count: usize) -> Option { + if self.core.deposit_cache_len() < min_genesis_active_validator_count { + None + } else { + self.core + .deposits() + .read() + .cache + .get(min_genesis_active_validator_count.saturating_sub(1)) + .map(|log| log.block_number) + } + } + + /// Returns a future that will keep updating the cache and resolve once it has discovered the + /// first Eth1 block that triggers an Eth2 genesis. + /// + /// ## Returns + /// + /// - `Ok(state)` once the canonical eth2 genesis state has been discovered. + /// - `Err(e)` if there is some internal error during updates. + pub fn wait_for_genesis_state( + &self, + update_interval: Duration, + spec: ChainSpec, + ) -> impl Future, Error = String> { + let service = self.clone(); + + loop_fn::<(ChainSpec, Option>), _, _, _>( + (spec, None), + move |(spec, state)| { + let service_1 = service.clone(); + let service_2 = service.clone(); + let service_3 = service.clone(); + let service_4 = service.clone(); + let log = service.core.log.clone(); + let min_genesis_active_validator_count = spec.min_genesis_active_validator_count; + + Delay::new(Instant::now() + update_interval) + .map_err(|e| format!("Delay between genesis deposit checks failed: {:?}", e)) + .and_then(move |()| { + service_1 + .core + .update_deposit_cache() + .map_err(|e| format!("{:?}", e)) + }) + .then(move |update_result| { + if let Err(e) = update_result { + error!( + log, + "Failed to update eth1 deposit cache"; + "error" => e + ) + } + + // Do not exit the loop if there is an error whilst updating. + Ok(()) + }) + // Only enable the `sync_blocks` flag if there are enough deposits to feasibly + // trigger genesis. + // + // Note: genesis is triggered by the _active_ validator count, not just the + // deposit count, so it's possible that block downloads are started too early. + // This is just wasteful, not erroneous. + .and_then(move |()| { + let mut sync_blocks = service_2.sync_blocks.lock(); + + if !(*sync_blocks) { + if let Some(viable_eth1_block) = service_2.first_viable_eth1_block( + min_genesis_active_validator_count as usize, + ) { + info!( + service_2.core.log, + "Minimum genesis deposit count met"; + "deposit_count" => min_genesis_active_validator_count, + "block_number" => viable_eth1_block, + ); + service_2.core.set_lowest_cached_block(viable_eth1_block); + *sync_blocks = true + } + } + + Ok(*sync_blocks) + }) + .and_then(move |should_update_block_cache| { + let maybe_update_future: Box + Send> = + if should_update_block_cache { + Box::new(service_3.core.update_block_cache().then( + move |update_result| { + if let Err(e) = update_result { + error!( + service_3.core.log, + "Failed to update eth1 block cache"; + "error" => format!("{:?}", e) + ); + } + + // Do not exit the loop if there is an error whilst updating. + Ok(()) + }, + )) + } else { + Box::new(future::ok(())) + }; + + maybe_update_future + }) + .and_then(move |()| { + if let Some(genesis_state) = service_4 + .scan_new_blocks::(&spec) + .map_err(|e| format!("Failed to scan for new blocks: {}", e))? + { + Ok(Loop::Break((spec, genesis_state))) + } else { + debug!( + service_4.core.log, + "No eth1 genesis block found"; + "cached_blocks" => service_4.core.block_cache_len(), + "cached_deposits" => service_4.core.deposit_cache_len(), + "cache_head" => service_4.highest_known_block(), + ); + + Ok(Loop::Continue((spec, state))) + } + }) + }, + ) + .map(|(_spec, state)| state) + } + + /// Processes any new blocks that have appeared since this function was last run. + /// + /// A `highest_processed_block` value is stored in `self`. This function will find any blocks + /// in it's caches that have a higher block number than `highest_processed_block` and check to + /// see if they would trigger an Eth2 genesis. + /// + /// Blocks are always tested in increasing order, starting with the lowest unknown block + /// number in the cache. + /// + /// ## Returns + /// + /// - `Ok(Some(eth1_block))` if a previously-unprocessed block would trigger Eth2 genesis. + /// - `Ok(None)` if none of the new blocks would trigger genesis, or there were no new blocks. + /// - `Err(_)` if there was some internal error. + fn scan_new_blocks( + &self, + spec: &ChainSpec, + ) -> Result>, String> { + let genesis_trigger_eth1_block = self + .core + .blocks() + .read() + .iter() + // It's only worth scanning blocks that have timestamps _after_ genesis time. It's + // impossible for any other block to trigger genesis. + .filter(|block| block.timestamp >= spec.min_genesis_time) + // The block cache might be more recently updated than deposit cache. Restrict any + // block numbers that are not known by all caches. + .filter(|block| { + self.highest_known_block() + .map(|n| block.number <= n) + .unwrap_or_else(|| false) + }) + .find(|block| { + let mut highest_processed_block = self.highest_processed_block.lock(); + + let next_new_block_number = + highest_processed_block.map(|n| n + 1).unwrap_or_else(|| 0); + + if block.number < next_new_block_number { + return false; + } + + self.is_valid_genesis_eth1_block::(block, &spec) + .and_then(|val| { + *highest_processed_block = Some(block.number); + Ok(val) + }) + .unwrap_or_else(|_| { + error!( + self.core.log, + "Failed to detect if eth1 block triggers genesis"; + "eth1_block_number" => block.number, + "eth1_block_hash" => format!("{}", block.hash), + ); + false + }) + }) + .cloned(); + + if let Some(eth1_block) = genesis_trigger_eth1_block { + debug!( + self.core.log, + "All genesis conditions met"; + "eth1_block_height" => eth1_block.number, + ); + + let genesis_state = self + .genesis_from_eth1_block(eth1_block.clone(), &spec) + .map_err(|e| format!("Failed to generate valid genesis state : {}", e))?; + + info!( + self.core.log, + "Deposit contract genesis complete"; + "eth1_block_height" => eth1_block.number, + "validator_count" => genesis_state.validators.len(), + ); + + Ok(Some(genesis_state)) + } else { + Ok(None) + } + } + + /// Produces an eth2 genesis `BeaconState` from the given `eth1_block`. + /// + /// ## Returns + /// + /// - Ok(genesis_state) if all went well. + /// - Err(e) if the given `eth1_block` was not a viable block to trigger genesis or there was + /// an internal error. + fn genesis_from_eth1_block( + &self, + eth1_block: Eth1Block, + spec: &ChainSpec, + ) -> Result, String> { + let deposit_logs = self + .core + .deposits() + .read() + .cache + .iter() + .take_while(|log| log.block_number <= eth1_block.number) + .map(|log| log.deposit_data.clone()) + .collect::>(); + + let genesis_state = initialize_beacon_state_from_eth1( + eth1_block.hash, + eth1_block.timestamp, + genesis_deposits(deposit_logs, &spec)?, + &spec, + ) + .map_err(|e| format!("Unable to initialize genesis state: {:?}", e))?; + + if is_valid_genesis_state(&genesis_state, &spec) { + Ok(genesis_state) + } else { + Err("Generated state was not valid.".to_string()) + } + } + + /// A cheap (compared to using `initialize_beacon_state_from_eth1) method for determining if some + /// `target_block` will trigger genesis. + fn is_valid_genesis_eth1_block( + &self, + target_block: &Eth1Block, + spec: &ChainSpec, + ) -> Result { + if target_block.timestamp < spec.min_genesis_time { + Ok(false) + } else { + let mut local_state: BeaconState = BeaconState::new( + 0, + Eth1Data { + block_hash: Hash256::zero(), + deposit_root: Hash256::zero(), + deposit_count: 0, + }, + &spec, + ); + + local_state.genesis_time = target_block.timestamp; + + self.deposit_logs_at_block(target_block.number) + .iter() + // TODO: add the signature field back. + //.filter(|deposit_log| deposit_log.signature_is_valid) + .map(|deposit_log| Deposit { + proof: vec![Hash256::zero(); spec.deposit_contract_tree_depth as usize].into(), + data: deposit_log.deposit_data.clone(), + }) + .try_for_each(|deposit| { + // No need to verify proofs in order to test if some block will trigger genesis. + const PROOF_VERIFICATION: bool = false; + + // Note: presently all the signatures are verified each time this function is + // run. + // + // It would be more efficient to pre-verify signatures, filter out the invalid + // ones and disable verification for `process_deposit`. + // + // This is only more efficient in scenarios where `min_genesis_time` occurs + // _before_ `min_validator_count` is met. We're unlikely to see this scenario + // in testnets (`min_genesis_time` is usually `0`) and I'm not certain it will + // happen for the real, production deposit contract. + + process_deposit(&mut local_state, &deposit, spec, PROOF_VERIFICATION) + .map_err(|e| format!("Error whilst processing deposit: {:?}", e)) + })?; + + process_activations(&mut local_state, spec); + + Ok(is_valid_genesis_state(&local_state, spec)) + } + } + + /// Returns the `block_number` of the highest (by block number) block in the cache. + /// + /// Takes the lower block number of the deposit and block caches to ensure this number is safe. + fn highest_known_block(&self) -> Option { + let block_cache = self.core.blocks().read().highest_block_number()?; + let deposit_cache = self.core.deposits().read().last_processed_block?; + + Some(std::cmp::min(block_cache, deposit_cache)) + } + + /// Returns all deposit logs included in `block_number` and all prior blocks. + fn deposit_logs_at_block(&self, block_number: u64) -> Vec { + self.core + .deposits() + .read() + .cache + .iter() + .take_while(|log| log.block_number <= block_number) + .cloned() + .collect() + } + + /// Returns the `Service` contained in `self`. + pub fn into_core_service(self) -> Service { + self.core + } +} diff --git a/beacon_node/genesis/src/interop.rs b/beacon_node/genesis/src/interop.rs new file mode 100644 index 0000000000..49010ab0b0 --- /dev/null +++ b/beacon_node/genesis/src/interop.rs @@ -0,0 +1,142 @@ +use crate::common::genesis_deposits; +use eth2_hashing::hash; +use rayon::prelude::*; +use ssz::Encode; +use state_processing::initialize_beacon_state_from_eth1; +use std::time::SystemTime; +use tree_hash::SignedRoot; +use types::{ + BeaconState, ChainSpec, DepositData, Domain, EthSpec, Fork, Hash256, Keypair, PublicKey, + Signature, +}; + +/// Builds a genesis state as defined by the Eth2 interop procedure (see below). +/// +/// Reference: +/// https://github.com/ethereum/eth2.0-pm/tree/6e41fcf383ebeb5125938850d8e9b4e9888389b4/interop/mocked_start +pub fn interop_genesis_state( + keypairs: &[Keypair], + genesis_time: u64, + spec: &ChainSpec, +) -> Result, String> { + let eth1_block_hash = Hash256::from_slice(&[0x42; 32]); + let eth1_timestamp = 2_u64.pow(40); + let amount = spec.max_effective_balance; + + let withdrawal_credentials = |pubkey: &PublicKey| { + let mut credentials = hash(&pubkey.as_ssz_bytes()); + credentials[0] = spec.bls_withdrawal_prefix_byte; + Hash256::from_slice(&credentials) + }; + + let datas = keypairs + .into_par_iter() + .map(|keypair| { + let mut data = DepositData { + withdrawal_credentials: withdrawal_credentials(&keypair.pk), + pubkey: keypair.pk.clone().into(), + amount, + signature: Signature::empty_signature().into(), + }; + + let domain = spec.get_domain( + spec.genesis_slot.epoch(T::slots_per_epoch()), + Domain::Deposit, + &Fork::default(), + ); + data.signature = Signature::new(&data.signed_root()[..], domain, &keypair.sk).into(); + + data + }) + .collect::>(); + + let mut state = initialize_beacon_state_from_eth1( + eth1_block_hash, + eth1_timestamp, + genesis_deposits(datas, spec)?, + spec, + ) + .map_err(|e| format!("Unable to initialize genesis state: {:?}", e))?; + + state.genesis_time = genesis_time; + + // Invalid all the caches after all the manual state surgery. + state.drop_all_caches(); + + Ok(state) +} + +/// Returns the system time, mod 30 minutes. +/// +/// Used for easily creating testnets. +pub fn recent_genesis_time(minutes: u64) -> u64 { + let now = SystemTime::now() + .duration_since(SystemTime::UNIX_EPOCH) + .unwrap() + .as_secs(); + let secs_after_last_period = now.checked_rem(minutes * 60).unwrap_or(0); + now - secs_after_last_period +} + +#[cfg(test)] +mod test { + use super::*; + use types::{test_utils::generate_deterministic_keypairs, EthSpec, MinimalEthSpec}; + + type TestEthSpec = MinimalEthSpec; + + #[test] + fn interop_state() { + let validator_count = 16; + let genesis_time = 42; + let spec = &TestEthSpec::default_spec(); + + let keypairs = generate_deterministic_keypairs(validator_count); + + let state = interop_genesis_state::(&keypairs, genesis_time, spec) + .expect("should build state"); + + assert_eq!( + state.eth1_data.block_hash, + Hash256::from_slice(&[0x42; 32]), + "eth1 block hash should be co-ordinated junk" + ); + + assert_eq!( + state.genesis_time, genesis_time, + "genesis time should be as specified" + ); + + for b in &state.balances { + assert_eq!( + *b, spec.max_effective_balance, + "validator balances should be max effective balance" + ); + } + + for v in &state.validators { + let creds = v.withdrawal_credentials.as_bytes(); + assert_eq!( + creds[0], spec.bls_withdrawal_prefix_byte, + "first byte of withdrawal creds should be bls prefix" + ); + assert_eq!( + &creds[1..], + &hash(&v.pubkey.as_ssz_bytes())[1..], + "rest of withdrawal creds should be pubkey hash" + ) + } + + assert_eq!( + state.balances.len(), + validator_count, + "validator balances len should be correct" + ); + + assert_eq!( + state.validators.len(), + validator_count, + "validator count should be correct" + ); + } +} diff --git a/beacon_node/genesis/src/lib.rs b/beacon_node/genesis/src/lib.rs new file mode 100644 index 0000000000..d6b3606f7f --- /dev/null +++ b/beacon_node/genesis/src/lib.rs @@ -0,0 +1,31 @@ +mod common; +mod eth1_genesis_service; +mod interop; + +pub use eth1::Config as Eth1Config; +pub use eth1_genesis_service::Eth1GenesisService; +pub use interop::{interop_genesis_state, recent_genesis_time}; +pub use types::test_utils::generate_deterministic_keypairs; + +use ssz::Decode; +use std::fs::File; +use std::io::prelude::*; +use std::path::PathBuf; +use types::{BeaconState, EthSpec}; + +/// Load a `BeaconState` from the given `path`. The file should contain raw SSZ bytes (i.e., no +/// ASCII encoding or schema). +pub fn state_from_ssz_file(path: PathBuf) -> Result, String> { + File::open(path.clone()) + .map_err(move |e| format!("Unable to open SSZ genesis state file {:?}: {:?}", path, e)) + .and_then(|mut file| { + let mut bytes = vec![]; + file.read_to_end(&mut bytes) + .map_err(|e| format!("Failed to read SSZ file: {:?}", e))?; + Ok(bytes) + }) + .and_then(|bytes| { + BeaconState::from_ssz_bytes(&bytes) + .map_err(|e| format!("Unable to parse SSZ genesis state file: {:?}", e)) + }) +} diff --git a/beacon_node/genesis/tests/tests.rs b/beacon_node/genesis/tests/tests.rs new file mode 100644 index 0000000000..d3030720ac --- /dev/null +++ b/beacon_node/genesis/tests/tests.rs @@ -0,0 +1,105 @@ +//! NOTE: These tests will not pass unless ganache-cli is running on `ENDPOINT` (see below). +//! +//! You can start a suitable instance using the `ganache_test_node.sh` script in the `scripts` +//! dir in the root of the `lighthouse` repo. +#![cfg(test)] +use environment::{Environment, EnvironmentBuilder}; +use eth1_test_rig::{DelayThenDeposit, GanacheEth1Instance}; +use futures::Future; +use genesis::{Eth1Config, Eth1GenesisService}; +use state_processing::is_valid_genesis_state; +use std::time::Duration; +use types::{test_utils::generate_deterministic_keypair, Hash256, MinimalEthSpec}; + +pub fn new_env() -> Environment { + EnvironmentBuilder::minimal() + .single_thread_tokio_runtime() + .expect("should start tokio runtime") + .null_logger() + .expect("should start null logger") + .build() + .expect("should build env") +} + +#[test] +fn basic() { + let mut env = new_env(); + let log = env.core_context().log; + let mut spec = env.eth2_config().spec.clone(); + let runtime = env.runtime(); + + let eth1 = runtime + .block_on(GanacheEth1Instance::new()) + .expect("should start eth1 environment"); + let deposit_contract = ð1.deposit_contract; + let web3 = eth1.web3(); + + let now = runtime + .block_on(web3.eth().block_number().map(|v| v.as_u64())) + .expect("should get block number"); + + let service = Eth1GenesisService::new( + Eth1Config { + endpoint: eth1.endpoint(), + deposit_contract_address: deposit_contract.address(), + deposit_contract_deploy_block: now, + lowest_cached_block_number: now, + follow_distance: 0, + block_cache_truncation: None, + ..Eth1Config::default() + }, + log, + ); + + // NOTE: this test is sensitive to the response speed of the external web3 server. If + // you're experiencing failures, try increasing the update_interval. + let update_interval = Duration::from_millis(500); + + spec.min_genesis_time = 0; + spec.min_genesis_active_validator_count = 8; + + let deposits = (0..spec.min_genesis_active_validator_count + 2) + .into_iter() + .map(|i| { + deposit_contract.deposit_helper::( + generate_deterministic_keypair(i as usize), + Hash256::from_low_u64_le(i), + 32_000_000_000, + ) + }) + .map(|deposit| DelayThenDeposit { + delay: Duration::from_secs(0), + deposit, + }) + .collect::>(); + + let deposit_future = deposit_contract.deposit_multiple(deposits.clone()); + + let wait_future = + service.wait_for_genesis_state::(update_interval, spec.clone()); + + let state = runtime + .block_on(deposit_future.join(wait_future)) + .map(|(_, state)| state) + .expect("should finish waiting for genesis"); + + // Note: using ganache these deposits are 1-per-block, therefore we know there should only be + // the minimum number of validators. + assert_eq!( + state.validators.len(), + spec.min_genesis_active_validator_count as usize, + "should have expected validator count" + ); + + assert!(state.genesis_time > 0, "should have some genesis time"); + + assert!( + is_valid_genesis_state(&state, &spec), + "should be valid genesis state" + ); + + assert!( + is_valid_genesis_state(&state, &spec), + "should be valid genesis state" + ); +} diff --git a/beacon_node/network/src/message_handler.rs b/beacon_node/network/src/message_handler.rs index 898304272e..35bdbb7d08 100644 --- a/beacon_node/network/src/message_handler.rs +++ b/beacon_node/network/src/message_handler.rs @@ -51,7 +51,7 @@ impl MessageHandler { executor: &tokio::runtime::TaskExecutor, log: slog::Logger, ) -> error::Result> { - let message_handler_log = log.new(o!("Service"=> "Message Handler")); + let message_handler_log = log.new(o!("service"=> "msg_handler")); trace!(message_handler_log, "Service starting"); let (handler_send, handler_recv) = mpsc::unbounded_channel(); diff --git a/beacon_node/network/src/service.rs b/beacon_node/network/src/service.rs index 1357b54951..0e86a61cc7 100644 --- a/beacon_node/network/src/service.rs +++ b/beacon_node/network/src/service.rs @@ -10,7 +10,7 @@ use eth2_libp2p::{PubsubMessage, RPCEvent}; use futures::prelude::*; use futures::Stream; use parking_lot::Mutex; -use slog::{debug, info, o, trace}; +use slog::{debug, info, trace}; use std::sync::Arc; use tokio::runtime::TaskExecutor; use tokio::sync::{mpsc, oneshot}; @@ -29,15 +29,18 @@ impl Service { beacon_chain: Arc>, config: &NetworkConfig, executor: &TaskExecutor, - log: slog::Logger, + network_log: slog::Logger, ) -> error::Result<(Arc, mpsc::UnboundedSender)> { // build the network channel let (network_send, network_recv) = mpsc::unbounded_channel::(); // launch message handler thread - let message_handler_send = - MessageHandler::spawn(beacon_chain, network_send.clone(), executor, log.clone())?; + let message_handler_send = MessageHandler::spawn( + beacon_chain, + network_send.clone(), + executor, + network_log.clone(), + )?; - let network_log = log.new(o!("Service" => "Network")); // launch libp2p service let libp2p_service = Arc::new(Mutex::new(LibP2PService::new( config.clone(), diff --git a/beacon_node/network/src/sync/simple_sync.rs b/beacon_node/network/src/sync/simple_sync.rs index 83aa7ebd29..049c2a6738 100644 --- a/beacon_node/network/src/sync/simple_sync.rs +++ b/beacon_node/network/src/sync/simple_sync.rs @@ -75,7 +75,7 @@ impl MessageProcessor { network_send: mpsc::UnboundedSender, log: &slog::Logger, ) -> Self { - let sync_logger = log.new(o!("Service"=> "Sync")); + let sync_logger = log.new(o!("service"=> "sync")); let sync_network_context = NetworkContext::new(network_send.clone(), sync_logger.clone()); // spawn the sync thread diff --git a/beacon_node/rest_api/src/lib.rs b/beacon_node/rest_api/src/lib.rs index 4f9bcc6171..f8fbdcab78 100644 --- a/beacon_node/rest_api/src/lib.rs +++ b/beacon_node/rest_api/src/lib.rs @@ -26,7 +26,8 @@ use hyper::rt::Future; use hyper::service::Service; use hyper::{Body, Method, Request, Response, Server}; use parking_lot::RwLock; -use slog::{info, o, warn}; +use slog::{info, warn}; +use std::net::SocketAddr; use std::ops::Deref; use std::path::PathBuf; use std::sync::Arc; @@ -35,7 +36,7 @@ use tokio::sync::mpsc; use url_query::UrlQuery; pub use beacon::{BlockResponse, HeadResponse, StateResponse}; -pub use config::Config as ApiConfig; +pub use config::Config; type BoxFut = Box, Error = ApiError> + Send>; @@ -196,16 +197,14 @@ impl Service for ApiService { } pub fn start_server( - config: &ApiConfig, + config: &Config, executor: &TaskExecutor, beacon_chain: Arc>, network_info: NetworkInfo, db_path: PathBuf, eth2_config: Eth2Config, - log: &slog::Logger, -) -> Result { - let log = log.new(o!("Service" => "Api")); - + log: slog::Logger, +) -> Result<(exit_future::Signal, SocketAddr), hyper::Error> { // build a channel to kill the HTTP server let (exit_signal, exit) = exit_future::signal(); @@ -237,8 +236,11 @@ pub fn start_server( }; let log_clone = log.clone(); - let server = Server::bind(&bind_addr) - .serve(service) + let server = Server::bind(&bind_addr).serve(service); + + let actual_listen_addr = server.local_addr(); + + let server_future = server .with_graceful_shutdown(server_exit) .map_err(move |e| { warn!( @@ -248,15 +250,15 @@ pub fn start_server( }); info!( - log, - "REST API started"; - "address" => format!("{}", config.listen_address), - "port" => config.port, + log, + "REST API started"; + "address" => format!("{}", actual_listen_addr.ip()), + "port" => actual_listen_addr.port(), ); - executor.spawn(server); + executor.spawn(server_future); - Ok(exit_signal) + Ok((exit_signal, actual_listen_addr)) } #[derive(Clone)] diff --git a/beacon_node/rpc/src/attestation.rs b/beacon_node/rpc/src/attestation.rs index 7c737cd115..2621cb7727 100644 --- a/beacon_node/rpc/src/attestation.rs +++ b/beacon_node/rpc/src/attestation.rs @@ -16,13 +16,23 @@ use std::sync::Arc; use tokio::sync::mpsc; use types::{Attestation, Slot}; -#[derive(Clone)] pub struct AttestationServiceInstance { pub chain: Arc>, pub network_chan: mpsc::UnboundedSender, pub log: slog::Logger, } +// NOTE: Deriving Clone puts bogus bounds on T, so we implement it manually. +impl Clone for AttestationServiceInstance { + fn clone(&self) -> Self { + Self { + chain: self.chain.clone(), + network_chan: self.network_chan.clone(), + log: self.log.clone(), + } + } +} + impl AttestationService for AttestationServiceInstance { /// Produce the `AttestationData` for signing by a validator. fn produce_attestation_data( diff --git a/beacon_node/rpc/src/beacon_block.rs b/beacon_node/rpc/src/beacon_block.rs index ab7c0aef58..0834a4387d 100644 --- a/beacon_node/rpc/src/beacon_block.rs +++ b/beacon_node/rpc/src/beacon_block.rs @@ -16,13 +16,23 @@ use std::sync::Arc; use tokio::sync::mpsc; use types::{BeaconBlock, Signature, Slot}; -#[derive(Clone)] pub struct BeaconBlockServiceInstance { pub chain: Arc>, pub network_chan: mpsc::UnboundedSender, pub log: Logger, } +// NOTE: Deriving Clone puts bogus bounds on T, so we implement it manually. +impl Clone for BeaconBlockServiceInstance { + fn clone(&self) -> Self { + Self { + chain: self.chain.clone(), + network_chan: self.network_chan.clone(), + log: self.log.clone(), + } + } +} + impl BeaconBlockService for BeaconBlockServiceInstance { /// Produce a `BeaconBlock` for signing by a validator. fn produce_beacon_block( diff --git a/beacon_node/rpc/src/beacon_node.rs b/beacon_node/rpc/src/beacon_node.rs index 5d635c9d1b..e9057707fb 100644 --- a/beacon_node/rpc/src/beacon_node.rs +++ b/beacon_node/rpc/src/beacon_node.rs @@ -6,12 +6,21 @@ use protos::services_grpc::BeaconNodeService; use slog::{trace, warn}; use std::sync::Arc; -#[derive(Clone)] pub struct BeaconNodeServiceInstance { pub chain: Arc>, pub log: slog::Logger, } +// NOTE: Deriving Clone puts bogus bounds on T, so we implement it manually. +impl Clone for BeaconNodeServiceInstance { + fn clone(&self) -> Self { + Self { + chain: self.chain.clone(), + log: self.log.clone(), + } + } +} + impl BeaconNodeService for BeaconNodeServiceInstance { /// Provides basic node information. fn info(&mut self, ctx: RpcContext, _req: Empty, sink: UnarySink) { diff --git a/beacon_node/rpc/src/lib.rs b/beacon_node/rpc/src/lib.rs index 59902ff43f..3425eeeac2 100644 --- a/beacon_node/rpc/src/lib.rs +++ b/beacon_node/rpc/src/lib.rs @@ -9,7 +9,7 @@ use self::beacon_block::BeaconBlockServiceInstance; use self::beacon_node::BeaconNodeServiceInstance; use self::validator::ValidatorServiceInstance; use beacon_chain::{BeaconChain, BeaconChainTypes}; -pub use config::Config as RPCConfig; +pub use config::Config; use futures::Future; use grpcio::{Environment, ServerBuilder}; use network::NetworkMessage; @@ -17,19 +17,18 @@ use protos::services_grpc::{ create_attestation_service, create_beacon_block_service, create_beacon_node_service, create_validator_service, }; -use slog::{info, o, warn}; +use slog::{info, warn}; use std::sync::Arc; use tokio::runtime::TaskExecutor; use tokio::sync::mpsc; -pub fn start_server( - config: &RPCConfig, +pub fn start_server( + config: &Config, executor: &TaskExecutor, network_chan: mpsc::UnboundedSender, beacon_chain: Arc>, - log: &slog::Logger, + log: slog::Logger, ) -> exit_future::Signal { - let log = log.new(o!("Service"=>"RPC")); let env = Arc::new(Environment::new(1)); // build a channel to kill the rpc server diff --git a/beacon_node/rpc/src/validator.rs b/beacon_node/rpc/src/validator.rs index 0533e25586..42ca025ee8 100644 --- a/beacon_node/rpc/src/validator.rs +++ b/beacon_node/rpc/src/validator.rs @@ -9,12 +9,21 @@ use ssz::Decode; use std::sync::Arc; use types::{Epoch, EthSpec, RelativeEpoch}; -#[derive(Clone)] pub struct ValidatorServiceInstance { pub chain: Arc>, pub log: slog::Logger, } +// NOTE: Deriving Clone puts bogus bounds on T, so we implement it manually. +impl Clone for ValidatorServiceInstance { + fn clone(&self) -> Self { + Self { + chain: self.chain.clone(), + log: self.log.clone(), + } + } +} + impl ValidatorService for ValidatorServiceInstance { /// For a list of validator public keys, this function returns the slot at which each /// validator must propose a block, attest to a shard, their shard committee and the shard they diff --git a/beacon_node/src/main.rs b/beacon_node/src/cli.rs similarity index 74% rename from beacon_node/src/main.rs rename to beacon_node/src/cli.rs index 7bc7e8abe6..b22b99862f 100644 --- a/beacon_node/src/main.rs +++ b/beacon_node/src/cli.rs @@ -1,22 +1,9 @@ -mod config; -mod run; - use clap::{App, Arg, SubCommand}; -use config::get_configs; -use env_logger::{Builder, Env}; -use slog::{crit, o, warn, Drain, Level}; -pub const DEFAULT_DATA_DIR: &str = ".lighthouse"; -pub const CLIENT_CONFIG_FILENAME: &str = "beacon-node.toml"; -pub const ETH2_CONFIG_FILENAME: &str = "eth2-spec.toml"; -pub const TESTNET_CONFIG_FILENAME: &str = "testnet.toml"; - -fn main() { - // debugging output for libp2p and external crates - Builder::from_env(Env::default()).init(); - - let matches = App::new("Lighthouse") - .version(version::version().as_str()) +pub fn cli_app<'a, 'b>() -> App<'a, 'b> { + App::new("Beacon Node") + .visible_aliases(&["b", "bn", "beacon", "beacon_node"]) + .version(crate_version!()) .author("Sigma Prime ") .about("Eth 2.0 Client") /* @@ -30,13 +17,6 @@ fn main() { .takes_value(true) .global(true) ) - .arg( - Arg::with_name("logfile") - .long("logfile") - .value_name("FILE") - .help("File path where output will be written.") - .takes_value(true), - ) .arg( Arg::with_name("network-dir") .long("network-dir") @@ -197,35 +177,44 @@ fn main() { * Eth1 Integration */ .arg( - Arg::with_name("eth1-server") - .long("eth1-server") - .value_name("SERVER") + Arg::with_name("dummy-eth1") + .long("dummy-eth1") + .help("If present, uses an eth1 backend that generates static dummy data.\ + Identical to the method used at the 2019 Canada interop.") + ) + .arg( + Arg::with_name("eth1-endpoint") + .long("eth1-endpoint") + .value_name("HTTP-ENDPOINT") .help("Specifies the server for a web3 connection to the Eth1 chain.") .takes_value(true) + .default_value("http://localhost:8545") ) - /* - * Database parameters. - */ .arg( - Arg::with_name("db") - .long("db") - .value_name("DB") - .help("Type of database to use.") + Arg::with_name("eth1-follow") + .long("eth1-follow") + .value_name("BLOCK_COUNT") + .help("Specifies how many blocks we should cache behind the eth1 head. A larger number means a smaller cache.") .takes_value(true) - .possible_values(&["disk", "memory"]) - .default_value("disk"), + // TODO: set this higher once we're not using testnets all the time. + .default_value("0") ) - /* - * Logging. - */ .arg( - Arg::with_name("debug-level") - .long("debug-level") - .value_name("LEVEL") - .help("The title of the spec constants for chain config.") + Arg::with_name("deposit-contract") + .long("deposit-contract") + .short("e") + .value_name("DEPOSIT-CONTRACT") + .help("Specifies the deposit contract address on the Eth1 chain.") .takes_value(true) - .possible_values(&["info", "debug", "trace", "warn", "error", "crit"]) - .default_value("trace"), + ) + .arg( + Arg::with_name("deposit-contract-deploy") + .long("deposit-contract-deploy") + .value_name("BLOCK_NUMBER") + .help("Specifies the block number that the deposit contract was deployed at.") + .takes_value(true) + // TODO: set this higher once we're not using testnets all the time. + .default_value("0") ) /* * The "testnet" sub-command. @@ -234,17 +223,6 @@ fn main() { */ .subcommand(SubCommand::with_name("testnet") .about("Create a new Lighthouse datadir using a testnet strategy.") - .arg( - Arg::with_name("spec") - .short("s") - .long("spec") - .value_name("TITLE") - .help("Specifies the default eth2 spec type. Only effective when creating a new datadir.") - .takes_value(true) - .required(true) - .possible_values(&["mainnet", "minimal", "interop"]) - .default_value("minimal") - ) .arg( Arg::with_name("eth2-config") .long("eth2-config") @@ -347,68 +325,25 @@ fn main() { * Start a new node, using a genesis state loaded from a YAML file */ .subcommand(SubCommand::with_name("file") - .about("Creates a new datadir where the genesis state is read from YAML. May fail to parse \ + .about("Creates a new datadir where the genesis state is read from file. May fail to parse \ a file that was generated to a different spec than that specified by --spec.") .arg(Arg::with_name("format") .value_name("FORMAT") .required(true) - .possible_values(&["yaml", "ssz", "json"]) + .possible_values(&["ssz"]) .help("The encoding of the state in the file.")) .arg(Arg::with_name("file") - .value_name("YAML_FILE") + .value_name("FILE") .required(true) - .help("A YAML file from which to read the state")) + .help("A file from which to read the state")) + ) + /* + * `prysm` + * + * Connect to the Prysmatic Labs testnet. + */ + .subcommand(SubCommand::with_name("prysm") + .about("Connect to the Prysmatic Labs testnet on Goerli.") ) ) - .get_matches(); - - // build the initial logger - let decorator = slog_term::TermDecorator::new().build(); - let decorator = logging::AlignedTermDecorator::new(decorator, logging::MAX_MESSAGE_WIDTH); - let drain = slog_term::FullFormat::new(decorator).build().fuse(); - let drain = slog_async::Async::new(drain).build(); - - let drain = match matches.value_of("debug-level") { - Some("info") => drain.filter_level(Level::Info), - Some("debug") => drain.filter_level(Level::Debug), - Some("trace") => drain.filter_level(Level::Trace), - Some("warn") => drain.filter_level(Level::Warning), - Some("error") => drain.filter_level(Level::Error), - Some("crit") => drain.filter_level(Level::Critical), - _ => unreachable!("guarded by clap"), - }; - - let log = slog::Logger::root(drain.fuse(), o!()); - - if std::mem::size_of::() != 8 { - crit!( - log, - "Lighthouse only supports 64bit CPUs"; - "detected" => format!("{}bit", std::mem::size_of::() * 8) - ); - } - - warn!( - log, - "Ethereum 2.0 is pre-release. This software is experimental." - ); - - let log_clone = log.clone(); - - // Load the process-wide configuration. - // - // May load this from disk or create a new configuration, depending on the CLI flags supplied. - let (client_config, eth2_config, log) = match get_configs(&matches, log) { - Ok(configs) => configs, - Err(e) => { - crit!(log_clone, "Failed to load configuration. Exiting"; "error" => e); - return; - } - }; - - // Start the node using a `tokio` executor. - match run::run_beacon_node(client_config, eth2_config, &log) { - Ok(_) => {} - Err(e) => crit!(log, "Beacon node failed to start"; "reason" => format!("{:}", e)), - } } diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index 58de096d12..e6d56737d4 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -1,12 +1,14 @@ use clap::ArgMatches; -use client::{BeaconChainStartMethod, ClientConfig, Eth1BackendMethod, Eth2Config}; +use client::{ClientConfig, ClientGenesis, Eth2Config}; use eth2_config::{read_from_file, write_to_file}; +use genesis::recent_genesis_time; use lighthouse_bootstrap::Bootstrapper; use rand::{distributions::Alphanumeric, Rng}; use slog::{crit, info, warn, Logger}; use std::fs; use std::net::Ipv4Addr; use std::path::{Path, PathBuf}; +use types::{Address, Epoch, Fork}; pub const DEFAULT_DATA_DIR: &str = ".lighthouse"; pub const CLIENT_CONFIG_FILENAME: &str = "beacon-node.toml"; @@ -27,12 +29,33 @@ pub fn get_configs(cli_args: &ArgMatches, core_log: Logger) -> Result { let mut builder = ConfigBuilder::new(cli_args, core_log)?; - if let Some(server) = cli_args.value_of("eth1-server") { - builder.set_eth1_backend_method(Eth1BackendMethod::Web3 { - server: server.into(), - }) - } else { - builder.set_eth1_backend_method(Eth1BackendMethod::Interop) + if cli_args.is_present("dummy-eth1") { + builder.client_config.dummy_eth1_backend = true; + } + + if let Some(val) = cli_args.value_of("eth1-endpoint") { + builder.set_eth1_endpoint(val) + } + + if let Some(val) = cli_args.value_of("deposit-contract") { + builder.set_deposit_contract( + val.parse::
() + .map_err(|e| format!("Unable to parse deposit-contract address: {:?}", e))?, + ) + } + + if let Some(val) = cli_args.value_of("deposit-contract-deploy") { + builder.set_deposit_contract_deploy_block( + val.parse::() + .map_err(|e| format!("Unable to parse deposit-contract-deploy: {:?}", e))?, + ) + } + + if let Some(val) = cli_args.value_of("eth1-follow") { + builder.set_eth1_follow( + val.parse::() + .map_err(|e| format!("Unable to parse follow distance: {:?}", e))?, + ) } match cli_args.subcommand() { @@ -49,7 +72,7 @@ pub fn get_configs(cli_args: &ArgMatches, core_log: Logger) -> Result { // If no primary subcommand was given, start the beacon chain from an existing // database. - builder.set_beacon_chain_start_method(BeaconChainStartMethod::Resume); + builder.set_genesis(ClientGenesis::Resume); // Whilst there is no large testnet or mainnet force the user to specify how they want // to start a new chain (e.g., from a genesis YAML file, another node, etc). @@ -142,7 +165,7 @@ fn process_testnet_subcommand( builder.import_bootstrap_enr_address(server)?; builder.import_bootstrap_eth2_config(server)?; - builder.set_beacon_chain_start_method(BeaconChainStartMethod::HttpBootstrap { + builder.set_genesis(ClientGenesis::RemoteNode { server: server.to_string(), port, }) @@ -160,9 +183,11 @@ fn process_testnet_subcommand( .parse::() .map_err(|e| format!("Unable to parse minutes: {:?}", e))?; - builder.set_beacon_chain_start_method(BeaconChainStartMethod::RecentGenesis { + builder.client_config.dummy_eth1_backend = true; + + builder.set_genesis(ClientGenesis::Interop { validator_count, - minutes, + genesis_time: recent_genesis_time(minutes), }) } ("quick", Some(cli_args)) => { @@ -178,13 +203,15 @@ fn process_testnet_subcommand( .parse::() .map_err(|e| format!("Unable to parse genesis time: {:?}", e))?; - builder.set_beacon_chain_start_method(BeaconChainStartMethod::Generated { + builder.client_config.dummy_eth1_backend = true; + + builder.set_genesis(ClientGenesis::Interop { validator_count, genesis_time, }) } ("file", Some(cli_args)) => { - let file = cli_args + let path = cli_args .value_of("file") .ok_or_else(|| "No filename specified")? .parse::() @@ -195,13 +222,34 @@ fn process_testnet_subcommand( .ok_or_else(|| "No file format specified")?; let start_method = match format { - "yaml" => BeaconChainStartMethod::Yaml { file }, - "ssz" => BeaconChainStartMethod::Ssz { file }, - "json" => BeaconChainStartMethod::Json { file }, + "ssz" => ClientGenesis::SszFile { path }, other => return Err(format!("Unknown genesis file format: {}", other)), }; - builder.set_beacon_chain_start_method(start_method) + builder.set_genesis(start_method) + } + ("prysm", Some(_)) => { + let mut spec = &mut builder.eth2_config.spec; + let mut client_config = &mut builder.client_config; + + spec.min_deposit_amount = 100; + spec.max_effective_balance = 3_200_000_000; + spec.ejection_balance = 1_600_000_000; + spec.effective_balance_increment = 100_000_000; + spec.min_genesis_time = 0; + spec.genesis_fork = Fork { + previous_version: [0; 4], + current_version: [0, 0, 0, 2], + epoch: Epoch::new(0), + }; + + client_config.eth1.deposit_contract_address = + "0x802dF6aAaCe28B2EEb1656bb18dF430dDC42cc2e".to_string(); + client_config.eth1.deposit_contract_deploy_block = 1487270; + client_config.eth1.follow_distance = 16; + client_config.dummy_eth1_backend = false; + + builder.set_genesis(ClientGenesis::DepositContract) } (cmd, Some(_)) => { return Err(format!( @@ -220,8 +268,8 @@ fn process_testnet_subcommand( /// Allows for building a set of configurations based upon `clap` arguments. struct ConfigBuilder { log: Logger, - eth2_config: Eth2Config, - client_config: ClientConfig, + pub eth2_config: Eth2Config, + pub client_config: ClientConfig, } impl ConfigBuilder { @@ -294,14 +342,24 @@ impl ConfigBuilder { Ok(()) } - /// Sets the method for starting the beacon chain. - pub fn set_beacon_chain_start_method(&mut self, method: BeaconChainStartMethod) { - self.client_config.beacon_chain_start_method = method; + pub fn set_eth1_endpoint(&mut self, endpoint: &str) { + self.client_config.eth1.endpoint = endpoint.to_string(); } - /// Sets the method for starting the beacon chain. - pub fn set_eth1_backend_method(&mut self, method: Eth1BackendMethod) { - self.client_config.eth1_backend_method = method; + pub fn set_deposit_contract(&mut self, deposit_contract: Address) { + self.client_config.eth1.deposit_contract_address = format!("{:?}", deposit_contract); + } + + pub fn set_deposit_contract_deploy_block(&mut self, eth1_block_number: u64) { + self.client_config.eth1.deposit_contract_deploy_block = eth1_block_number; + } + + pub fn set_eth1_follow(&mut self, distance: u64) { + self.client_config.eth1.follow_distance = distance; + } + + pub fn set_genesis(&mut self, method: ClientGenesis) { + self.client_config.genesis = method; } /// Import the libp2p address for `server` into the list of libp2p nodes to connect with. @@ -540,7 +598,6 @@ impl ConfigBuilder { /// The supplied `cli_args` should be the base-level `clap` cli_args (i.e., not a subcommand /// cli_args). pub fn build(mut self, cli_args: &ArgMatches) -> Result { - self.eth2_config.apply_cli_args(cli_args)?; self.client_config.apply_cli_args(cli_args, &mut self.log)?; if let Some(bump) = cli_args.value_of("port-bump") { diff --git a/beacon_node/src/lib.rs b/beacon_node/src/lib.rs new file mode 100644 index 0000000000..43e649a64b --- /dev/null +++ b/beacon_node/src/lib.rs @@ -0,0 +1,153 @@ +#[macro_use] +extern crate clap; + +mod cli; +mod config; + +pub use beacon_chain; +pub use cli::cli_app; +pub use client::{Client, ClientBuilder, ClientConfig, ClientGenesis}; +pub use eth2_config::Eth2Config; + +use beacon_chain::{ + builder::Witness, eth1_chain::CachingEth1Backend, events::WebSocketSender, + lmd_ghost::ThreadSafeReducedTree, slot_clock::SystemTimeSlotClock, +}; +use clap::ArgMatches; +use config::get_configs; +use environment::RuntimeContext; +use futures::{Future, IntoFuture}; +use slog::{info, warn}; +use std::ops::{Deref, DerefMut}; +use store::DiskStore; +use types::EthSpec; + +/// A type-alias to the tighten the definition of a production-intended `Client`. +pub type ProductionClient = Client< + Witness< + DiskStore, + SystemTimeSlotClock, + ThreadSafeReducedTree, + CachingEth1Backend, + E, + WebSocketSender, + >, +>; + +/// The beacon node `Client` that will be used in production. +/// +/// Generic over some `EthSpec`. +/// +/// ## Notes: +/// +/// Despite being titled `Production...`, this code is not ready for production. The name +/// demonstrates an intention, not a promise. +pub struct ProductionBeaconNode(ProductionClient); + +impl ProductionBeaconNode { + /// Starts a new beacon node `Client` in the given `environment`. + /// + /// Identical to `start_from_client_config`, however the `client_config` is generated from the + /// given `matches` and potentially configuration files on the local filesystem or other + /// configurations hosted remotely. + pub fn new_from_cli<'a, 'b>( + mut context: RuntimeContext, + matches: &ArgMatches<'b>, + ) -> impl Future + 'a { + let log = context.log.clone(); + + // TODO: the eth2 config in the env is being completely ignored. + // + // See https://github.com/sigp/lighthouse/issues/602 + get_configs(&matches, log).into_future().and_then( + move |(client_config, eth2_config, _log)| { + context.eth2_config = eth2_config; + Self::new(context, client_config) + }, + ) + } + + /// Starts a new beacon node `Client` in the given `environment`. + /// + /// Client behaviour is defined by the given `client_config`. + pub fn new( + context: RuntimeContext, + client_config: ClientConfig, + ) -> impl Future { + let http_eth2_config = context.eth2_config().clone(); + let spec = context.eth2_config().spec.clone(); + let genesis_eth1_config = client_config.eth1.clone(); + let client_genesis = client_config.genesis.clone(); + let log = context.log.clone(); + + client_config + .db_path() + .ok_or_else(|| "Unable to access database path".to_string()) + .into_future() + .and_then(move |db_path| { + Ok(ClientBuilder::new(context.eth_spec_instance.clone()) + .runtime_context(context) + .disk_store(&db_path)? + .chain_spec(spec)) + }) + .and_then(move |builder| { + builder.beacon_chain_builder(client_genesis, genesis_eth1_config) + }) + .and_then(move |builder| { + let builder = if client_config.sync_eth1_chain && !client_config.dummy_eth1_backend + { + info!( + log, + "Block production enabled"; + "endpoint" => &client_config.eth1.endpoint, + "method" => "json rpc via http" + ); + builder.caching_eth1_backend(client_config.eth1.clone())? + } else if client_config.dummy_eth1_backend { + warn!( + log, + "Block production impaired"; + "reason" => "dummy eth1 backend is enabled" + ); + builder.dummy_eth1_backend()? + } else { + info!( + log, + "Block production disabled"; + "reason" => "no eth1 backend configured" + ); + builder.no_eth1_backend()? + }; + + let builder = builder + .system_time_slot_clock()? + .websocket_event_handler(client_config.websocket_server.clone())? + .build_beacon_chain()? + .libp2p_network(&client_config.network)? + .http_server(&client_config, &http_eth2_config)? + .grpc_server(&client_config.rpc)? + .peer_count_notifier()? + .slot_notifier()?; + + Ok(Self(builder.build())) + }) + } + + pub fn into_inner(self) -> ProductionClient { + self.0 + } +} + +impl Deref for ProductionBeaconNode { + type Target = ProductionClient; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +impl DerefMut for ProductionBeaconNode { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.0 + } +} diff --git a/beacon_node/src/run.rs b/beacon_node/src/run.rs deleted file mode 100644 index 3d6607552c..0000000000 --- a/beacon_node/src/run.rs +++ /dev/null @@ -1,138 +0,0 @@ -use client::{error, notifier, Client, ClientConfig, Eth1BackendMethod, Eth2Config}; -use futures::sync::oneshot; -use futures::Future; -use slog::{error, info}; -use std::cell::RefCell; -use std::path::Path; -use std::path::PathBuf; -use store::Store; -use store::{DiskStore, MemoryStore}; -use tokio::runtime::Builder; -use tokio::runtime::Runtime; -use tokio::runtime::TaskExecutor; -use tokio_timer::clock::Clock; -use types::{EthSpec, InteropEthSpec, MainnetEthSpec, MinimalEthSpec}; - -/// Reads the configuration and initializes a `BeaconChain` with the required types and parameters. -/// -/// Spawns an executor which performs syncing, networking, block production, etc. -/// -/// Blocks the current thread, returning after the `BeaconChain` has exited or a `Ctrl+C` -/// signal. -pub fn run_beacon_node( - client_config: ClientConfig, - eth2_config: Eth2Config, - log: &slog::Logger, -) -> error::Result<()> { - let runtime = Builder::new() - .name_prefix("main-") - .clock(Clock::system()) - .build() - .map_err(|e| format!("{:?}", e))?; - - let executor = runtime.executor(); - - let db_path: PathBuf = client_config - .db_path() - .ok_or_else::(|| "Unable to access database path".into())?; - let db_type = &client_config.db_type; - let spec_constants = eth2_config.spec_constants.clone(); - - let other_client_config = client_config.clone(); - - info!( - log, - "Starting beacon node"; - "p2p_listen_address" => format!("{}", &other_client_config.network.listen_address), - "db_type" => &other_client_config.db_type, - "spec_constants" => &spec_constants, - ); - - macro_rules! run_client { - ($store: ty, $eth_spec: ty) => { - run::<$store, $eth_spec>(&db_path, client_config, eth2_config, executor, runtime, log) - }; - } - - if let Eth1BackendMethod::Web3 { .. } = client_config.eth1_backend_method { - return Err("Starting from web3 backend is not supported for interop.".into()); - } - - match (db_type.as_str(), spec_constants.as_str()) { - ("disk", "minimal") => run_client!(DiskStore, MinimalEthSpec), - ("disk", "mainnet") => run_client!(DiskStore, MainnetEthSpec), - ("disk", "interop") => run_client!(DiskStore, InteropEthSpec), - ("memory", "minimal") => run_client!(MemoryStore, MinimalEthSpec), - ("memory", "mainnet") => run_client!(MemoryStore, MainnetEthSpec), - ("memory", "interop") => run_client!(MemoryStore, InteropEthSpec), - (db_type, spec) => { - error!(log, "Unknown runtime configuration"; "spec_constants" => spec, "db_type" => db_type); - Err("Unknown specification and/or db_type.".into()) - } - } -} - -/// Performs the type-generic parts of launching a `BeaconChain`. -fn run( - db_path: &Path, - client_config: ClientConfig, - eth2_config: Eth2Config, - executor: TaskExecutor, - mut runtime: Runtime, - log: &slog::Logger, -) -> error::Result<()> -where - S: Store + Clone + 'static + OpenDatabase, - E: EthSpec, -{ - let store = S::open_database(&db_path)?; - - let client: Client = - Client::new(client_config, eth2_config, store, log.clone(), &executor)?; - - // run service until ctrl-c - let (ctrlc_send, ctrlc_oneshot) = oneshot::channel(); - let ctrlc_send_c = RefCell::new(Some(ctrlc_send)); - ctrlc::set_handler(move || { - if let Some(ctrlc_send) = ctrlc_send_c.try_borrow_mut().unwrap().take() { - ctrlc_send.send(()).expect("Error sending ctrl-c message"); - } - }) - .map_err(|e| format!("Could not set ctrlc handler: {:?}", e))?; - - let (exit_signal, exit) = exit_future::signal(); - - notifier::run(&client, executor, exit); - - runtime - .block_on(ctrlc_oneshot) - .map_err(|e| format!("Ctrlc oneshot failed: {:?}", e))?; - - // perform global shutdown operations. - info!(log, "Shutting down.."); - exit_signal.fire(); - // shutdown the client - // client.exit_signal.fire(); - drop(client); - runtime.shutdown_on_idle().wait().unwrap(); - Ok(()) -} - -/// A convenience trait, providing a method to open a database. -/// -/// Panics if unable to open the database. -pub trait OpenDatabase: Sized { - fn open_database(path: &Path) -> error::Result; -} - -impl OpenDatabase for MemoryStore { - fn open_database(_path: &Path) -> error::Result { - Ok(MemoryStore::open()) - } -} - -impl OpenDatabase for DiskStore { - fn open_database(path: &Path) -> error::Result { - DiskStore::open(path).map_err(|e| format!("Unable to open database: {:?}", e).into()) - } -} diff --git a/beacon_node/tests/test.rs b/beacon_node/tests/test.rs new file mode 100644 index 0000000000..4492c2f882 --- /dev/null +++ b/beacon_node/tests/test.rs @@ -0,0 +1,40 @@ +#![cfg(test)] + +use node_test_rig::{environment::EnvironmentBuilder, LocalBeaconNode}; +use types::{MinimalEthSpec, Slot}; + +fn env_builder() -> EnvironmentBuilder { + EnvironmentBuilder::minimal() +} + +#[test] +fn http_server_genesis_state() { + let mut env = env_builder() + .null_logger() + .expect("should build env logger") + .multi_threaded_tokio_runtime() + .expect("should start tokio runtime") + .build() + .expect("environment should build"); + + let node = LocalBeaconNode::production(env.core_context()); + let remote_node = node.remote_node().expect("should produce remote node"); + + let (api_state, _root) = env + .runtime() + .block_on(remote_node.http.beacon().state_at_slot(Slot::new(0))) + .expect("should fetch state from http api"); + + let mut db_state = node + .client + .beacon_chain() + .expect("client should have beacon chain") + .state_at_slot(Slot::new(0)) + .expect("should find state"); + db_state.drop_all_caches(); + + assert_eq!( + api_state, db_state, + "genesis state from api should match that from the DB" + ); +} diff --git a/beacon_node/websocket_server/Cargo.toml b/beacon_node/websocket_server/Cargo.toml index 2922d5fa5d..9ea8d9b650 100644 --- a/beacon_node/websocket_server/Cargo.toml +++ b/beacon_node/websocket_server/Cargo.toml @@ -7,7 +7,6 @@ edition = "2018" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -beacon_chain = { path = "../beacon_chain" } clap = "2.33.0" exit-future = "0.1.4" futures = "0.1.29" diff --git a/beacon_node/websocket_server/src/lib.rs b/beacon_node/websocket_server/src/lib.rs index c161224c7b..26736c573e 100644 --- a/beacon_node/websocket_server/src/lib.rs +++ b/beacon_node/websocket_server/src/lib.rs @@ -1,7 +1,7 @@ -use beacon_chain::events::{EventHandler, EventKind}; use futures::Future; use slog::{debug, error, info, warn, Logger}; use std::marker::PhantomData; +use std::net::SocketAddr; use std::thread; use tokio::runtime::TaskExecutor; use types::EthSpec; @@ -36,31 +36,30 @@ impl WebSocketSender { } } -impl EventHandler for WebSocketSender { - fn register(&self, kind: EventKind) -> Result<(), String> { - self.send_string( - serde_json::to_string(&kind) - .map_err(|e| format!("Unable to serialize event: {:?}", e))?, - ) - } -} - pub fn start_server( config: &Config, executor: &TaskExecutor, log: &Logger, -) -> Result<(WebSocketSender, exit_future::Signal), String> { +) -> Result<(WebSocketSender, exit_future::Signal, SocketAddr), String> { let server_string = format!("{}:{}", config.listen_address, config.port); - info!( - log, - "Websocket server starting"; - "listen_address" => &server_string - ); - // Create a server that simply ignores any incoming messages. let server = WebSocket::new(|_| |_| Ok(())) - .map_err(|e| format!("Failed to initialize websocket server: {:?}", e))?; + .map_err(|e| format!("Failed to initialize websocket server: {:?}", e))? + .bind(server_string.clone()) + .map_err(|e| { + format!( + "Failed to bind websocket server to {}: {:?}", + server_string, e + ) + })?; + + let actual_listen_addr = server.local_addr().map_err(|e| { + format!( + "Failed to read listening addr from websocket server: {:?}", + e + ) + })?; let broadcaster = server.broadcaster(); @@ -91,7 +90,7 @@ pub fn start_server( }; let log_inner = log.clone(); - let _handle = thread::spawn(move || match server.listen(server_string) { + let _handle = thread::spawn(move || match server.run() { Ok(_) => { debug!( log_inner, @@ -107,11 +106,19 @@ pub fn start_server( } }); + info!( + log, + "WebSocket server started"; + "address" => format!("{}", actual_listen_addr.ip()), + "port" => actual_listen_addr.port(), + ); + Ok(( WebSocketSender { sender: Some(broadcaster), _phantom: PhantomData, }, exit_signal, + actual_listen_addr, )) } diff --git a/book/src/cli.md b/book/src/cli.md index 47f85b9cba..0f82335c30 100644 --- a/book/src/cli.md +++ b/book/src/cli.md @@ -1,24 +1,32 @@ # Command-Line Interface (CLI) -Lighthouse a collection of CLI applications. The two primary binaries are: +The `lighthouse` binary provides all necessary Ethereum 2.0 functionality. It +has two primary sub-commands: -- `beacon_node`: the largest and most fundamental component which connects to +- `$ lighthouse beacon_node`: the largest and most fundamental component which connects to the p2p network, processes messages and tracks the head of the beacon chain. -- `validator_client`: a lightweight but important component which loads a validators private +- `$ lighthouse validator_client`: a lightweight but important component which loads a validators private key and signs messages using a `beacon_node` as a source-of-truth. -There are also some ancillary binaries: +There are also some ancillary binaries like `lcli` and `account_manager`, but +these are primarily for testing. -- `account_manager`: generates cryptographic keys. -- `lcli`: a general-purpose utility for troubleshooting Lighthouse state - transitions (developer tool). +> **Note:** documentation sometimes uses `$ lighthouse bn` and `$ lighthouse +> vc` instead of the long-form `beacon_node` and `validator_client`. These +> commands are valid on the CLI too. ## Installation -Presently, we recommend building Lighthouse using the `$ cargo build --release ---all` command and executing binaries from the -`/target/release` directory. +Typical users may install `lighthouse` to `CARGO_HOME` with `cargo install +--path lighthouse` from the root of the repository. See ["Configuring the +`PATH` environment variable"](https://www.rust-lang.org/tools/install) for more +information. + +For develeopers, we recommend building Lighthouse using the `$ cargo build --release +--bin lighthouse` command and executing binaries from the +`/target/release` directory. This is more ergonomic when +modifying and rebuilding regularly. ## Documentation @@ -27,36 +35,29 @@ documentation. ```bash -$ ./beacon_node --help +$ lighthouse beacon_node --help ``` ```bash -$ ./validator_client --help -``` - -```bash -$ ./account_manager --help -``` - -```bash -$ ./lcli --help +$ lighthouse validator_client --help ``` ## Beacon Node -The `beacon_node` CLI has two primary tasks: +The `$ lighthouse beacon_node` (or `$ lighthouse bn`) command has two primary +tasks: -- **Resuming** an existing database with `$ ./beacon_node`. -- **Creating** a new testnet database using `$ ./beacon_node testnet`. +- **Resuming** an existing database with `$ lighthouse bn`. +- **Creating** a new testnet database using `$ lighthouse bn testnet`. ## Creating a new database -Use the `$./beacon_node testnet` command (see [testnets](./testnets.md) for more -information). +Use the `$ lighthouse bn testnet` command (see [testnets](./testnets.md) for +more information). ## Resuming from an existing database -Once a database has been created, it can be resumed by running `$ ./beacon_node`. +Once a database has been created, it can be resumed by running `$ lighthouse bn`. -Presently, this command will fail if no existing database is found. You must -use the `$ ./beacon_node testnet` command to create a new database. +Presently, you are not allowed to call `$ lighthouse bn` unless you have first +created a database using `$ lighthouse bn testnet`. diff --git a/book/src/setup.md b/book/src/setup.md index 22671477c2..5293947d55 100644 --- a/book/src/setup.md +++ b/book/src/setup.md @@ -19,6 +19,18 @@ > `target/release` directory. > - First-time compilation may take several minutes. +### Installing to `PATH` + +Use `cargo install --path lighthouse` from the root of the repository to +install the compiled binary to `CARGO_HOME` or `$HOME/.cargo`. If this +directory is on your `PATH`, you can run `$ lighthouse ..` from anywhere. + + See ["Configuring the `PATH` environment + variable" (rust-lang.org)](https://www.rust-lang.org/tools/install) for more information. + + > If you _don't_ install `lighthouse` to the path, you'll need to run the + > binaries directly from the `target` directory or using `cargo run ...`. + ### Windows Perl may also be required to build Lighthouse. You can install [Strawberry diff --git a/book/src/simple-testnet.md b/book/src/simple-testnet.md index b3b24c1c51..9b062badeb 100644 --- a/book/src/simple-testnet.md +++ b/book/src/simple-testnet.md @@ -3,9 +3,9 @@ With a functional [development environment](./setup.md), starting a local multi-node testnet is easy: -1. Start the first node: `$ ./beacon_node testnet -f recent 8` -1. Start a validator client: `$ ./validator_client testnet -b insecure 0 8` -1. Start more nodes with `$ ./beacon_node -b 10 testnet -f bootstrap +1. Start the first node: `$ lighthouse bn testnet -f recent 8` +1. Start a validator client: `$ lighthouse bn testnet -b insecure 0 8` +1. Start more nodes with `$ lighthouse bn -b 10 testnet -f bootstrap http://localhost:5052` - Increment the `-b` value by `10` for each additional node. @@ -16,10 +16,10 @@ First, setup a Lighthouse development environment and navigate to the ## Starting a beacon node -Start a new node (creating a fresh database and configuration in `~/.lighthouse`), using: +Start a new node (creating a fresh database and configuration in `$HOME/.lighthouse`), using: ```bash -$ ./beacon_node testnet -f recent 8 +$ lighthouse bn testnet -f recent 8 ``` > Notes: @@ -27,7 +27,7 @@ $ ./beacon_node testnet -f recent 8 > - The `-f` flag ignores any existing database or configuration, backing them > up before re-initializing. > - `8` is number of validators with deposits in the genesis state. -> - See `$ ./beacon_node testnet recent --help` for more configuration options, +> - See `$ lighthouse bn testnet recent --help` for more configuration options, > including `minimal`/`mainnet` specification. ## Starting a validator client @@ -35,7 +35,7 @@ $ ./beacon_node testnet -f recent 8 In a new terminal window, start the validator client with: ```bash -$ ./validator_client testnet -b insecure 0 8 +$ lighthouse bn testnet -b insecure 0 8 ``` > Notes: @@ -58,7 +58,7 @@ In a new terminal window, run: ```bash -$ ./beacon_node -b 10 testnet -r bootstrap +$ lighthouse bn -b 10 testnet -r bootstrap ``` > Notes: @@ -70,4 +70,4 @@ $ ./beacon_node -b 10 testnet -r bootstrap > (avoids data directory collisions between nodes). > - The default bootstrap HTTP address is `http://localhost:5052`. The new node > will download configuration via HTTP before starting sync via libp2p. -> - See `$ ./beacon_node testnet bootstrap --help` for more configuration. +> - See `$ lighthouse bn testnet bootstrap --help` for more configuration. diff --git a/book/src/testnets.md b/book/src/testnets.md index 211d235c11..60cd0b3ac9 100644 --- a/book/src/testnets.md +++ b/book/src/testnets.md @@ -1,16 +1,16 @@ # Testnets -The Lighthouse CLI has a `testnet` sub-command to allow creating or connecting -to Eth2 beacon chain testnets. +The `beacon_node` and `validator` commands have a `testnet` sub-command to +allow creating or connecting to Eth2 beacon chain testnets. For detailed documentation, use the `--help` flag on the CLI: ```bash -$ ./beacon_node testnet --help +$ lighthouse bn testnet --help ``` ```bash -$ ./validator_client testnet --help +$ lighthouse vc testnet --help ``` ## Examples @@ -25,7 +25,7 @@ commands are based in the `target/release` directory (this is the build dir for To start a brand-new beacon node (with no history) use: ```bash -$ ./beacon_node testnet -f quick 8 +$ lighthouse bn testnet -f quick 8 ``` Where `GENESIS_TIME` is in [unix time](https://duckduckgo.com/?q=unix+time&t=ffab&ia=answer). @@ -38,7 +38,7 @@ method in the `ethereum/eth2.0-pm` repository. > - The `-f` flag ignores any existing database or configuration, backing them > up before re-initializing. > - `8` is the validator count and `1567222226` is the genesis time. -> - See `$ ./beacon_node testnet quick --help` for more configuration options. +> - See `$ lighthouse bn testnet quick --help` for more configuration options. ### Start a beacon node given a genesis state file @@ -52,14 +52,14 @@ There are three supported formats: Start a new node using `/tmp/genesis.ssz` as the genesis state: ```bash -$ ./beacon_node testnet --spec minimal -f file ssz /tmp/genesis.ssz +$ lighthouse bn testnet --spec minimal -f file ssz /tmp/genesis.ssz ``` > Notes: > > - The `-f` flag ignores any existing database or configuration, backing them > up before re-initializing. -> - See `$ ./beacon_node testnet file --help` for more configuration options. +> - See `$ lighthouse bn testnet file --help` for more configuration options. > - The `--spec` flag is required to allow SSZ parsing of fixed-length lists. > Here the `minimal` eth2 specification is chosen, allowing for lower > validator counts. See @@ -71,7 +71,7 @@ $ ./beacon_node testnet --spec minimal -f file ssz /tmp/genesis.ssz To start a brand-new validator client (with no history) use: ```bash -$ ./validator_client testnet -b insecure 0 8 +$ lighthouse vc testnet -b insecure 0 8 ``` > Notes: @@ -113,7 +113,7 @@ the `--libp2p-addresses` command. #### Example: ```bash -$ ./beacon_node --libp2p-addresses /ip4/192.168.0.1/tcp/9000 +$ lighthouse bn --libp2p-addresses /ip4/192.168.0.1/tcp/9000 ``` ### Specify a boot node by ENR (Ethereum Name Record) @@ -124,7 +124,7 @@ the `--boot-nodes` command. #### Example: ```bash -$ ./beacon_node --boot-nodes -IW4QB2Hi8TPuEzQ41Cdf1r2AUU1FFVFDBJdJyOkWk2qXpZfFZQy2YnJIyoT_5fnbtrXUouoskmydZl4pIg90clIkYUDgmlwhH8AAAGDdGNwgiMog3VkcIIjKIlzZWNwMjU2azGhAjg0-DsTkQynhJCRnLLttBK1RS78lmUkLa-wgzAi-Ob5 +$ lighthouse bn --boot-nodes -IW4QB2Hi8TPuEzQ41Cdf1r2AUU1FFVFDBJdJyOkWk2qXpZfFZQy2YnJIyoT_5fnbtrXUouoskmydZl4pIg90clIkYUDgmlwhH8AAAGDdGNwgiMog3VkcIIjKIlzZWNwMjU2azGhAjg0-DsTkQynhJCRnLLttBK1RS78lmUkLa-wgzAi-Ob5 ``` ### Avoid port clashes when starting nodes @@ -138,7 +138,7 @@ ports by some `n`. Increase all ports by `10` (using multiples of `10` is recommended). ```bash -$ ./beacon_node -b 10 +$ lighthouse bn -b 10 ``` ### Start a testnet with a custom slot time @@ -151,7 +151,7 @@ Lighthouse can run at quite low slot times when there are few validators (e.g., The `-t` (`--slot-time`) flag specifies the milliseconds per slot. ```bash -$ ./beacon_node testnet -t 500 recent 8 +$ lighthouse bn testnet -t 500 recent 8 ``` > Note: `bootstrap` loads the slot time via HTTP and therefore conflicts with diff --git a/eth2/lmd_ghost/tests/test.rs b/eth2/lmd_ghost/tests/test.rs index 49e9ff738a..a8752e2b44 100644 --- a/eth2/lmd_ghost/tests/test.rs +++ b/eth2/lmd_ghost/tests/test.rs @@ -5,7 +5,7 @@ extern crate lazy_static; use beacon_chain::test_utils::{ generate_deterministic_keypairs, AttestationStrategy, - BeaconChainHarness as BaseBeaconChainHarness, BlockStrategy, + BeaconChainHarness as BaseBeaconChainHarness, BlockStrategy, HarnessType, }; use lmd_ghost::{LmdGhost, ThreadSafeReducedTree as BaseThreadSafeReducedTree}; use rand::{prelude::*, rngs::StdRng}; @@ -21,7 +21,7 @@ pub const VALIDATOR_COUNT: usize = 3 * 8; type TestEthSpec = MinimalEthSpec; type ThreadSafeReducedTree = BaseThreadSafeReducedTree; -type BeaconChainHarness = BaseBeaconChainHarness; +type BeaconChainHarness = BaseBeaconChainHarness>; type RootAndSlot = (Hash256, Slot); lazy_static! { @@ -52,7 +52,10 @@ struct ForkedHarness { impl ForkedHarness { /// A new standard instance of with constant parameters. pub fn new() -> Self { - let harness = BeaconChainHarness::new(generate_deterministic_keypairs(VALIDATOR_COUNT)); + let harness = BeaconChainHarness::new( + MinimalEthSpec, + generate_deterministic_keypairs(VALIDATOR_COUNT), + ); // Move past the zero slot. harness.advance_slot(); diff --git a/eth2/operation_pool/src/lib.rs b/eth2/operation_pool/src/lib.rs index 618c9d870d..02a8535d29 100644 --- a/eth2/operation_pool/src/lib.rs +++ b/eth2/operation_pool/src/lib.rs @@ -598,7 +598,7 @@ mod tests { let mut state = BeaconState::random_for_test(rng); - state.fork = Fork::genesis(MainnetEthSpec::genesis_epoch()); + state.fork = Fork::default(); (spec, state) } diff --git a/eth2/state_processing/src/genesis.rs b/eth2/state_processing/src/genesis.rs index e36261ca3e..84bdebd97c 100644 --- a/eth2/state_processing/src/genesis.rs +++ b/eth2/state_processing/src/genesis.rs @@ -35,18 +35,7 @@ pub fn initialize_beacon_state_from_eth1( process_deposit(&mut state, &deposit, spec, true)?; } - // Process activations - for (index, validator) in state.validators.iter_mut().enumerate() { - let balance = state.balances[index]; - validator.effective_balance = std::cmp::min( - balance - balance % spec.effective_balance_increment, - spec.max_effective_balance, - ); - if validator.effective_balance == spec.max_effective_balance { - validator.activation_eligibility_epoch = T::genesis_epoch(); - validator.activation_epoch = T::genesis_epoch(); - } - } + process_activations(&mut state, spec); // Now that we have our validators, initialize the caches (including the committees) state.build_all_caches(spec)?; @@ -71,3 +60,20 @@ pub fn is_valid_genesis_state(state: &BeaconState, spec: &ChainSp && state.get_active_validator_indices(T::genesis_epoch()).len() as u64 >= spec.min_genesis_active_validator_count } + +/// Activate genesis validators, if their balance is acceptable. +/// +/// Spec v0.8.0 +pub fn process_activations(state: &mut BeaconState, spec: &ChainSpec) { + for (index, validator) in state.validators.iter_mut().enumerate() { + let balance = state.balances[index]; + validator.effective_balance = std::cmp::min( + balance - balance % spec.effective_balance_increment, + spec.max_effective_balance, + ); + if validator.effective_balance == spec.max_effective_balance { + validator.activation_eligibility_epoch = T::genesis_epoch(); + validator.activation_epoch = T::genesis_epoch(); + } + } +} diff --git a/eth2/state_processing/src/lib.rs b/eth2/state_processing/src/lib.rs index d94d47734d..0c82527e01 100644 --- a/eth2/state_processing/src/lib.rs +++ b/eth2/state_processing/src/lib.rs @@ -8,7 +8,7 @@ pub mod per_epoch_processing; pub mod per_slot_processing; pub mod test_utils; -pub use genesis::{initialize_beacon_state_from_eth1, is_valid_genesis_state}; +pub use genesis::{initialize_beacon_state_from_eth1, is_valid_genesis_state, process_activations}; pub use per_block_processing::{ errors::BlockProcessingError, per_block_processing, BlockSignatureStrategy, VerifySignatures, }; diff --git a/eth2/state_processing/src/per_block_processing.rs b/eth2/state_processing/src/per_block_processing.rs index fec16c5b95..ada25d5fe3 100644 --- a/eth2/state_processing/src/per_block_processing.rs +++ b/eth2/state_processing/src/per_block_processing.rs @@ -444,7 +444,7 @@ pub fn process_deposit( } else { // The signature should be checked for new validators. Return early for a bad // signature. - if verify_deposit_signature(state, deposit, spec).is_err() { + if verify_deposit_signature(&deposit.data, spec).is_err() { return Ok(()); } diff --git a/eth2/state_processing/src/per_block_processing/signature_sets.rs b/eth2/state_processing/src/per_block_processing/signature_sets.rs index 4f1a066708..35f47331d6 100644 --- a/eth2/state_processing/src/per_block_processing/signature_sets.rs +++ b/eth2/state_processing/src/per_block_processing/signature_sets.rs @@ -7,7 +7,7 @@ use std::convert::TryInto; use tree_hash::{SignedRoot, TreeHash}; use types::{ AggregateSignature, AttestationDataAndCustodyBit, AttesterSlashing, BeaconBlock, - BeaconBlockHeader, BeaconState, BeaconStateError, ChainSpec, Deposit, Domain, EthSpec, Fork, + BeaconBlockHeader, BeaconState, BeaconStateError, ChainSpec, DepositData, Domain, EthSpec, Hash256, IndexedAttestation, ProposerSlashing, PublicKey, RelativeEpoch, Signature, Transfer, VoluntaryExit, }; @@ -194,18 +194,17 @@ pub fn attester_slashing_signature_sets<'a, T: EthSpec>( /// /// This method is separate to `deposit_signature_set` to satisfy lifetime requirements. pub fn deposit_pubkey_signature_message( - deposit: &Deposit, + deposit_data: &DepositData, ) -> Option<(PublicKey, Signature, Vec)> { - let pubkey = (&deposit.data.pubkey).try_into().ok()?; - let signature = (&deposit.data.signature).try_into().ok()?; - let message = deposit.data.signed_root(); + let pubkey = (&deposit_data.pubkey).try_into().ok()?; + let signature = (&deposit_data.signature).try_into().ok()?; + let message = deposit_data.signed_root(); Some((pubkey, signature, message)) } /// Returns the signature set for some set of deposit signatures, made with /// `deposit_pubkey_signature_message`. -pub fn deposit_signature_set<'a, T: EthSpec>( - state: &'a BeaconState, +pub fn deposit_signature_set<'a>( pubkey_signature_message: &'a (PublicKey, Signature, Vec), spec: &'a ChainSpec, ) -> SignatureSet<'a> { @@ -213,9 +212,12 @@ pub fn deposit_signature_set<'a, T: EthSpec>( // Note: Deposits are valid across forks, thus the deposit domain is computed // with the fork zeroed. - let domain = spec.get_domain(state.current_epoch(), Domain::Deposit, &Fork::default()); - - SignatureSet::single(signature, pubkey, message.clone(), domain) + SignatureSet::single( + signature, + pubkey, + message.clone(), + spec.get_deposit_domain(), + ) } /// Returns a signature set that is valid if the `VoluntaryExit` was signed by the indicated diff --git a/eth2/state_processing/src/per_block_processing/verify_deposit.rs b/eth2/state_processing/src/per_block_processing/verify_deposit.rs index 644b283574..c854bb82aa 100644 --- a/eth2/state_processing/src/per_block_processing/verify_deposit.rs +++ b/eth2/state_processing/src/per_block_processing/verify_deposit.rs @@ -15,16 +15,12 @@ fn error(reason: DepositInvalid) -> BlockOperationError { /// Verify `Deposit.pubkey` signed `Deposit.signature`. /// /// Spec v0.8.0 -pub fn verify_deposit_signature( - state: &BeaconState, - deposit: &Deposit, - spec: &ChainSpec, -) -> Result<()> { - let deposit_signature_message = deposit_pubkey_signature_message(deposit) +pub fn verify_deposit_signature(deposit_data: &DepositData, spec: &ChainSpec) -> Result<()> { + let deposit_signature_message = deposit_pubkey_signature_message(&deposit_data) .ok_or_else(|| error(DepositInvalid::BadBlsBytes))?; verify!( - deposit_signature_set(state, &deposit_signature_message, spec).is_valid(), + deposit_signature_set(&deposit_signature_message, spec).is_valid(), DepositInvalid::BadSignature ); diff --git a/eth2/types/src/beacon_state.rs b/eth2/types/src/beacon_state.rs index 2aa805808a..b96a53d74b 100644 --- a/eth2/types/src/beacon_state.rs +++ b/eth2/types/src/beacon_state.rs @@ -216,7 +216,7 @@ impl BeaconState { // Versioning genesis_time, slot: spec.genesis_slot, - fork: Fork::genesis(T::genesis_epoch()), + fork: spec.genesis_fork.clone(), // History latest_block_header: BeaconBlock::::empty(spec).temporary_block_header(), diff --git a/eth2/types/src/chain_spec.rs b/eth2/types/src/chain_spec.rs index d59e0db0ac..bef78d99f1 100644 --- a/eth2/types/src/chain_spec.rs +++ b/eth2/types/src/chain_spec.rs @@ -91,8 +91,15 @@ pub struct ChainSpec { domain_voluntary_exit: u32, domain_transfer: u32, + /* + * Eth1 + */ + pub eth1_follow_distance: u64, + pub boot_nodes: Vec, pub network_id: u8, + + pub genesis_fork: Fork, } impl ChainSpec { @@ -118,6 +125,22 @@ impl ChainSpec { u64::from_le_bytes(fork_and_domain) } + /// Get the domain for a deposit signature. + /// + /// Deposits are valid across forks, thus the deposit domain is computed + /// with the fork zeroed. + /// + /// Spec v0.8.1 + pub fn get_deposit_domain(&self) -> u64 { + let mut bytes: Vec = int_to_bytes4(self.domain_deposit); + bytes.append(&mut vec![0; 4]); + + let mut fork_and_domain = [0; 8]; + fork_and_domain.copy_from_slice(&bytes); + + u64::from_le_bytes(fork_and_domain) + } + /// Returns a `ChainSpec` compatible with the Ethereum Foundation specification. /// /// Spec v0.8.1 @@ -186,6 +209,20 @@ impl ChainSpec { domain_voluntary_exit: 4, domain_transfer: 5, + /* + * Eth1 + */ + eth1_follow_distance: 1_024, + + /* + * Fork + */ + genesis_fork: Fork { + previous_version: [0; 4], + current_version: [0; 4], + epoch: Epoch::new(0), + }, + /* * Network specific */ @@ -210,6 +247,7 @@ impl ChainSpec { max_epochs_per_crosslink: 4, network_id: 2, // lighthouse testnet network id boot_nodes, + eth1_follow_distance: 16, ..ChainSpec::mainnet() } } @@ -248,7 +286,7 @@ mod tests { } fn test_domain(domain_type: Domain, raw_domain: u32, spec: &ChainSpec) { - let fork = Fork::genesis(Epoch::new(0)); + let fork = &spec.genesis_fork; let epoch = Epoch::new(0); let domain = spec.get_domain(epoch, domain_type, &fork); diff --git a/eth2/types/src/deposit.rs b/eth2/types/src/deposit.rs index bde26c4b46..0e68454e04 100644 --- a/eth2/types/src/deposit.rs +++ b/eth2/types/src/deposit.rs @@ -7,6 +7,8 @@ use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; +pub const DEPOSIT_TREE_DEPTH: usize = 32; + /// A deposit to potentially become a beacon chain validator. /// /// Spec v0.8.0 diff --git a/eth2/types/src/deposit_data.rs b/eth2/types/src/deposit_data.rs index ab3b2dcb9d..95588e25a8 100644 --- a/eth2/types/src/deposit_data.rs +++ b/eth2/types/src/deposit_data.rs @@ -36,15 +36,9 @@ impl DepositData { /// Generate the signature for a given DepositData details. /// /// Spec v0.8.1 - pub fn create_signature( - &self, - secret_key: &SecretKey, - epoch: Epoch, - fork: &Fork, - spec: &ChainSpec, - ) -> SignatureBytes { + pub fn create_signature(&self, secret_key: &SecretKey, spec: &ChainSpec) -> SignatureBytes { let msg = self.signed_root(); - let domain = spec.get_domain(epoch, Domain::Deposit, fork); + let domain = spec.get_deposit_domain(); SignatureBytes::from(Signature::new(msg.as_slice(), domain, secret_key)) } diff --git a/eth2/types/src/eth1_data.rs b/eth2/types/src/eth1_data.rs index 674cdd10b9..d98e89cee8 100644 --- a/eth2/types/src/eth1_data.rs +++ b/eth2/types/src/eth1_data.rs @@ -10,7 +10,18 @@ use tree_hash_derive::TreeHash; /// /// Spec v0.8.1 #[derive( - Debug, PartialEq, Clone, Default, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom, + Debug, + PartialEq, + Clone, + Default, + Eq, + Hash, + Serialize, + Deserialize, + Encode, + Decode, + TreeHash, + TestRandom, )] pub struct Eth1Data { pub deposit_root: Hash256, diff --git a/eth2/types/src/fork.rs b/eth2/types/src/fork.rs index 23869d073d..2da3218d11 100644 --- a/eth2/types/src/fork.rs +++ b/eth2/types/src/fork.rs @@ -28,17 +28,6 @@ pub struct Fork { } impl Fork { - /// Initialize the `Fork` from the genesis parameters in the `spec`. - /// - /// Spec v0.8.1 - pub fn genesis(genesis_epoch: Epoch) -> Self { - Self { - previous_version: [0; 4], - current_version: [0; 4], - epoch: genesis_epoch, - } - } - /// Return the fork version of the given ``epoch``. /// /// Spec v0.8.1 @@ -56,24 +45,6 @@ mod tests { ssz_tests!(Fork); - fn test_genesis(epoch: Epoch) { - let fork = Fork::genesis(epoch); - - assert_eq!(fork.epoch, epoch, "epoch incorrect"); - assert_eq!( - fork.previous_version, fork.current_version, - "previous and current are not identical" - ); - } - - #[test] - fn genesis() { - test_genesis(Epoch::new(0)); - test_genesis(Epoch::new(11)); - test_genesis(Epoch::new(2_u64.pow(63))); - test_genesis(Epoch::max_value()); - } - #[test] fn get_fork_version() { let previous_version = [1; 4]; diff --git a/eth2/types/src/lib.rs b/eth2/types/src/lib.rs index fa23f9c1c1..d9a4f22353 100644 --- a/eth2/types/src/lib.rs +++ b/eth2/types/src/lib.rs @@ -58,7 +58,7 @@ pub use crate::checkpoint::Checkpoint; pub use crate::compact_committee::CompactCommittee; pub use crate::crosslink::Crosslink; pub use crate::crosslink_committee::{CrosslinkCommittee, OwnedCrosslinkCommittee}; -pub use crate::deposit::Deposit; +pub use crate::deposit::{Deposit, DEPOSIT_TREE_DEPTH}; pub use crate::deposit_data::DepositData; pub use crate::eth1_data::Eth1Data; pub use crate::fork::Fork; diff --git a/eth2/types/src/test_utils/builders/testing_beacon_block_builder.rs b/eth2/types/src/test_utils/builders/testing_beacon_block_builder.rs index fa77254d94..3df8ed9e9d 100644 --- a/eth2/types/src/test_utils/builders/testing_beacon_block_builder.rs +++ b/eth2/types/src/test_utils/builders/testing_beacon_block_builder.rs @@ -294,13 +294,7 @@ impl TestingBeaconBlockBuilder { let keypair = Keypair::random(); let mut builder = TestingDepositBuilder::new(keypair.pk.clone(), amount); - builder.sign( - &test_task, - &keypair, - state.slot.epoch(T::slots_per_epoch()), - &state.fork, - spec, - ); + builder.sign(&test_task, &keypair, spec); datas.push(builder.build().data); } diff --git a/eth2/types/src/test_utils/builders/testing_deposit_builder.rs b/eth2/types/src/test_utils/builders/testing_deposit_builder.rs index dcde1a74f0..41cd194377 100644 --- a/eth2/types/src/test_utils/builders/testing_deposit_builder.rs +++ b/eth2/types/src/test_utils/builders/testing_deposit_builder.rs @@ -30,14 +30,7 @@ impl TestingDepositBuilder { /// - `pubkey` to the signing pubkey. /// - `withdrawal_credentials` to the signing pubkey. /// - `proof_of_possession` - pub fn sign( - &mut self, - test_task: &DepositTestTask, - keypair: &Keypair, - epoch: Epoch, - fork: &Fork, - spec: &ChainSpec, - ) { + pub fn sign(&mut self, test_task: &DepositTestTask, keypair: &Keypair, spec: &ChainSpec) { let new_key = Keypair::random(); let mut pubkeybytes = PublicKeyBytes::from(keypair.pk.clone()); let mut secret_key = keypair.sk.clone(); @@ -61,10 +54,7 @@ impl TestingDepositBuilder { // Building the data and signing it self.deposit.data.pubkey = pubkeybytes; self.deposit.data.withdrawal_credentials = withdrawal_credentials; - self.deposit.data.signature = - self.deposit - .data - .create_signature(&secret_key, epoch, fork, spec); + self.deposit.data.signature = self.deposit.data.create_signature(&secret_key, spec); } /// Builds the deposit, consuming the builder. diff --git a/eth2/utils/eth2_config/Cargo.toml b/eth2/utils/eth2_config/Cargo.toml index 7459cfed69..f186a90a06 100644 --- a/eth2/utils/eth2_config/Cargo.toml +++ b/eth2/utils/eth2_config/Cargo.toml @@ -5,7 +5,6 @@ authors = ["Paul Hauner "] edition = "2018" [dependencies] -clap = "2.33.0" serde = "1.0.102" serde_derive = "1.0.102" toml = "0.5.4" diff --git a/eth2/utils/eth2_config/src/lib.rs b/eth2/utils/eth2_config/src/lib.rs index 794a27e4e6..95a85c5e05 100644 --- a/eth2/utils/eth2_config/src/lib.rs +++ b/eth2/utils/eth2_config/src/lib.rs @@ -1,9 +1,7 @@ -use clap::ArgMatches; use serde_derive::{Deserialize, Serialize}; use std::fs::File; use std::io::prelude::*; use std::path::PathBuf; -use std::time::SystemTime; use types::ChainSpec; /// The core configuration of a Lighthouse beacon node. @@ -46,33 +44,6 @@ impl Eth2Config { } } -impl Eth2Config { - /// Apply the following arguments to `self`, replacing values if they are specified in `args`. - /// - /// Returns an error if arguments are obviously invalid. May succeed even if some values are - /// invalid. - pub fn apply_cli_args(&mut self, args: &ArgMatches) -> Result<(), &'static str> { - if args.is_present("recent-genesis") { - self.spec.min_genesis_time = recent_genesis_time() - } - - Ok(()) - } -} - -/// Returns the system time, mod 30 minutes. -/// -/// Used for easily creating testnets. -fn recent_genesis_time() -> u64 { - let now = SystemTime::now() - .duration_since(SystemTime::UNIX_EPOCH) - .unwrap() - .as_secs(); - let secs_after_last_period = now.checked_rem(30 * 60).unwrap_or(0); - // genesis is now the last 30 minute block. - now - secs_after_last_period -} - /// Write a configuration to file. pub fn write_to_file(path: PathBuf, config: &T) -> Result<(), String> where @@ -111,3 +82,15 @@ where Ok(None) } } + +#[cfg(test)] +mod tests { + use super::*; + use toml; + + #[test] + fn serde_serialize() { + let _ = + toml::to_string(&Eth2Config::default()).expect("Should serde encode default config"); + } +} diff --git a/eth2/utils/remote_beacon_node/Cargo.toml b/eth2/utils/remote_beacon_node/Cargo.toml new file mode 100644 index 0000000000..48567de37f --- /dev/null +++ b/eth2/utils/remote_beacon_node/Cargo.toml @@ -0,0 +1,14 @@ +[package] +name = "remote_beacon_node" +version = "0.1.0" +authors = ["Paul Hauner "] +edition = "2018" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +reqwest = "0.9" +url = "1.2" +serde = "1.0" +futures = "0.1.25" +types = { path = "../../../eth2/types" } diff --git a/eth2/utils/remote_beacon_node/src/lib.rs b/eth2/utils/remote_beacon_node/src/lib.rs new file mode 100644 index 0000000000..a796e166a5 --- /dev/null +++ b/eth2/utils/remote_beacon_node/src/lib.rs @@ -0,0 +1,141 @@ +//! Provides a `RemoteBeaconNode` which interacts with a HTTP API on another Lighthouse (or +//! compatible) instance. +//! +//! Presently, this is only used for testing but it _could_ become a user-facing library. + +use futures::{Future, IntoFuture}; +use reqwest::r#async::{Client, RequestBuilder}; +use serde::Deserialize; +use std::marker::PhantomData; +use std::net::SocketAddr; +use types::{BeaconBlock, BeaconState, EthSpec}; +use types::{Hash256, Slot}; +use url::Url; + +/// Connects to a remote Lighthouse (or compatible) node via HTTP. +pub struct RemoteBeaconNode { + pub http: HttpClient, +} + +impl RemoteBeaconNode { + pub fn new(http_endpoint: SocketAddr) -> Result { + Ok(Self { + http: HttpClient::new(format!("http://{}", http_endpoint.to_string())) + .map_err(|e| format!("Unable to create http client: {:?}", e))?, + }) + } +} + +#[derive(Debug)] +pub enum Error { + UrlParseError(url::ParseError), + ReqwestError(reqwest::Error), +} + +#[derive(Clone)] +pub struct HttpClient { + client: Client, + url: Url, + _phantom: PhantomData, +} + +impl HttpClient { + /// Creates a new instance (without connecting to the node). + pub fn new(server_url: String) -> Result { + Ok(Self { + client: Client::new(), + url: Url::parse(&server_url)?, + _phantom: PhantomData, + }) + } + + pub fn beacon(&self) -> Beacon { + Beacon(self.clone()) + } + + fn url(&self, path: &str) -> Result { + self.url.join(path).map_err(|e| e.into()) + } + + pub fn get(&self, path: &str) -> Result { + self.url(path) + .map(|url| Client::new().get(&url.to_string())) + } +} + +/// Provides the functions on the `/beacon` endpoint of the node. +#[derive(Clone)] +pub struct Beacon(HttpClient); + +impl Beacon { + fn url(&self, path: &str) -> Result { + self.0 + .url("beacon/") + .and_then(move |url| url.join(path).map_err(Error::from)) + .map_err(Into::into) + } + + /// Returns the block and block root at the given slot. + pub fn block_at_slot( + &self, + slot: Slot, + ) -> impl Future, Hash256), Error = Error> { + let client = self.0.clone(); + self.url("block") + .into_future() + .and_then(move |mut url| { + url.query_pairs_mut() + .append_pair("slot", &format!("{}", slot.as_u64())); + client.get(&url.to_string()) + }) + .and_then(|builder| builder.send().map_err(Error::from)) + .and_then(|response| response.error_for_status().map_err(Error::from)) + .and_then(|mut success| success.json::>().map_err(Error::from)) + .map(|response| (response.beacon_block, response.root)) + } + + /// Returns the state and state root at the given slot. + pub fn state_at_slot( + &self, + slot: Slot, + ) -> impl Future, Hash256), Error = Error> { + let client = self.0.clone(); + self.url("state") + .into_future() + .and_then(move |mut url| { + url.query_pairs_mut() + .append_pair("slot", &format!("{}", slot.as_u64())); + client.get(&url.to_string()) + }) + .and_then(|builder| builder.send().map_err(Error::from)) + .and_then(|response| response.error_for_status().map_err(Error::from)) + .and_then(|mut success| success.json::>().map_err(Error::from)) + .map(|response| (response.beacon_state, response.root)) + } +} + +#[derive(Deserialize)] +#[serde(bound = "T: EthSpec")] +pub struct BlockResponse { + pub beacon_block: BeaconBlock, + pub root: Hash256, +} + +#[derive(Deserialize)] +#[serde(bound = "T: EthSpec")] +pub struct StateResponse { + pub beacon_state: BeaconState, + pub root: Hash256, +} + +impl From for Error { + fn from(e: reqwest::Error) -> Error { + Error::ReqwestError(e) + } +} + +impl From for Error { + fn from(e: url::ParseError) -> Error { + Error::UrlParseError(e) + } +} diff --git a/lcli/Cargo.toml b/lcli/Cargo.toml index d1dbdb221e..b8bd0e16f2 100644 --- a/lcli/Cargo.toml +++ b/lcli/Cargo.toml @@ -18,3 +18,7 @@ types = { path = "../eth2/types" } state_processing = { path = "../eth2/state_processing" } eth2_ssz = "0.1.2" regex = "1.3.1" +eth1_test_rig = { path = "../tests/eth1_test_rig" } +futures = "0.1.25" +environment = { path = "../lighthouse/environment" } +web3 = "0.8.0" diff --git a/lcli/src/deposit_contract.rs b/lcli/src/deposit_contract.rs new file mode 100644 index 0000000000..0c5596a07d --- /dev/null +++ b/lcli/src/deposit_contract.rs @@ -0,0 +1,78 @@ +use clap::ArgMatches; +use environment::Environment; +use eth1_test_rig::{DelayThenDeposit, DepositContract}; +use futures::Future; +use std::time::Duration; +use types::{test_utils::generate_deterministic_keypair, EthSpec, Hash256}; +use web3::{transports::Http, Web3}; + +pub fn run_deposit_contract( + mut env: Environment, + matches: &ArgMatches, +) -> Result<(), String> { + let count = matches + .value_of("count") + .ok_or_else(|| "Deposit count not specified")? + .parse::() + .map_err(|e| format!("Failed to parse deposit count: {}", e))?; + + let delay = matches + .value_of("delay") + .ok_or_else(|| "Deposit count not specified")? + .parse::() + .map(Duration::from_millis) + .map_err(|e| format!("Failed to parse deposit count: {}", e))?; + + let confirmations = matches + .value_of("confirmations") + .ok_or_else(|| "Confirmations not specified")? + .parse::() + .map_err(|e| format!("Failed to parse confirmations: {}", e))?; + + let endpoint = matches + .value_of("endpoint") + .ok_or_else(|| "Endpoint not specified")?; + + let (_event_loop, transport) = Http::new(&endpoint).map_err(|e| { + format!( + "Failed to start HTTP transport connected to ganache: {:?}", + e + ) + })?; + let web3 = Web3::new(transport); + + let deposit_contract = env + .runtime() + .block_on(DepositContract::deploy(web3, confirmations)) + .map_err(|e| format!("Failed to deploy contract: {}", e))?; + + info!( + "Deposit contract deployed. Address: {}", + deposit_contract.address() + ); + + env.runtime() + .block_on(do_deposits::(deposit_contract, count, delay)) + .map_err(|e| format!("Failed to submit deposits: {}", e))?; + + Ok(()) +} + +fn do_deposits( + deposit_contract: DepositContract, + count: usize, + delay: Duration, +) -> impl Future { + let deposits = (0..count) + .map(|i| DelayThenDeposit { + deposit: deposit_contract.deposit_helper::( + generate_deterministic_keypair(i), + Hash256::from_low_u64_le(i as u64), + 32_000_000_000, + ), + delay, + }) + .collect(); + + deposit_contract.deposit_multiple(deposits) +} diff --git a/lcli/src/main.rs b/lcli/src/main.rs index 87d670cb96..85af9f21e8 100644 --- a/lcli/src/main.rs +++ b/lcli/src/main.rs @@ -1,11 +1,15 @@ #[macro_use] extern crate log; +mod deposit_contract; mod parse_hex; mod pycli; mod transition_blocks; use clap::{App, Arg, SubCommand}; +use deposit_contract::run_deposit_contract; +use environment::EnvironmentBuilder; +use log::Level; use parse_hex::run_parse_hex; use pycli::run_pycli; use std::fs::File; @@ -17,7 +21,7 @@ use types::{test_utils::TestingBeaconStateBuilder, EthSpec, MainnetEthSpec, Mini type LocalEthSpec = MinimalEthSpec; fn main() { - simple_logger::init().expect("logger should initialize"); + simple_logger::init_with_level(Level::Info).expect("logger should initialize"); let matches = App::new("Lighthouse CLI Tool") .version("0.1.0") @@ -115,6 +119,45 @@ fn main() { .help("SSZ encoded as 0x-prefixed hex"), ), ) + .subcommand( + SubCommand::with_name("deposit-contract") + .about( + "Uses an eth1 test rpc (e.g., ganache-cli) to simulate the deposit contract.", + ) + .version("0.1.0") + .author("Paul Hauner ") + .arg( + Arg::with_name("count") + .short("c") + .value_name("INTEGER") + .takes_value(true) + .required(true) + .help("The number of deposits to be submitted."), + ) + .arg( + Arg::with_name("delay") + .short("d") + .value_name("MILLIS") + .takes_value(true) + .required(true) + .help("The delay (in milliseconds) between each deposit"), + ) + .arg( + Arg::with_name("endpoint") + .short("e") + .value_name("HTTP_SERVER") + .takes_value(true) + .default_value("http://localhost:8545") + .help("The URL to the eth1 JSON-RPC http API."), + ) + .arg( + Arg::with_name("confirmations") + .value_name("INTEGER") + .takes_value(true) + .default_value("3") + .help("The number of block confirmations before declaring the contract deployed."), + ) + ) .subcommand( SubCommand::with_name("pycli") .about("TODO") @@ -132,6 +175,14 @@ fn main() { ) .get_matches(); + let env = EnvironmentBuilder::minimal() + .multi_threaded_tokio_runtime() + .expect("should start tokio runtime") + .null_logger() + .expect("should start null logger") + .build() + .expect("should build env"); + match matches.subcommand() { ("genesis_yaml", Some(matches)) => { let num_validators = matches @@ -178,6 +229,8 @@ fn main() { } ("pycli", Some(matches)) => run_pycli::(matches) .unwrap_or_else(|e| error!("Failed to run pycli: {}", e)), + ("deposit-contract", Some(matches)) => run_deposit_contract::(env, matches) + .unwrap_or_else(|e| error!("Failed to run deposit contract sim: {}", e)), (other, _) => error!("Unknown subcommand {}. See --help.", other), } } diff --git a/lighthouse/Cargo.toml b/lighthouse/Cargo.toml new file mode 100644 index 0000000000..25a41ea6a9 --- /dev/null +++ b/lighthouse/Cargo.toml @@ -0,0 +1,20 @@ +[package] +name = "lighthouse" +version = "0.1.0" +authors = ["Sigma Prime "] +edition = "2018" + +[dependencies] +beacon_node = { "path" = "../beacon_node" } +tokio = "0.1.15" +slog = { version = "^2.2.3" , features = ["max_level_trace"] } +sloggers = "0.3.4" +types = { "path" = "../eth2/types" } +clap = "2.32.0" +env_logger = "0.6.1" +logging = { path = "../eth2/utils/logging" } +slog-term = "^2.4.0" +slog-async = "^2.3.0" +environment = { path = "./environment" } +futures = "0.1.25" +validator_client = { "path" = "../validator_client" } diff --git a/lighthouse/environment/Cargo.toml b/lighthouse/environment/Cargo.toml new file mode 100644 index 0000000000..b5e21a4e88 --- /dev/null +++ b/lighthouse/environment/Cargo.toml @@ -0,0 +1,19 @@ +[package] +name = "environment" +version = "0.1.0" +authors = ["Paul Hauner "] +edition = "2018" + +[dependencies] +tokio = "0.1.15" +slog = { version = "^2.2.3" , features = ["max_level_trace"] } +sloggers = "0.3.4" +types = { "path" = "../../eth2/types" } +eth2_config = { "path" = "../../eth2/utils/eth2_config" } +env_logger = "0.6.1" +logging = { path = "../../eth2/utils/logging" } +slog-term = "^2.4.0" +slog-async = "^2.3.0" +ctrlc = { version = "3.1.1", features = ["termination"] } +futures = "0.1.25" +parking_lot = "0.7" diff --git a/lighthouse/environment/src/lib.rs b/lighthouse/environment/src/lib.rs new file mode 100644 index 0000000000..bde6c2dbb3 --- /dev/null +++ b/lighthouse/environment/src/lib.rs @@ -0,0 +1,241 @@ +//! This crate aims to provide a common set of tools that can be used to create a "environment" to +//! run Lighthouse services like the `beacon_node` or `validator_client`. This allows for the +//! unification of creating tokio runtimes, loggers and eth2 specifications in production and in +//! testing. +//! +//! The idea is that the main thread creates an `Environment`, which is then used to spawn a +//! `Context` which can be handed to any service that wishes to start async tasks or perform +//! logging. + +use eth2_config::Eth2Config; +use futures::{sync::oneshot, Future}; +use slog::{o, Drain, Level, Logger}; +use sloggers::{null::NullLoggerBuilder, Build}; +use std::cell::RefCell; +use tokio::runtime::{Builder as RuntimeBuilder, Runtime, TaskExecutor}; +use types::{EthSpec, InteropEthSpec, MainnetEthSpec, MinimalEthSpec}; + +/// Builds an `Environment`. +pub struct EnvironmentBuilder { + runtime: Option, + log: Option, + eth_spec_instance: E, + eth2_config: Eth2Config, +} + +impl EnvironmentBuilder { + /// Creates a new builder using the `minimal` eth2 specification. + pub fn minimal() -> Self { + Self { + runtime: None, + log: None, + eth_spec_instance: MinimalEthSpec, + eth2_config: Eth2Config::minimal(), + } + } +} + +impl EnvironmentBuilder { + /// Creates a new builder using the `mainnet` eth2 specification. + pub fn mainnet() -> Self { + Self { + runtime: None, + log: None, + eth_spec_instance: MainnetEthSpec, + eth2_config: Eth2Config::mainnet(), + } + } +} + +impl EnvironmentBuilder { + /// Creates a new builder using the `interop` eth2 specification. + pub fn interop() -> Self { + Self { + runtime: None, + log: None, + eth_spec_instance: InteropEthSpec, + eth2_config: Eth2Config::interop(), + } + } +} + +impl EnvironmentBuilder { + /// Specifies that a multi-threaded tokio runtime should be used. Ideal for production uses. + /// + /// The `Runtime` used is just the standard tokio runtime. + pub fn multi_threaded_tokio_runtime(mut self) -> Result { + self.runtime = + Some(Runtime::new().map_err(|e| format!("Failed to start runtime: {:?}", e))?); + Ok(self) + } + + /// Specifies that a single-threaded tokio runtime should be used. Ideal for testing purposes + /// where tests are already multi-threaded. + /// + /// This can solve problems if "too many open files" errors are thrown during tests. + pub fn single_thread_tokio_runtime(mut self) -> Result { + self.runtime = Some( + RuntimeBuilder::new() + .core_threads(1) + .build() + .map_err(|e| format!("Failed to start runtime: {:?}", e))?, + ); + Ok(self) + } + + /// Specifies that all logs should be sent to `null` (i.e., ignored). + pub fn null_logger(mut self) -> Result { + self.log = Some(null_logger()?); + Ok(self) + } + + /// Specifies that the `slog` asynchronous logger should be used. Ideal for production. + /// + /// The logger is "async" because it has a dedicated thread that accepts logs and then + /// asynchronously flushes them to stdout/files/etc. This means the thread that raised the log + /// does not have to wait for the logs to be flushed. + pub fn async_logger(mut self, debug_level: &str) -> Result { + // Build the initial logger. + let decorator = slog_term::TermDecorator::new().build(); + let decorator = logging::AlignedTermDecorator::new(decorator, logging::MAX_MESSAGE_WIDTH); + let drain = slog_term::FullFormat::new(decorator).build().fuse(); + let drain = slog_async::Async::new(drain).build(); + + let drain = match debug_level { + "info" => drain.filter_level(Level::Info), + "debug" => drain.filter_level(Level::Debug), + "trace" => drain.filter_level(Level::Trace), + "warn" => drain.filter_level(Level::Warning), + "error" => drain.filter_level(Level::Error), + "crit" => drain.filter_level(Level::Critical), + unknown => return Err(format!("Unknown debug-level: {}", unknown)), + }; + + self.log = Some(Logger::root(drain.fuse(), o!())); + Ok(self) + } + + /// Consumes the builder, returning an `Environment`. + pub fn build(self) -> Result, String> { + Ok(Environment { + runtime: self + .runtime + .ok_or_else(|| "Cannot build environment without runtime".to_string())?, + log: self + .log + .ok_or_else(|| "Cannot build environment without log".to_string())?, + eth_spec_instance: self.eth_spec_instance, + eth2_config: self.eth2_config, + }) + } +} + +/// An execution context that can be used by a service. +/// +/// Distinct from an `Environment` because a `Context` is not able to give a mutable reference to a +/// `Runtime`, instead it only has access to a `TaskExecutor`. +#[derive(Clone)] +pub struct RuntimeContext { + pub executor: TaskExecutor, + pub log: Logger, + pub eth_spec_instance: E, + pub eth2_config: Eth2Config, +} + +impl RuntimeContext { + /// Returns a sub-context of this context. + /// + /// The generated service will have the `service_name` in all it's logs. + pub fn service_context(&self, service_name: &'static str) -> Self { + Self { + executor: self.executor.clone(), + log: self.log.new(o!("service" => service_name)), + eth_spec_instance: self.eth_spec_instance.clone(), + eth2_config: self.eth2_config.clone(), + } + } + + /// Returns the `eth2_config` for this service. + pub fn eth2_config(&self) -> &Eth2Config { + &self.eth2_config + } +} + +/// An environment where Lighthouse services can run. Used to start a production beacon node or +/// validator client, or to run tests that involve logging and async task execution. +pub struct Environment { + runtime: Runtime, + log: Logger, + eth_spec_instance: E, + eth2_config: Eth2Config, +} + +impl Environment { + /// Returns a mutable reference to the `tokio` runtime. + /// + /// Useful in the rare scenarios where it's necessary to block the current thread until a task + /// is finished (e.g., during testing). + pub fn runtime(&mut self) -> &mut Runtime { + &mut self.runtime + } + + /// Returns a `Context` where no "service" has been added to the logger output. + pub fn core_context(&mut self) -> RuntimeContext { + RuntimeContext { + executor: self.runtime.executor(), + log: self.log.clone(), + eth_spec_instance: self.eth_spec_instance.clone(), + eth2_config: self.eth2_config.clone(), + } + } + + /// Returns a `Context` where the `service_name` is added to the logger output. + pub fn service_context(&mut self, service_name: &'static str) -> RuntimeContext { + RuntimeContext { + executor: self.runtime.executor(), + log: self.log.new(o!("service" => service_name)), + eth_spec_instance: self.eth_spec_instance.clone(), + eth2_config: self.eth2_config.clone(), + } + } + + /// Block the current thread until Ctrl+C is received. + pub fn block_until_ctrl_c(&mut self) -> Result<(), String> { + let (ctrlc_send, ctrlc_oneshot) = oneshot::channel(); + let ctrlc_send_c = RefCell::new(Some(ctrlc_send)); + ctrlc::set_handler(move || { + if let Some(ctrlc_send) = ctrlc_send_c.try_borrow_mut().unwrap().take() { + ctrlc_send.send(()).expect("Error sending ctrl-c message"); + } + }) + .map_err(|e| format!("Could not set ctrlc handler: {:?}", e))?; + + // Block this thread until Crtl+C is pressed. + self.runtime() + .block_on(ctrlc_oneshot) + .map_err(|e| format!("Ctrlc oneshot failed: {:?}", e)) + } + + /// Shutdown the `tokio` runtime when all tasks are idle. + pub fn shutdown_on_idle(self) -> Result<(), String> { + self.runtime + .shutdown_on_idle() + .wait() + .map_err(|e| format!("Tokio runtime shutdown returned an error: {:?}", e)) + } + + pub fn eth_spec_instance(&self) -> &E { + &self.eth_spec_instance + } + + pub fn eth2_config(&self) -> &Eth2Config { + &self.eth2_config + } +} + +pub fn null_logger() -> Result { + let log_builder = NullLoggerBuilder; + log_builder + .build() + .map_err(|e| format!("Failed to start null logger: {:?}", e)) +} diff --git a/lighthouse/src/main.rs b/lighthouse/src/main.rs new file mode 100644 index 0000000000..9125e9802a --- /dev/null +++ b/lighthouse/src/main.rs @@ -0,0 +1,165 @@ +#[macro_use] +extern crate clap; + +use beacon_node::ProductionBeaconNode; +use clap::{App, Arg, ArgMatches}; +use env_logger::{Builder, Env}; +use environment::EnvironmentBuilder; +use slog::{crit, info, warn}; +use std::process::exit; +use types::EthSpec; +use validator_client::ProductionValidatorClient; + +pub const DEFAULT_DATA_DIR: &str = ".lighthouse"; +pub const CLIENT_CONFIG_FILENAME: &str = "beacon-node.toml"; +pub const ETH2_CONFIG_FILENAME: &str = "eth2-spec.toml"; + +fn main() { + // Debugging output for libp2p and external crates. + Builder::from_env(Env::default()).init(); + + // Parse the CLI parameters. + let matches = App::new("Lighthouse") + .version(crate_version!()) + .author("Sigma Prime ") + .about("Eth 2.0 Client") + .arg( + Arg::with_name("spec") + .short("s") + .long("spec") + .value_name("TITLE") + .help("Specifies the default eth2 spec type. Only effective when creating a new datadir.") + .takes_value(true) + .required(true) + .possible_values(&["mainnet", "minimal", "interop"]) + .global(true) + .default_value("minimal") + ) + .arg( + Arg::with_name("logfile") + .long("logfile") + .value_name("FILE") + .help("File path where output will be written.") + .takes_value(true), + ) + .arg( + Arg::with_name("debug-level") + .long("debug-level") + .value_name("LEVEL") + .help("The title of the spec constants for chain config.") + .takes_value(true) + .possible_values(&["info", "debug", "trace", "warn", "error", "crit"]) + .default_value("trace"), + ) + .subcommand(beacon_node::cli_app()) + .subcommand(validator_client::cli_app()) + .get_matches(); + + macro_rules! run_with_spec { + ($env_builder: expr) => { + match run($env_builder, &matches) { + Ok(()) => exit(0), + Err(e) => { + println!("Failed to start Lighthouse: {}", e); + exit(1) + } + } + }; + } + + match matches.value_of("spec") { + Some("minimal") => run_with_spec!(EnvironmentBuilder::minimal()), + Some("mainnet") => run_with_spec!(EnvironmentBuilder::mainnet()), + Some("interop") => run_with_spec!(EnvironmentBuilder::interop()), + spec => { + // This path should be unreachable due to slog having a `default_value` + unreachable!("Unknown spec configuration: {:?}", spec); + } + } +} + +fn run( + environment_builder: EnvironmentBuilder, + matches: &ArgMatches, +) -> Result<(), String> { + let mut environment = environment_builder + .async_logger( + matches + .value_of("debug-level") + .ok_or_else(|| "Expected --debug-level flag".to_string())?, + )? + .multi_threaded_tokio_runtime()? + .build()?; + + let log = environment.core_context().log; + + if std::mem::size_of::() != 8 { + crit!( + log, + "Lighthouse only supports 64bit CPUs"; + "detected" => format!("{}bit", std::mem::size_of::() * 8) + ); + return Err("Invalid CPU architecture".into()); + } + + warn!( + log, + "Ethereum 2.0 is pre-release. This software is experimental." + ); + + // Note: the current code technically allows for starting a beacon node _and_ a validator + // client at the same time. + // + // Whilst this is possible, the mutual-exclusivity of `clap` sub-commands prevents it from + // actually happening. + // + // Creating a command which can run both might be useful future works. + + let beacon_node = if let Some(sub_matches) = matches.subcommand_matches("Beacon Node") { + let runtime_context = environment.core_context(); + + let beacon = environment + .runtime() + .block_on(ProductionBeaconNode::new_from_cli( + runtime_context, + sub_matches, + )) + .map_err(|e| format!("Failed to start beacon node: {}", e))?; + + Some(beacon) + } else { + None + }; + + let validator_client = if let Some(sub_matches) = matches.subcommand_matches("Validator Client") + { + let runtime_context = environment.core_context(); + + let validator = ProductionValidatorClient::new_from_cli(runtime_context, sub_matches) + .map_err(|e| format!("Failed to init validator client: {}", e))?; + + validator + .start_service() + .map_err(|e| format!("Failed to start validator client service: {}", e))?; + + Some(validator) + } else { + None + }; + + if beacon_node.is_none() && validator_client.is_none() { + crit!(log, "No subcommand supplied. See --help ."); + return Err("No subcommand supplied.".into()); + } + + // Block this thread until Crtl+C is pressed. + environment.block_until_ctrl_c()?; + + info!(log, "Shutting down.."); + + drop(beacon_node); + drop(validator_client); + + // Shutdown the environment once all tasks have completed. + environment.shutdown_on_idle() +} diff --git a/scripts/ganache_test_node.sh b/scripts/ganache_test_node.sh new file mode 100755 index 0000000000..2a538266a4 --- /dev/null +++ b/scripts/ganache_test_node.sh @@ -0,0 +1,8 @@ +#!/usr/bin/env bash + +ganache-cli \ + --defaultBalanceEther 1000000000 \ + --gasLimit 1000000000 \ + --accounts 10 \ + --mnemonic "vast thought differ pull jewel broom cook wrist tribe word before omit" \ + --port 8545 \ diff --git a/scripts/whiteblock_start.sh b/scripts/whiteblock_start.sh index 74bdd8cfab..f9d1a90075 100755 --- a/scripts/whiteblock_start.sh +++ b/scripts/whiteblock_start.sh @@ -74,9 +74,10 @@ do shift done -./beacon_node \ - --p2p-priv-key $IDENTITY \ +./lighthouse \ --logfile $BEACON_LOG_FILE \ + beacon \ + --p2p-priv-key $IDENTITY \ --libp2p-addresses $PEERS \ --port $PORT \ testnet \ @@ -86,8 +87,9 @@ done $GEN_STATE \ & \ -./validator_client \ +./lighthouse \ --logfile $VALIDATOR_LOG_FILE \ + validator \ testnet \ --bootstrap \ interop-yaml \ diff --git a/tests/eth1_test_rig/.gitignore b/tests/eth1_test_rig/.gitignore new file mode 100644 index 0000000000..81b46ff033 --- /dev/null +++ b/tests/eth1_test_rig/.gitignore @@ -0,0 +1 @@ +contract/ diff --git a/tests/eth1_test_rig/Cargo.toml b/tests/eth1_test_rig/Cargo.toml new file mode 100644 index 0000000000..e2815db984 --- /dev/null +++ b/tests/eth1_test_rig/Cargo.toml @@ -0,0 +1,19 @@ +[package] +name = "eth1_test_rig" +version = "0.1.0" +authors = ["Paul Hauner "] +edition = "2018" + +build = "build.rs" + +[build-dependencies] +reqwest = "0.9.20" +serde_json = "1.0" + +[dependencies] +web3 = "0.8.0" +tokio = "0.1.17" +futures = "0.1.25" +types = { path = "../../eth2/types"} +eth2_ssz = { path = "../../eth2/utils/ssz"} +serde_json = "1.0" diff --git a/tests/eth1_test_rig/build.rs b/tests/eth1_test_rig/build.rs new file mode 100644 index 0000000000..5923788793 --- /dev/null +++ b/tests/eth1_test_rig/build.rs @@ -0,0 +1,95 @@ +//! Downloads the ABI and bytecode for the deposit contract from the ethereum spec repository and +//! stores them in a `contract/` directory in the crate root. +//! +//! These files are required for some `include_bytes` calls used in this crate. + +use reqwest::Response; +use serde_json::Value; +use std::env; +use std::fs::File; +use std::io::Write; +use std::path::PathBuf; + +const GITHUB_RAW: &str = "https://raw.githubusercontent.com"; +const SPEC_REPO: &str = "ethereum/eth2.0-specs"; +const SPEC_TAG: &str = "v0.8.3"; +const ABI_FILE: &str = "validator_registration.json"; +const BYTECODE_FILE: &str = "validator_registration.bytecode"; + +fn main() { + match init_deposit_contract_abi() { + Ok(()) => (), + Err(e) => panic!(e), + } +} + +/// Attempts to download the deposit contract ABI from github if a local copy is not already +/// present. +pub fn init_deposit_contract_abi() -> Result<(), String> { + let abi_file = abi_dir().join(format!("{}_{}", SPEC_TAG, ABI_FILE)); + let bytecode_file = abi_dir().join(format!("{}_{}", SPEC_TAG, BYTECODE_FILE)); + + if abi_file.exists() { + // Nothing to do. + } else { + match download_abi() { + Ok(mut response) => { + let mut abi_file = File::create(abi_file) + .map_err(|e| format!("Failed to create local abi file: {:?}", e))?; + let mut bytecode_file = File::create(bytecode_file) + .map_err(|e| format!("Failed to create local bytecode file: {:?}", e))?; + + let contract: Value = response + .json() + .map_err(|e| format!("Respsonse is not a valid json {:?}", e))?; + + let abi = contract + .get("abi") + .ok_or(format!("Response does not contain key: abi"))? + .to_string(); + abi_file + .write(abi.as_bytes()) + .map_err(|e| format!("Failed to write http response to abi file: {:?}", e))?; + + let bytecode = contract + .get("bytecode") + .ok_or(format!("Response does not contain key: bytecode"))? + .to_string(); + bytecode_file.write(bytecode.as_bytes()).map_err(|e| { + format!("Failed to write http response to bytecode file: {:?}", e) + })?; + } + Err(e) => { + return Err(format!( + "No abi file found. Failed to download from github: {:?}", + e + )) + } + } + } + + Ok(()) +} + +/// Attempts to download the deposit contract file from the Ethereum github. +fn download_abi() -> Result { + reqwest::get(&format!( + "{}/{}/{}/deposit_contract/contracts/{}", + GITHUB_RAW, SPEC_REPO, SPEC_TAG, ABI_FILE + )) + .map_err(|e| format!("Failed to download deposit ABI from github: {:?}", e)) +} + +/// Returns the directory that will be used to store the deposit contract ABI. +fn abi_dir() -> PathBuf { + let base = env::var("CARGO_MANIFEST_DIR") + .expect("should know manifest dir") + .parse::() + .expect("should parse manifest dir as path") + .join("contract"); + + std::fs::create_dir_all(base.clone()) + .expect("should be able to create abi directory in manifest"); + + base +} diff --git a/tests/eth1_test_rig/src/ganache.rs b/tests/eth1_test_rig/src/ganache.rs new file mode 100644 index 0000000000..bd81919a08 --- /dev/null +++ b/tests/eth1_test_rig/src/ganache.rs @@ -0,0 +1,157 @@ +use futures::Future; +use serde_json::json; +use std::io::prelude::*; +use std::io::BufReader; +use std::net::TcpListener; +use std::process::{Child, Command, Stdio}; +use std::sync::Arc; +use std::time::{Duration, Instant}; +use web3::{ + transports::{EventLoopHandle, Http}, + Transport, Web3, +}; + +/// How long we will wait for ganache to indicate that it is ready. +const GANACHE_STARTUP_TIMEOUT_MILLIS: u64 = 10_000; + +/// Provides a dedicated `ganachi-cli` instance with a connected `Web3` instance. +/// +/// Requires that `ganachi-cli` is installed and available on `PATH`. +pub struct GanacheInstance { + pub port: u16, + child: Child, + _event_loop: Arc, + pub web3: Web3, +} + +impl GanacheInstance { + /// Start a new `ganache-cli` process, waiting until it indicates that it is ready to accept + /// RPC connections. + pub fn new() -> Result { + let port = unused_port()?; + + let mut child = Command::new("ganache-cli") + .stdout(Stdio::piped()) + .arg("--defaultBalanceEther") + .arg("1000000000") + .arg("--gasLimit") + .arg("1000000000") + .arg("--accounts") + .arg("10") + .arg("--port") + .arg(format!("{}", port)) + .arg("--mnemonic") + .arg("\"vast thought differ pull jewel broom cook wrist tribe word before omit\"") + .spawn() + .map_err(|e| { + format!( + "Failed to start ganche-cli. \ + Is it ganache-cli installed and available on $PATH? Error: {:?}", + e + ) + })?; + + let stdout = child + .stdout + .ok_or_else(|| "Unable to get stdout for ganache child process")?; + + let start = Instant::now(); + let mut reader = BufReader::new(stdout); + loop { + if start + Duration::from_millis(GANACHE_STARTUP_TIMEOUT_MILLIS) <= Instant::now() { + break Err( + "Timed out waiting for ganache to start. Is ganache-cli installed?".to_string(), + ); + } + + let mut line = String::new(); + if let Err(e) = reader.read_line(&mut line) { + break Err(format!("Failed to read line from ganache process: {:?}", e)); + } else if line.starts_with("Listening on") { + break Ok(()); + } else { + continue; + } + }?; + + let (event_loop, transport) = Http::new(&endpoint(port)).map_err(|e| { + format!( + "Failed to start HTTP transport connected to ganache: {:?}", + e + ) + })?; + let web3 = Web3::new(transport); + + child.stdout = Some(reader.into_inner()); + + Ok(Self { + child, + port, + _event_loop: Arc::new(event_loop), + web3, + }) + } + + /// Returns the endpoint that this instance is listening on. + pub fn endpoint(&self) -> String { + endpoint(self.port) + } + + /// Increase the timestamp on future blocks by `increase_by` seconds. + pub fn increase_time(&self, increase_by: u64) -> impl Future { + self.web3 + .transport() + .execute("evm_increaseTime", vec![json!(increase_by)]) + .map(|_json_value| ()) + .map_err(|e| format!("Failed to increase time on EVM (is this ganache?): {:?}", e)) + } + + /// Returns the current block number, as u64 + pub fn block_number(&self) -> impl Future { + self.web3 + .eth() + .block_number() + .map(|v| v.as_u64()) + .map_err(|e| format!("Failed to get block number: {:?}", e)) + } + + /// Mines a single block. + pub fn evm_mine(&self) -> impl Future { + self.web3 + .transport() + .execute("evm_mine", vec![]) + .map(|_| ()) + .map_err(|_| { + "utils should mine new block with evm_mine (only works with ganache-cli!)" + .to_string() + }) + } +} + +fn endpoint(port: u16) -> String { + format!("http://localhost:{}", port) +} + +/// A bit of hack to find an unused TCP port. +/// +/// Does not guarantee that the given port is unused after the function exists, just that it was +/// unused before the function started (i.e., it does not reserve a port). +pub fn unused_port() -> Result { + let listener = TcpListener::bind("127.0.0.1:0") + .map_err(|e| format!("Failed to create TCP listener to find unused port: {:?}", e))?; + + let local_addr = listener.local_addr().map_err(|e| { + format!( + "Failed to read TCP listener local_addr to find unused port: {:?}", + e + ) + })?; + + Ok(local_addr.port()) +} + +impl Drop for GanacheInstance { + fn drop(&mut self) { + let _ = self.child.kill(); + } +} diff --git a/tests/eth1_test_rig/src/lib.rs b/tests/eth1_test_rig/src/lib.rs new file mode 100644 index 0000000000..f137468b40 --- /dev/null +++ b/tests/eth1_test_rig/src/lib.rs @@ -0,0 +1,240 @@ +//! Provides utilities for deploying and manipulating the eth2 deposit contract on the eth1 chain. +//! +//! Presently used with [`ganache-cli`](https://github.com/trufflesuite/ganache-cli) to simulate +//! the deposit contract for testing beacon node eth1 integration. +//! +//! Not tested to work with actual clients (e.g., geth). It should work fine, however there may be +//! some initial issues. +mod ganache; + +use futures::{stream, Future, IntoFuture, Stream}; +use ganache::GanacheInstance; +use ssz::Encode; +use std::time::{Duration, Instant}; +use tokio::{runtime::Runtime, timer::Delay}; +use types::DepositData; +use types::{EthSpec, Hash256, Keypair, Signature}; +use web3::contract::{Contract, Options}; +use web3::transports::Http; +use web3::types::{Address, U256}; +use web3::{Transport, Web3}; + +pub const DEPLOYER_ACCOUNTS_INDEX: usize = 0; +pub const DEPOSIT_ACCOUNTS_INDEX: usize = 0; + +const CONTRACT_DEPLOY_GAS: usize = 4_000_000; +const DEPOSIT_GAS: usize = 4_000_000; + +// Deposit contract +pub const ABI: &[u8] = include_bytes!("../contract/v0.8.3_validator_registration.json"); +pub const BYTECODE: &[u8] = include_bytes!("../contract/v0.8.3_validator_registration.bytecode"); + +/// Provides a dedicated ganache-cli instance with the deposit contract already deployed. +pub struct GanacheEth1Instance { + pub ganache: GanacheInstance, + pub deposit_contract: DepositContract, +} + +impl GanacheEth1Instance { + pub fn new() -> impl Future { + GanacheInstance::new().into_future().and_then(|ganache| { + DepositContract::deploy(ganache.web3.clone(), 0).map(|deposit_contract| Self { + ganache, + deposit_contract, + }) + }) + } + + pub fn endpoint(&self) -> String { + self.ganache.endpoint() + } + + pub fn web3(&self) -> Web3 { + self.ganache.web3.clone() + } +} + +/// Deploys and provides functions for the eth2 deposit contract, deployed on the eth1 chain. +#[derive(Clone, Debug)] +pub struct DepositContract { + web3: Web3, + contract: Contract, +} + +impl DepositContract { + pub fn deploy( + web3: Web3, + confirmations: usize, + ) -> impl Future { + let web3_1 = web3.clone(); + + deploy_deposit_contract(web3.clone(), confirmations) + .map_err(|e| { + format!( + "Failed to deploy contract: {}. Is scripts/ganache_tests_node.sh running?.", + e + ) + }) + .and_then(move |address| { + Contract::from_json(web3_1.eth(), address, ABI) + .map_err(|e| format!("Failed to init contract: {:?}", e)) + }) + .map(|contract| Self { contract, web3 }) + } + + /// The deposit contract's address in `0x00ab...` format. + pub fn address(&self) -> String { + format!("0x{:x}", self.contract.address()) + } + + /// A helper to return a fully-formed `DepositData`. Does not submit the deposit data to the + /// smart contact. + pub fn deposit_helper( + &self, + keypair: Keypair, + withdrawal_credentials: Hash256, + amount: u64, + ) -> DepositData { + let mut deposit = DepositData { + pubkey: keypair.pk.into(), + withdrawal_credentials, + amount, + signature: Signature::empty_signature().into(), + }; + + deposit.signature = deposit.create_signature(&keypair.sk, &E::default_spec()); + + deposit + } + + /// Creates a random, valid deposit and submits it to the deposit contract. + /// + /// The keypairs are created randomly and destroyed. + pub fn deposit_random(&self, runtime: &mut Runtime) -> Result<(), String> { + let keypair = Keypair::random(); + + let mut deposit = DepositData { + pubkey: keypair.pk.into(), + withdrawal_credentials: Hash256::zero(), + amount: 32_000_000_000, + signature: Signature::empty_signature().into(), + }; + + deposit.signature = deposit.create_signature(&keypair.sk, &E::default_spec()); + + self.deposit(runtime, deposit) + } + + /// Perfoms a blocking deposit. + pub fn deposit(&self, runtime: &mut Runtime, deposit_data: DepositData) -> Result<(), String> { + runtime + .block_on(self.deposit_async(deposit_data)) + .map_err(|e| format!("Deposit failed: {:?}", e)) + } + + /// Performs a non-blocking deposit. + pub fn deposit_async( + &self, + deposit_data: DepositData, + ) -> impl Future { + let contract = self.contract.clone(); + + self.web3 + .eth() + .accounts() + .map_err(|e| format!("Failed to get accounts: {:?}", e)) + .and_then(|accounts| { + accounts + .get(DEPOSIT_ACCOUNTS_INDEX) + .cloned() + .ok_or_else(|| "Insufficient accounts for deposit".to_string()) + }) + .and_then(move |from_address| { + let params = ( + deposit_data.pubkey.as_ssz_bytes(), + deposit_data.withdrawal_credentials.as_ssz_bytes(), + deposit_data.signature.as_ssz_bytes(), + ); + let options = Options { + gas: Some(U256::from(DEPOSIT_GAS)), + value: Some(from_gwei(deposit_data.amount)), + ..Options::default() + }; + contract + .call("deposit", params, from_address, options) + .map_err(|e| format!("Failed to call deposit fn: {:?}", e)) + }) + .map(|_| ()) + } + + /// Peforms many deposits, each preceded by a delay. + pub fn deposit_multiple( + &self, + deposits: Vec, + ) -> impl Future { + let s = self.clone(); + stream::unfold(deposits.into_iter(), move |mut deposit_iter| { + let s = s.clone(); + match deposit_iter.next() { + Some(deposit) => Some( + Delay::new(Instant::now() + deposit.delay) + .map_err(|e| format!("Failed to execute delay: {:?}", e)) + .and_then(move |_| s.deposit_async(deposit.deposit)) + .map(move |yielded| (yielded, deposit_iter)), + ), + None => None, + } + }) + .collect() + .map(|_| ()) + } +} + +/// Describes a deposit and a delay that should should precede it's submission to the deposit +/// contract. +#[derive(Clone)] +pub struct DelayThenDeposit { + /// Wait this duration ... + pub delay: Duration, + /// ... then submit this deposit. + pub deposit: DepositData, +} + +fn from_gwei(gwei: u64) -> U256 { + U256::from(gwei) * U256::exp10(9) +} + +/// Deploys the deposit contract to the given web3 instance using the account with index +/// `DEPLOYER_ACCOUNTS_INDEX`. +fn deploy_deposit_contract( + web3: Web3, + confirmations: usize, +) -> impl Future { + let bytecode = String::from_utf8_lossy(&BYTECODE); + + web3.eth() + .accounts() + .map_err(|e| format!("Failed to get accounts: {:?}", e)) + .and_then(|accounts| { + accounts + .get(DEPLOYER_ACCOUNTS_INDEX) + .cloned() + .ok_or_else(|| "Insufficient accounts for deployer".to_string()) + }) + .and_then(move |deploy_address| { + Contract::deploy(web3.eth(), &ABI) + .map_err(|e| format!("Unable to build contract deployer: {:?}", e))? + .confirmations(confirmations) + .options(Options { + gas: Some(U256::from(CONTRACT_DEPLOY_GAS)), + ..Options::default() + }) + .execute(bytecode, (), deploy_address) + .map_err(|e| format!("Failed to execute deployment: {:?}", e)) + }) + .and_then(|pending_contract| { + pending_contract + .map(|contract| contract.address()) + .map_err(|e| format!("Unable to resolve pending contract: {:?}", e)) + }) +} diff --git a/tests/node_test_rig/Cargo.toml b/tests/node_test_rig/Cargo.toml new file mode 100644 index 0000000000..7bb19db9c6 --- /dev/null +++ b/tests/node_test_rig/Cargo.toml @@ -0,0 +1,18 @@ +[package] +name = "node_test_rig" +version = "0.1.0" +authors = ["Paul Hauner "] +edition = "2018" + +[dependencies] +environment = { path = "../../lighthouse/environment" } +beacon_node = { path = "../../beacon_node" } +types = { path = "../../eth2/types" } +eth2_config = { path = "../../eth2/utils/eth2_config" } +tempdir = "0.3" +reqwest = "0.9" +url = "1.2" +serde = "1.0" +futures = "0.1.25" +genesis = { path = "../../beacon_node/genesis" } +remote_beacon_node = { path = "../../eth2/utils/remote_beacon_node" } diff --git a/tests/node_test_rig/src/lib.rs b/tests/node_test_rig/src/lib.rs new file mode 100644 index 0000000000..5a0f21e097 --- /dev/null +++ b/tests/node_test_rig/src/lib.rs @@ -0,0 +1,67 @@ +use beacon_node::{ + beacon_chain::BeaconChainTypes, Client, ClientConfig, ClientGenesis, ProductionBeaconNode, + ProductionClient, +}; +use environment::RuntimeContext; +use futures::Future; +use remote_beacon_node::RemoteBeaconNode; +use tempdir::TempDir; +use types::EthSpec; + +pub use environment; + +/// Provides a beacon node that is running in the current process. Useful for testing purposes. +pub struct LocalBeaconNode { + pub client: T, + pub datadir: TempDir, +} + +impl LocalBeaconNode> { + /// Starts a new, production beacon node. + pub fn production(context: RuntimeContext) -> Self { + let (client_config, datadir) = testing_client_config(); + + let client = ProductionBeaconNode::new(context, client_config) + .wait() + .expect("should build production client") + .into_inner(); + + LocalBeaconNode { client, datadir } + } +} + +impl LocalBeaconNode> { + /// Returns a `RemoteBeaconNode` that can connect to `self`. Useful for testing the node as if + /// it were external this process. + pub fn remote_node(&self) -> Result, String> { + Ok(RemoteBeaconNode::new( + self.client + .http_listen_addr() + .ok_or_else(|| "A remote beacon node must have a http server".to_string())?, + )?) + } +} + +fn testing_client_config() -> (ClientConfig, TempDir) { + // Creates a temporary directory that will be deleted once this `TempDir` is dropped. + let tempdir = TempDir::new("lighthouse_node_test_rig") + .expect("should create temp directory for client datadir"); + + let mut client_config = ClientConfig::default(); + + client_config.data_dir = tempdir.path().into(); + + // Setting ports to `0` means that the OS will choose some available port. + client_config.network.libp2p_port = 0; + client_config.network.discovery_port = 0; + client_config.rpc.port = 0; + client_config.rest_api.port = 0; + client_config.websocket_server.port = 0; + + client_config.genesis = ClientGenesis::Interop { + validator_count: 8, + genesis_time: 13_371_337, + }; + + (client_config, tempdir) +} diff --git a/validator_client/Cargo.toml b/validator_client/Cargo.toml index 09cb52b76d..038bbd3c35 100644 --- a/validator_client/Cargo.toml +++ b/validator_client/Cargo.toml @@ -4,10 +4,6 @@ version = "0.1.0" authors = ["Paul Hauner ", "Age Manning ", "Luke Anderson "] edition = "2018" -[[bin]] -name = "validator_client" -path = "src/main.rs" - [lib] name = "validator_client" path = "src/lib.rs" @@ -38,4 +34,7 @@ bincode = "1.2.0" futures = "0.1.29" dirs = "2.0.2" logging = { path = "../eth2/utils/logging" } +environment = { path = "../lighthouse/environment" } +parking_lot = "0.7" +exit-future = "0.1.4" libc = "0.2.65" diff --git a/validator_client/src/cli.rs b/validator_client/src/cli.rs new file mode 100644 index 0000000000..623d1b349b --- /dev/null +++ b/validator_client/src/cli.rs @@ -0,0 +1,123 @@ +use crate::config::{DEFAULT_SERVER, DEFAULT_SERVER_GRPC_PORT, DEFAULT_SERVER_HTTP_PORT}; +use clap::{App, Arg, SubCommand}; + +pub fn cli_app<'a, 'b>() -> App<'a, 'b> { + App::new("Validator Client") + .visible_aliases(&["v", "vc", "validator", "validator_client"]) + .version("0.0.1") + .author("Sigma Prime ") + .about("Eth 2.0 Validator Client") + .arg( + Arg::with_name("datadir") + .long("datadir") + .short("d") + .value_name("DIR") + .help("Data directory for keys and databases.") + .takes_value(true), + ) + .arg( + Arg::with_name("logfile") + .long("logfile") + .value_name("logfile") + .help("File path where output will be written.") + .takes_value(true), + ) + .arg( + Arg::with_name("spec") + .long("spec") + .value_name("TITLE") + .help("Specifies the default eth2 spec type.") + .takes_value(true) + .possible_values(&["mainnet", "minimal", "interop"]) + .conflicts_with("eth2-config") + .global(true) + ) + .arg( + Arg::with_name("eth2-config") + .long("eth2-config") + .short("e") + .value_name("TOML_FILE") + .help("Path to Ethereum 2.0 config and specification file (e.g., eth2_spec.toml).") + .takes_value(true), + ) + .arg( + Arg::with_name("server") + .long("server") + .value_name("NETWORK_ADDRESS") + .help("Address to connect to BeaconNode.") + .default_value(DEFAULT_SERVER) + .takes_value(true), + ) + .arg( + Arg::with_name("server-grpc-port") + .long("server-grpc-port") + .short("g") + .value_name("PORT") + .help("Port to use for gRPC API connection to the server.") + .default_value(DEFAULT_SERVER_GRPC_PORT) + .takes_value(true), + ) + .arg( + Arg::with_name("server-http-port") + .long("server-http-port") + .short("h") + .value_name("PORT") + .help("Port to use for HTTP API connection to the server.") + .default_value(DEFAULT_SERVER_HTTP_PORT) + .takes_value(true), + ) + .arg( + Arg::with_name("debug-level") + .long("debug-level") + .value_name("LEVEL") + .short("s") + .help("The title of the spec constants for chain config.") + .takes_value(true) + .possible_values(&["info", "debug", "trace", "warn", "error", "crit"]) + .default_value("trace"), + ) + /* + * The "testnet" sub-command. + * + * Used for starting testnet validator clients. + */ + .subcommand(SubCommand::with_name("testnet") + .about("Starts a testnet validator using INSECURE, predicatable private keys, based off the canonical \ + validator index. ONLY USE FOR TESTING PURPOSES!") + .arg( + Arg::with_name("bootstrap") + .short("b") + .long("bootstrap") + .help("Connect to the RPC server to download the eth2_config via the HTTP API.") + ) + .subcommand(SubCommand::with_name("insecure") + .about("Uses the standard, predicatable `interop` keygen method to produce a range \ + of predicatable private keys and starts performing their validator duties.") + .arg(Arg::with_name("first_validator") + .value_name("VALIDATOR_INDEX") + .required(true) + .help("The first validator public key to be generated for this client.")) + .arg(Arg::with_name("validator_count") + .value_name("COUNT") + .required(true) + .help("The number of validators.")) + ) + .subcommand(SubCommand::with_name("interop-yaml") + .about("Loads plain-text secret keys from YAML files. Expects the interop format defined + in the ethereum/eth2.0-pm repo.") + .arg(Arg::with_name("path") + .value_name("PATH") + .required(true) + .help("Path to a YAML file.")) + ) + ) + .subcommand(SubCommand::with_name("sign_block") + .about("Connects to the beacon server, requests a new block (after providing reveal),\ + and prints the signed block to standard out") + .arg(Arg::with_name("validator") + .value_name("VALIDATOR") + .required(true) + .help("The pubkey of the validator that should sign the block.") + ) + ) +} diff --git a/validator_client/src/duties/mod.rs b/validator_client/src/duties/mod.rs index f0269a41f6..bc20b853b9 100644 --- a/validator_client/src/duties/mod.rs +++ b/validator_client/src/duties/mod.rs @@ -10,10 +10,10 @@ use self::epoch_duties::{EpochDuties, EpochDutiesMapError}; pub use self::epoch_duties::{EpochDutiesMap, WorkInfo}; use super::signer::Signer; use futures::Async; +use parking_lot::RwLock; use slog::{debug, error, info}; use std::fmt::Display; use std::sync::Arc; -use std::sync::RwLock; use types::{Epoch, PublicKey, Slot}; #[derive(Debug, PartialEq, Clone)] @@ -55,20 +55,20 @@ impl DutiesManager { let duties = self.beacon_node.request_duties(epoch, &public_keys)?; { // If these duties were known, check to see if they're updates or identical. - if let Some(known_duties) = self.duties_map.read()?.get(&epoch) { + if let Some(known_duties) = self.duties_map.read().get(&epoch) { if *known_duties == duties { return Ok(UpdateOutcome::NoChange(epoch)); } } } - if !self.duties_map.read()?.contains_key(&epoch) { + if !self.duties_map.read().contains_key(&epoch) { //TODO: Remove clone by removing duties from outcome - self.duties_map.write()?.insert(epoch, duties.clone()); + self.duties_map.write().insert(epoch, duties.clone()); return Ok(UpdateOutcome::NewDuties(epoch, duties)); } // duties have changed //TODO: Duties could be large here. Remove from display and avoid the clone. - self.duties_map.write()?.insert(epoch, duties.clone()); + self.duties_map.write().insert(epoch, duties.clone()); Ok(UpdateOutcome::DutiesChanged(epoch, duties)) } @@ -97,7 +97,7 @@ impl DutiesManager { let mut current_work: Vec<(usize, WorkInfo)> = Vec::new(); // if the map is poisoned, return None - let duties = self.duties_map.read().ok()?; + let duties = self.duties_map.read(); for (index, validator_signer) in self.signers.iter().enumerate() { match duties.is_work_slot(slot, &validator_signer.to_public()) { diff --git a/validator_client/src/lib.rs b/validator_client/src/lib.rs index fc08d6a123..175ee4793e 100644 --- a/validator_client/src/lib.rs +++ b/validator_client/src/lib.rs @@ -1,4 +1,258 @@ -extern crate libc; -pub mod config; +mod attestation_producer; +mod block_producer; +mod cli; +mod config; +mod duties; +mod error; +mod service; +mod signer; -pub use crate::config::Config; +pub use cli::cli_app; +pub use config::Config; + +use clap::ArgMatches; +use config::{Config as ClientConfig, KeySource}; +use environment::RuntimeContext; +use eth2_config::Eth2Config; +use exit_future::Signal; +use futures::Stream; +use lighthouse_bootstrap::Bootstrapper; +use parking_lot::RwLock; +use protos::services_grpc::ValidatorServiceClient; +use service::Service; +use slog::{error, info, warn, Logger}; +use slot_clock::SlotClock; +use std::path::PathBuf; +use std::sync::Arc; +use std::time::{Duration, Instant}; +use tokio::timer::Interval; +use types::{EthSpec, Keypair}; + +/// A fixed amount of time after a slot to perform operations. This gives the node time to complete +/// per-slot processes. +const TIME_DELAY_FROM_SLOT: Duration = Duration::from_millis(100); + +#[derive(Clone)] +pub struct ProductionValidatorClient { + context: RuntimeContext, + service: Arc>, + exit_signals: Arc>>, +} + +impl ProductionValidatorClient { + /// Instantiates the validator client, _without_ starting the timers to trigger block + /// and attestation production. + pub fn new_from_cli(context: RuntimeContext, matches: &ArgMatches) -> Result { + let mut log = context.log.clone(); + + let (client_config, eth2_config) = get_configs(&matches, &mut log) + .map_err(|e| format!("Unable to initialize config: {}", e))?; + + info!( + log, + "Starting validator client"; + "datadir" => client_config.full_data_dir().expect("Unable to find datadir").to_str(), + ); + + let service: Service = + Service::initialize_service(client_config, eth2_config, log.clone()) + .map_err(|e| e.to_string())?; + + Ok(Self { + context, + service: Arc::new(service), + exit_signals: Arc::new(RwLock::new(vec![])), + }) + } + + /// Starts the timers to trigger block and attestation production. + pub fn start_service(&self) -> Result<(), String> { + let service = self.clone().service; + let log = self.context.log.clone(); + + let duration_to_next_slot = service + .slot_clock + .duration_to_next_slot() + .ok_or_else(|| "Unable to determine duration to next slot. Exiting.".to_string())?; + + // set up the validator work interval - start at next slot and proceed every slot + let interval = { + // Set the interval to start at the next slot, and every slot after + let slot_duration = Duration::from_millis(service.spec.milliseconds_per_slot); + //TODO: Handle checked add correctly + Interval::new(Instant::now() + duration_to_next_slot, slot_duration) + }; + + if service.slot_clock.now().is_none() { + warn!( + log, + "Starting node prior to genesis"; + ); + } + + info!( + log, + "Waiting for next slot"; + "seconds_to_wait" => duration_to_next_slot.as_secs() + ); + + let (exit_signal, exit_fut) = exit_future::signal(); + + self.exit_signals.write().push(exit_signal); + + /* kick off the core service */ + self.context.executor.spawn( + interval + .map_err(move |e| { + error! { + log, + "Timer thread failed"; + "error" => format!("{}", e) + } + }) + .and_then(move |_| if exit_fut.is_live() { Ok(()) } else { Err(()) }) + .for_each(move |_| { + // wait for node to process + std::thread::sleep(TIME_DELAY_FROM_SLOT); + // if a non-fatal error occurs, proceed to the next slot. + let _ignore_error = service.per_slot_execution(); + // completed a slot process + Ok(()) + }), + ); + + Ok(()) + } +} + +/// Parses the CLI arguments and attempts to load the client and eth2 configuration. +/// +/// This is not a pure function, it reads from disk and may contact network servers. +fn get_configs( + cli_args: &ArgMatches, + mut log: &mut Logger, +) -> Result<(ClientConfig, Eth2Config), String> { + let mut client_config = ClientConfig::default(); + + client_config.apply_cli_args(&cli_args, &mut log)?; + + if let Some(server) = cli_args.value_of("server") { + client_config.server = server.to_string(); + } + + if let Some(port) = cli_args.value_of("server-http-port") { + client_config.server_http_port = port + .parse::() + .map_err(|e| format!("Unable to parse HTTP port: {:?}", e))?; + } + + if let Some(port) = cli_args.value_of("server-grpc-port") { + client_config.server_grpc_port = port + .parse::() + .map_err(|e| format!("Unable to parse gRPC port: {:?}", e))?; + } + + info!( + *log, + "Beacon node connection info"; + "grpc_port" => client_config.server_grpc_port, + "http_port" => client_config.server_http_port, + "server" => &client_config.server, + ); + + let (client_config, eth2_config) = match cli_args.subcommand() { + ("testnet", Some(sub_cli_args)) => { + if cli_args.is_present("eth2-config") && sub_cli_args.is_present("bootstrap") { + return Err( + "Cannot specify --eth2-config and --bootstrap as it may result \ + in ambiguity." + .into(), + ); + } + process_testnet_subcommand(sub_cli_args, client_config, log) + } + _ => return Err("You must use the testnet command. See '--help'.".into()), + }?; + + Ok((client_config, eth2_config)) +} + +/// Parses the `testnet` CLI subcommand. +/// +/// This is not a pure function, it reads from disk and may contact network servers. +fn process_testnet_subcommand( + cli_args: &ArgMatches, + mut client_config: ClientConfig, + log: &Logger, +) -> Result<(ClientConfig, Eth2Config), String> { + let eth2_config = if cli_args.is_present("bootstrap") { + info!(log, "Connecting to bootstrap server"); + let bootstrapper = Bootstrapper::connect( + format!( + "http://{}:{}", + client_config.server, client_config.server_http_port + ), + &log, + )?; + + let eth2_config = bootstrapper.eth2_config()?; + + info!( + log, + "Bootstrapped eth2 config via HTTP"; + "slot_time_millis" => eth2_config.spec.milliseconds_per_slot, + "spec" => ð2_config.spec_constants, + ); + + eth2_config + } else { + match cli_args.value_of("spec") { + Some("mainnet") => Eth2Config::mainnet(), + Some("minimal") => Eth2Config::minimal(), + Some("interop") => Eth2Config::interop(), + _ => return Err("No --spec flag provided. See '--help'.".into()), + } + }; + + client_config.key_source = match cli_args.subcommand() { + ("insecure", Some(sub_cli_args)) => { + let first = sub_cli_args + .value_of("first_validator") + .ok_or_else(|| "No first validator supplied")? + .parse::() + .map_err(|e| format!("Unable to parse first validator: {:?}", e))?; + let count = sub_cli_args + .value_of("validator_count") + .ok_or_else(|| "No validator count supplied")? + .parse::() + .map_err(|e| format!("Unable to parse validator count: {:?}", e))?; + + info!( + log, + "Generating unsafe testing keys"; + "first_validator" => first, + "count" => count + ); + + KeySource::TestingKeypairRange(first..first + count) + } + ("interop-yaml", Some(sub_cli_args)) => { + let path = sub_cli_args + .value_of("path") + .ok_or_else(|| "No yaml path supplied")? + .parse::() + .map_err(|e| format!("Unable to parse yaml path: {:?}", e))?; + + info!( + log, + "Loading keypairs from interop YAML format"; + "path" => format!("{:?}", path), + ); + + KeySource::YamlKeypairs(path) + } + _ => KeySource::Disk, + }; + + Ok((client_config, eth2_config)) +} diff --git a/validator_client/src/main.rs b/validator_client/src/main.rs deleted file mode 100644 index 30ed95661c..0000000000 --- a/validator_client/src/main.rs +++ /dev/null @@ -1,354 +0,0 @@ -mod attestation_producer; -mod block_producer; -mod config; -mod duties; -pub mod error; -mod service; -mod signer; - -use crate::config::{ - Config as ClientConfig, KeySource, DEFAULT_SERVER, DEFAULT_SERVER_GRPC_PORT, - DEFAULT_SERVER_HTTP_PORT, -}; -use crate::service::Service as ValidatorService; -use clap::{App, Arg, ArgMatches, SubCommand}; -use eth2_config::Eth2Config; -use lighthouse_bootstrap::Bootstrapper; -use protos::services_grpc::ValidatorServiceClient; -use slog::{crit, error, info, o, Drain, Level, Logger}; -use std::path::PathBuf; -use types::{InteropEthSpec, Keypair, MainnetEthSpec, MinimalEthSpec}; - -pub const DEFAULT_SPEC: &str = "minimal"; -pub const DEFAULT_DATA_DIR: &str = ".lighthouse-validator"; -pub const CLIENT_CONFIG_FILENAME: &str = "validator-client.toml"; -pub const ETH2_CONFIG_FILENAME: &str = "eth2-spec.toml"; - -type Result = core::result::Result; - -fn main() { - // Logging - let decorator = slog_term::TermDecorator::new().build(); - let decorator = logging::AlignedTermDecorator::new(decorator, logging::MAX_MESSAGE_WIDTH); - let drain = slog_term::FullFormat::new(decorator).build().fuse(); - let drain = slog_async::Async::new(drain).build().fuse(); - - // CLI - let matches = App::new("Lighthouse Validator Client") - .version("0.0.1") - .author("Sigma Prime ") - .about("Eth 2.0 Validator Client") - .arg( - Arg::with_name("datadir") - .long("datadir") - .short("d") - .value_name("DIR") - .help("Data directory for keys and databases.") - .takes_value(true), - ) - .arg( - Arg::with_name("logfile") - .long("logfile") - .value_name("logfile") - .help("File path where output will be written.") - .takes_value(true), - ) - .arg( - Arg::with_name("spec") - .long("spec") - .value_name("TITLE") - .help("Specifies the default eth2 spec type.") - .takes_value(true) - .possible_values(&["mainnet", "minimal", "interop"]) - .conflicts_with("eth2-config") - .global(true) - ) - .arg( - Arg::with_name("eth2-config") - .long("eth2-config") - .short("e") - .value_name("TOML_FILE") - .help("Path to Ethereum 2.0 config and specification file (e.g., eth2_spec.toml).") - .takes_value(true), - ) - .arg( - Arg::with_name("server") - .long("server") - .value_name("NETWORK_ADDRESS") - .help("Address to connect to BeaconNode.") - .default_value(DEFAULT_SERVER) - .takes_value(true), - ) - .arg( - Arg::with_name("server-grpc-port") - .long("server-grpc-port") - .short("g") - .value_name("PORT") - .help("Port to use for gRPC API connection to the server.") - .default_value(DEFAULT_SERVER_GRPC_PORT) - .takes_value(true), - ) - .arg( - Arg::with_name("server-http-port") - .long("server-http-port") - .short("h") - .value_name("PORT") - .help("Port to use for HTTP API connection to the server.") - .default_value(DEFAULT_SERVER_HTTP_PORT) - .takes_value(true), - ) - .arg( - Arg::with_name("debug-level") - .long("debug-level") - .value_name("LEVEL") - .short("s") - .help("The title of the spec constants for chain config.") - .takes_value(true) - .possible_values(&["info", "debug", "trace", "warn", "error", "crit"]) - .default_value("trace"), - ) - /* - * The "testnet" sub-command. - * - * Used for starting testnet validator clients. - */ - .subcommand(SubCommand::with_name("testnet") - .about("Starts a testnet validator using INSECURE, predicatable private keys, based off the canonical \ - validator index. ONLY USE FOR TESTING PURPOSES!") - .arg( - Arg::with_name("bootstrap") - .short("b") - .long("bootstrap") - .help("Connect to the RPC server to download the eth2_config via the HTTP API.") - ) - .subcommand(SubCommand::with_name("insecure") - .about("Uses the standard, predicatable `interop` keygen method to produce a range \ - of predicatable private keys and starts performing their validator duties.") - .arg(Arg::with_name("first_validator") - .value_name("VALIDATOR_INDEX") - .required(true) - .help("The first validator public key to be generated for this client.")) - .arg(Arg::with_name("validator_count") - .value_name("COUNT") - .required(true) - .help("The number of validators.")) - ) - .subcommand(SubCommand::with_name("interop-yaml") - .about("Loads plain-text secret keys from YAML files. Expects the interop format defined - in the ethereum/eth2.0-pm repo.") - .arg(Arg::with_name("path") - .value_name("PATH") - .required(true) - .help("Path to a YAML file.")) - ) - ) - .subcommand(SubCommand::with_name("sign_block") - .about("Connects to the beacon server, requests a new block (after providing reveal),\ - and prints the signed block to standard out") - .arg(Arg::with_name("validator") - .value_name("VALIDATOR") - .required(true) - .help("The pubkey of the validator that should sign the block.") - ) - ) - .get_matches(); - - let drain = match matches.value_of("debug-level") { - Some("info") => drain.filter_level(Level::Info), - Some("debug") => drain.filter_level(Level::Debug), - Some("trace") => drain.filter_level(Level::Trace), - Some("warn") => drain.filter_level(Level::Warning), - Some("error") => drain.filter_level(Level::Error), - Some("crit") => drain.filter_level(Level::Critical), - _ => unreachable!("guarded by clap"), - }; - - let mut log = slog::Logger::root(drain.fuse(), o!()); - - if std::mem::size_of::() != 8 { - crit!( - log, - "Lighthouse only supports 64bit CPUs"; - "detected" => format!("{}bit", std::mem::size_of::() * 8) - ); - } - - let (client_config, eth2_config) = match get_configs(&matches, &mut log) { - Ok(tuple) => tuple, - Err(e) => { - crit!( - log, - "Unable to initialize configuration"; - "error" => e - ); - return; - } - }; - - info!( - log, - "Starting validator client"; - "datadir" => client_config.full_data_dir().expect("Unable to find datadir").to_str(), - ); - - let result = match eth2_config.spec_constants.as_str() { - "mainnet" => ValidatorService::::start( - client_config, - eth2_config, - log.clone(), - ), - "minimal" => ValidatorService::::start( - client_config, - eth2_config, - log.clone(), - ), - "interop" => ValidatorService::::start( - client_config, - eth2_config, - log.clone(), - ), - other => { - crit!(log, "Unknown spec constants"; "title" => other); - return; - } - }; - - // start the validator service. - // this specifies the GRPC and signer type to use as the duty manager beacon node. - match result { - Ok(_) => info!(log, "Validator client shutdown successfully."), - Err(e) => crit!(log, "Validator client exited with error"; "error" => e.to_string()), - } -} - -/// Parses the CLI arguments and attempts to load the client and eth2 configuration. -/// -/// This is not a pure function, it reads from disk and may contact network servers. -pub fn get_configs( - cli_args: &ArgMatches, - mut log: &mut Logger, -) -> Result<(ClientConfig, Eth2Config)> { - let mut client_config = ClientConfig::default(); - - client_config.apply_cli_args(&cli_args, &mut log)?; - - if let Some(server) = cli_args.value_of("server") { - client_config.server = server.to_string(); - } - - if let Some(port) = cli_args.value_of("server-http-port") { - client_config.server_http_port = port - .parse::() - .map_err(|e| format!("Unable to parse HTTP port: {:?}", e))?; - } - - if let Some(port) = cli_args.value_of("server-grpc-port") { - client_config.server_grpc_port = port - .parse::() - .map_err(|e| format!("Unable to parse gRPC port: {:?}", e))?; - } - - info!( - *log, - "Beacon node connection info"; - "grpc_port" => client_config.server_grpc_port, - "http_port" => client_config.server_http_port, - "server" => &client_config.server, - ); - - let (client_config, eth2_config) = match cli_args.subcommand() { - ("testnet", Some(sub_cli_args)) => { - if cli_args.is_present("eth2-config") && sub_cli_args.is_present("bootstrap") { - return Err( - "Cannot specify --eth2-config and --bootstrap as it may result \ - in ambiguity." - .into(), - ); - } - process_testnet_subcommand(sub_cli_args, client_config, log) - } - _ => return Err("You must use the testnet command. See '--help'.".into()), - }?; - - Ok((client_config, eth2_config)) -} - -/// Parses the `testnet` CLI subcommand. -/// -/// This is not a pure function, it reads from disk and may contact network servers. -fn process_testnet_subcommand( - cli_args: &ArgMatches, - mut client_config: ClientConfig, - log: &Logger, -) -> Result<(ClientConfig, Eth2Config)> { - let eth2_config = if cli_args.is_present("bootstrap") { - info!(log, "Connecting to bootstrap server"); - let bootstrapper = Bootstrapper::connect( - format!( - "http://{}:{}", - client_config.server, client_config.server_http_port - ), - &log, - )?; - - let eth2_config = bootstrapper.eth2_config()?; - - info!( - log, - "Bootstrapped eth2 config via HTTP"; - "slot_time_millis" => eth2_config.spec.milliseconds_per_slot, - "spec" => ð2_config.spec_constants, - ); - - eth2_config - } else { - match cli_args.value_of("spec") { - Some("mainnet") => Eth2Config::mainnet(), - Some("minimal") => Eth2Config::minimal(), - Some("interop") => Eth2Config::interop(), - _ => return Err("No --spec flag provided. See '--help'.".into()), - } - }; - - client_config.key_source = match cli_args.subcommand() { - ("insecure", Some(sub_cli_args)) => { - let first = sub_cli_args - .value_of("first_validator") - .ok_or_else(|| "No first validator supplied")? - .parse::() - .map_err(|e| format!("Unable to parse first validator: {:?}", e))?; - let count = sub_cli_args - .value_of("validator_count") - .ok_or_else(|| "No validator count supplied")? - .parse::() - .map_err(|e| format!("Unable to parse validator count: {:?}", e))?; - - info!( - log, - "Generating unsafe testing keys"; - "first_validator" => first, - "count" => count - ); - - KeySource::TestingKeypairRange(first..first + count) - } - ("interop-yaml", Some(sub_cli_args)) => { - let path = sub_cli_args - .value_of("path") - .ok_or_else(|| "No yaml path supplied")? - .parse::() - .map_err(|e| format!("Unable to parse yaml path: {:?}", e))?; - - info!( - log, - "Loading keypairs from interop YAML format"; - "path" => format!("{:?}", path), - ); - - KeySource::YamlKeypairs(path) - } - _ => KeySource::Disk, - }; - - Ok((client_config, eth2_config)) -} diff --git a/validator_client/src/service.rs b/validator_client/src/service.rs index a7974594d4..b193941147 100644 --- a/validator_client/src/service.rs +++ b/validator_client/src/service.rs @@ -17,6 +17,7 @@ use crate::signer::Signer; use bls::Keypair; use eth2_config::Eth2Config; use grpcio::{ChannelBuilder, EnvBuilder}; +use parking_lot::RwLock; use protos::services::Empty; use protos::services_grpc::{ AttestationServiceClient, BeaconBlockServiceClient, BeaconNodeServiceClient, @@ -26,18 +27,9 @@ use slog::{crit, error, info, trace, warn}; use slot_clock::{SlotClock, SystemTimeSlotClock}; use std::marker::PhantomData; use std::sync::Arc; -use std::sync::RwLock; -use std::time::{Duration, Instant}; -use tokio::prelude::*; -use tokio::runtime::Builder; -use tokio::timer::Interval; -use tokio_timer::clock::Clock; +use std::time::Duration; use types::{ChainSpec, Epoch, EthSpec, Fork, Slot}; -/// A fixed amount of time after a slot to perform operations. This gives the node time to complete -/// per-slot processes. -const TIME_DELAY_FROM_SLOT: Duration = Duration::from_millis(100); - /// The validator service. This is the main thread that executes and maintains validator /// duties. //TODO: Generalize the BeaconNode types to use testing @@ -45,12 +37,12 @@ pub struct Service, + current_slot: RwLock>, slots_per_epoch: u64, /// The chain specification for this clients instance. - spec: Arc, + pub spec: Arc, /// The duties manager which maintains the state of when to perform actions. duties_manager: Arc>, // GRPC Clients @@ -63,12 +55,12 @@ pub struct Service, } -impl Service { +impl Service { /// Initial connection to the beacon node to determine its properties. /// /// This tries to connect to a beacon node. Once connected, it initialised the gRPC clients /// and returns an instance of the service. - fn initialize_service( + pub fn initialize_service( client_config: ValidatorConfig, eth2_config: Eth2Config, log: slog::Logger, @@ -195,7 +187,7 @@ impl Service Service error_chain::Result<()> { - // connect to the node and retrieve its properties and initialize the gRPC clients - let mut service = Service::::initialize_service( - client_config, - eth2_config, - log.clone(), - )?; - - // we have connected to a node and established its parameters. Spin up the core service - - // set up the validator service runtime - let mut runtime = Builder::new() - .clock(Clock::system()) - .name_prefix("validator-client-") - .build() - .map_err(|e| format!("Tokio runtime failed: {}", e))?; - - let duration_to_next_slot = service - .slot_clock - .duration_to_next_slot() - .ok_or_else::(|| { - "Unable to determine duration to next slot. Exiting.".into() - })?; - - // set up the validator work interval - start at next slot and proceed every slot - let interval = { - // Set the interval to start at the next slot, and every slot after - let slot_duration = Duration::from_millis(service.spec.milliseconds_per_slot); - //TODO: Handle checked add correctly - Interval::new(Instant::now() + duration_to_next_slot, slot_duration) - }; - - if service.slot_clock.now().is_none() { - warn!( - log, - "Starting node prior to genesis"; - ); - } - - info!( - log, - "Waiting for next slot"; - "seconds_to_wait" => duration_to_next_slot.as_secs() - ); - - /* kick off the core service */ - runtime.block_on( - interval - .for_each(move |_| { - // wait for node to process - std::thread::sleep(TIME_DELAY_FROM_SLOT); - // if a non-fatal error occurs, proceed to the next slot. - let _ignore_error = service.per_slot_execution(); - // completed a slot process - Ok(()) - }) - .map_err(|e| format!("Service thread failed: {:?}", e)), - )?; - // validator client exited - Ok(()) - } - +impl Service { /// The execution logic that runs every slot. // Errors are logged to output, and core execution continues unless fatal errors occur. - fn per_slot_execution(&mut self) -> error_chain::Result<()> { + pub fn per_slot_execution(&self) -> error_chain::Result<()> { /* get the new current slot and epoch */ self.update_current_slot()?; @@ -295,7 +221,7 @@ impl Service error_chain::Result<()> { + fn update_current_slot(&self) -> error_chain::Result<()> { let wall_clock_slot = self .slot_clock .now() @@ -304,11 +230,12 @@ impl Service Service wall_clock_slot.as_u64(), "epoch" => wall_clock_epoch.as_u64()); Ok(()) } /// For all known validator keypairs, update any known duties from the beacon node. - fn check_for_duties(&mut self) { + fn check_for_duties(&self) { let cloned_manager = self.duties_manager.clone(); let cloned_log = self.log.clone(); let current_epoch = self .current_slot + .read() .expect("The current slot must be updated before checking for duties") .epoch(self.slots_per_epoch); @@ -349,9 +277,10 @@ impl Service Service Service Date: Sat, 16 Nov 2019 06:49:43 +1100 Subject: [PATCH 17/21] Set the gitlab docker back to latest (#603) --- .gitlab-ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 3b26fa79da..1636d51724 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -1,7 +1,7 @@ #Adapted from https://users.rust-lang.org/t/my-gitlab-config-docs-tests/16396 default: - image: 'sigp/lighthouse:eth1' + image: 'sigp/lighthouse:latest' cache: paths: - tests/ef_tests/*-v0.8.3.tar.gz From e1de30bd648f77e48f84cb2077bed613b7af321e Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Wed, 20 Nov 2019 11:25:16 +1100 Subject: [PATCH 18/21] Clean CLI arguments (#609) --- lighthouse/src/main.rs | 1 - validator_client/src/cli.rs | 35 ----------------------------------- 2 files changed, 36 deletions(-) diff --git a/lighthouse/src/main.rs b/lighthouse/src/main.rs index 9125e9802a..5ad7a2bf8a 100644 --- a/lighthouse/src/main.rs +++ b/lighthouse/src/main.rs @@ -30,7 +30,6 @@ fn main() { .value_name("TITLE") .help("Specifies the default eth2 spec type. Only effective when creating a new datadir.") .takes_value(true) - .required(true) .possible_values(&["mainnet", "minimal", "interop"]) .global(true) .default_value("minimal") diff --git a/validator_client/src/cli.rs b/validator_client/src/cli.rs index 623d1b349b..ef8c4ee181 100644 --- a/validator_client/src/cli.rs +++ b/validator_client/src/cli.rs @@ -15,31 +15,6 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .help("Data directory for keys and databases.") .takes_value(true), ) - .arg( - Arg::with_name("logfile") - .long("logfile") - .value_name("logfile") - .help("File path where output will be written.") - .takes_value(true), - ) - .arg( - Arg::with_name("spec") - .long("spec") - .value_name("TITLE") - .help("Specifies the default eth2 spec type.") - .takes_value(true) - .possible_values(&["mainnet", "minimal", "interop"]) - .conflicts_with("eth2-config") - .global(true) - ) - .arg( - Arg::with_name("eth2-config") - .long("eth2-config") - .short("e") - .value_name("TOML_FILE") - .help("Path to Ethereum 2.0 config and specification file (e.g., eth2_spec.toml).") - .takes_value(true), - ) .arg( Arg::with_name("server") .long("server") @@ -66,16 +41,6 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .default_value(DEFAULT_SERVER_HTTP_PORT) .takes_value(true), ) - .arg( - Arg::with_name("debug-level") - .long("debug-level") - .value_name("LEVEL") - .short("s") - .help("The title of the spec constants for chain config.") - .takes_value(true) - .possible_values(&["info", "debug", "trace", "warn", "error", "crit"]) - .default_value("trace"), - ) /* * The "testnet" sub-command. * From 62d66f1c10acefca87425464102bb5239d67f809 Mon Sep 17 00:00:00 2001 From: Age Manning Date: Wed, 20 Nov 2019 15:03:50 +1100 Subject: [PATCH 19/21] Enables ENR auto-update based on new listen address (#610) --- beacon_node/eth2-libp2p/Cargo.toml | 4 +-- beacon_node/eth2-libp2p/src/behaviour.rs | 5 +++ beacon_node/eth2-libp2p/src/discovery.rs | 18 +++++++++++ beacon_node/eth2-libp2p/src/service.rs | 40 +++++++++++++++++++++++- 4 files changed, 64 insertions(+), 3 deletions(-) diff --git a/beacon_node/eth2-libp2p/Cargo.toml b/beacon_node/eth2-libp2p/Cargo.toml index 8982e17669..51d596b1e1 100644 --- a/beacon_node/eth2-libp2p/Cargo.toml +++ b/beacon_node/eth2-libp2p/Cargo.toml @@ -8,8 +8,8 @@ edition = "2018" clap = "2.33.0" hex = "0.3" #SigP repository -libp2p = { git = "https://github.com/SigP/rust-libp2p", rev = "cdd5251d29e21a01aa2ffed8cb577a37a0f9e2eb" } -enr = { git = "https://github.com/SigP/rust-libp2p/", rev = "cdd5251d29e21a01aa2ffed8cb577a37a0f9e2eb", features = ["serde"] } +libp2p = { git = "https://github.com/SigP/rust-libp2p", rev = "1295ff592a94d19f23f176712d6d04af4db6e698" } +enr = { git = "https://github.com/SigP/rust-libp2p/", rev = "1295ff592a94d19f23f176712d6d04af4db6e698", features = ["serde"] } types = { path = "../../eth2/types" } serde = "1.0.102" serde_derive = "1.0.102" diff --git a/beacon_node/eth2-libp2p/src/behaviour.rs b/beacon_node/eth2-libp2p/src/behaviour.rs index d8301ad8be..6a97ac9e41 100644 --- a/beacon_node/eth2-libp2p/src/behaviour.rs +++ b/beacon_node/eth2-libp2p/src/behaviour.rs @@ -222,6 +222,11 @@ impl Behaviour { pub fn connected_peers(&self) -> usize { self.discovery.connected_peers() } + + /// Informs the discovery behaviour if a new IP/Port is set at the application layer + pub fn update_local_enr_socket(&mut self, socket: std::net::SocketAddr, is_tcp: bool) { + self.discovery.update_local_enr(socket, is_tcp); + } } /// The types of events than can be obtained from polling the behaviour. diff --git a/beacon_node/eth2-libp2p/src/discovery.rs b/beacon_node/eth2-libp2p/src/discovery.rs index 380914af5b..8ad58300a7 100644 --- a/beacon_node/eth2-libp2p/src/discovery.rs +++ b/beacon_node/eth2-libp2p/src/discovery.rs @@ -103,6 +103,24 @@ impl Discovery { }) } + /// Allows the application layer to update the `ip` and `port` of the local ENR. The second + /// parameter defines whether the port is a TPC port. If false, this is interpreted as a UDP + /// port. + pub fn update_local_enr(&mut self, socket: std::net::SocketAddr, is_tcp: bool) { + // discv5 checks to see if an update is necessary before performing it, so we do not + // need to check here + if self.discovery.update_local_enr_socket(socket, is_tcp) { + let enr = self.discovery.local_enr(); + info!( + self.log, + "ENR Updated"; + "enr" => enr.to_base64(), + "seq" => enr.seq(), + "address" => format!("{:?}", socket)); + } + } + + /// Return the nodes local ENR. pub fn local_enr(&self) -> &Enr { self.discovery.local_enr() } diff --git a/beacon_node/eth2-libp2p/src/service.rs b/beacon_node/eth2-libp2p/src/service.rs index 2ffafb855e..b7ede9dd84 100644 --- a/beacon_node/eth2-libp2p/src/service.rs +++ b/beacon_node/eth2-libp2p/src/service.rs @@ -30,6 +30,8 @@ pub struct Service { pub swarm: Swarm, /// This node's PeerId. pub local_peer_id: PeerId, + /// Indicates if the listening address have been verified and compared to the expected ENR. + pub verified_listen_address: bool, /// The libp2p logger handle. pub log: slog::Logger, } @@ -151,6 +153,7 @@ impl Service { Ok(Service { local_peer_id, swarm, + verified_listen_address: false, log, }) } @@ -189,7 +192,18 @@ impl Stream for Service { } }, Ok(Async::Ready(None)) => unreachable!("Swarm stream shouldn't end"), - Ok(Async::NotReady) => break, + Ok(Async::NotReady) => { + // check to see if the address is different to the config. If so, update our ENR + if !self.verified_listen_address { + let multiaddr = Swarm::listeners(&self.swarm).next(); + if let Some(multiaddr) = multiaddr { + if let Some(socket_addr) = multiaddr_to_socket_addr(multiaddr) { + self.swarm.update_local_enr_socket(socket_addr, true); + } + } + } + break; + } _ => break, } } @@ -197,6 +211,30 @@ impl Stream for Service { } } +/// Converts a multiaddr to a `SocketAddr` if the multiaddr has the TCP/IP form. Libp2p currently +/// only supports TCP, so the UDP case is currently ignored. +fn multiaddr_to_socket_addr(multiaddr: &Multiaddr) -> Option { + let protocols = multiaddr.iter().collect::>(); + // assume the IP protocol + match protocols[0] { + Protocol::Ip4(address) => { + if let Protocol::Tcp(port) = protocols[1] { + Some(std::net::SocketAddr::new(address.into(), port)) + } else { + None + } + } + Protocol::Ip6(address) => { + if let Protocol::Tcp(port) = protocols[1] { + Some(std::net::SocketAddr::new(address.into(), port)) + } else { + None + } + } + _ => None, + } +} + /// The implementation supports TCP/IP, WebSockets over TCP/IP, secio as the encryption layer, and /// mplex or yamux as the multiplexing layer. fn build_transport(local_private_key: Keypair) -> Boxed<(PeerId, StreamMuxerBox), Error> { From b7a0feb7253965b1d5e622b6247736ca29e1a254 Mon Sep 17 00:00:00 2001 From: Age Manning Date: Wed, 20 Nov 2019 16:32:38 +1100 Subject: [PATCH 20/21] Update the libp2p commit hash to target latest lighthouse branch (#611) --- beacon_node/eth2-libp2p/Cargo.toml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/beacon_node/eth2-libp2p/Cargo.toml b/beacon_node/eth2-libp2p/Cargo.toml index 51d596b1e1..430d54ea65 100644 --- a/beacon_node/eth2-libp2p/Cargo.toml +++ b/beacon_node/eth2-libp2p/Cargo.toml @@ -8,8 +8,8 @@ edition = "2018" clap = "2.33.0" hex = "0.3" #SigP repository -libp2p = { git = "https://github.com/SigP/rust-libp2p", rev = "1295ff592a94d19f23f176712d6d04af4db6e698" } -enr = { git = "https://github.com/SigP/rust-libp2p/", rev = "1295ff592a94d19f23f176712d6d04af4db6e698", features = ["serde"] } +libp2p = { git = "https://github.com/SigP/rust-libp2p", rev = "0d4583e110b3ab9406ecd512655bba1a9906d470" } +enr = { git = "https://github.com/SigP/rust-libp2p/", rev = "0d4583e110b3ab9406ecd512655bba1a9906d470", features = ["serde"] } types = { path = "../../eth2/types" } serde = "1.0.102" serde_derive = "1.0.102" From 24e941d175a7455209c140157bf8d5d588c87ba1 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Thu, 21 Nov 2019 11:47:30 +1100 Subject: [PATCH 21/21] Update to spec v0.9.1 (#597) * Update to spec v0.9.0 * Update to v0.9.1 * Bump spec tags for v0.9.1 * Formatting, fix CI failures * Resolve accidental KeyPair merge conflict * Document new BeaconState functions * Fix incorrect cache drops in `advance_caches` * Update fork choice for v0.9.1 * Clean up some FIXMEs * Fix a few docs/logs --- .gitlab-ci.yml | 4 +- beacon_node/beacon_chain/src/beacon_chain.rs | 152 +++--- beacon_node/beacon_chain/src/builder.rs | 7 +- beacon_node/beacon_chain/src/errors.rs | 1 - beacon_node/beacon_chain/src/eth1_chain.rs | 2 +- beacon_node/beacon_chain/src/fork_choice.rs | 138 ++++-- beacon_node/beacon_chain/src/lib.rs | 2 +- beacon_node/beacon_chain/src/metrics.rs | 3 - beacon_node/beacon_chain/src/test_utils.rs | 33 +- beacon_node/rest_api/src/validator.rs | 24 +- beacon_node/rpc/src/attestation.rs | 5 +- beacon_node/rpc/src/validator.rs | 8 +- book/src/simple-testnet.md | 2 +- eth2/lmd_ghost/Cargo.toml | 1 + eth2/lmd_ghost/src/lib.rs | 2 +- eth2/lmd_ghost/src/reduced_tree.rs | 181 ++++++-- eth2/lmd_ghost/tests/test.rs | 60 ++- eth2/operation_pool/Cargo.toml | 1 - eth2/operation_pool/src/attestation.rs | 13 +- eth2/operation_pool/src/attestation_id.rs | 2 +- eth2/operation_pool/src/lib.rs | 122 ++--- eth2/operation_pool/src/persistence.rs | 7 - .../src/common/get_attesting_indices.rs | 10 +- .../src/common/get_compact_committees_root.rs | 41 -- .../src/common/get_indexed_attestation.rs | 126 +---- .../src/common/initiate_validator_exit.rs | 2 +- eth2/state_processing/src/common/mod.rs | 2 - .../src/common/slash_validator.rs | 5 +- eth2/state_processing/src/genesis.rs | 17 +- .../src/per_block_processing.rs | 73 +-- .../block_processing_builder.rs | 34 +- .../block_signature_verifier.rs | 16 - .../src/per_block_processing/errors.rs | 102 +--- .../is_valid_indexed_attestation.rs | 31 +- .../per_block_processing/signature_sets.rs | 64 +-- .../src/per_block_processing/tests.rs | 352 ++++---------- .../verify_attestation.rs | 61 +-- .../verify_attester_slashing.rs | 10 +- .../per_block_processing/verify_deposit.rs | 4 +- .../src/per_block_processing/verify_exit.rs | 6 +- .../verify_proposer_slashing.rs | 9 +- .../per_block_processing/verify_transfer.rs | 208 --------- .../src/per_epoch_processing.rs | 76 +-- .../src/per_epoch_processing/apply_rewards.rs | 62 +-- .../per_epoch_processing/process_slashings.rs | 2 +- .../per_epoch_processing/registry_updates.rs | 2 +- .../validator_statuses.rs | 109 +---- .../src/per_epoch_processing/winning_root.rs | 130 ------ .../src/per_slot_processing.rs | 2 +- eth2/state_processing/src/test_utils.rs | 33 +- eth2/state_processing/tests/tests.rs | 2 - eth2/types/src/attestation.rs | 4 +- eth2/types/src/attestation_data.rs | 10 +- .../src/attestation_data_and_custody_bit.rs | 22 - eth2/types/src/attestation_duty.rs | 8 +- eth2/types/src/attester_slashing.rs | 2 +- eth2/types/src/beacon_block.rs | 11 +- eth2/types/src/beacon_block_body.rs | 3 +- eth2/types/src/beacon_block_header.rs | 6 +- ...slink_committee.rs => beacon_committee.rs} | 16 +- eth2/types/src/beacon_state.rs | 434 +++++++----------- .../types/src/beacon_state/committee_cache.rs | 213 +++------ .../src/beacon_state/committee_cache/tests.rs | 170 +------ eth2/types/src/beacon_state/tests.rs | 163 ++----- eth2/types/src/chain_spec.rs | 70 +-- eth2/types/src/checkpoint.rs | 2 +- eth2/types/src/compact_committee.rs | 24 - eth2/types/src/crosslink.rs | 40 -- eth2/types/src/deposit.rs | 2 +- eth2/types/src/deposit_data.rs | 4 +- eth2/types/src/eth1_data.rs | 2 +- eth2/types/src/eth_spec.rs | 69 +-- eth2/types/src/fork.rs | 4 +- eth2/types/src/historical_batch.rs | 2 +- eth2/types/src/indexed_attestation.rs | 9 +- eth2/types/src/lib.rs | 27 +- eth2/types/src/pending_attestation.rs | 2 +- eth2/types/src/proposer_slashing.rs | 2 +- eth2/types/src/relative_epoch.rs | 6 +- eth2/types/src/test_utils/builders.rs | 2 - .../builders/testing_attestation_builder.rs | 35 +- .../testing_attestation_data_builder.rs | 51 +- .../testing_attester_slashing_builder.rs | 50 +- .../builders/testing_beacon_block_builder.rs | 75 +-- .../builders/testing_beacon_state_builder.rs | 14 +- .../builders/testing_deposit_builder.rs | 2 +- .../testing_pending_attestation_builder.rs | 15 +- .../testing_proposer_slashing_builder.rs | 12 +- .../builders/testing_transfer_builder.rs | 45 -- eth2/types/src/transfer.rs | 45 -- eth2/types/src/validator.rs | 2 +- eth2/types/src/voluntary_exit.rs | 2 +- .../swap_or_not_shuffle/src/shuffle_list.rs | 12 +- tests/ef_tests/Makefile | 2 +- tests/ef_tests/src/cases/epoch_processing.rs | 15 +- tests/ef_tests/src/cases/operations.rs | 14 +- tests/ef_tests/src/lib.rs | 2 +- tests/ef_tests/src/type_name.rs | 4 - tests/ef_tests/tests/tests.rs | 19 +- validator_client/README.md | 4 +- .../beacon_node_attestation.rs | 4 +- .../src/attestation_producer/grpc.rs | 6 +- .../src/attestation_producer/mod.rs | 21 +- validator_client/src/duties/epoch_duties.rs | 6 +- validator_client/src/duties/grpc.rs | 4 +- 105 files changed, 1211 insertions(+), 2940 deletions(-) delete mode 100644 eth2/state_processing/src/common/get_compact_committees_root.rs delete mode 100644 eth2/state_processing/src/per_block_processing/verify_transfer.rs delete mode 100644 eth2/state_processing/src/per_epoch_processing/winning_root.rs delete mode 100644 eth2/types/src/attestation_data_and_custody_bit.rs rename eth2/types/src/{crosslink_committee.rs => beacon_committee.rs} (52%) delete mode 100644 eth2/types/src/compact_committee.rs delete mode 100644 eth2/types/src/crosslink.rs delete mode 100644 eth2/types/src/test_utils/builders/testing_transfer_builder.rs delete mode 100644 eth2/types/src/transfer.rs diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 1636d51724..7f68a0fd19 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -4,7 +4,7 @@ default: image: 'sigp/lighthouse:latest' cache: paths: - - tests/ef_tests/*-v0.8.3.tar.gz + - tests/ef_tests/*-v0.9.1.tar.gz stages: - test @@ -47,7 +47,7 @@ test-ef-fake-crypto: GIT_SUBMODULE_STRATEGY: normal script: - make make-ef-tests - - cargo test --manifest-path tests/ef_tests/Cargo.toml --release --features ef_tests fake_crypto + - cargo test --manifest-path tests/ef_tests/Cargo.toml --release --features ef_tests,fake_crypto documentation: stage: document diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 5d7def3002..ad05b155c4 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -16,7 +16,7 @@ use ssz::Encode; use state_processing::per_block_processing::{ errors::{ AttestationValidationError, AttesterSlashingValidationError, DepositValidationError, - ExitValidationError, ProposerSlashingValidationError, TransferValidationError, + ExitValidationError, ProposerSlashingValidationError, }, verify_attestation_for_state, VerifySignatures, }; @@ -218,6 +218,44 @@ impl BeaconChain { ReverseBlockRootIterator::new((head.beacon_block_root, head.beacon_block.slot), iter) } + /// Traverse backwards from `block_root` to find the block roots of its ancestors. + /// + /// ## Notes + /// + /// `slot` always decreases by `1`. + /// - Skipped slots contain the root of the closest prior + /// non-skipped slot (identical to the way they are stored in `state.block_roots`) . + /// - Iterator returns `(Hash256, Slot)`. + /// - The provided `block_root` is included as the first item in the iterator. + pub fn rev_iter_block_roots_from( + &self, + block_root: Hash256, + ) -> Result, Error> { + let block = self + .get_block(&block_root)? + .ok_or_else(|| Error::MissingBeaconBlock(block_root))?; + let state = self + .get_state(&block.state_root)? + .ok_or_else(|| Error::MissingBeaconState(block.state_root))?; + let iter = BlockRootsIterator::owned(self.store.clone(), state); + Ok(ReverseBlockRootIterator::new( + (block_root, block.slot), + iter, + )) + } + + /// Traverse backwards from `block_root` to find the root of the ancestor block at `slot`. + pub fn get_ancestor_block_root( + &self, + block_root: Hash256, + slot: Slot, + ) -> Result, Error> { + Ok(self + .rev_iter_block_roots_from(block_root)? + .find(|(_, ancestor_slot)| *ancestor_slot == slot) + .map(|(ancestor_block_root, _)| ancestor_block_root)) + } + /// Iterates across all `(state_root, slot)` pairs from the head of the chain (inclusive) to /// the earliest reachable ancestor (may or may not be genesis). /// @@ -248,6 +286,18 @@ impl BeaconChain { Ok(self.store.get(block_root)?) } + /// Returns the state at the given root, if any. + /// + /// ## Errors + /// + /// May return a database error. + pub fn get_state( + &self, + state_root: &Hash256, + ) -> Result>, Error> { + Ok(self.store.get(state_root)?) + } + /// Returns a `Checkpoint` representing the head block and state. Contains the "best block"; /// the head of the canonical `BeaconChain`. /// @@ -385,15 +435,15 @@ impl BeaconChain { } state - .get_beacon_proposer_index(slot, RelativeEpoch::Current, &self.spec) + .get_beacon_proposer_index(slot, &self.spec) .map_err(Into::into) } - /// Returns the attestation slot and shard for a given validator index. + /// Returns the attestation slot and committee index for a given validator index. /// /// Information is read from the current state, so only information from the present and prior /// epoch is available. - pub fn validator_attestation_slot_and_shard( + pub fn validator_attestation_slot_and_index( &self, validator_index: usize, epoch: Epoch, @@ -420,25 +470,25 @@ impl BeaconChain { if let Some(attestation_duty) = state.get_attestation_duties(validator_index, RelativeEpoch::Current)? { - Ok(Some((attestation_duty.slot, attestation_duty.shard))) + Ok(Some((attestation_duty.slot, attestation_duty.index))) } else { Ok(None) } } - /// Produce an `AttestationData` that is valid for the given `slot` `shard`. + /// Produce an `AttestationData` that is valid for the given `slot`, `index`. /// /// Always attests to the canonical chain. pub fn produce_attestation_data( &self, - shard: u64, slot: Slot, + index: CommitteeIndex, ) -> Result { let state = self.state_at_slot(slot)?; let head = self.head(); self.produce_attestation_data_for_block( - shard, + index, head.beacon_block_root, head.beacon_block.slot, &state, @@ -451,7 +501,7 @@ impl BeaconChain { /// function should be used as it attests to the canonical chain. pub fn produce_attestation_data_for_block( &self, - shard: u64, + index: CommitteeIndex, head_block_root: Hash256, head_block_slot: Slot, state: &BeaconState, @@ -492,18 +542,6 @@ impl BeaconChain { root: target_root, }; - let parent_crosslink = state.get_current_crosslink(shard)?; - let crosslink = Crosslink { - shard, - parent_root: Hash256::from_slice(&parent_crosslink.tree_hash_root()), - start_epoch: parent_crosslink.end_epoch, - end_epoch: std::cmp::min( - target.epoch, - parent_crosslink.end_epoch + self.spec.max_epochs_per_crosslink, - ), - data_root: Hash256::zero(), - }; - // Collect some metrics. metrics::inc_counter(&metrics::ATTESTATION_PRODUCTION_SUCCESSES); metrics::stop_timer(timer); @@ -512,15 +550,16 @@ impl BeaconChain { self.log, "Produced beacon attestation data"; "beacon_block_root" => format!("{}", head_block_root), - "shard" => shard, - "slot" => state.slot + "slot" => state.slot, + "index" => index ); Ok(AttestationData { + slot: state.slot, + index, beacon_block_root: head_block_root, source: state.current_justified_checkpoint.clone(), target, - crosslink, }) } @@ -549,7 +588,7 @@ impl BeaconChain { self.log, "Beacon attestation imported"; "target_epoch" => attestation.data.target.epoch, - "shard" => attestation.data.crosslink.shard, + "index" => attestation.data.index, ); let _ = self .event_handler @@ -668,16 +707,14 @@ impl BeaconChain { state.build_committee_cache(RelativeEpoch::Current, &self.spec)?; - let attestation_slot = state.get_attestation_data_slot(&attestation.data)?; - // Reject any attestation where the `state` loaded from `data.beacon_block_root` // has a higher slot than the attestation. // // Permitting this would allow for attesters to vote on _future_ slots. - if state.slot > attestation_slot { + if state.slot > attestation.data.slot { Ok(AttestationProcessingOutcome::AttestsToFutureState { state: state.slot, - attestation: attestation_slot, + attestation: attestation.data.slot, }) } else { self.process_attestation_for_state_and_block( @@ -776,20 +813,27 @@ impl BeaconChain { Ok(AttestationProcessingOutcome::Invalid(e)) } else { - // Provide the attestation to fork choice, updating the validator latest messages but - // _without_ finding and updating the head. - if let Err(e) = self - .fork_choice - .process_attestation(&state, &attestation, block) + // If the attestation is from the current or previous epoch, supply it to the fork + // choice. This is FMD GHOST. + let current_epoch = self.epoch()?; + if attestation.data.target.epoch == current_epoch + || attestation.data.target.epoch == current_epoch - 1 { - error!( - self.log, - "Add attestation to fork choice failed"; - "fork_choice_integrity" => format!("{:?}", self.fork_choice.verify_integrity()), - "beacon_block_root" => format!("{}", attestation.data.beacon_block_root), - "error" => format!("{:?}", e) - ); - return Err(e.into()); + // Provide the attestation to fork choice, updating the validator latest messages but + // _without_ finding and updating the head. + if let Err(e) = self + .fork_choice + .process_attestation(&state, &attestation, block) + { + error!( + self.log, + "Add attestation to fork choice failed"; + "fork_choice_integrity" => format!("{:?}", self.fork_choice.verify_integrity()), + "beacon_block_root" => format!("{}", attestation.data.beacon_block_root), + "error" => format!("{:?}", e) + ); + return Err(e.into()); + } } // Provide the valid attestation to op pool, which may choose to retain the @@ -829,22 +873,6 @@ impl BeaconChain { } } - /// Accept some transfer and queue it for inclusion in an appropriate block. - pub fn process_transfer(&self, transfer: Transfer) -> Result<(), TransferValidationError> { - match self.wall_clock_state() { - Ok(state) => self.op_pool.insert_transfer(transfer, &state, &self.spec), - Err(e) => { - error!( - &self.log, - "Unable to process transfer"; - "error" => format!("{:?}", e), - "reason" => "no state" - ); - Ok(()) - } - } - } - /// Accept some proposer slashing and queue it for inclusion in an appropriate block. pub fn process_proposer_slashing( &self, @@ -905,7 +933,7 @@ impl BeaconChain { self.log, "Beacon block imported"; "block_root" => format!("{:?}", block_root), - "block_slot" => format!("{:?}", block_root), + "block_slot" => format!("{:?}", block.slot.as_u64()), ); let _ = self.event_handler.register(EventKind::BeaconBlockImported { block_root: *block_root, @@ -1115,7 +1143,10 @@ impl BeaconChain { metrics::start_timer(&metrics::BLOCK_PROCESSING_FORK_CHOICE_REGISTER); // Register the new block with the fork choice service. - if let Err(e) = self.fork_choice.process_block(&state, &block, block_root) { + if let Err(e) = self + .fork_choice + .process_block(self, &state, &block, block_root) + { error!( self.log, "Add block to fork choice failed"; @@ -1230,7 +1261,6 @@ impl BeaconChain { .deposits_for_block_inclusion(&state, &self.spec)? .into(), voluntary_exits: self.op_pool.get_voluntary_exits(&state, &self.spec).into(), - transfers: self.op_pool.get_transfers(&state, &self.spec).into(), }, }; diff --git a/beacon_node/beacon_chain/src/builder.rs b/beacon_node/beacon_chain/src/builder.rs index cef818359d..3032669915 100644 --- a/beacon_node/beacon_chain/src/builder.rs +++ b/beacon_node/beacon_chain/src/builder.rs @@ -231,7 +231,12 @@ where .genesis_block_root .ok_or_else(|| "fork_choice_backend requires a genesis_block_root")?; - self.fork_choice = Some(ForkChoice::new(store, backend, genesis_block_root)); + self.fork_choice = Some(ForkChoice::new( + store, + backend, + genesis_block_root, + self.spec.genesis_slot, + )); Ok(self) } diff --git a/beacon_node/beacon_chain/src/errors.rs b/beacon_node/beacon_chain/src/errors.rs index f8046980fd..3e202ab192 100644 --- a/beacon_node/beacon_chain/src/errors.rs +++ b/beacon_node/beacon_chain/src/errors.rs @@ -18,7 +18,6 @@ macro_rules! easy_from_to { #[derive(Debug, PartialEq)] pub enum BeaconChainError { InsufficientValidators, - BadRecentBlockRoots, UnableToReadSlot, RevertedFinalizedEpoch { previous_epoch: Epoch, diff --git a/beacon_node/beacon_chain/src/eth1_chain.rs b/beacon_node/beacon_chain/src/eth1_chain.rs index 78f96ef177..a9819eab1f 100644 --- a/beacon_node/beacon_chain/src/eth1_chain.rs +++ b/beacon_node/beacon_chain/src/eth1_chain.rs @@ -487,7 +487,7 @@ mod test { let keypair = generate_deterministic_keypair(i as usize); let mut builder = TestingDepositBuilder::new(keypair.pk.clone(), spec.max_effective_balance); - builder.sign(&DepositTestTask::Valid, &keypair, spec); + builder.sign(DepositTestTask::Valid, &keypair, spec); let deposit_data = builder.build().data; DepositLog { diff --git a/beacon_node/beacon_chain/src/fork_choice.rs b/beacon_node/beacon_chain/src/fork_choice.rs index 5645a925a4..04c521c332 100644 --- a/beacon_node/beacon_chain/src/fork_choice.rs +++ b/beacon_node/beacon_chain/src/fork_choice.rs @@ -1,10 +1,11 @@ -use crate::{metrics, BeaconChain, BeaconChainTypes}; +use crate::{errors::BeaconChainError, metrics, BeaconChain, BeaconChainTypes}; use lmd_ghost::LmdGhost; -use state_processing::common::get_attesting_indices; +use parking_lot::RwLock; +use state_processing::{common::get_attesting_indices, per_slot_processing}; use std::sync::Arc; use store::{Error as StoreError, Store}; use types::{ - Attestation, BeaconBlock, BeaconState, BeaconStateError, Epoch, EthSpec, Hash256, Slot, + Attestation, BeaconBlock, BeaconState, BeaconStateError, Checkpoint, EthSpec, Hash256, Slot, }; type Result = std::result::Result; @@ -16,6 +17,7 @@ pub enum Error { BackendError(String), BeaconStateError(BeaconStateError), StoreError(StoreError), + BeaconChainError(Box), } pub struct ForkChoice { @@ -26,6 +28,10 @@ pub struct ForkChoice { /// Does not necessarily need to be the _actual_ genesis, it suffices to be the finalized root /// whenever the struct was instantiated. genesis_block_root: Hash256, + /// The fork choice rule's current view of the justified checkpoint. + justified_checkpoint: RwLock, + /// The best justified checkpoint we've seen, which may be ahead of `justified_checkpoint`. + best_justified_checkpoint: RwLock, } impl ForkChoice { @@ -33,39 +39,91 @@ impl ForkChoice { /// /// "Genesis" does not necessarily need to be the absolute genesis, it can be some finalized /// block. - pub fn new(store: Arc, backend: T::LmdGhost, genesis_block_root: Hash256) -> Self { + pub fn new( + store: Arc, + backend: T::LmdGhost, + genesis_block_root: Hash256, + genesis_slot: Slot, + ) -> Self { + let justified_checkpoint = Checkpoint { + epoch: genesis_slot.epoch(T::EthSpec::slots_per_epoch()), + root: genesis_block_root, + }; Self { store: store.clone(), backend, genesis_block_root, + justified_checkpoint: RwLock::new(justified_checkpoint.clone()), + best_justified_checkpoint: RwLock::new(justified_checkpoint), } } + /// Determine whether the fork choice's view of the justified checkpoint should be updated. + /// + /// To prevent the bouncing attack, an update is allowed only in these conditions: + /// + /// * We're in the first SAFE_SLOTS_TO_UPDATE_JUSTIFIED slots of the epoch, or + /// * The new justified checkpoint is a descendant of the current justified checkpoint + fn should_update_justified_checkpoint( + &self, + chain: &BeaconChain, + new_justified_checkpoint: &Checkpoint, + ) -> Result { + if Self::compute_slots_since_epoch_start(chain.slot()?) + < chain.spec.safe_slots_to_update_justified + { + return Ok(true); + } + + let justified_checkpoint = self.justified_checkpoint.read().clone(); + + let current_justified_block = chain + .get_block(&justified_checkpoint.root)? + .ok_or_else(|| Error::MissingBlock(justified_checkpoint.root))?; + + let new_justified_block = chain + .get_block(&new_justified_checkpoint.root)? + .ok_or_else(|| Error::MissingBlock(new_justified_checkpoint.root))?; + + let slots_per_epoch = T::EthSpec::slots_per_epoch(); + + Ok( + new_justified_block.slot > justified_checkpoint.epoch.start_slot(slots_per_epoch) + && chain.get_ancestor_block_root( + new_justified_checkpoint.root, + current_justified_block.slot, + )? == Some(justified_checkpoint.root), + ) + } + + /// Calculate how far `slot` lies from the start of its epoch. + fn compute_slots_since_epoch_start(slot: Slot) -> u64 { + let slots_per_epoch = T::EthSpec::slots_per_epoch(); + (slot - slot.epoch(slots_per_epoch).start_slot(slots_per_epoch)).as_u64() + } + + /// Run the fork choice rule to determine the head. pub fn find_head(&self, chain: &BeaconChain) -> Result { let timer = metrics::start_timer(&metrics::FORK_CHOICE_FIND_HEAD_TIMES); - let start_slot = |epoch: Epoch| epoch.start_slot(T::EthSpec::slots_per_epoch()); - - // From the specification: - // - // Let justified_head be the descendant of finalized_head with the highest epoch that has - // been justified for at least 1 epoch ... If no such descendant exists, - // set justified_head to finalized_head. let (start_state, start_block_root, start_block_slot) = { - let state = &chain.head().beacon_state; + // Check if we should update our view of the justified checkpoint. + // Doing this check here should be quasi-equivalent to the update in the `on_tick` + // function of the spec, so long as `find_head` is called at least once during the first + // SAFE_SLOTS_TO_UPDATE_JUSTIFIED slots. + let best_justified_checkpoint = self.best_justified_checkpoint.read(); + if self.should_update_justified_checkpoint(chain, &best_justified_checkpoint)? { + *self.justified_checkpoint.write() = best_justified_checkpoint.clone(); + } - let (block_root, block_slot) = - if state.current_epoch() + 1 > state.current_justified_checkpoint.epoch { - ( - state.current_justified_checkpoint.root, - start_slot(state.current_justified_checkpoint.epoch), - ) - } else { - ( - state.finalized_checkpoint.root, - start_slot(state.finalized_checkpoint.epoch), - ) - }; + let current_justified_checkpoint = self.justified_checkpoint.read().clone(); + + let (block_root, block_justified_slot) = ( + current_justified_checkpoint.root, + current_justified_checkpoint + .epoch + .start_slot(T::EthSpec::slots_per_epoch()), + ); let block = chain .store @@ -79,12 +137,17 @@ impl ForkChoice { block_root }; - let state = chain - .store - .get::>(&block.state_root)? + let mut state = chain + .get_state(&block.state_root)? .ok_or_else(|| Error::MissingState(block.state_root))?; - (state, block_root, block_slot) + // Fast-forward the state to the start slot of the epoch where it was justified. + for _ in block.slot.as_u64()..block_justified_slot.as_u64() { + per_slot_processing(&mut state, &chain.spec) + .map_err(|e| BeaconChainError::SlotProcessingError(e))? + } + + (state, block_root, block_justified_slot) }; // A function that returns the weight for some validator index. @@ -107,10 +170,11 @@ impl ForkChoice { /// Process all attestations in the given `block`. /// - /// Assumes the block (and therefore it's attestations) are valid. It is a logic error to + /// Assumes the block (and therefore its attestations) are valid. It is a logic error to /// provide an invalid block. pub fn process_block( &self, + chain: &BeaconChain, state: &BeaconState, block: &BeaconBlock, block_root: Hash256, @@ -133,6 +197,16 @@ impl ForkChoice { } } + // Check if we should update our view of the justified checkpoint + if state.current_justified_checkpoint.epoch > self.justified_checkpoint.read().epoch { + *self.best_justified_checkpoint.write() = state.current_justified_checkpoint.clone(); + if self + .should_update_justified_checkpoint(chain, &state.current_justified_checkpoint)? + { + *self.justified_checkpoint.write() = state.current_justified_checkpoint.clone(); + } + } + // This does not apply a vote to the block, it just makes fork choice aware of the block so // it can still be identified as the head even if it doesn't have any votes. // @@ -224,6 +298,12 @@ impl From for Error { } } +impl From for Error { + fn from(e: BeaconChainError) -> Error { + Error::BeaconChainError(Box::new(e)) + } +} + impl From for Error { fn from(e: StoreError) -> Error { Error::StoreError(e) diff --git a/beacon_node/beacon_chain/src/lib.rs b/beacon_node/beacon_chain/src/lib.rs index 375abe8753..141f768980 100644 --- a/beacon_node/beacon_chain/src/lib.rs +++ b/beacon_node/beacon_chain/src/lib.rs @@ -28,7 +28,7 @@ pub use parking_lot; pub use slot_clock; pub use state_processing::per_block_processing::errors::{ AttestationValidationError, AttesterSlashingValidationError, DepositValidationError, - ExitValidationError, ProposerSlashingValidationError, TransferValidationError, + ExitValidationError, ProposerSlashingValidationError, }; pub use store; pub use types; diff --git a/beacon_node/beacon_chain/src/metrics.rs b/beacon_node/beacon_chain/src/metrics.rs index 5fa5d4807a..bd1742b58b 100644 --- a/beacon_node/beacon_chain/src/metrics.rs +++ b/beacon_node/beacon_chain/src/metrics.rs @@ -172,8 +172,6 @@ lazy_static! { try_create_int_gauge("beacon_head_state_finalized_root", "Finalized root at the head of the chain"); pub static ref HEAD_STATE_FINALIZED_EPOCH: Result = try_create_int_gauge("beacon_head_state_finalized_epoch", "Finalized epoch at the head of the chain"); - pub static ref HEAD_STATE_SHARDS: Result = - try_create_int_gauge("beacon_head_state_shard_total", "Count of shards in the beacon chain"); pub static ref HEAD_STATE_TOTAL_VALIDATORS: Result = try_create_int_gauge("beacon_head_state_total_validators_total", "Count of validators at the head of the chain"); pub static ref HEAD_STATE_ACTIVE_VALIDATORS: Result = @@ -226,7 +224,6 @@ fn scrape_head_state(state: &BeaconState, state &HEAD_STATE_FINALIZED_EPOCH, state.finalized_checkpoint.epoch, ); - set_gauge_by_usize(&HEAD_STATE_SHARDS, state.previous_crosslinks.len()); set_gauge_by_usize(&HEAD_STATE_TOTAL_VALIDATORS, state.validators.len()); set_gauge_by_u64(&HEAD_STATE_VALIDATOR_BALANCES, state.balances.iter().sum()); set_gauge_by_usize( diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index 01e50ee247..bf41018d26 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -15,9 +15,8 @@ use std::time::Duration; use store::MemoryStore; use tree_hash::{SignedRoot, TreeHash}; use types::{ - AggregateSignature, Attestation, AttestationDataAndCustodyBit, BeaconBlock, BeaconState, - BitList, ChainSpec, Domain, EthSpec, Hash256, Keypair, RelativeEpoch, SecretKey, Signature, - Slot, + AggregateSignature, Attestation, BeaconBlock, BeaconState, BitList, ChainSpec, Domain, EthSpec, + Hash256, Keypair, SecretKey, Signature, Slot, }; pub use crate::persisted_beacon_chain::{PersistedBeaconChain, BEACON_CHAIN_DB_KEY}; @@ -203,7 +202,7 @@ impl BeaconChainHarness> { .block_proposer(slot) .expect("should get block proposer from chain"), _ => state - .get_beacon_proposer_index(slot, RelativeEpoch::Current, &self.spec) + .get_beacon_proposer_index(slot, &self.spec) .expect("should get block proposer from state"), }; @@ -280,13 +279,13 @@ impl BeaconChainHarness> { let mut attestations = vec![]; state - .get_crosslink_committees_at_slot(state.slot) + .get_beacon_committees_at_slot(state.slot) .expect("should get committees") .iter() - .for_each(|cc| { - let committee_size = cc.committee.len(); + .for_each(|bc| { + let committee_size = bc.committee.len(); - let mut local_attestations: Vec> = cc + let mut local_attestations: Vec> = bc .committee .par_iter() .enumerate() @@ -297,7 +296,7 @@ impl BeaconChainHarness> { let data = self .chain .produce_attestation_data_for_block( - cc.shard, + bc.index, head_block_root, head_block_slot, state, @@ -309,18 +308,15 @@ impl BeaconChainHarness> { aggregation_bits .set(i, true) .expect("should be able to set aggregation bits"); - let custody_bits = BitList::with_capacity(committee_size) - .expect("should make custody bits"); let signature = { - let message = AttestationDataAndCustodyBit { - data: data.clone(), - custody_bit: false, - } - .tree_hash_root(); + let message = data.tree_hash_root(); - let domain = - spec.get_domain(data.target.epoch, Domain::Attestation, fork); + let domain = spec.get_domain( + data.target.epoch, + Domain::BeaconAttester, + fork, + ); let mut agg_sig = AggregateSignature::new(); agg_sig.add(&Signature::new( @@ -335,7 +331,6 @@ impl BeaconChainHarness> { let attestation = Attestation { aggregation_bits, data, - custody_bits, signature, }; diff --git a/beacon_node/rest_api/src/validator.rs b/beacon_node/rest_api/src/validator.rs index 082966637a..061a88cfd6 100644 --- a/beacon_node/rest_api/src/validator.rs +++ b/beacon_node/rest_api/src/validator.rs @@ -17,7 +17,7 @@ use std::sync::Arc; use tokio; use tokio::sync::mpsc; use types::beacon_state::EthSpec; -use types::{Attestation, BeaconBlock, BitList, Epoch, RelativeEpoch, Shard, Slot}; +use types::{Attestation, BeaconBlock, BitList, CommitteeIndex, Epoch, RelativeEpoch, Slot}; #[derive(Debug, Serialize, Deserialize)] pub struct ValidatorDuty { @@ -25,8 +25,8 @@ pub struct ValidatorDuty { pub validator_pubkey: String, /// The slot at which the validator must attest. pub attestation_slot: Option, - /// The shard in which the validator must attest. - pub attestation_shard: Option, + /// The index of the committee within `slot` of which the validator is a member. + pub attestation_committee_index: Option, /// The slot in which a validator must propose a block, or `null` if block production is not required. pub block_proposal_slot: Option, } @@ -36,7 +36,7 @@ impl ValidatorDuty { ValidatorDuty { validator_pubkey: "".to_string(), attestation_slot: None, - attestation_shard: None, + attestation_committee_index: None, block_proposal_slot: None, } } @@ -90,7 +90,7 @@ pub fn get_validator_duties(req: Request) - .slot_iter(T::EthSpec::slots_per_epoch()) .map(|slot| { head_state - .get_beacon_proposer_index(slot, relative_epoch, &beacon_chain.spec) + .get_beacon_proposer_index(slot, &beacon_chain.spec) .map_err(|e| { ApiError::ServerError(format!( "Unable to get proposer index for validator: {:?}", @@ -125,7 +125,7 @@ pub fn get_validator_duties(req: Request) - match head_state.get_attestation_duties(val_index, relative_epoch) { Ok(Some(d)) => { duty.attestation_slot = Some(d.slot); - duty.attestation_shard = Some(d.shard); + duty.attestation_committee_index = Some(d.index); } Ok(None) => {} Err(e) => { @@ -311,7 +311,7 @@ pub fn get_new_attestation(req: Request) -> let mut aggregation_bits = BitList::with_capacity(val_duty.committee_len) .expect("An empty BitList should always be created, or we have bigger problems."); aggregation_bits - .set(val_duty.committee_index, poc_bit) + .set(val_duty.committee_position, poc_bit) .map_err(|e| { ApiError::ServerError(format!( "Unable to set aggregation bits for the attestation: {:?}", @@ -334,21 +334,19 @@ pub fn get_new_attestation(req: Request) -> return Err(ApiError::BadRequest(format!("Attestation data can only be requested for the current slot ({:?}), not your requested slot ({:?})", current_slot, requested_slot))); } - let shard = query - .first_of(&["shard"]) + let index = query + .first_of(&["index"]) .map(|(_key, value)| value)? .parse::() - .map_err(|e| ApiError::BadRequest(format!("Shard is not a valid u64 value: {:?}", e)))?; + .map_err(|e| ApiError::BadRequest(format!("Index is not a valid u64 value: {:?}", e)))?; let attestation_data = beacon_chain - .produce_attestation_data(shard, current_slot.into()) + .produce_attestation_data(current_slot.into(), index) .map_err(|e| ApiError::ServerError(format!("Could not produce an attestation: {:?}", e)))?; let attestation: Attestation = Attestation { aggregation_bits, data: attestation_data, - custody_bits: BitList::with_capacity(val_duty.committee_len) - .expect("Should be able to create an empty BitList for the custody bits."), signature: AggregateSignature::new(), }; diff --git a/beacon_node/rpc/src/attestation.rs b/beacon_node/rpc/src/attestation.rs index 2621cb7727..d9396ae35a 100644 --- a/beacon_node/rpc/src/attestation.rs +++ b/beacon_node/rpc/src/attestation.rs @@ -48,11 +48,12 @@ impl AttestationService for AttestationServiceInstance { ); // Then get the AttestationData from the beacon chain + // NOTE(v0.9): shard is incorrectly named, all this should be deleted let shard = req.get_shard(); let slot_requested = req.get_slot(); let attestation_data = match self .chain - .produce_attestation_data(shard, Slot::from(slot_requested)) + .produce_attestation_data(Slot::from(slot_requested), shard) { Ok(v) => v, Err(e) => { @@ -115,7 +116,7 @@ impl AttestationService for AttestationServiceInstance { self.log, "Valid attestation from RPC"; "target_epoch" => attestation.data.target.epoch, - "shard" => attestation.data.crosslink.shard, + "index" => attestation.data.index, ); // valid attestation, propagate to the network diff --git a/beacon_node/rpc/src/validator.rs b/beacon_node/rpc/src/validator.rs index 42ca025ee8..be789b3473 100644 --- a/beacon_node/rpc/src/validator.rs +++ b/beacon_node/rpc/src/validator.rs @@ -66,9 +66,7 @@ impl ValidatorService for ValidatorServiceInstance { let validator_proposers: Result, _> = epoch .slot_iter(T::EthSpec::slots_per_epoch()) - .map(|slot| { - state.get_beacon_proposer_index(slot, RelativeEpoch::Current, &self.chain.spec) - }) + .map(|slot| state.get_beacon_proposer_index(slot, &self.chain.spec)) .collect(); let validator_proposers = match validator_proposers { Ok(v) => v, @@ -170,9 +168,9 @@ impl ValidatorService for ValidatorServiceInstance { duty.set_none(false) } - duty.set_committee_index(attestation_duties.committee_index as u64); + duty.set_committee_index(attestation_duties.committee_position as u64); duty.set_attestation_slot(attestation_duties.slot.as_u64()); - duty.set_attestation_shard(attestation_duties.shard); + duty.set_attestation_shard(attestation_duties.index); duty.set_committee_len(attestation_duties.committee_len as u64); active_validator.set_duty(duty); diff --git a/book/src/simple-testnet.md b/book/src/simple-testnet.md index 9b062badeb..40c7b1e90d 100644 --- a/book/src/simple-testnet.md +++ b/book/src/simple-testnet.md @@ -35,7 +35,7 @@ $ lighthouse bn testnet -f recent 8 In a new terminal window, start the validator client with: ```bash -$ lighthouse bn testnet -b insecure 0 8 +$ lighthouse vc testnet -b insecure 0 8 ``` > Notes: diff --git a/eth2/lmd_ghost/Cargo.toml b/eth2/lmd_ghost/Cargo.toml index e26b85626c..d78faaab39 100644 --- a/eth2/lmd_ghost/Cargo.toml +++ b/eth2/lmd_ghost/Cargo.toml @@ -8,6 +8,7 @@ edition = "2018" parking_lot = "0.9.0" store = { path = "../../beacon_node/store" } types = { path = "../types" } +itertools = "0.8.1" [dev-dependencies] criterion = "0.3.0" diff --git a/eth2/lmd_ghost/src/lib.rs b/eth2/lmd_ghost/src/lib.rs index 167cd36eaf..d58affe146 100644 --- a/eth2/lmd_ghost/src/lib.rs +++ b/eth2/lmd_ghost/src/lib.rs @@ -49,7 +49,7 @@ pub trait LmdGhost: Send + Sync { /// Runs an integrity verification function on fork choice algorithm. /// - /// Returns `Ok(())` if the underlying fork choice has maintained it's integrity, + /// Returns `Ok(())` if the underlying fork choice has maintained its integrity, /// `Err(description)` otherwise. fn verify_integrity(&self) -> Result<()>; } diff --git a/eth2/lmd_ghost/src/reduced_tree.rs b/eth2/lmd_ghost/src/reduced_tree.rs index b786887f11..85540785db 100644 --- a/eth2/lmd_ghost/src/reduced_tree.rs +++ b/eth2/lmd_ghost/src/reduced_tree.rs @@ -4,6 +4,7 @@ //! //! This implementation is incomplete and has known bugs. Do not use in production. use super::{LmdGhost, Result as SuperResult}; +use itertools::Itertools; use parking_lot::RwLock; use std::collections::HashMap; use std::fmt; @@ -20,6 +21,7 @@ pub enum Error { MissingBlock(Hash256), MissingState(Hash256), MissingChild(Hash256), + MissingSuccessor(Hash256, Hash256), NotInTree(Hash256), NoCommonAncestor((Hash256, Hash256)), StoreError(StoreError), @@ -177,8 +179,8 @@ where if current_hash != subtree_hash { let children = self.get_node(current_hash)?.children.clone(); - for child_hash in children { - self.retain_subtree(child_hash, subtree_hash)?; + for child in children { + self.retain_subtree(child.hash, subtree_hash)?; } self.nodes.remove(¤t_hash); @@ -239,7 +241,7 @@ where let _root_weight = self.update_weight(start_block_root, weight_fn)?; let start_node = self.get_node(start_block_root)?; - let head_node = self.find_head_from(start_node)?; + let head_node = self.find_head_from(start_node, start_block_slot)?; Ok(head_node.block_hash) } @@ -251,31 +253,32 @@ where } } - fn find_head_from<'a>(&'a self, start_node: &'a Node) -> Result<&'a Node> { - if start_node.does_not_have_children() { + // Corresponds to the loop in `get_head` in the spec. + fn find_head_from<'a>( + &'a self, + start_node: &'a Node, + justified_slot: Slot, + ) -> Result<&'a Node> { + let children = start_node + .children + .iter() + // This check is primarily for the first iteration, where we must ensure that + // we only consider votes that were made after the last justified checkpoint. + .filter(|c| c.successor_slot > justified_slot) + .map(|c| self.get_node(c.hash)) + .collect::>>()?; + + if children.is_empty() { Ok(start_node) } else { - let children = start_node - .children - .iter() - .map(|hash| self.get_node(*hash)) - .collect::>>()?; - - // TODO: check if `max_by` is `O(n^2)`. let best_child = children .iter() - .max_by(|a, b| { - if a.weight != b.weight { - a.weight.cmp(&b.weight) - } else { - a.block_hash.cmp(&b.block_hash) - } - }) + .max_by_key(|child| (child.weight, child.block_hash)) // There can only be no maximum if there are no children. This code path is guarded // against that condition. .expect("There must be a maximally weighted node."); - self.find_head_from(best_child) + self.find_head_from(best_child, justified_slot) } } @@ -288,8 +291,8 @@ where let mut weight = 0; - for &child in &node.children { - weight += self.update_weight(child, weight_fn)?; + for child in &node.children { + weight += self.update_weight(child.hash, weight_fn)?; } for &voter in &node.voters { @@ -323,13 +326,13 @@ where // // Load the child of the node and set it's parent to be the parent of this // node (viz., graft the node's child to the node's parent) - let child = self.get_mut_node(node.children[0])?; + let child = self.get_mut_node(node.children[0].hash)?; child.parent_hash = node.parent_hash; // Graft the parent of this node to it's child. if let Some(parent_hash) = node.parent_hash { let parent = self.get_mut_node(parent_hash)?; - parent.replace_child(node.block_hash, node.children[0])?; + parent.replace_child_hash(node.block_hash, node.children[0].hash)?; } self.nodes.remove(&vote.hash); @@ -376,15 +379,16 @@ where let node = node.clone(); if let Some(parent_hash) = node.parent_hash { - if (node.children.len() == 1) && !node.has_votes() { - let child_hash = node.children[0]; + if node.children.len() == 1 && !node.has_votes() { + let child = &node.children[0]; // Graft the single descendant `node` to the `parent` of node. - self.get_mut_node(child_hash)?.parent_hash = Some(parent_hash); + self.get_mut_node(child.hash)?.parent_hash = Some(parent_hash); // Detach `node` from `parent`, replacing it with `child`. + // Preserve the parent's direct descendant slot. self.get_mut_node(parent_hash)? - .replace_child(hash, child_hash)?; + .replace_child_hash(hash, child.hash)?; true } else { @@ -442,6 +446,40 @@ where Ok(()) } + /// Find the direct successor block of `ancestor` if `descendant` is a descendant. + fn find_ancestor_successor_opt( + &self, + ancestor: Hash256, + descendant: Hash256, + ) -> Result> { + Ok(std::iter::once(descendant) + .chain( + self.iter_ancestors(descendant)? + .take_while(|(_, slot)| *slot >= self.root_slot()) + .map(|(block_hash, _)| block_hash), + ) + .tuple_windows() + .find_map(|(successor, block_hash)| { + if block_hash == ancestor { + Some(successor) + } else { + None + } + })) + } + + /// Same as `find_ancestor_successor_opt` but will return an error instead of an option. + fn find_ancestor_successor(&self, ancestor: Hash256, descendant: Hash256) -> Result { + self.find_ancestor_successor_opt(ancestor, descendant)? + .ok_or_else(|| Error::MissingSuccessor(ancestor, descendant)) + } + + /// Look up the successor of the given `ancestor`, returning the slot of that block. + fn find_ancestor_successor_slot(&self, ancestor: Hash256, descendant: Hash256) -> Result { + let successor_hash = self.find_ancestor_successor(ancestor, descendant)?; + Ok(self.get_block(successor_hash)?.slot) + } + /// Add `node` to the reduced tree, returning an error if `node` is not rooted in the tree. fn add_node(&mut self, mut node: Node) -> Result<()> { // Find the highest (by slot) ancestor of the given node in the reduced tree. @@ -460,7 +498,9 @@ where // `node` to it. // 3. Graft `node` to an existing node. if !prev_in_tree.children.is_empty() { - for &child_hash in &prev_in_tree.children { + for child_link in &prev_in_tree.children { + let child_hash = child_link.hash; + // 1. Graft the new node between two existing nodes. // // If `node` is a descendant of `prev_in_tree` but an ancestor of a child connected to @@ -468,19 +508,20 @@ where // // This means that `node` can be grafted between `prev_in_tree` and the child that is a // descendant of both `node` and `prev_in_tree`. - if self - .iter_ancestors(child_hash)? - .take_while(|(_, slot)| *slot >= self.root_slot()) - .any(|(ancestor, _slot)| ancestor == node.block_hash) + if let Some(successor) = + self.find_ancestor_successor_opt(node.block_hash, child_hash)? { let child = self.get_mut_node(child_hash)?; // Graft `child` to `node`. child.parent_hash = Some(node.block_hash); // Graft `node` to `child`. - node.children.push(child_hash); + node.children.push(ChildLink { + hash: child_hash, + successor_slot: self.get_block(successor)?.slot, + }); // Detach `child` from `prev_in_tree`, replacing it with `node`. - prev_in_tree.replace_child(child_hash, node.block_hash)?; + prev_in_tree.replace_child_hash(child_hash, node.block_hash)?; // Graft `node` to `prev_in_tree`. node.parent_hash = Some(prev_in_tree.block_hash); @@ -495,7 +536,8 @@ where // any of the children of `prev_in_tree`, we know that `node` is on a different fork to // all of the children of `prev_in_tree`. if node.parent_hash.is_none() { - for &child_hash in &prev_in_tree.children { + for child_link in &prev_in_tree.children { + let child_hash = child_link.hash; // Find the highest (by slot) common ancestor between `node` and `child`. // // The common ancestor is the last block before `node` and `child` forked. @@ -506,24 +548,37 @@ where // must add this new block into the tree (because it is a decision node // between two forks). if ancestor_hash != prev_in_tree.block_hash { - let child = self.get_mut_node(child_hash)?; - // Create a new `common_ancestor` node which represents the `ancestor_hash` // block, has `prev_in_tree` as the parent and has both `node` and `child` // as children. let common_ancestor = Node { block_hash: ancestor_hash, parent_hash: Some(prev_in_tree.block_hash), - children: vec![node.block_hash, child_hash], + children: vec![ + ChildLink { + hash: node.block_hash, + successor_slot: self.find_ancestor_successor_slot( + ancestor_hash, + node.block_hash, + )?, + }, + ChildLink { + hash: child_hash, + successor_slot: self + .find_ancestor_successor_slot(ancestor_hash, child_hash)?, + }, + ], ..Node::default() }; + let child = self.get_mut_node(child_hash)?; + // Graft `child` and `node` to `common_ancestor`. child.parent_hash = Some(common_ancestor.block_hash); node.parent_hash = Some(common_ancestor.block_hash); // Detach `child` from `prev_in_tree`, replacing it with `common_ancestor`. - prev_in_tree.replace_child(child_hash, common_ancestor.block_hash)?; + prev_in_tree.replace_child_hash(child_hash, common_ancestor.block_hash)?; // Store the new `common_ancestor` node. self.nodes @@ -540,7 +595,11 @@ where // // Graft `node` to `prev_in_tree` and `prev_in_tree` to `node` node.parent_hash = Some(prev_in_tree.block_hash); - prev_in_tree.children.push(node.block_hash); + prev_in_tree.children.push(ChildLink { + hash: node.block_hash, + successor_slot: self + .find_ancestor_successor_slot(prev_in_tree.block_hash, node.block_hash)?, + }); } // Update `prev_in_tree`. A mutable reference was not maintained to satisfy the borrow @@ -655,7 +714,17 @@ where node.children .iter() - .map(|child| verify_node_exists(*child, "child_must_exist".to_string())) + .map(|child| { + verify_node_exists(child.hash, "child_must_exist".to_string())?; + + if self.find_ancestor_successor_slot(node.block_hash, child.hash)? + == child.successor_slot + { + Ok(()) + } else { + Err("successor slot on child link is incorrect".to_string()) + } + }) .collect::>()?; verify_node_exists(node.block_hash, "block hash must exist".to_string())?; @@ -698,25 +767,35 @@ where #[derive(Default, Clone, Debug)] pub struct Node { + /// Hash of the parent node in the reduced tree (not necessarily parent block). pub parent_hash: Option, - pub children: Vec, + pub children: Vec, pub weight: u64, pub block_hash: Hash256, pub voters: Vec, } -impl Node { - pub fn does_not_have_children(&self) -> bool { - self.children.is_empty() - } +#[derive(Default, Clone, Debug)] +pub struct ChildLink { + /// Hash of the child block (may not be a direct descendant). + pub hash: Hash256, + /// Slot of the block which is a direct descendant on the chain leading to `hash`. + /// + /// Node <--- Successor <--- ... <--- Child + pub successor_slot: Slot, +} - pub fn replace_child(&mut self, old: Hash256, new: Hash256) -> Result<()> { +impl Node { + /// Replace a child with a new child, whilst preserving the successor slot. + /// + /// The new child should have the same ancestor successor block as the old one. + pub fn replace_child_hash(&mut self, old: Hash256, new: Hash256) -> Result<()> { let i = self .children .iter() - .position(|&c| c == old) + .position(|c| c.hash == old) .ok_or_else(|| Error::MissingChild(old))?; - self.children[i] = new; + self.children[i].hash = new; Ok(()) } @@ -725,7 +804,7 @@ impl Node { let i = self .children .iter() - .position(|&c| c == child) + .position(|c| c.hash == child) .ok_or_else(|| Error::MissingChild(child))?; self.children.remove(i); diff --git a/eth2/lmd_ghost/tests/test.rs b/eth2/lmd_ghost/tests/test.rs index a8752e2b44..631f3e406e 100644 --- a/eth2/lmd_ghost/tests/test.rs +++ b/eth2/lmd_ghost/tests/test.rs @@ -45,7 +45,9 @@ struct ForkedHarness { pub genesis_block: BeaconBlock, pub honest_head: RootAndSlot, pub faulty_head: RootAndSlot, + /// Honest roots in reverse order (slot high to low) pub honest_roots: Vec, + /// Faulty roots in reverse order (slot high to low) pub faulty_roots: Vec, } @@ -225,7 +227,7 @@ fn single_voter_persistent_instance_reverse_order() { "New tree should have integrity" ); - for (root, slot) in harness.honest_roots.iter().rev() { + for (root, slot) in &harness.honest_roots { lmd.process_attestation(0, *root, *slot) .expect("fork choice should accept attestations to honest roots in reverse"); @@ -237,11 +239,15 @@ fn single_voter_persistent_instance_reverse_order() { } // The honest head should be selected. - let (head_root, head_slot) = harness.honest_roots.first().unwrap(); - let (finalized_root, _) = harness.honest_roots.last().unwrap(); + let (head_root, _) = harness.honest_roots.first().unwrap(); + let (finalized_root, finalized_slot) = harness.honest_roots.last().unwrap(); assert_eq!( - lmd.find_head(*head_slot, *finalized_root, ForkedHarness::weight_function), + lmd.find_head( + *finalized_slot, + *finalized_root, + ForkedHarness::weight_function + ), Ok(*head_root), "Honest head should be selected" ); @@ -253,7 +259,7 @@ fn single_voter_persistent_instance_reverse_order() { fn single_voter_many_instance_honest_blocks_voting_forwards() { let harness = &FORKED_HARNESS; - for (root, slot) in &harness.honest_roots { + for (root, slot) in harness.honest_roots.iter().rev() { let lmd = harness.new_fork_choice(); lmd.process_attestation(0, *root, *slot) .expect("fork choice should accept attestations to honest roots"); @@ -272,7 +278,7 @@ fn single_voter_many_instance_honest_blocks_voting_in_reverse() { let harness = &FORKED_HARNESS; // Same as above, but in reverse order (votes on the highest honest block first). - for (root, slot) in harness.honest_roots.iter().rev() { + for (root, slot) in &harness.honest_roots { let lmd = harness.new_fork_choice(); lmd.process_attestation(0, *root, *slot) .expect("fork choice should accept attestations to honest roots in reverse"); @@ -291,7 +297,7 @@ fn single_voter_many_instance_honest_blocks_voting_in_reverse() { fn single_voter_many_instance_faulty_blocks_voting_forwards() { let harness = &FORKED_HARNESS; - for (root, slot) in &harness.faulty_roots { + for (root, slot) in harness.faulty_roots.iter().rev() { let lmd = harness.new_fork_choice(); lmd.process_attestation(0, *root, *slot) .expect("fork choice should accept attestations to faulty roots"); @@ -309,7 +315,7 @@ fn single_voter_many_instance_faulty_blocks_voting_forwards() { fn single_voter_many_instance_faulty_blocks_voting_in_reverse() { let harness = &FORKED_HARNESS; - for (root, slot) in harness.faulty_roots.iter().rev() { + for (root, slot) in &harness.faulty_roots { let lmd = harness.new_fork_choice(); lmd.process_attestation(0, *root, *slot) .expect("fork choice should accept attestations to faulty roots in reverse"); @@ -322,6 +328,44 @@ fn single_voter_many_instance_faulty_blocks_voting_in_reverse() { } } +/// Ensure that votes with slots before the justified slot are not counted. +#[test] +fn discard_votes_before_justified_slot() { + let harness = &FORKED_HARNESS; + + let lmd = harness.new_fork_choice(); + + let (genesis_root, genesis_slot) = *harness.honest_roots.last().unwrap(); + + // Add attestations from all validators for all honest blocks. + for (root, slot) in harness.honest_roots.iter().rev() { + for i in 0..VALIDATOR_COUNT { + lmd.process_attestation(i, *root, *slot) + .expect("should accept attestations in increasing order"); + } + + // Head starting from 0 checkpoint (genesis) should be current root + assert_eq!( + lmd.find_head(genesis_slot, genesis_root, ForkedHarness::weight_function), + Ok(*root), + "Honest head should be selected" + ); + + // Head from one slot after genesis should still be genesis, because the successor + // block of the genesis block has slot `genesis_slot + 1` which isn't greater than + // the slot we're starting from. This is a very artifical test, but one that's easy to + // describe. + assert_eq!( + lmd.find_head( + genesis_slot + 1, + genesis_root, + ForkedHarness::weight_function + ), + Ok(genesis_root) + ); + } +} + /// Ensures that the finalized root can be set to all values in `roots`. fn test_update_finalized_root(roots: &[(Hash256, Slot)]) { let harness = &FORKED_HARNESS; diff --git a/eth2/operation_pool/Cargo.toml b/eth2/operation_pool/Cargo.toml index 9aa2b598da..e9344a9675 100644 --- a/eth2/operation_pool/Cargo.toml +++ b/eth2/operation_pool/Cargo.toml @@ -6,7 +6,6 @@ edition = "2018" [dependencies] int_to_bytes = { path = "../utils/int_to_bytes" } -itertools = "0.8.1" parking_lot = "0.9.0" types = { path = "../types" } state_processing = { path = "../state_processing" } diff --git a/eth2/operation_pool/src/attestation.rs b/eth2/operation_pool/src/attestation.rs index de07b2f7bf..c2cc9d56ce 100644 --- a/eth2/operation_pool/src/attestation.rs +++ b/eth2/operation_pool/src/attestation.rs @@ -35,16 +35,14 @@ impl<'a, T: EthSpec> MaxCover for AttMaxCover<'a, T> { /// Sneaky: we keep all the attestations together in one bucket, even though /// their aggregation bitfields refer to different committees. In order to avoid /// confusing committees when updating covering sets, we update only those attestations - /// whose shard and epoch match the attestation being included in the solution, by the logic - /// that a shard and epoch uniquely identify a committee. + /// whose slot and index match the attestation being included in the solution, by the logic + /// that a slot and index uniquely identify a committee. fn update_covering_set( &mut self, best_att: &Attestation, covered_validators: &BitList, ) { - if self.att.data.crosslink.shard == best_att.data.crosslink.shard - && self.att.data.target.epoch == best_att.data.target.epoch - { + if self.att.data.slot == best_att.data.slot && self.att.data.index == best_att.data.index { self.fresh_validators.difference_inplace(covered_validators); } } @@ -80,11 +78,12 @@ pub fn earliest_attestation_validators( state_attestations .iter() - // In a single epoch, an attester should only be attesting for one shard. + // In a single epoch, an attester should only be attesting for one slot and index. // TODO: we avoid including slashable attestations in the state here, // but maybe we should do something else with them (like construct slashings). .filter(|existing_attestation| { - existing_attestation.data.crosslink.shard == attestation.data.crosslink.shard + existing_attestation.data.slot == attestation.data.slot + && existing_attestation.data.index == attestation.data.index }) .for_each(|existing_attestation| { // Remove the validators who have signed the existing attestation (they are not new) diff --git a/eth2/operation_pool/src/attestation_id.rs b/eth2/operation_pool/src/attestation_id.rs index e435bae7f9..dfe55581d9 100644 --- a/eth2/operation_pool/src/attestation_id.rs +++ b/eth2/operation_pool/src/attestation_id.rs @@ -29,7 +29,7 @@ impl AttestationId { state: &BeaconState, spec: &ChainSpec, ) -> Vec { - int_to_bytes8(spec.get_domain(epoch, Domain::Attestation, &state.fork)) + int_to_bytes8(spec.get_domain(epoch, Domain::BeaconAttester, &state.fork)) } pub fn domain_bytes_match(&self, domain_bytes: &[u8]) -> bool { diff --git a/eth2/operation_pool/src/lib.rs b/eth2/operation_pool/src/lib.rs index 02a8535d29..550717eb8a 100644 --- a/eth2/operation_pool/src/lib.rs +++ b/eth2/operation_pool/src/lib.rs @@ -7,24 +7,22 @@ pub use persistence::PersistedOperationPool; use attestation::{earliest_attestation_validators, AttMaxCover}; use attestation_id::AttestationId; -use itertools::Itertools; use max_cover::maximum_cover; use parking_lot::RwLock; use state_processing::per_block_processing::errors::{ AttestationValidationError, AttesterSlashingValidationError, DepositValidationError, - ExitValidationError, ProposerSlashingValidationError, TransferValidationError, + ExitValidationError, ProposerSlashingValidationError, }; use state_processing::per_block_processing::{ get_slashable_indices_modular, verify_attestation_for_block_inclusion, verify_attester_slashing, verify_exit, verify_exit_time_independent_only, - verify_proposer_slashing, verify_transfer, verify_transfer_time_independent_only, - VerifySignatures, + verify_proposer_slashing, VerifySignatures, }; use std::collections::{btree_map::Entry, hash_map, BTreeMap, HashMap, HashSet}; use std::marker::PhantomData; use types::{ typenum::Unsigned, Attestation, AttesterSlashing, BeaconState, ChainSpec, Deposit, EthSpec, - ProposerSlashing, Transfer, Validator, VoluntaryExit, + ProposerSlashing, Validator, VoluntaryExit, }; #[derive(Default, Debug)] @@ -43,8 +41,6 @@ pub struct OperationPool { proposer_slashings: RwLock>, /// Map from exiting validator to their exit data. voluntary_exits: RwLock>, - /// Set of transfers. - transfers: RwLock>, _phantom: PhantomData, } @@ -375,44 +371,6 @@ impl OperationPool { ); } - /// Insert a transfer into the pool, checking it for validity in the process. - pub fn insert_transfer( - &self, - transfer: Transfer, - state: &BeaconState, - spec: &ChainSpec, - ) -> Result<(), TransferValidationError> { - // The signature of the transfer isn't hashed, but because we check - // it before we insert into the HashSet, we can't end up with duplicate - // transactions. - verify_transfer_time_independent_only(state, &transfer, VerifySignatures::True, spec)?; - self.transfers.write().insert(transfer); - Ok(()) - } - - /// Get a list of transfers for inclusion in a block. - // TODO: improve the economic optimality of this function by accounting for - // dependencies between transfers in the same block e.g. A pays B, B pays C - pub fn get_transfers(&self, state: &BeaconState, spec: &ChainSpec) -> Vec { - self.transfers - .read() - .iter() - .filter(|transfer| { - verify_transfer(state, transfer, VerifySignatures::False, spec).is_ok() - }) - .sorted_by_key(|transfer| std::cmp::Reverse(transfer.fee)) - .take(T::MaxTransfers::to_usize()) - .cloned() - .collect() - } - - /// Prune the set of transfers by removing all those whose slot has already passed. - pub fn prune_transfers(&self, finalized_state: &BeaconState) { - self.transfers - .write() - .retain(|transfer| transfer.slot > finalized_state.slot) - } - /// Prune all types of transactions given the latest finalized state. pub fn prune_all(&self, finalized_state: &BeaconState, spec: &ChainSpec) { self.prune_attestations(finalized_state); @@ -420,7 +378,6 @@ impl OperationPool { self.prune_proposer_slashings(finalized_state); self.prune_attester_slashings(finalized_state, spec); self.prune_voluntary_exits(finalized_state); - self.prune_transfers(finalized_state); } } @@ -467,7 +424,6 @@ impl PartialEq for OperationPool { && *self.attester_slashings.read() == *other.attester_slashings.read() && *self.proposer_slashings.read() == *other.proposer_slashings.read() && *self.voluntary_exits.read() == *other.voluntary_exits.read() - && *self.transfers.read() == *other.transfers.read() } } @@ -611,7 +567,7 @@ mod tests { /// Signed by all validators in `committee[signing_range]` and `committee[extra_signer]`. fn signed_attestation, E: EthSpec>( committee: &[usize], - shard: u64, + index: u64, keypairs: &[Keypair], signing_range: R, slot: Slot, @@ -620,32 +576,30 @@ mod tests { extra_signer: Option, ) -> Attestation { let mut builder = TestingAttestationBuilder::new( - &AttestationTestTask::Valid, + AttestationTestTask::Valid, state, committee, slot, - shard, + index, spec, ); let signers = &committee[signing_range]; let committee_keys = signers.iter().map(|&i| &keypairs[i].sk).collect::>(); builder.sign( - &AttestationTestTask::Valid, + AttestationTestTask::Valid, signers, &committee_keys, &state.fork, spec, - false, ); extra_signer.map(|c_idx| { let validator_index = committee[c_idx]; builder.sign( - &AttestationTestTask::Valid, + AttestationTestTask::Valid, &[validator_index], &[&keypairs[validator_index].sk], &state.fork, spec, - false, ) }); builder.build() @@ -677,16 +631,16 @@ mod tests { attestation_test_state::(1); let slot = state.slot - 1; let committees = state - .get_crosslink_committees_at_slot(slot) + .get_beacon_committees_at_slot(slot) .unwrap() .into_iter() - .map(CrosslinkCommittee::into_owned) + .map(BeaconCommittee::into_owned) .collect::>(); - for cc in committees { + for bc in committees { let att1 = signed_attestation( - &cc.committee, - cc.shard, + &bc.committee, + bc.index, keypairs, ..2, slot, @@ -695,8 +649,8 @@ mod tests { None, ); let att2 = signed_attestation( - &cc.committee, - cc.shard, + &bc.committee, + bc.index, keypairs, .., slot, @@ -720,7 +674,7 @@ mod tests { .unwrap(); assert_eq!( - cc.committee.len() - 2, + bc.committee.len() - 2, earliest_attestation_validators(&att2, state).num_set_bits() ); } @@ -736,10 +690,10 @@ mod tests { let slot = state.slot - 1; let committees = state - .get_crosslink_committees_at_slot(slot) + .get_beacon_committees_at_slot(slot) .unwrap() .into_iter() - .map(CrosslinkCommittee::into_owned) + .map(BeaconCommittee::into_owned) .collect::>(); assert_eq!( @@ -748,12 +702,12 @@ mod tests { "we expect just one committee with this many validators" ); - for cc in &committees { + for bc in &committees { let step_size = 2; - for i in (0..cc.committee.len()).step_by(step_size) { + for i in (0..bc.committee.len()).step_by(step_size) { let att = signed_attestation( - &cc.committee, - cc.shard, + &bc.committee, + bc.index, keypairs, i..i + step_size, slot, @@ -805,16 +759,16 @@ mod tests { let slot = state.slot - 1; let committees = state - .get_crosslink_committees_at_slot(slot) + .get_beacon_committees_at_slot(slot) .unwrap() .into_iter() - .map(CrosslinkCommittee::into_owned) + .map(BeaconCommittee::into_owned) .collect::>(); - for cc in &committees { + for bc in &committees { let att = signed_attestation( - &cc.committee, - cc.shard, + &bc.committee, + bc.index, keypairs, .., slot, @@ -842,20 +796,20 @@ mod tests { let slot = state.slot - 1; let committees = state - .get_crosslink_committees_at_slot(slot) + .get_beacon_committees_at_slot(slot) .unwrap() .into_iter() - .map(CrosslinkCommittee::into_owned) + .map(BeaconCommittee::into_owned) .collect::>(); let step_size = 2; - for cc in &committees { + for bc in &committees { // Create attestations that overlap on `step_size` validators, like: // {0,1,2,3}, {2,3,4,5}, {4,5,6,7}, ... - for i in (0..cc.committee.len() - step_size).step_by(step_size) { + for i in (0..bc.committee.len() - step_size).step_by(step_size) { let att = signed_attestation( - &cc.committee, - cc.shard, + &bc.committee, + bc.index, keypairs, i..i + 2 * step_size, slot, @@ -890,20 +844,20 @@ mod tests { let slot = state.slot - 1; let committees = state - .get_crosslink_committees_at_slot(slot) + .get_beacon_committees_at_slot(slot) .unwrap() .into_iter() - .map(CrosslinkCommittee::into_owned) + .map(BeaconCommittee::into_owned) .collect::>(); let max_attestations = ::MaxAttestations::to_usize(); let target_committee_size = spec.target_committee_size as usize; - let insert_attestations = |cc: &OwnedCrosslinkCommittee, step_size| { + let insert_attestations = |bc: &OwnedBeaconCommittee, step_size| { for i in (0..target_committee_size).step_by(step_size) { let att = signed_attestation( - &cc.committee, - cc.shard, + &bc.committee, + bc.index, keypairs, i..i + step_size, slot, diff --git a/eth2/operation_pool/src/persistence.rs b/eth2/operation_pool/src/persistence.rs index 00d1cd2f15..bb423891a9 100644 --- a/eth2/operation_pool/src/persistence.rs +++ b/eth2/operation_pool/src/persistence.rs @@ -21,8 +21,6 @@ pub struct PersistedOperationPool { proposer_slashings: Vec, /// Voluntary exits. voluntary_exits: Vec, - /// Transfers. - transfers: Vec, } impl PersistedOperationPool { @@ -63,15 +61,12 @@ impl PersistedOperationPool { .map(|(_, exit)| exit.clone()) .collect(); - let transfers = operation_pool.transfers.read().iter().cloned().collect(); - Self { attestations, deposits, attester_slashings, proposer_slashings, voluntary_exits, - transfers, } } @@ -102,7 +97,6 @@ impl PersistedOperationPool { .map(|exit| (exit.validator_index, exit)) .collect(), ); - let transfers = RwLock::new(self.transfers.into_iter().collect()); OperationPool { attestations, @@ -110,7 +104,6 @@ impl PersistedOperationPool { attester_slashings, proposer_slashings, voluntary_exits, - transfers, _phantom: Default::default(), } } diff --git a/eth2/state_processing/src/common/get_attesting_indices.rs b/eth2/state_processing/src/common/get_attesting_indices.rs index adb71801a4..ee97969515 100644 --- a/eth2/state_processing/src/common/get_attesting_indices.rs +++ b/eth2/state_processing/src/common/get_attesting_indices.rs @@ -3,19 +3,13 @@ use types::*; /// Returns validator indices which participated in the attestation, sorted by increasing index. /// -/// Spec v0.8.1 +/// Spec v0.9.1 pub fn get_attesting_indices( state: &BeaconState, attestation_data: &AttestationData, bitlist: &BitList, ) -> Result, BeaconStateError> { - let target_relative_epoch = - RelativeEpoch::from_epoch(state.current_epoch(), attestation_data.target.epoch)?; - - let committee = state.get_crosslink_committee_for_shard( - attestation_data.crosslink.shard, - target_relative_epoch, - )?; + let committee = state.get_beacon_committee(attestation_data.slot, attestation_data.index)?; if bitlist.len() != committee.committee.len() { return Err(BeaconStateError::InvalidBitfield); diff --git a/eth2/state_processing/src/common/get_compact_committees_root.rs b/eth2/state_processing/src/common/get_compact_committees_root.rs deleted file mode 100644 index b8ab4345fd..0000000000 --- a/eth2/state_processing/src/common/get_compact_committees_root.rs +++ /dev/null @@ -1,41 +0,0 @@ -use tree_hash::TreeHash; -use types::*; - -/// Return the compact committee root at `relative_epoch`. -/// -/// Spec v0.8.3 -pub fn get_compact_committees_root( - state: &BeaconState, - relative_epoch: RelativeEpoch, - spec: &ChainSpec, -) -> Result { - let mut committees = - FixedVector::<_, T::ShardCount>::from_elem(CompactCommittee::::default()); - let start_shard = state.get_epoch_start_shard(relative_epoch)?; - - for committee_number in 0..state.get_committee_count(relative_epoch)? { - let shard = (start_shard + committee_number) % T::ShardCount::to_u64(); - - for &index in state - .get_crosslink_committee_for_shard(shard, relative_epoch)? - .committee - { - let validator = state - .validators - .get(index) - .ok_or(BeaconStateError::UnknownValidator)?; - committees[shard as usize] - .pubkeys - .push(validator.pubkey.clone())?; - let compact_balance = validator.effective_balance / spec.effective_balance_increment; - // `index` (top 6 bytes) + `slashed` (16th bit) + `compact_balance` (bottom 15 bits) - let compact_validator: u64 = - ((index as u64) << 16) + (u64::from(validator.slashed) << 15) + compact_balance; - committees[shard as usize] - .compact_validators - .push(compact_validator)?; - } - } - - Ok(Hash256::from_slice(&committees.tree_hash_root())) -} diff --git a/eth2/state_processing/src/common/get_indexed_attestation.rs b/eth2/state_processing/src/common/get_indexed_attestation.rs index 6cae2e47ff..16b2984889 100644 --- a/eth2/state_processing/src/common/get_indexed_attestation.rs +++ b/eth2/state_processing/src/common/get_indexed_attestation.rs @@ -6,139 +6,19 @@ type Result = std::result::Result>; /// Convert `attestation` to (almost) indexed-verifiable form. /// -/// Spec v0.8.0 +/// Spec v0.9.1 pub fn get_indexed_attestation( state: &BeaconState, attestation: &Attestation, ) -> Result> { - // Note: we rely on both calls to `get_attesting_indices` to check the bitfield lengths - // against the committee length let attesting_indices = get_attesting_indices(state, &attestation.data, &attestation.aggregation_bits)?; - let custody_bit_1_indices = - get_attesting_indices(state, &attestation.data, &attestation.custody_bits)?; - - verify!( - custody_bit_1_indices.is_subset(&attesting_indices), - Invalid::CustodyBitfieldNotSubset - ); - - let custody_bit_0_indices = &attesting_indices - &custody_bit_1_indices; - Ok(IndexedAttestation { - custody_bit_0_indices: VariableList::new( - custody_bit_0_indices - .into_iter() - .map(|x| x as u64) - .collect(), - )?, - custody_bit_1_indices: VariableList::new( - custody_bit_1_indices - .into_iter() - .map(|x| x as u64) - .collect(), + attesting_indices: VariableList::new( + attesting_indices.into_iter().map(|x| x as u64).collect(), )?, data: attestation.data.clone(), signature: attestation.signature.clone(), }) } - -#[cfg(test)] -mod test { - use super::*; - use itertools::{Either, Itertools}; - use types::test_utils::*; - - #[test] - fn custody_bitfield_indexing() { - let validator_count = 128; - let spec = MinimalEthSpec::default_spec(); - let state_builder = - TestingBeaconStateBuilder::::from_default_keypairs_file_if_exists( - validator_count, - &spec, - ); - let (mut state, keypairs) = state_builder.build(); - state.build_all_caches(&spec).unwrap(); - state.slot += 1; - - let shard = 0; - let cc = state - .get_crosslink_committee_for_shard(shard, RelativeEpoch::Current) - .unwrap(); - - // Make a third of the validators sign with custody bit 0, a third with custody bit 1 - // and a third not sign at all. - assert!( - cc.committee.len() >= 4, - "need at least 4 validators per committee for this test to work" - ); - let (mut bit_0_indices, mut bit_1_indices): (Vec<_>, Vec<_>) = cc - .committee - .iter() - .enumerate() - .filter(|(i, _)| i % 3 != 0) - .partition_map(|(i, index)| { - if i % 3 == 1 { - Either::Left(*index) - } else { - Either::Right(*index) - } - }); - assert!(!bit_0_indices.is_empty()); - assert!(!bit_1_indices.is_empty()); - - let bit_0_keys = bit_0_indices - .iter() - .map(|validator_index| &keypairs[*validator_index].sk) - .collect::>(); - let bit_1_keys = bit_1_indices - .iter() - .map(|validator_index| &keypairs[*validator_index].sk) - .collect::>(); - - let mut attestation_builder = TestingAttestationBuilder::new( - &AttestationTestTask::Valid, - &state, - &cc.committee, - cc.slot, - shard, - &spec, - ); - attestation_builder - .sign( - &AttestationTestTask::Valid, - &bit_0_indices, - &bit_0_keys, - &state.fork, - &spec, - false, - ) - .sign( - &AttestationTestTask::Valid, - &bit_1_indices, - &bit_1_keys, - &state.fork, - &spec, - true, - ); - let attestation = attestation_builder.build(); - - let indexed_attestation = get_indexed_attestation(&state, &attestation).unwrap(); - - bit_0_indices.sort(); - bit_1_indices.sort(); - - assert!(indexed_attestation - .custody_bit_0_indices - .iter() - .copied() - .eq(bit_0_indices.iter().map(|idx| *idx as u64))); - assert!(indexed_attestation - .custody_bit_1_indices - .iter() - .copied() - .eq(bit_1_indices.iter().map(|idx| *idx as u64))); - } -} diff --git a/eth2/state_processing/src/common/initiate_validator_exit.rs b/eth2/state_processing/src/common/initiate_validator_exit.rs index 0929069710..f8d342863f 100644 --- a/eth2/state_processing/src/common/initiate_validator_exit.rs +++ b/eth2/state_processing/src/common/initiate_validator_exit.rs @@ -3,7 +3,7 @@ use types::{BeaconStateError as Error, *}; /// Initiate the exit of the validator of the given `index`. /// -/// Spec v0.8.1 +/// Spec v0.9.1 pub fn initiate_validator_exit( state: &mut BeaconState, index: usize, diff --git a/eth2/state_processing/src/common/mod.rs b/eth2/state_processing/src/common/mod.rs index 8ce7b7107b..2bf8e0fc43 100644 --- a/eth2/state_processing/src/common/mod.rs +++ b/eth2/state_processing/src/common/mod.rs @@ -1,11 +1,9 @@ mod get_attesting_indices; -mod get_compact_committees_root; mod get_indexed_attestation; mod initiate_validator_exit; mod slash_validator; pub use get_attesting_indices::get_attesting_indices; -pub use get_compact_committees_root::get_compact_committees_root; pub use get_indexed_attestation::get_indexed_attestation; pub use initiate_validator_exit::initiate_validator_exit; pub use slash_validator::slash_validator; diff --git a/eth2/state_processing/src/common/slash_validator.rs b/eth2/state_processing/src/common/slash_validator.rs index 5b91c4a078..fde3150cca 100644 --- a/eth2/state_processing/src/common/slash_validator.rs +++ b/eth2/state_processing/src/common/slash_validator.rs @@ -4,7 +4,7 @@ use types::{BeaconStateError as Error, *}; /// Slash the validator with index ``index``. /// -/// Spec v0.8.0 +/// Spec v0.9.1 pub fn slash_validator( state: &mut BeaconState, slashed_index: usize, @@ -35,8 +35,7 @@ pub fn slash_validator( ); // Apply proposer and whistleblower rewards - let proposer_index = - state.get_beacon_proposer_index(state.slot, RelativeEpoch::Current, spec)?; + let proposer_index = state.get_beacon_proposer_index(state.slot, spec)?; let whistleblower_index = opt_whistleblower_index.unwrap_or(proposer_index); let whistleblower_reward = validator_effective_balance / spec.whistleblower_reward_quotient; let proposer_reward = whistleblower_reward / spec.proposer_reward_quotient; diff --git a/eth2/state_processing/src/genesis.rs b/eth2/state_processing/src/genesis.rs index 84bdebd97c..f8235db709 100644 --- a/eth2/state_processing/src/genesis.rs +++ b/eth2/state_processing/src/genesis.rs @@ -1,12 +1,11 @@ use super::per_block_processing::{errors::BlockProcessingError, process_deposit}; -use crate::common::get_compact_committees_root; use tree_hash::TreeHash; use types::typenum::U4294967296; use types::*; /// Initialize a `BeaconState` from genesis data. /// -/// Spec v0.8.0 +/// Spec v0.9.1 // TODO: this is quite inefficient and we probably want to rethink how we do this pub fn initialize_beacon_state_from_eth1( eth1_block_hash: Hash256, @@ -24,6 +23,9 @@ pub fn initialize_beacon_state_from_eth1( }; let mut state = BeaconState::new(genesis_time, eth1_data, spec); + // Seed RANDAO with Eth1 entropy + state.fill_randao_mixes_with(eth1_block_hash); + // Process deposits let leaves: Vec<_> = deposits .iter() @@ -40,21 +42,12 @@ pub fn initialize_beacon_state_from_eth1( // Now that we have our validators, initialize the caches (including the committees) state.build_all_caches(spec)?; - // Populate active_index_roots and compact_committees_roots - let indices_list = VariableList::::from( - state.get_active_validator_indices(T::genesis_epoch()), - ); - let active_index_root = Hash256::from_slice(&indices_list.tree_hash_root()); - let committee_root = get_compact_committees_root(&state, RelativeEpoch::Current, spec)?; - state.fill_active_index_roots_with(active_index_root); - state.fill_compact_committees_roots_with(committee_root); - Ok(state) } /// Determine whether a candidate genesis state is suitable for starting the chain. /// -/// Spec v0.8.1 +/// Spec v0.9.1 pub fn is_valid_genesis_state(state: &BeaconState, spec: &ChainSpec) -> bool { state.genesis_time >= spec.min_genesis_time && state.get_active_validator_indices(T::genesis_epoch()).len() as u64 diff --git a/eth2/state_processing/src/per_block_processing.rs b/eth2/state_processing/src/per_block_processing.rs index ada25d5fe3..c60b89a0c3 100644 --- a/eth2/state_processing/src/per_block_processing.rs +++ b/eth2/state_processing/src/per_block_processing.rs @@ -2,9 +2,7 @@ use crate::common::{initiate_validator_exit, slash_validator}; use errors::{BlockOperationError, BlockProcessingError, HeaderInvalid, IntoWithIndex}; use rayon::prelude::*; use signature_sets::{block_proposal_signature_set, randao_signature_set}; -use std::collections::HashSet; use std::convert::TryInto; -use std::iter::FromIterator; use tree_hash::SignedRoot; use types::*; @@ -21,9 +19,6 @@ pub use verify_deposit::{ get_existing_validator_index, verify_deposit_merkle_proof, verify_deposit_signature, }; pub use verify_exit::{verify_exit, verify_exit_time_independent_only}; -pub use verify_transfer::{ - execute_transfer, verify_transfer, verify_transfer_time_independent_only, -}; pub mod block_processing_builder; mod block_signature_verifier; @@ -36,7 +31,6 @@ mod verify_attester_slashing; mod verify_deposit; mod verify_exit; mod verify_proposer_slashing; -mod verify_transfer; /// The strategy to be used when validating the block's signatures. #[derive(PartialEq, Clone, Copy)] @@ -74,7 +68,7 @@ impl VerifySignatures { /// signature. If it is `None` the signed root is calculated here. This parameter only exists to /// avoid re-calculating the root when it is already known. /// -/// Spec v0.8.0 +/// Spec v0.9.1 pub fn per_block_processing( mut state: &mut BeaconState, block: &BeaconBlock, @@ -128,14 +122,13 @@ pub fn per_block_processing( verify_signatures, spec, )?; - process_transfers(&mut state, &block.body.transfers, verify_signatures, spec)?; Ok(()) } /// Processes the block header. /// -/// Spec v0.8.0 +/// Spec v0.9.1 pub fn process_block_header( state: &mut BeaconState, block: &BeaconBlock, @@ -158,7 +151,7 @@ pub fn process_block_header( state.latest_block_header = block.temporary_block_header(); // Verify proposer is not slashed - let proposer_idx = state.get_beacon_proposer_index(block.slot, RelativeEpoch::Current, spec)?; + let proposer_idx = state.get_beacon_proposer_index(block.slot, spec)?; let proposer = &state.validators[proposer_idx]; verify!( !proposer.slashed, @@ -174,7 +167,7 @@ pub fn process_block_header( /// Verifies the signature of a block. /// -/// Spec v0.8.0 +/// Spec v0.9.1 pub fn verify_block_signature( state: &BeaconState, block: &BeaconBlock, @@ -192,7 +185,7 @@ pub fn verify_block_signature( /// Verifies the `randao_reveal` against the block's proposer pubkey and updates /// `state.latest_randao_mixes`. /// -/// Spec v0.8.0 +/// Spec v0.9.1 pub fn process_randao( state: &mut BeaconState, block: &BeaconBlock, @@ -215,7 +208,7 @@ pub fn process_randao( /// Update the `state.eth1_data_votes` based upon the `eth1_data` provided. /// -/// Spec v0.8.0 +/// Spec v0.9.1 pub fn process_eth1_data( state: &mut BeaconState, eth1_data: &Eth1Data, @@ -240,7 +233,7 @@ pub fn process_eth1_data( /// Returns `Ok(())` if the validation and state updates completed successfully, otherwise returns /// an `Err` describing the invalid object or cause of failure. /// -/// Spec v0.8.0 +/// Spec v0.9.1 pub fn process_proposer_slashings( state: &mut BeaconState, proposer_slashings: &[ProposerSlashing], @@ -269,7 +262,7 @@ pub fn process_proposer_slashings( /// Returns `Ok(())` if the validation and state updates completed successfully, otherwise returns /// an `Err` describing the invalid object or cause of failure. /// -/// Spec v0.8.0 +/// Spec v0.9.1 pub fn process_attester_slashings( state: &mut BeaconState, attester_slashings: &[AttesterSlashing], @@ -323,7 +316,7 @@ pub fn process_attester_slashings( /// Returns `Ok(())` if the validation and state updates completed successfully, otherwise returns /// an `Err` describing the invalid object or cause of failure. /// -/// Spec v0.8.0 +/// Spec v0.9.1 pub fn process_attestations( state: &mut BeaconState, attestations: &[Attestation], @@ -343,14 +336,12 @@ pub fn process_attestations( })?; // Update the state in series. - let proposer_index = - state.get_beacon_proposer_index(state.slot, RelativeEpoch::Current, spec)? as u64; + let proposer_index = state.get_beacon_proposer_index(state.slot, spec)? as u64; for attestation in attestations { - let attestation_slot = state.get_attestation_data_slot(&attestation.data)?; let pending_attestation = PendingAttestation { aggregation_bits: attestation.aggregation_bits.clone(), data: attestation.data.clone(), - inclusion_delay: (state.slot - attestation_slot).as_u64(), + inclusion_delay: (state.slot - attestation.data.slot).as_u64(), proposer_index, }; @@ -371,7 +362,7 @@ pub fn process_attestations( /// Returns `Ok(())` if the validation and state updates completed successfully, otherwise returns /// an `Err` describing the invalid object or cause of failure. /// -/// Spec v0.8.0 +/// Spec v0.9.1 pub fn process_deposits( state: &mut BeaconState, deposits: &[Deposit], @@ -408,7 +399,7 @@ pub fn process_deposits( /// Process a single deposit, optionally verifying its merkle proof. /// -/// Spec v0.8.1 +/// Spec v0.9.1 pub fn process_deposit( state: &mut BeaconState, deposit: &Deposit, @@ -474,7 +465,7 @@ pub fn process_deposit( /// Returns `Ok(())` if the validation and state updates completed successfully, otherwise returns /// an `Err` describing the invalid object or cause of failure. /// -/// Spec v0.8.0 +/// Spec v0.9.1 pub fn process_exits( state: &mut BeaconState, voluntary_exits: &[VoluntaryExit], @@ -496,39 +487,3 @@ pub fn process_exits( Ok(()) } - -/// Validates each `Transfer` and updates the state, short-circuiting on an invalid object. -/// -/// Returns `Ok(())` if the validation and state updates completed successfully, otherwise returns -/// an `Err` describing the invalid object or cause of failure. -/// -/// Spec v0.8.0 -pub fn process_transfers( - state: &mut BeaconState, - transfers: &[Transfer], - verify_signatures: VerifySignatures, - spec: &ChainSpec, -) -> Result<(), BlockProcessingError> { - let expected_transfers = HashSet::<_>::from_iter(transfers).len(); - // Verify that there are no duplicate transfers - block_verify!( - transfers.len() == expected_transfers, - BlockProcessingError::DuplicateTransfers { - duplicates: transfers.len().saturating_sub(expected_transfers) - } - ); - - transfers - .par_iter() - .enumerate() - .try_for_each(|(i, transfer)| { - verify_transfer(&state, transfer, verify_signatures, spec) - .map_err(|e| e.into_with_index(i)) - })?; - - for (i, transfer) in transfers.iter().enumerate() { - execute_transfer(state, transfer, spec).map_err(|e| e.into_with_index(i))?; - } - - Ok(()) -} diff --git a/eth2/state_processing/src/per_block_processing/block_processing_builder.rs b/eth2/state_processing/src/per_block_processing/block_processing_builder.rs index 2e7e54b6ba..cba37cfbbf 100644 --- a/eth2/state_processing/src/per_block_processing/block_processing_builder.rs +++ b/eth2/state_processing/src/per_block_processing/block_processing_builder.rs @@ -55,9 +55,7 @@ impl BlockProcessingBuilder { )), } - let proposer_index = state - .get_beacon_proposer_index(state.slot, RelativeEpoch::Current, spec) - .unwrap(); + let proposer_index = state.get_beacon_proposer_index(state.slot, spec).unwrap(); let keypair = &keypairs[proposer_index]; match randao_sk { @@ -99,9 +97,7 @@ impl BlockProcessingBuilder { )), } - let proposer_index = state - .get_beacon_proposer_index(state.slot, RelativeEpoch::Current, spec) - .unwrap(); + let proposer_index = state.get_beacon_proposer_index(state.slot, spec).unwrap(); let keypair = &keypairs[proposer_index]; match randao_sk { @@ -112,7 +108,7 @@ impl BlockProcessingBuilder { ExitTestTask::AlreadyInitiated => { for _ in 0..2 { self.block_builder.insert_exit( - &test_task, + test_task, &mut state, (0 as usize).try_into().unwrap(), &keypairs[0].sk, @@ -123,7 +119,7 @@ impl BlockProcessingBuilder { _ => { for (i, keypair) in keypairs.iter().take(num_exits).enumerate() { self.block_builder.insert_exit( - &test_task, + test_task, &mut state, (i as usize).try_into().unwrap(), &keypair.sk, @@ -140,7 +136,7 @@ impl BlockProcessingBuilder { pub fn build_with_n_attestations( mut self, - test_task: &AttestationTestTask, + test_task: AttestationTestTask, num_attestations: u64, randao_sk: Option, previous_block_root: Option, @@ -158,9 +154,7 @@ impl BlockProcessingBuilder { )), } - let proposer_index = state - .get_beacon_proposer_index(state.slot, RelativeEpoch::Current, spec) - .unwrap(); + let proposer_index = state.get_beacon_proposer_index(state.slot, spec).unwrap(); let keypair = &keypairs[proposer_index]; match randao_sk { @@ -185,7 +179,7 @@ impl BlockProcessingBuilder { pub fn build_with_attester_slashing( mut self, - test_task: &AttesterSlashingTestTask, + test_task: AttesterSlashingTestTask, num_attester_slashings: u64, randao_sk: Option, previous_block_root: Option, @@ -203,9 +197,7 @@ impl BlockProcessingBuilder { )), } - let proposer_index = state - .get_beacon_proposer_index(state.slot, RelativeEpoch::Current, spec) - .unwrap(); + let proposer_index = state.get_beacon_proposer_index(state.slot, spec).unwrap(); let keypair = &keypairs[proposer_index]; match randao_sk { @@ -236,7 +228,7 @@ impl BlockProcessingBuilder { pub fn build_with_proposer_slashing( mut self, - test_task: &ProposerSlashingTestTask, + test_task: ProposerSlashingTestTask, num_proposer_slashings: u64, randao_sk: Option, previous_block_root: Option, @@ -254,9 +246,7 @@ impl BlockProcessingBuilder { )), } - let proposer_index = state - .get_beacon_proposer_index(state.slot, RelativeEpoch::Current, spec) - .unwrap(); + let proposer_index = state.get_beacon_proposer_index(state.slot, spec).unwrap(); let keypair = &keypairs[proposer_index]; match randao_sk { @@ -298,9 +288,7 @@ impl BlockProcessingBuilder { )), } - let proposer_index = state - .get_beacon_proposer_index(state.slot, RelativeEpoch::Current, spec) - .unwrap(); + let proposer_index = state.get_beacon_proposer_index(state.slot, spec).unwrap(); let keypair = &keypairs[proposer_index]; match randao_sk { diff --git a/eth2/state_processing/src/per_block_processing/block_signature_verifier.rs b/eth2/state_processing/src/per_block_processing/block_signature_verifier.rs index adc5e19ba4..762af0ca76 100644 --- a/eth2/state_processing/src/per_block_processing/block_signature_verifier.rs +++ b/eth2/state_processing/src/per_block_processing/block_signature_verifier.rs @@ -86,7 +86,6 @@ impl<'a, T: EthSpec> BlockSignatureVerifier<'a, T> { * Deposits are not included because they can legally have invalid signatures. */ verifier.include_exits()?; - verifier.include_transfers()?; verifier.verify() } @@ -209,19 +208,4 @@ impl<'a, T: EthSpec> BlockSignatureVerifier<'a, T> { Ok(()) } - - /// Includes all signatures in `self.block.body.transfers` for verification. - fn include_transfers(&mut self) -> Result<()> { - let mut sets = self - .block - .body - .transfers - .iter() - .map(|transfer| transfer_signature_set(&self.state, transfer, &self.spec)) - .collect::>()?; - - self.sets.append(&mut sets); - - Ok(()) - } } diff --git a/eth2/state_processing/src/per_block_processing/errors.rs b/eth2/state_processing/src/per_block_processing/errors.rs index 1d3094a89f..c247794599 100644 --- a/eth2/state_processing/src/per_block_processing/errors.rs +++ b/eth2/state_processing/src/per_block_processing/errors.rs @@ -16,9 +16,6 @@ pub enum BlockProcessingError { expected: usize, found: usize, }, - DuplicateTransfers { - duplicates: usize, - }, HeaderInvalid { reason: HeaderInvalid, }, @@ -46,10 +43,6 @@ pub enum BlockProcessingError { index: usize, reason: ExitInvalid, }, - TransferInvalid { - index: usize, - reason: TransferInvalid, - }, BeaconStateError(BeaconStateError), SignatureSetError(SignatureSetError), SszTypesError(ssz_types::Error), @@ -119,8 +112,7 @@ impl_into_block_processing_error_with_index!( IndexedAttestationInvalid, AttestationInvalid, DepositInvalid, - ExitInvalid, - TransferInvalid + ExitInvalid ); pub type HeaderValidationError = BlockOperationError; @@ -129,7 +121,6 @@ pub type ProposerSlashingValidationError = BlockOperationError; pub type DepositValidationError = BlockOperationError; pub type ExitValidationError = BlockOperationError; -pub type TransferValidationError = BlockOperationError; #[derive(Debug, PartialEq)] pub enum BlockOperationError { @@ -174,10 +165,10 @@ pub enum HeaderInvalid { pub enum ProposerSlashingInvalid { /// The proposer index is not a known validator. ProposerUnknown(u64), - /// The two proposal have different epochs. + /// The two proposal have different slots. /// /// (proposal_1_slot, proposal_2_slot) - ProposalEpochMismatch(Slot, Slot), + ProposalSlotMismatch(Slot, Slot), /// The proposals are identical and therefore not slashable. ProposalsIdentical, /// The specified proposer cannot be slashed because they are already slashed, or not active. @@ -207,8 +198,8 @@ pub enum AttesterSlashingInvalid { /// Describes why an object is invalid. #[derive(Debug, PartialEq)] pub enum AttestationInvalid { - /// Shard exceeds SHARD_COUNT. - BadShard, + /// Commmittee index exceeds number of committees in that slot. + BadCommitteeIndex, /// Attestation included before the inclusion delay. IncludedTooEarly { state: Slot, @@ -229,36 +220,18 @@ pub enum AttestationInvalid { attestation: Checkpoint, is_current: bool, }, - /// Attestation crosslink root does not match the state crosslink root for the attestations - /// slot. - BadParentCrosslinkHash, - /// Attestation crosslink start epoch does not match the end epoch of the state crosslink. - BadParentCrosslinkStartEpoch, - /// Attestation crosslink end epoch does not match the expected value. - BadParentCrosslinkEndEpoch, - /// The custody bitfield has some bits set `true`. This is not allowed in phase 0. - CustodyBitfieldHasSetBits, /// There are no set bits on the attestation -- an attestation must be signed by at least one /// validator. AggregationBitfieldIsEmpty, - /// The custody bitfield length is not the smallest possible size to represent the committee. - BadCustodyBitfieldLength { - committee_len: usize, - bitfield_len: usize, - }, /// The aggregation bitfield length is not the smallest possible size to represent the committee. BadAggregationBitfieldLength { committee_len: usize, bitfield_len: usize, }, - /// The bits set in the custody bitfield are not a subset of those set in the aggregation bits. - CustodyBitfieldNotSubset, - /// There was no known committee in this `epoch` for the given shard and slot. - NoCommitteeForShard { shard: u64, slot: Slot }, + /// The validator index was unknown. + UnknownValidator(u64), /// The attestation signature verification failed. BadSignature, - /// The shard block root was not set to zero. This is a phase 0 requirement. - ShardBlockRootNotZero, /// The indexed attestation created from this attestation was found to be invalid. BadIndexedAttestation(IndexedAttestationInvalid), } @@ -280,14 +253,6 @@ impl From> #[derive(Debug, PartialEq)] pub enum IndexedAttestationInvalid { - /// The custody bit 0 validators intersect with the bit 1 validators. - CustodyBitValidatorsIntersect, - /// The custody bitfield has some bits set `true`. This is not allowed in phase 0. - CustodyBitfieldHasSetBits, - /// The custody bitfield violated a type-level bound. - CustodyBitfieldBoundsError(ssz_types::Error), - /// No validator indices were specified. - NoValidatorIndices, /// The number of indices exceeds the global maximum. /// /// (max_indices, indices_given) @@ -339,56 +304,3 @@ pub enum ExitInvalid { /// been invalid or an internal error occurred. SignatureSetError(SignatureSetError), } - -#[derive(Debug, PartialEq)] -pub enum TransferInvalid { - /// The validator indicated by `transfer.from` is unknown. - FromValidatorUnknown(u64), - /// The validator indicated by `transfer.to` is unknown. - ToValidatorUnknown(u64), - /// The balance of `transfer.from` is insufficient. - /// - /// (required, available) - FromBalanceInsufficient(u64, u64), - /// Adding `transfer.fee` to `transfer.amount` causes an overflow. - /// - /// (transfer_fee, transfer_amount) - FeeOverflow(u64, u64), - /// This transfer would result in the `transfer.from` account to have `0 < balance < - /// min_deposit_amount` - /// - /// (resulting_amount, min_deposit_amount) - SenderDust(u64, u64), - /// This transfer would result in the `transfer.to` account to have `0 < balance < - /// min_deposit_amount` - /// - /// (resulting_amount, min_deposit_amount) - RecipientDust(u64, u64), - /// The state slot does not match `transfer.slot`. - /// - /// (state_slot, transfer_slot) - StateSlotMismatch(Slot, Slot), - /// The `transfer.slot` is in the past relative to the state slot. - /// - /// - /// (state_slot, transfer_slot) - TransferSlotInPast(Slot, Slot), - /// The `transfer.from` validator has been activated and is not withdrawable. - /// - /// (from_validator) - FromValidatorIneligibleForTransfer(u64), - /// The validators withdrawal credentials do not match `transfer.pubkey`. - /// - /// (state_credentials, transfer_pubkey_credentials) - WithdrawalCredentialsMismatch(Hash256, Hash256), - /// The deposit was not signed by `deposit.pubkey`. - BadSignature, - /// Overflow when adding to `transfer.to` balance. - /// - /// (to_balance, transfer_amount) - ToBalanceOverflow(u64, u64), - /// Overflow when adding to beacon proposer balance. - /// - /// (proposer_balance, transfer_fee) - ProposerBalanceOverflow(u64, u64), -} diff --git a/eth2/state_processing/src/per_block_processing/is_valid_indexed_attestation.rs b/eth2/state_processing/src/per_block_processing/is_valid_indexed_attestation.rs index 194c2b018e..007cf741a1 100644 --- a/eth2/state_processing/src/per_block_processing/is_valid_indexed_attestation.rs +++ b/eth2/state_processing/src/per_block_processing/is_valid_indexed_attestation.rs @@ -1,8 +1,6 @@ use super::errors::{BlockOperationError, IndexedAttestationInvalid as Invalid}; use super::signature_sets::indexed_attestation_signature_set; use crate::VerifySignatures; -use std::collections::HashSet; -use std::iter::FromIterator; use types::*; type Result = std::result::Result>; @@ -13,38 +11,26 @@ fn error(reason: Invalid) -> BlockOperationError { /// Verify an `IndexedAttestation`. /// -/// Spec v0.8.0 +/// Spec v0.9.1 pub fn is_valid_indexed_attestation( state: &BeaconState, indexed_attestation: &IndexedAttestation, verify_signatures: VerifySignatures, spec: &ChainSpec, ) -> Result<()> { - let bit_0_indices = &indexed_attestation.custody_bit_0_indices; - let bit_1_indices = &indexed_attestation.custody_bit_1_indices; - - // Verify no index has custody bit equal to 1 [to be removed in phase 1] - verify!(bit_1_indices.is_empty(), Invalid::CustodyBitfieldHasSetBits); + let indices = &indexed_attestation.attesting_indices; // Verify max number of indices - let total_indices = bit_0_indices.len() + bit_1_indices.len(); verify!( - total_indices <= T::MaxValidatorsPerCommittee::to_usize(), - Invalid::MaxIndicesExceed(T::MaxValidatorsPerCommittee::to_usize(), total_indices) + indices.len() <= T::MaxValidatorsPerCommittee::to_usize(), + Invalid::MaxIndicesExceed(T::MaxValidatorsPerCommittee::to_usize(), indices.len()) ); - // Verify index sets are disjoint - let custody_bit_intersection: HashSet<&u64> = - &HashSet::from_iter(bit_0_indices.iter()) & &HashSet::from_iter(bit_1_indices.iter()); - verify!( - custody_bit_intersection.is_empty(), - Invalid::CustodyBitValidatorsIntersect - ); - - // Check that both vectors of indices are sorted + // Check that indices are sorted let check_sorted = |list: &[u64]| -> Result<()> { list.windows(2).enumerate().try_for_each(|(i, pair)| { - if pair[0] >= pair[1] { + // The spec allows duplicates, so use strict comparison (>). + if pair[0] > pair[1] { Err(error(Invalid::BadValidatorIndicesOrdering(i))) } else { Ok(()) @@ -52,8 +38,7 @@ pub fn is_valid_indexed_attestation( })?; Ok(()) }; - check_sorted(&bit_0_indices)?; - check_sorted(&bit_1_indices)?; + check_sorted(indices)?; if verify_signatures.is_true() { verify!( diff --git a/eth2/state_processing/src/per_block_processing/signature_sets.rs b/eth2/state_processing/src/per_block_processing/signature_sets.rs index 35f47331d6..d4ac169ef7 100644 --- a/eth2/state_processing/src/per_block_processing/signature_sets.rs +++ b/eth2/state_processing/src/per_block_processing/signature_sets.rs @@ -2,14 +2,13 @@ //! validated individually, or alongside in others in a potentially cheaper bulk operation. //! //! This module exposes one function to extract each type of `SignatureSet` from a `BeaconBlock`. -use bls::SignatureSet; +use bls::{SignatureSet, SignedMessage}; use std::convert::TryInto; use tree_hash::{SignedRoot, TreeHash}; use types::{ - AggregateSignature, AttestationDataAndCustodyBit, AttesterSlashing, BeaconBlock, - BeaconBlockHeader, BeaconState, BeaconStateError, ChainSpec, DepositData, Domain, EthSpec, - Hash256, IndexedAttestation, ProposerSlashing, PublicKey, RelativeEpoch, Signature, Transfer, - VoluntaryExit, + AggregateSignature, AttesterSlashing, BeaconBlock, BeaconBlockHeader, BeaconState, + BeaconStateError, ChainSpec, DepositData, Domain, EthSpec, Hash256, IndexedAttestation, + ProposerSlashing, PublicKey, Signature, VoluntaryExit, }; pub type Result = std::result::Result; @@ -42,8 +41,7 @@ pub fn block_proposal_signature_set<'a, T: EthSpec>( block_signed_root: Option, spec: &'a ChainSpec, ) -> Result> { - let proposer_index = - state.get_beacon_proposer_index(block.slot, RelativeEpoch::Current, spec)?; + let proposer_index = state.get_beacon_proposer_index(block.slot, spec)?; let block_proposer = &state .validators .get(proposer_index) @@ -75,8 +73,7 @@ pub fn randao_signature_set<'a, T: EthSpec>( block: &'a BeaconBlock, spec: &'a ChainSpec, ) -> Result> { - let block_proposer = &state.validators - [state.get_beacon_proposer_index(block.slot, RelativeEpoch::Current, spec)?]; + let block_proposer = &state.validators[state.get_beacon_proposer_index(block.slot, spec)?]; let domain = spec.get_domain( block.slot.epoch(T::slots_per_epoch()), @@ -141,31 +138,20 @@ pub fn indexed_attestation_signature_set<'a, 'b, T: EthSpec>( indexed_attestation: &'b IndexedAttestation, spec: &'a ChainSpec, ) -> Result> { - let message_0 = AttestationDataAndCustodyBit { - data: indexed_attestation.data.clone(), - custody_bit: false, - } - .tree_hash_root(); - let message_1 = AttestationDataAndCustodyBit { - data: indexed_attestation.data.clone(), - custody_bit: true, - } - .tree_hash_root(); + let message = indexed_attestation.data.tree_hash_root(); + + let signed_message = SignedMessage::new( + get_pubkeys(state, &indexed_attestation.attesting_indices)?, + message, + ); let domain = spec.get_domain( indexed_attestation.data.target.epoch, - Domain::Attestation, + Domain::BeaconAttester, &state.fork, ); - Ok(SignatureSet::dual( - signature, - message_0, - get_pubkeys(state, &indexed_attestation.custody_bit_0_indices)?, - message_1, - get_pubkeys(state, &indexed_attestation.custody_bit_1_indices)?, - domain, - )) + Ok(SignatureSet::new(signature, vec![signed_message], domain)) } /// Returns the signature set for the given `attester_slashing` and corresponding `pubkeys`. @@ -244,28 +230,6 @@ pub fn exit_signature_set<'a, T: EthSpec>( )) } -/// Returns a signature set that is valid if the `Transfer` was signed by `transfer.pubkey`. -pub fn transfer_signature_set<'a, T: EthSpec>( - state: &'a BeaconState, - transfer: &'a Transfer, - spec: &'a ChainSpec, -) -> Result> { - let domain = spec.get_domain( - transfer.slot.epoch(T::slots_per_epoch()), - Domain::Transfer, - &state.fork, - ); - - let message = transfer.signed_root(); - - Ok(SignatureSet::single( - &transfer.signature, - &transfer.pubkey, - message, - domain, - )) -} - /// Maps validator indices to public keys. fn get_pubkeys<'a, 'b, T, I>( state: &'a BeaconState, diff --git a/eth2/state_processing/src/per_block_processing/tests.rs b/eth2/state_processing/src/per_block_processing/tests.rs index fe94bf72ff..32c460d856 100644 --- a/eth2/state_processing/src/per_block_processing/tests.rs +++ b/eth2/state_processing/src/per_block_processing/tests.rs @@ -393,6 +393,7 @@ fn invalid_exit_already_exited() { ); } +/* FIXME: needs updating for v0.9 #[test] fn invalid_exit_not_active() { use std::cmp::max; @@ -421,6 +422,7 @@ fn invalid_exit_not_active() { }) ); } +*/ #[test] fn invalid_exit_already_initiated() { @@ -546,7 +548,7 @@ fn valid_attestations() { let builder = get_builder(&spec, SLOT_OFFSET, VALIDATOR_COUNT); let test_task = AttestationTestTask::Valid; let (block, mut state) = - builder.build_with_n_attestations(&test_task, NUM_ATTESTATIONS, None, None, &spec); + builder.build_with_n_attestations(test_task, NUM_ATTESTATIONS, None, None, &spec); let result = per_block_processing( &mut state, @@ -560,91 +562,14 @@ fn valid_attestations() { assert_eq!(result, Ok(())); } -#[test] -fn invalid_attestation_parent_crosslink_start_epoch() { - let spec = MainnetEthSpec::default_spec(); - let builder = get_builder(&spec, SLOT_OFFSET, VALIDATOR_COUNT); - let test_task = AttestationTestTask::BadParentCrosslinkStartEpoch; - let (block, mut state) = - builder.build_with_n_attestations(&test_task, NUM_ATTESTATIONS, None, None, &spec); - - let result = per_block_processing( - &mut state, - &block, - None, - BlockSignatureStrategy::VerifyIndividual, - &spec, - ); - - // Expecting BadParentCrosslinkEndEpoch because we manually set an invalid crosslink start epoch - assert_eq!( - result, - Err(BlockProcessingError::AttestationInvalid { - index: 0, - reason: AttestationInvalid::BadParentCrosslinkStartEpoch - }) - ); -} - -#[test] -fn invalid_attestation_parent_crosslink_end_epoch() { - let spec = MainnetEthSpec::default_spec(); - let builder = get_builder(&spec, SLOT_OFFSET, VALIDATOR_COUNT); - let test_task = AttestationTestTask::BadParentCrosslinkEndEpoch; - let (block, mut state) = - builder.build_with_n_attestations(&test_task, NUM_ATTESTATIONS, None, None, &spec); - - let result = per_block_processing( - &mut state, - &block, - None, - BlockSignatureStrategy::VerifyIndividual, - &spec, - ); - - // Expecting BadParentCrosslinkEndEpoch because we manually set an invalid crosslink end epoch - assert_eq!( - result, - Err(BlockProcessingError::AttestationInvalid { - index: 0, - reason: AttestationInvalid::BadParentCrosslinkEndEpoch - }) - ); -} - -#[test] -fn invalid_attestation_parent_crosslink_hash() { - let spec = MainnetEthSpec::default_spec(); - let builder = get_builder(&spec, SLOT_OFFSET, VALIDATOR_COUNT); - let test_task = AttestationTestTask::BadParentCrosslinkHash; - let (block, mut state) = - builder.build_with_n_attestations(&test_task, NUM_ATTESTATIONS, None, None, &spec); - - let result = per_block_processing( - &mut state, - &block, - None, - BlockSignatureStrategy::VerifyIndividual, - &spec, - ); - - // Expecting BadParentCrosslinkHash because we manually set an invalid crosslink parent_root - assert_eq!( - result, - Err(BlockProcessingError::AttestationInvalid { - index: 0, - reason: AttestationInvalid::BadParentCrosslinkHash - }) - ); -} - +/* FIXME: needs updating for v0.9 #[test] fn invalid_attestation_no_committee_for_shard() { let spec = MainnetEthSpec::default_spec(); let builder = get_builder(&spec, SLOT_OFFSET, VALIDATOR_COUNT); let test_task = AttestationTestTask::NoCommiteeForShard; let (block, mut state) = - builder.build_with_n_attestations(&test_task, NUM_ATTESTATIONS, None, None, &spec); + builder.build_with_n_attestations(test_task, NUM_ATTESTATIONS, None, None, &spec); let result = per_block_processing( &mut state, @@ -658,10 +583,14 @@ fn invalid_attestation_no_committee_for_shard() { assert_eq!( result, Err(BlockProcessingError::BeaconStateError( - BeaconStateError::NoCommitteeForShard + BeaconStateError::NoCommittee { + slot: Slot::new(0), + index: 0 + } )) ); } +*/ #[test] fn invalid_attestation_wrong_justified_checkpoint() { @@ -669,7 +598,7 @@ fn invalid_attestation_wrong_justified_checkpoint() { let builder = get_builder(&spec, SLOT_OFFSET, VALIDATOR_COUNT); let test_task = AttestationTestTask::WrongJustifiedCheckpoint; let (block, mut state) = - builder.build_with_n_attestations(&test_task, NUM_ATTESTATIONS, None, None, &spec); + builder.build_with_n_attestations(test_task, NUM_ATTESTATIONS, None, None, &spec); let result = per_block_processing( &mut state, @@ -706,7 +635,7 @@ fn invalid_attestation_bad_target_too_low() { let builder = get_builder(&spec, SLOT_OFFSET, VALIDATOR_COUNT); let test_task = AttestationTestTask::BadTargetTooLow; let (block, mut state) = - builder.build_with_n_attestations(&test_task, NUM_ATTESTATIONS, None, None, &spec); + builder.build_with_n_attestations(test_task, NUM_ATTESTATIONS, None, None, &spec); let result = per_block_processing( &mut state, @@ -716,16 +645,14 @@ fn invalid_attestation_bad_target_too_low() { &spec, ); - // Expecting EpochTooLow because we manually set the + // Expecting BadTargetEpoch because we manually set the // target field of the AttestationData object to be invalid assert_eq!( result, - Err(BlockProcessingError::BeaconStateError( - BeaconStateError::RelativeEpochError(RelativeEpochError::EpochTooLow { - base: state.current_epoch(), - other: Epoch::from(0 as u64), - }) - )) + Err(BlockProcessingError::AttestationInvalid { + index: 0, + reason: AttestationInvalid::BadTargetEpoch + }) ); } @@ -735,7 +662,7 @@ fn invalid_attestation_bad_target_too_high() { let builder = get_builder(&spec, SLOT_OFFSET, VALIDATOR_COUNT); let test_task = AttestationTestTask::BadTargetTooHigh; let (block, mut state) = - builder.build_with_n_attestations(&test_task, NUM_ATTESTATIONS, None, None, &spec); + builder.build_with_n_attestations(test_task, NUM_ATTESTATIONS, None, None, &spec); let result = per_block_processing( &mut state, @@ -745,43 +672,13 @@ fn invalid_attestation_bad_target_too_high() { &spec, ); - // Expecting EpochTooHigh because we manually set the + // Expecting BadTargetEpoch because we manually set the // target field of the AttestationData object to be invalid - assert_eq!( - result, - Err(BlockProcessingError::BeaconStateError( - BeaconStateError::RelativeEpochError(RelativeEpochError::EpochTooHigh { - base: state.current_epoch(), - other: Epoch::from(10 as u64), - }) - )) - ); -} - -#[test] -fn invalid_attestation_bad_crosslink_data_root() { - let spec = MainnetEthSpec::default_spec(); - let builder = get_builder(&spec, SLOT_OFFSET, VALIDATOR_COUNT); - let test_task = AttestationTestTask::BadParentCrosslinkDataRoot; - let (block, mut state) = - builder.build_with_n_attestations(&test_task, NUM_ATTESTATIONS, None, None, &spec); - - let result = per_block_processing( - &mut state, - &block, - None, - BlockSignatureStrategy::VerifyIndividual, - &spec, - ); - - // Expecting ShardBlockRootNotZero because we manually set the - // data_root of the cross link to be non zero - assert_eq!( result, Err(BlockProcessingError::AttestationInvalid { index: 0, - reason: AttestationInvalid::ShardBlockRootNotZero, + reason: AttestationInvalid::BadTargetEpoch }) ); } @@ -792,7 +689,7 @@ fn invalid_attestation_bad_indexed_attestation_bad_signature() { let builder = get_builder(&spec, SLOT_OFFSET, 33); // minmium number of validators required for this test let test_task = AttestationTestTask::BadIndexedAttestationBadSignature; let (block, mut state) = - builder.build_with_n_attestations(&test_task, NUM_ATTESTATIONS, None, None, &spec); + builder.build_with_n_attestations(test_task, NUM_ATTESTATIONS, None, None, &spec); let result = per_block_processing( &mut state, @@ -814,90 +711,13 @@ fn invalid_attestation_bad_indexed_attestation_bad_signature() { ); } -#[test] -fn invalid_attestation_custody_bitfield_not_subset() { - let spec = MainnetEthSpec::default_spec(); - let builder = get_builder(&spec, SLOT_OFFSET, 33); // minmium number of validators required for this test - let test_task = AttestationTestTask::CustodyBitfieldNotSubset; - let (block, mut state) = - builder.build_with_n_attestations(&test_task, NUM_ATTESTATIONS, None, None, &spec); - let result = per_block_processing( - &mut state, - &block, - None, - BlockSignatureStrategy::VerifyIndividual, - &spec, - ); - - // Expecting CustodyBitfieldNotSubset because we set custody_bit to true without setting the aggregation bits. - assert_eq!( - result, - Err(BlockProcessingError::AttestationInvalid { - index: 0, - reason: AttestationInvalid::CustodyBitfieldNotSubset - }) - ); -} - -#[test] -fn invalid_attestation_custody_bitfield_has_set_bits() { - let spec = MainnetEthSpec::default_spec(); - let builder = get_builder(&spec, SLOT_OFFSET, 33); // minmium number of validators required for this test - let test_task = AttestationTestTask::CustodyBitfieldHasSetBits; - let (block, mut state) = - builder.build_with_n_attestations(&test_task, NUM_ATTESTATIONS, None, None, &spec); - let result = per_block_processing( - &mut state, - &block, - None, - BlockSignatureStrategy::VerifyIndividual, - &spec, - ); - - // Expecting CustodyBitfieldHasSetBits because we set custody bits even though the custody_bit boolean is set to false - assert_eq!( - result, - Err(BlockProcessingError::AttestationInvalid { - index: 0, - reason: AttestationInvalid::BadIndexedAttestation( - IndexedAttestationInvalid::CustodyBitfieldHasSetBits - ) - }) - ); -} - -#[test] -fn invalid_attestation_bad_custody_bitfield_len() { - let spec = MainnetEthSpec::default_spec(); - let builder = get_builder(&spec, SLOT_OFFSET, VALIDATOR_COUNT); - let test_task = AttestationTestTask::BadCustodyBitfieldLen; - let (block, mut state) = - builder.build_with_n_attestations(&test_task, NUM_ATTESTATIONS, None, None, &spec); - - let result = per_block_processing( - &mut state, - &block, - None, - BlockSignatureStrategy::VerifyIndividual, - &spec, - ); - - // Expecting InvalidBitfield because the size of the custody_bitfield is bigger than the commitee size. - assert_eq!( - result, - Err(BlockProcessingError::BeaconStateError( - BeaconStateError::InvalidBitfield - )) - ); -} - #[test] fn invalid_attestation_bad_aggregation_bitfield_len() { let spec = MainnetEthSpec::default_spec(); let builder = get_builder(&spec, SLOT_OFFSET, VALIDATOR_COUNT); let test_task = AttestationTestTask::BadAggregationBitfieldLen; let (block, mut state) = - builder.build_with_n_attestations(&test_task, NUM_ATTESTATIONS, None, None, &spec); + builder.build_with_n_attestations(test_task, NUM_ATTESTATIONS, None, None, &spec); let result = per_block_processing( &mut state, @@ -922,7 +742,7 @@ fn invalid_attestation_bad_signature() { let builder = get_builder(&spec, SLOT_OFFSET, 97); // minimal number of required validators for this test let test_task = AttestationTestTask::BadSignature; let (block, mut state) = - builder.build_with_n_attestations(&test_task, NUM_ATTESTATIONS, None, None, &spec); + builder.build_with_n_attestations(test_task, NUM_ATTESTATIONS, None, None, &spec); let result = per_block_processing( &mut state, &block, @@ -949,7 +769,7 @@ fn invalid_attestation_included_too_early() { let builder = get_builder(&spec, SLOT_OFFSET, VALIDATOR_COUNT); let test_task = AttestationTestTask::IncludedTooEarly; let (block, mut state) = - builder.build_with_n_attestations(&test_task, NUM_ATTESTATIONS, None, None, &spec); + builder.build_with_n_attestations(test_task, NUM_ATTESTATIONS, None, None, &spec); let result = per_block_processing( &mut state, @@ -965,9 +785,9 @@ fn invalid_attestation_included_too_early() { Err(BlockProcessingError::AttestationInvalid { index: 0, reason: AttestationInvalid::IncludedTooEarly { - state: Slot::from(319 as u64), + state: state.slot, delay: spec.min_attestation_inclusion_delay, - attestation: Slot::from(319 as u64) + attestation: block.body.attestations[0].data.slot, } }) ); @@ -976,11 +796,11 @@ fn invalid_attestation_included_too_early() { #[test] fn invalid_attestation_included_too_late() { let spec = MainnetEthSpec::default_spec(); - // note to maintainer: might need to increase validator count if we get NoCommitteeForShard + // note to maintainer: might need to increase validator count if we get NoCommittee let builder = get_builder(&spec, SLOT_OFFSET, VALIDATOR_COUNT); let test_task = AttestationTestTask::IncludedTooLate; let (block, mut state) = - builder.build_with_n_attestations(&test_task, NUM_ATTESTATIONS, None, None, &spec); + builder.build_with_n_attestations(test_task, NUM_ATTESTATIONS, None, None, &spec); let result = per_block_processing( &mut state, @@ -990,31 +810,26 @@ fn invalid_attestation_included_too_late() { &spec, ); - // Expecting IncludedTooLate because the shard included in the crosslink is bigger than expected - assert!( - result - == Err(BlockProcessingError::BeaconStateError( - BeaconStateError::NoCommitteeForShard - )) - || result - == Err(BlockProcessingError::AttestationInvalid { - index: 0, - reason: AttestationInvalid::IncludedTooLate { - state: state.slot, - attestation: Slot::from(254 as u64), - } - }) + assert_eq!( + result, + Err(BlockProcessingError::AttestationInvalid { + index: 0, + reason: AttestationInvalid::IncludedTooLate { + state: state.slot, + attestation: block.body.attestations[0].data.slot, + } + }) ); } #[test] fn invalid_attestation_bad_target_epoch() { let spec = MainnetEthSpec::default_spec(); - // note to maintainer: might need to increase validator count if we get NoCommitteeForShard + // note to maintainer: might need to increase validator count if we get NoCommittee let builder = get_builder(&spec, SLOT_OFFSET, VALIDATOR_COUNT); let test_task = AttestationTestTask::BadTargetEpoch; let (block, mut state) = - builder.build_with_n_attestations(&test_task, NUM_ATTESTATIONS, None, None, &spec); + builder.build_with_n_attestations(test_task, NUM_ATTESTATIONS, None, None, &spec); let result = per_block_processing( &mut state, @@ -1028,7 +843,10 @@ fn invalid_attestation_bad_target_epoch() { assert!( result == Err(BlockProcessingError::BeaconStateError( - BeaconStateError::NoCommitteeForShard + BeaconStateError::NoCommittee { + slot: Slot::new(0), + index: 0 + } )) || result == Err(BlockProcessingError::AttestationInvalid { @@ -1038,13 +856,14 @@ fn invalid_attestation_bad_target_epoch() { ); } +/* FIXME: needs updating for v0.9 #[test] fn invalid_attestation_bad_shard() { let spec = MainnetEthSpec::default_spec(); let builder = get_builder(&spec, SLOT_OFFSET, VALIDATOR_COUNT); let test_task = AttestationTestTask::BadShard; let (block, mut state) = - builder.build_with_n_attestations(&test_task, NUM_ATTESTATIONS, None, None, &spec); + builder.build_with_n_attestations(test_task, NUM_ATTESTATIONS, None, None, &spec); let result = per_block_processing( &mut state, @@ -1054,7 +873,7 @@ fn invalid_attestation_bad_shard() { &spec, ); - // Expecting BadShard or NoCommitteeForShard because the shard number is higher than ShardCount + // Expecting BadShard or NoCommittee because the shard number is higher than ShardCount assert!( result == Err(BlockProcessingError::AttestationInvalid { @@ -1063,10 +882,14 @@ fn invalid_attestation_bad_shard() { }) || result == Err(BlockProcessingError::BeaconStateError( - BeaconStateError::NoCommitteeForShard + BeaconStateError::NoCommittee { + slot: Slot::new(0), + index: 0 + } )) ); } +*/ #[test] fn valid_insert_attester_slashing() { @@ -1075,7 +898,7 @@ fn valid_insert_attester_slashing() { let test_task = AttesterSlashingTestTask::Valid; let num_attester_slashings = 1; let (block, mut state) = - builder.build_with_attester_slashing(&test_task, num_attester_slashings, None, None, &spec); + builder.build_with_attester_slashing(test_task, num_attester_slashings, None, None, &spec); let result = per_block_processing( &mut state, @@ -1096,7 +919,7 @@ fn invalid_attester_slashing_not_slashable() { let test_task = AttesterSlashingTestTask::NotSlashable; let num_attester_slashings = 1; let (block, mut state) = - builder.build_with_attester_slashing(&test_task, num_attester_slashings, None, None, &spec); + builder.build_with_attester_slashing(test_task, num_attester_slashings, None, None, &spec); let result = per_block_processing( &mut state, &block, @@ -1122,7 +945,7 @@ fn invalid_attester_slashing_1_invalid() { let test_task = AttesterSlashingTestTask::IndexedAttestation1Invalid; let num_attester_slashings = 1; let (block, mut state) = - builder.build_with_attester_slashing(&test_task, num_attester_slashings, None, None, &spec); + builder.build_with_attester_slashing(test_task, num_attester_slashings, None, None, &spec); let result = per_block_processing( &mut state, @@ -1132,22 +955,12 @@ fn invalid_attester_slashing_1_invalid() { &spec, ); - // Expecting IndexedAttestation1Invalid or IndexedAttestationInvalid because Attestation1 has CustodyBitfield bits set. - assert!( - result - == Err(BlockProcessingError::IndexedAttestationInvalid { - index: 0, - reason: IndexedAttestationInvalid::CustodyBitfieldHasSetBits - }) - || result - == Err(BlockProcessingError::AttesterSlashingInvalid { - index: 0, - reason: AttesterSlashingInvalid::IndexedAttestation1Invalid( - BlockOperationError::Invalid( - IndexedAttestationInvalid::CustodyBitfieldHasSetBits - ) - ) - }) + assert_eq!( + result, + Err(BlockProcessingError::IndexedAttestationInvalid { + index: 0, + reason: IndexedAttestationInvalid::BadValidatorIndicesOrdering(0) + }) ); } @@ -1158,7 +971,7 @@ fn invalid_attester_slashing_2_invalid() { let test_task = AttesterSlashingTestTask::IndexedAttestation2Invalid; let num_attester_slashings = 1; let (block, mut state) = - builder.build_with_attester_slashing(&test_task, num_attester_slashings, None, None, &spec); + builder.build_with_attester_slashing(test_task, num_attester_slashings, None, None, &spec); let result = per_block_processing( &mut state, @@ -1168,22 +981,12 @@ fn invalid_attester_slashing_2_invalid() { &spec, ); - // Expecting IndexedAttestation2Invalid or IndexedAttestationInvalid because Attestation2 has CustodyBitfield bits set. - assert!( - result - == Err(BlockProcessingError::IndexedAttestationInvalid { - index: 1, - reason: IndexedAttestationInvalid::CustodyBitfieldHasSetBits - }) - || result - == Err(BlockProcessingError::AttesterSlashingInvalid { - index: 1, - reason: AttesterSlashingInvalid::IndexedAttestation2Invalid( - BlockOperationError::Invalid( - IndexedAttestationInvalid::CustodyBitfieldHasSetBits - ) - ) - }) + assert_eq!( + result, + Err(BlockProcessingError::IndexedAttestationInvalid { + index: 1, + reason: IndexedAttestationInvalid::BadValidatorIndicesOrdering(0) + }) ); } @@ -1192,7 +995,7 @@ fn valid_insert_proposer_slashing() { let spec = MainnetEthSpec::default_spec(); let builder = get_builder(&spec, SLOT_OFFSET, VALIDATOR_COUNT); let test_task = ProposerSlashingTestTask::Valid; - let (block, mut state) = builder.build_with_proposer_slashing(&test_task, 1, None, None, &spec); + let (block, mut state) = builder.build_with_proposer_slashing(test_task, 1, None, None, &spec); let result = per_block_processing( &mut state, @@ -1211,7 +1014,7 @@ fn invalid_proposer_slashing_proposals_identical() { let spec = MainnetEthSpec::default_spec(); let builder = get_builder(&spec, SLOT_OFFSET, VALIDATOR_COUNT); let test_task = ProposerSlashingTestTask::ProposalsIdentical; - let (block, mut state) = builder.build_with_proposer_slashing(&test_task, 1, None, None, &spec); + let (block, mut state) = builder.build_with_proposer_slashing(test_task, 1, None, None, &spec); let result = per_block_processing( &mut state, @@ -1235,7 +1038,7 @@ fn invalid_proposer_slashing_proposer_unknown() { let spec = MainnetEthSpec::default_spec(); let builder = get_builder(&spec, SLOT_OFFSET, VALIDATOR_COUNT); let test_task = ProposerSlashingTestTask::ProposerUnknown; - let (block, mut state) = builder.build_with_proposer_slashing(&test_task, 1, None, None, &spec); + let (block, mut state) = builder.build_with_proposer_slashing(test_task, 1, None, None, &spec); let result = per_block_processing( &mut state, @@ -1260,7 +1063,7 @@ fn invalid_proposer_slashing_not_slashable() { let spec = MainnetEthSpec::default_spec(); let builder = get_builder(&spec, SLOT_OFFSET, VALIDATOR_COUNT); let test_task = ProposerSlashingTestTask::ProposerNotSlashable; - let (block, mut state) = builder.build_with_proposer_slashing(&test_task, 1, None, None, &spec); + let (block, mut state) = builder.build_with_proposer_slashing(test_task, 1, None, None, &spec); state.validators[0].slashed = true; let result = per_block_processing( @@ -1286,7 +1089,7 @@ fn invalid_bad_proposal_1_signature() { let spec = MainnetEthSpec::default_spec(); let builder = get_builder(&spec, SLOT_OFFSET, VALIDATOR_COUNT); let test_task = ProposerSlashingTestTask::BadProposal1Signature; - let (block, mut state) = builder.build_with_proposer_slashing(&test_task, 1, None, None, &spec); + let (block, mut state) = builder.build_with_proposer_slashing(test_task, 1, None, None, &spec); let result = per_block_processing( &mut state, @@ -1311,7 +1114,7 @@ fn invalid_bad_proposal_2_signature() { let spec = MainnetEthSpec::default_spec(); let builder = get_builder(&spec, SLOT_OFFSET, VALIDATOR_COUNT); let test_task = ProposerSlashingTestTask::BadProposal2Signature; - let (block, mut state) = builder.build_with_proposer_slashing(&test_task, 1, None, None, &spec); + let (block, mut state) = builder.build_with_proposer_slashing(test_task, 1, None, None, &spec); let result = per_block_processing( &mut state, @@ -1336,7 +1139,7 @@ fn invalid_proposer_slashing_proposal_epoch_mismatch() { let spec = MainnetEthSpec::default_spec(); let builder = get_builder(&spec, SLOT_OFFSET, VALIDATOR_COUNT); let test_task = ProposerSlashingTestTask::ProposalEpochMismatch; - let (block, mut state) = builder.build_with_proposer_slashing(&test_task, 1, None, None, &spec); + let (block, mut state) = builder.build_with_proposer_slashing(test_task, 1, None, None, &spec); let result = per_block_processing( &mut state, @@ -1351,7 +1154,7 @@ fn invalid_proposer_slashing_proposal_epoch_mismatch() { result, Err(BlockProcessingError::ProposerSlashingInvalid { index: 0, - reason: ProposerSlashingInvalid::ProposalEpochMismatch( + reason: ProposerSlashingInvalid::ProposalSlotMismatch( Slot::from(0 as u64), Slot::from(128 as u64) ) @@ -1363,7 +1166,7 @@ fn get_builder( spec: &ChainSpec, slot_offset: u64, num_validators: usize, -) -> (BlockProcessingBuilder) { +) -> BlockProcessingBuilder { let mut builder = BlockProcessingBuilder::new(num_validators, &spec); // Set the state and block to be in the last slot of the `slot_offset`th epoch. @@ -1371,6 +1174,5 @@ fn get_builder( (MainnetEthSpec::genesis_epoch() + slot_offset).end_slot(MainnetEthSpec::slots_per_epoch()); builder.set_slot(last_slot_of_epoch); builder.build_caches(&spec); - - (builder) + builder } diff --git a/eth2/state_processing/src/per_block_processing/verify_attestation.rs b/eth2/state_processing/src/per_block_processing/verify_attestation.rs index 2f5b1252e9..1540dee935 100644 --- a/eth2/state_processing/src/per_block_processing/verify_attestation.rs +++ b/eth2/state_processing/src/per_block_processing/verify_attestation.rs @@ -2,7 +2,6 @@ use super::errors::{AttestationInvalid as Invalid, BlockOperationError}; use super::VerifySignatures; use crate::common::get_indexed_attestation; use crate::per_block_processing::is_valid_indexed_attestation; -use tree_hash::TreeHash; use types::*; type Result = std::result::Result>; @@ -16,7 +15,7 @@ fn error(reason: Invalid) -> BlockOperationError { /// /// Optionally verifies the aggregate signature, depending on `verify_signatures`. /// -/// Spec v0.8.0 +/// Spec v0.9.1 pub fn verify_attestation_for_block_inclusion( state: &BeaconState, attestation: &Attestation, @@ -25,22 +24,19 @@ pub fn verify_attestation_for_block_inclusion( ) -> Result<()> { let data = &attestation.data; - // Check attestation slot. - let attestation_slot = state.get_attestation_data_slot(&data)?; - verify!( - attestation_slot + spec.min_attestation_inclusion_delay <= state.slot, + data.slot + spec.min_attestation_inclusion_delay <= state.slot, Invalid::IncludedTooEarly { state: state.slot, delay: spec.min_attestation_inclusion_delay, - attestation: attestation_slot + attestation: data.slot, } ); verify!( - state.slot <= attestation_slot + T::slots_per_epoch(), + state.slot <= data.slot + T::slots_per_epoch(), Invalid::IncludedTooLate { state: state.slot, - attestation: attestation_slot + attestation: data.slot, } ); @@ -53,7 +49,7 @@ pub fn verify_attestation_for_block_inclusion( /// Returns a descriptive `Err` if the attestation is malformed or does not accurately reflect the /// prior blocks in `state`. /// -/// Spec v0.8.0 +/// Spec v0.9.1 pub fn verify_attestation_for_state( state: &BeaconState, attestation: &Attestation, @@ -63,35 +59,12 @@ pub fn verify_attestation_for_state( let data = &attestation.data; verify!( - data.crosslink.shard < T::ShardCount::to_u64(), - Invalid::BadShard + data.index < state.get_committee_count_at_slot(data.slot)?, + Invalid::BadCommitteeIndex ); - // Verify the Casper FFG vote and crosslink data. - let parent_crosslink = verify_casper_ffg_vote(attestation, state)?; - - verify!( - data.crosslink.parent_root == Hash256::from_slice(&parent_crosslink.tree_hash_root()), - Invalid::BadParentCrosslinkHash - ); - verify!( - data.crosslink.start_epoch == parent_crosslink.end_epoch, - Invalid::BadParentCrosslinkStartEpoch - ); - verify!( - data.crosslink.end_epoch - == std::cmp::min( - data.target.epoch, - parent_crosslink.end_epoch + spec.max_epochs_per_crosslink - ), - Invalid::BadParentCrosslinkEndEpoch - ); - - // Crosslink data root is zero (to be removed in phase 1). - verify!( - attestation.data.crosslink.data_root == Hash256::zero(), - Invalid::ShardBlockRootNotZero - ); + // Verify the Casper FFG vote. + verify_casper_ffg_vote(attestation, state)?; // Check signature and bitfields let indexed_attestation = get_indexed_attestation(state, attestation)?; @@ -102,13 +75,11 @@ pub fn verify_attestation_for_state( /// Check target epoch and source checkpoint. /// -/// Return the parent crosslink for further checks. -/// -/// Spec v0.8.0 -fn verify_casper_ffg_vote<'a, T: EthSpec>( +/// Spec v0.9.1 +fn verify_casper_ffg_vote( attestation: &Attestation, - state: &'a BeaconState, -) -> Result<&'a Crosslink> { + state: &BeaconState, +) -> Result<()> { let data = &attestation.data; if data.target.epoch == state.current_epoch() { verify!( @@ -119,7 +90,7 @@ fn verify_casper_ffg_vote<'a, T: EthSpec>( is_current: true, } ); - Ok(state.get_current_crosslink(data.crosslink.shard)?) + Ok(()) } else if data.target.epoch == state.previous_epoch() { verify!( data.source == state.previous_justified_checkpoint, @@ -129,7 +100,7 @@ fn verify_casper_ffg_vote<'a, T: EthSpec>( is_current: false, } ); - Ok(state.get_previous_crosslink(data.crosslink.shard)?) + Ok(()) } else { Err(error(Invalid::BadTargetEpoch)) } diff --git a/eth2/state_processing/src/per_block_processing/verify_attester_slashing.rs b/eth2/state_processing/src/per_block_processing/verify_attester_slashing.rs index 601da5577e..cf966935f2 100644 --- a/eth2/state_processing/src/per_block_processing/verify_attester_slashing.rs +++ b/eth2/state_processing/src/per_block_processing/verify_attester_slashing.rs @@ -15,7 +15,7 @@ fn error(reason: Invalid) -> BlockOperationError { /// /// Returns `Ok(())` if the `AttesterSlashing` is valid, otherwise indicates the reason for invalidity. /// -/// Spec v0.8.1 +/// Spec v0.9.1 pub fn verify_attester_slashing( state: &BeaconState, attester_slashing: &AttesterSlashing, @@ -47,7 +47,7 @@ pub fn verify_attester_slashing( /// /// Returns Ok(indices) if `indices.len() > 0`. /// -/// Spec v0.8.1 +/// Spec v0.9.1 pub fn get_slashable_indices( state: &BeaconState, attester_slashing: &AttesterSlashing, @@ -71,15 +71,13 @@ where let attestation_2 = &attester_slashing.attestation_2; let attesting_indices_1 = attestation_1 - .custody_bit_0_indices + .attesting_indices .iter() - .chain(&attestation_1.custody_bit_1_indices) .cloned() .collect::>(); let attesting_indices_2 = attestation_2 - .custody_bit_0_indices + .attesting_indices .iter() - .chain(&attestation_2.custody_bit_1_indices) .cloned() .collect::>(); diff --git a/eth2/state_processing/src/per_block_processing/verify_deposit.rs b/eth2/state_processing/src/per_block_processing/verify_deposit.rs index c854bb82aa..9992eb89ec 100644 --- a/eth2/state_processing/src/per_block_processing/verify_deposit.rs +++ b/eth2/state_processing/src/per_block_processing/verify_deposit.rs @@ -14,7 +14,7 @@ fn error(reason: DepositInvalid) -> BlockOperationError { /// Verify `Deposit.pubkey` signed `Deposit.signature`. /// -/// Spec v0.8.0 +/// Spec v0.9.1 pub fn verify_deposit_signature(deposit_data: &DepositData, spec: &ChainSpec) -> Result<()> { let deposit_signature_message = deposit_pubkey_signature_message(&deposit_data) .ok_or_else(|| error(DepositInvalid::BadBlsBytes))?; @@ -46,7 +46,7 @@ pub fn get_existing_validator_index( /// The deposit index is provided as a parameter so we can check proofs /// before they're due to be processed, and in parallel. /// -/// Spec v0.8.0 +/// Spec v0.9.1 pub fn verify_deposit_merkle_proof( state: &BeaconState, deposit: &Deposit, diff --git a/eth2/state_processing/src/per_block_processing/verify_exit.rs b/eth2/state_processing/src/per_block_processing/verify_exit.rs index b2448b3b6e..385e506102 100644 --- a/eth2/state_processing/src/per_block_processing/verify_exit.rs +++ b/eth2/state_processing/src/per_block_processing/verify_exit.rs @@ -13,7 +13,7 @@ fn error(reason: ExitInvalid) -> BlockOperationError { /// /// Returns `Ok(())` if the `Exit` is valid, otherwise indicates the reason for invalidity. /// -/// Spec v0.8.0 +/// Spec v0.9.1 pub fn verify_exit( state: &BeaconState, exit: &VoluntaryExit, @@ -25,7 +25,7 @@ pub fn verify_exit( /// Like `verify_exit` but doesn't run checks which may become true in future states. /// -/// Spec v0.8.0 +/// Spec v0.9.1 pub fn verify_exit_time_independent_only( state: &BeaconState, exit: &VoluntaryExit, @@ -37,7 +37,7 @@ pub fn verify_exit_time_independent_only( /// Parametric version of `verify_exit` that skips some checks if `time_independent_only` is true. /// -/// Spec v0.8.0 +/// Spec v0.9.1 fn verify_exit_parametric( state: &BeaconState, exit: &VoluntaryExit, diff --git a/eth2/state_processing/src/per_block_processing/verify_proposer_slashing.rs b/eth2/state_processing/src/per_block_processing/verify_proposer_slashing.rs index a0078ecf82..9683d8762a 100644 --- a/eth2/state_processing/src/per_block_processing/verify_proposer_slashing.rs +++ b/eth2/state_processing/src/per_block_processing/verify_proposer_slashing.rs @@ -14,7 +14,7 @@ fn error(reason: Invalid) -> BlockOperationError { /// /// Returns `Ok(())` if the `ProposerSlashing` is valid, otherwise indicates the reason for invalidity. /// -/// Spec v0.8.0 +/// Spec v0.9.1 pub fn verify_proposer_slashing( proposer_slashing: &ProposerSlashing, state: &BeaconState, @@ -26,11 +26,10 @@ pub fn verify_proposer_slashing( .get(proposer_slashing.proposer_index as usize) .ok_or_else(|| error(Invalid::ProposerUnknown(proposer_slashing.proposer_index)))?; - // Verify that the epoch is the same + // Verify slots match verify!( - proposer_slashing.header_1.slot.epoch(T::slots_per_epoch()) - == proposer_slashing.header_2.slot.epoch(T::slots_per_epoch()), - Invalid::ProposalEpochMismatch( + proposer_slashing.header_1.slot == proposer_slashing.header_2.slot, + Invalid::ProposalSlotMismatch( proposer_slashing.header_1.slot, proposer_slashing.header_2.slot ) diff --git a/eth2/state_processing/src/per_block_processing/verify_transfer.rs b/eth2/state_processing/src/per_block_processing/verify_transfer.rs deleted file mode 100644 index bd3527c1e7..0000000000 --- a/eth2/state_processing/src/per_block_processing/verify_transfer.rs +++ /dev/null @@ -1,208 +0,0 @@ -use super::errors::{BlockOperationError, TransferInvalid as Invalid}; -use crate::per_block_processing::signature_sets::transfer_signature_set; -use crate::per_block_processing::VerifySignatures; -use bls::get_withdrawal_credentials; -use types::*; - -type Result = std::result::Result>; - -fn error(reason: Invalid) -> BlockOperationError { - BlockOperationError::invalid(reason) -} - -/// Indicates if a `Transfer` is valid to be included in a block in the current epoch of the given -/// state. -/// -/// Returns `Ok(())` if the `Transfer` is valid, otherwise indicates the reason for invalidity. -/// -/// Spec v0.8.0 -pub fn verify_transfer( - state: &BeaconState, - transfer: &Transfer, - verify_signatures: VerifySignatures, - spec: &ChainSpec, -) -> Result<()> { - verify_transfer_parametric(state, transfer, verify_signatures, spec, false) -} - -/// Like `verify_transfer` but doesn't run checks which may become true in future states. -/// -/// Spec v0.8.0 -pub fn verify_transfer_time_independent_only( - state: &BeaconState, - transfer: &Transfer, - verify_signatures: VerifySignatures, - spec: &ChainSpec, -) -> Result<()> { - verify_transfer_parametric(state, transfer, verify_signatures, spec, true) -} - -/// Parametric version of `verify_transfer` that allows some checks to be skipped. -/// -/// When `time_independent_only == true`, time-specific parameters are ignored, including: -/// -/// - Balance considerations (e.g., adequate balance, not dust, etc). -/// - `transfer.slot` does not have to exactly match `state.slot`, it just needs to be in the -/// present or future. -/// - Validator transfer eligibility (e.g., is withdrawable) -/// -/// Spec v0.8.0 -fn verify_transfer_parametric( - state: &BeaconState, - transfer: &Transfer, - verify_signatures: VerifySignatures, - spec: &ChainSpec, - time_independent_only: bool, -) -> Result<()> { - let sender_balance = *state - .balances - .get(transfer.sender as usize) - .ok_or_else(|| error(Invalid::FromValidatorUnknown(transfer.sender)))?; - - let recipient_balance = *state - .balances - .get(transfer.recipient as usize) - .ok_or_else(|| error(Invalid::FromValidatorUnknown(transfer.recipient)))?; - - // Safely determine `amount + fee`. - let total_amount = transfer - .amount - .checked_add(transfer.fee) - .ok_or_else(|| error(Invalid::FeeOverflow(transfer.amount, transfer.fee)))?; - - // Verify the sender has adequate balance. - verify!( - time_independent_only || sender_balance >= total_amount, - Invalid::FromBalanceInsufficient(total_amount, sender_balance) - ); - - // Verify sender balance will not be "dust" (i.e., greater than zero but less than the minimum deposit - // amount). - verify!( - time_independent_only - || (sender_balance == total_amount) - || (sender_balance >= (total_amount + spec.min_deposit_amount)), - Invalid::SenderDust(sender_balance - total_amount, spec.min_deposit_amount) - ); - - // Verify the recipient balance will not be dust. - verify!( - time_independent_only || ((recipient_balance + transfer.amount) >= spec.min_deposit_amount), - Invalid::RecipientDust(sender_balance - total_amount, spec.min_deposit_amount) - ); - - // If loosely enforcing `transfer.slot`, ensure the slot is not in the past. Otherwise, ensure - // the transfer slot equals the state slot. - if time_independent_only { - verify!( - state.slot <= transfer.slot, - Invalid::TransferSlotInPast(state.slot, transfer.slot) - ); - } else { - verify!( - state.slot == transfer.slot, - Invalid::StateSlotMismatch(state.slot, transfer.slot) - ); - } - - // Load the sender `Validator` record from the state. - let sender_validator = state - .validators - .get(transfer.sender as usize) - .ok_or_else(|| error(Invalid::FromValidatorUnknown(transfer.sender)))?; - - // Ensure one of the following is met: - // - // - Time dependent checks are being ignored. - // - The sender has never been eligible for activation. - // - The sender is withdrawable at the state's epoch. - // - The transfer will not reduce the sender below the max effective balance. - verify!( - time_independent_only - || sender_validator.activation_eligibility_epoch == spec.far_future_epoch - || sender_validator.is_withdrawable_at(state.current_epoch()) - || total_amount + spec.max_effective_balance <= sender_balance, - Invalid::FromValidatorIneligibleForTransfer(transfer.sender) - ); - - // Ensure the withdrawal credentials generated from the sender's pubkey match those stored in - // the validator registry. - // - // This ensures the validator can only perform a transfer when they are in control of the - // withdrawal address. - let transfer_withdrawal_credentials = Hash256::from_slice( - &get_withdrawal_credentials(&transfer.pubkey, spec.bls_withdrawal_prefix_byte)[..], - ); - verify!( - sender_validator.withdrawal_credentials == transfer_withdrawal_credentials, - Invalid::WithdrawalCredentialsMismatch( - sender_validator.withdrawal_credentials, - transfer_withdrawal_credentials - ) - ); - - if verify_signatures.is_true() { - verify!( - transfer_signature_set(state, transfer, spec)?.is_valid(), - Invalid::BadSignature - ); - } - - Ok(()) -} - -/// Executes a transfer on the state. -/// -/// Does not check that the transfer is valid, however checks for overflow in all actions. -/// -/// Spec v0.8.0 -pub fn execute_transfer( - state: &mut BeaconState, - transfer: &Transfer, - spec: &ChainSpec, -) -> Result<()> { - let sender_balance = *state - .balances - .get(transfer.sender as usize) - .ok_or_else(|| error(Invalid::FromValidatorUnknown(transfer.sender)))?; - let recipient_balance = *state - .balances - .get(transfer.recipient as usize) - .ok_or_else(|| error(Invalid::ToValidatorUnknown(transfer.recipient)))?; - - let proposer_index = - state.get_beacon_proposer_index(state.slot, RelativeEpoch::Current, spec)?; - let proposer_balance = state.balances[proposer_index]; - - let total_amount = transfer - .amount - .checked_add(transfer.fee) - .ok_or_else(|| error(Invalid::FeeOverflow(transfer.amount, transfer.fee)))?; - - state.balances[transfer.sender as usize] = - sender_balance.checked_sub(total_amount).ok_or_else(|| { - error(Invalid::FromBalanceInsufficient( - total_amount, - sender_balance, - )) - })?; - - state.balances[transfer.recipient as usize] = recipient_balance - .checked_add(transfer.amount) - .ok_or_else(|| { - error(Invalid::ToBalanceOverflow( - recipient_balance, - transfer.amount, - )) - })?; - - state.balances[proposer_index] = - proposer_balance.checked_add(transfer.fee).ok_or_else(|| { - error(Invalid::ProposerBalanceOverflow( - proposer_balance, - transfer.fee, - )) - })?; - - Ok(()) -} diff --git a/eth2/state_processing/src/per_epoch_processing.rs b/eth2/state_processing/src/per_epoch_processing.rs index bcac1dc27c..080767203c 100644 --- a/eth2/state_processing/src/per_epoch_processing.rs +++ b/eth2/state_processing/src/per_epoch_processing.rs @@ -1,10 +1,7 @@ -use crate::common::get_compact_committees_root; use errors::EpochProcessingError as Error; -use std::collections::HashMap; use tree_hash::TreeHash; use types::*; use validator_statuses::{TotalBalances, ValidatorStatuses}; -use winning_root::{winning_root, WinningRoot}; pub mod apply_rewards; pub mod errors; @@ -12,23 +9,17 @@ pub mod process_slashings; pub mod registry_updates; pub mod tests; pub mod validator_statuses; -pub mod winning_root; pub use apply_rewards::process_rewards_and_penalties; pub use process_slashings::process_slashings; pub use registry_updates::process_registry_updates; -/// Maps a shard to a winning root. -/// -/// It is generated during crosslink processing and later used to reward/penalize validators. -pub type WinningRootHashSet = HashMap; - /// Performs per-epoch processing on some BeaconState. /// /// Mutates the given `BeaconState`, returning early if an error is encountered. If an error is /// returned, a state might be "half-processed" and therefore in an invalid state. /// -/// Spec v0.8.0 +/// Spec v0.9.1 pub fn per_epoch_processing( state: &mut BeaconState, spec: &ChainSpec, @@ -47,9 +38,6 @@ pub fn per_epoch_processing( // Justification and finalization. process_justification_and_finalization(state, &validator_statuses.total_balances)?; - // Crosslinks. - process_crosslinks(state, spec)?; - // Rewards and Penalties. process_rewards_and_penalties(state, &mut validator_statuses, spec)?; @@ -78,7 +66,7 @@ pub fn per_epoch_processing( /// - `finalized_epoch` /// - `finalized_root` /// -/// Spec v0.8.0 +/// Spec v0.9.1 #[allow(clippy::if_same_then_else)] // For readability and consistency with spec. pub fn process_justification_and_finalization( state: &mut BeaconState, @@ -144,47 +132,9 @@ pub fn process_justification_and_finalization( Ok(()) } -/// Updates the following fields on the `BeaconState`: -/// -/// - `previous_crosslinks` -/// - `current_crosslinks` -/// -/// Also returns a `WinningRootHashSet` for later use during epoch processing. -/// -/// Spec v0.8.0 -pub fn process_crosslinks( - state: &mut BeaconState, - spec: &ChainSpec, -) -> Result<(), Error> { - state.previous_crosslinks = state.current_crosslinks.clone(); - - for &relative_epoch in &[RelativeEpoch::Previous, RelativeEpoch::Current] { - let epoch = relative_epoch.into_epoch(state.current_epoch()); - for offset in 0..state.get_committee_count(relative_epoch)? { - let shard = - (state.get_epoch_start_shard(relative_epoch)? + offset) % T::ShardCount::to_u64(); - let crosslink_committee = - state.get_crosslink_committee_for_shard(shard, relative_epoch)?; - - let winning_root = winning_root(state, shard, epoch, spec)?; - - if let Some(winning_root) = winning_root { - let total_committee_balance = - state.get_total_balance(&crosslink_committee.committee, spec)?; - - if 3 * winning_root.total_attesting_balance >= 2 * total_committee_balance { - state.current_crosslinks[shard as usize] = winning_root.crosslink.clone(); - } - } - } - } - - Ok(()) -} - /// Finish up an epoch update. /// -/// Spec v0.8.0 +/// Spec v0.9.1 pub fn process_final_updates( state: &mut BeaconState, spec: &ChainSpec, @@ -211,23 +161,6 @@ pub fn process_final_updates( } } - // Set active index root - let index_epoch = next_epoch + spec.activation_exit_delay; - let indices_list = VariableList::::from( - state.get_active_validator_indices(index_epoch), - ); - state.set_active_index_root( - index_epoch, - Hash256::from_slice(&indices_list.tree_hash_root()), - spec, - )?; - - // Set committees root - state.set_compact_committee_root( - next_epoch, - get_compact_committees_root(state, RelativeEpoch::Next, spec)?, - )?; - // Reset slashings state.set_slashings(next_epoch, 0)?; @@ -242,9 +175,6 @@ pub fn process_final_updates( .push(Hash256::from_slice(&historical_batch.tree_hash_root()))?; } - // Update start shard. - state.start_shard = state.get_epoch_start_shard(RelativeEpoch::Next)?; - // Rotate current/previous epoch attestations state.previous_epoch_attestations = std::mem::replace(&mut state.current_epoch_attestations, VariableList::empty()); diff --git a/eth2/state_processing/src/per_epoch_processing/apply_rewards.rs b/eth2/state_processing/src/per_epoch_processing/apply_rewards.rs index 6de9ed872a..f0f701160c 100644 --- a/eth2/state_processing/src/per_epoch_processing/apply_rewards.rs +++ b/eth2/state_processing/src/per_epoch_processing/apply_rewards.rs @@ -32,7 +32,7 @@ impl std::ops::AddAssign for Delta { /// Apply attester and proposer rewards. /// -/// Spec v0.8.0 +/// Spec v0.9.1 pub fn process_rewards_and_penalties( state: &mut BeaconState, validator_statuses: &mut ValidatorStatuses, @@ -53,11 +53,6 @@ pub fn process_rewards_and_penalties( get_attestation_deltas(&mut deltas, state, &validator_statuses, spec)?; - // Update statuses with the information from winning roots. - validator_statuses.process_winning_roots(state, spec)?; - - get_crosslink_deltas(&mut deltas, state, &validator_statuses, spec)?; - get_proposer_deltas(&mut deltas, state, validator_statuses, spec)?; // Apply the deltas, over-flowing but not under-flowing (saturating at 0 instead). @@ -71,7 +66,7 @@ pub fn process_rewards_and_penalties( /// For each attesting validator, reward the proposer who was first to include their attestation. /// -/// Spec v0.8.0 +/// Spec v0.9.1 fn get_proposer_deltas( deltas: &mut Vec, state: &BeaconState, @@ -79,10 +74,10 @@ fn get_proposer_deltas( spec: &ChainSpec, ) -> Result<(), Error> { for (index, validator) in validator_statuses.statuses.iter().enumerate() { - if validator.is_previous_epoch_attester { + if validator.is_previous_epoch_attester && !validator.is_slashed { let inclusion = validator .inclusion_info - .expect("It is a logic error for an attester not to have an inclusion distance."); + .expect("It is a logic error for an attester not to have an inclusion delay."); let base_reward = get_base_reward( state, @@ -104,7 +99,7 @@ fn get_proposer_deltas( /// Apply rewards for participation in attestations during the previous epoch. /// -/// Spec v0.8.0 +/// Spec v0.9.1 fn get_attestation_deltas( deltas: &mut Vec, state: &BeaconState, @@ -137,7 +132,7 @@ fn get_attestation_deltas( /// Determine the delta for a single validator, sans proposer rewards. /// -/// Spec v0.8.0 +/// Spec v0.9.1 fn get_attestation_delta( validator: &ValidatorStatus, total_balances: &TotalBalances, @@ -171,13 +166,8 @@ fn get_attestation_delta( let max_attester_reward = base_reward - proposer_reward; let inclusion = validator .inclusion_info - .expect("It is a logic error for an attester not to have an inclusion distance."); - delta.reward( - max_attester_reward - * (T::SlotsPerEpoch::to_u64() + spec.min_attestation_inclusion_delay - - inclusion.distance) - / T::SlotsPerEpoch::to_u64(), - ); + .expect("It is a logic error for an attester not to have an inclusion delay."); + delta.reward(max_attester_reward / inclusion.delay); } else { delta.penalize(base_reward); } @@ -222,43 +212,9 @@ fn get_attestation_delta( delta } -/// Calculate the deltas based upon the winning roots for attestations during the previous epoch. -/// -/// Spec v0.8.0 -fn get_crosslink_deltas( - deltas: &mut Vec, - state: &BeaconState, - validator_statuses: &ValidatorStatuses, - spec: &ChainSpec, -) -> Result<(), Error> { - for (index, validator) in validator_statuses.statuses.iter().enumerate() { - let mut delta = Delta::default(); - - let base_reward = get_base_reward( - state, - index, - validator_statuses.total_balances.current_epoch, - spec, - )?; - - if let Some(ref winning_root) = validator.winning_root_info { - delta.reward( - base_reward * winning_root.total_attesting_balance - / winning_root.total_committee_balance, - ); - } else { - delta.penalize(base_reward); - } - - deltas[index] += delta; - } - - Ok(()) -} - /// Returns the base reward for some validator. /// -/// Spec v0.8.0 +/// Spec v0.9.1 fn get_base_reward( state: &BeaconState, index: usize, diff --git a/eth2/state_processing/src/per_epoch_processing/process_slashings.rs b/eth2/state_processing/src/per_epoch_processing/process_slashings.rs index d244955ee6..b96add8e20 100644 --- a/eth2/state_processing/src/per_epoch_processing/process_slashings.rs +++ b/eth2/state_processing/src/per_epoch_processing/process_slashings.rs @@ -2,7 +2,7 @@ use types::{BeaconStateError as Error, *}; /// Process slashings. /// -/// Spec v0.8.0 +/// Spec v0.9.1 pub fn process_slashings( state: &mut BeaconState, total_balance: u64, diff --git a/eth2/state_processing/src/per_epoch_processing/registry_updates.rs b/eth2/state_processing/src/per_epoch_processing/registry_updates.rs index 3f654e4421..ab3d529799 100644 --- a/eth2/state_processing/src/per_epoch_processing/registry_updates.rs +++ b/eth2/state_processing/src/per_epoch_processing/registry_updates.rs @@ -5,7 +5,7 @@ use types::*; /// Performs a validator registry update, if required. /// -/// Spec v0.8.0 +/// Spec v0.9.1 pub fn process_registry_updates( state: &mut BeaconState, spec: &ChainSpec, diff --git a/eth2/state_processing/src/per_epoch_processing/validator_statuses.rs b/eth2/state_processing/src/per_epoch_processing/validator_statuses.rs index 3280b981f2..285fc94cdd 100644 --- a/eth2/state_processing/src/per_epoch_processing/validator_statuses.rs +++ b/eth2/state_processing/src/per_epoch_processing/validator_statuses.rs @@ -1,4 +1,3 @@ -use super::{winning_root::winning_root, WinningRootHashSet}; use crate::common::get_attesting_indices; use types::*; @@ -12,34 +11,21 @@ macro_rules! set_self_if_other_is_true { }; } -/// The information required to reward some validator for their participation in a "winning" -/// crosslink root. -#[derive(Default, Clone)] -pub struct WinningRootInfo { - /// The total balance of the crosslink committee. - pub total_committee_balance: u64, - /// The total balance of the crosslink committee that attested for the "winning" root. - pub total_attesting_balance: u64, -} - /// The information required to reward a block producer for including an attestation in a block. #[derive(Clone, Copy)] pub struct InclusionInfo { - /// The earliest slot a validator had an attestation included in the previous epoch. - pub slot: Slot, /// The distance between the attestation slot and the slot that attestation was included in a /// block. - pub distance: u64, + pub delay: u64, /// The index of the proposer at the slot where the attestation was included. pub proposer_index: usize, } impl Default for InclusionInfo { - /// Defaults to `slot` and `distance` at their maximum values and `proposer_index` at zero. + /// Defaults to `delay` at its maximum value and `proposer_index` at zero. fn default() -> Self { Self { - slot: Slot::max_value(), - distance: u64::max_value(), + delay: u64::max_value(), proposer_index: 0, } } @@ -49,9 +35,8 @@ impl InclusionInfo { /// Tests if some `other` `InclusionInfo` has a lower inclusion slot than `self`. If so, /// replaces `self` with `other`. pub fn update(&mut self, other: &Self) { - if other.slot < self.slot { - self.slot = other.slot; - self.distance = other.distance; + if other.delay < self.delay { + self.delay = other.delay; self.proposer_index = other.proposer_index; } } @@ -88,9 +73,6 @@ pub struct ValidatorStatus { /// Information used to reward the block producer of this validators earliest-included /// attestation. pub inclusion_info: Option, - /// Information used to reward/penalize the validator if they voted in the super-majority for - /// some shard block. - pub winning_root_info: Option, } impl ValidatorStatus { @@ -162,7 +144,7 @@ impl ValidatorStatuses { /// - Active validators /// - Total balances for the current and previous epochs. /// - /// Spec v0.8.1 + /// Spec v0.9.1 pub fn new( state: &BeaconState, spec: &ChainSpec, @@ -202,7 +184,7 @@ impl ValidatorStatuses { /// Process some attestations from the given `state` updating the `statuses` and /// `total_balances` fields. /// - /// Spec v0.8.1 + /// Spec v0.9.1 pub fn process_attestations( &mut self, state: &BeaconState, @@ -228,19 +210,11 @@ impl ValidatorStatuses { } else if a.data.target.epoch == state.previous_epoch() { status.is_previous_epoch_attester = true; - // The inclusion slot and distance are only required for previous epoch attesters. - let attestation_slot = state.get_attestation_data_slot(&a.data)?; - let inclusion_slot = attestation_slot + a.inclusion_delay; - let relative_epoch = - RelativeEpoch::from_slot(state.slot, inclusion_slot, T::slots_per_epoch())?; + // The inclusion delay and proposer index are only required for previous epoch + // attesters. status.inclusion_info = Some(InclusionInfo { - slot: inclusion_slot, - distance: a.inclusion_delay, - proposer_index: state.get_beacon_proposer_index( - inclusion_slot, - relative_epoch, - spec, - )?, + delay: a.inclusion_delay, + proposer_index: a.proposer_index as usize, }); if target_matches_epoch_start_block(a, state, state.previous_epoch())? { @@ -284,66 +258,12 @@ impl ValidatorStatuses { Ok(()) } - - /// Update the `statuses` for each validator based upon whether or not they attested to the - /// "winning" shard block root for the previous epoch. - /// - /// Spec v0.8.1 - pub fn process_winning_roots( - &mut self, - state: &BeaconState, - spec: &ChainSpec, - ) -> Result<(), BeaconStateError> { - // We must re-calculate the winning roots here because it is possible that they have - // changed since the first time they were calculated. - // - // This is because we altered the state during the first time we calculated the winning - // roots. - let winning_root_for_shards = { - let mut winning_root_for_shards = WinningRootHashSet::new(); - let relative_epoch = RelativeEpoch::Previous; - - let epoch = relative_epoch.into_epoch(state.current_epoch()); - for offset in 0..state.get_committee_count(relative_epoch)? { - let shard = (state.get_epoch_start_shard(relative_epoch)? + offset) - % T::ShardCount::to_u64(); - if let Some(winning_root) = winning_root(state, shard, epoch, spec)? { - winning_root_for_shards.insert(shard, winning_root); - } - } - - winning_root_for_shards - }; - - // Loop through each slot in the previous epoch. - for slot in state.previous_epoch().slot_iter(T::slots_per_epoch()) { - let crosslink_committees_at_slot = state.get_crosslink_committees_at_slot(slot)?; - - // Loop through each committee in the slot. - for c in crosslink_committees_at_slot { - // If there was some winning crosslink root for the committee's shard. - if let Some(winning_root) = winning_root_for_shards.get(&c.shard) { - let total_committee_balance = state.get_total_balance(&c.committee, spec)?; - for &validator_index in &winning_root.attesting_validator_indices { - // Take note of the balance information for the winning root, it will be - // used later to calculate rewards for that validator. - self.statuses[validator_index].winning_root_info = Some(WinningRootInfo { - total_committee_balance, - total_attesting_balance: winning_root.total_attesting_balance, - }) - } - } - } - } - - Ok(()) - } } /// Returns `true` if the attestation's FFG target is equal to the hash of the `state`'s first /// beacon block in the given `epoch`. /// -/// Spec v0.8.1 +/// Spec v0.9.1 fn target_matches_epoch_start_block( a: &PendingAttestation, state: &BeaconState, @@ -358,13 +278,12 @@ fn target_matches_epoch_start_block( /// Returns `true` if a `PendingAttestation` and `BeaconState` share the same beacon block hash for /// the current slot of the `PendingAttestation`. /// -/// Spec v0.8.1 +/// Spec v0.9.1 fn has_common_beacon_block_root( a: &PendingAttestation, state: &BeaconState, ) -> Result { - let attestation_slot = state.get_attestation_data_slot(&a.data)?; - let state_block_root = *state.get_block_root(attestation_slot)?; + let state_block_root = *state.get_block_root(a.data.slot)?; Ok(a.data.beacon_block_root == state_block_root) } diff --git a/eth2/state_processing/src/per_epoch_processing/winning_root.rs b/eth2/state_processing/src/per_epoch_processing/winning_root.rs deleted file mode 100644 index 82a6b0ff1f..0000000000 --- a/eth2/state_processing/src/per_epoch_processing/winning_root.rs +++ /dev/null @@ -1,130 +0,0 @@ -use crate::common::get_attesting_indices; -use std::collections::{HashMap, HashSet}; -use tree_hash::TreeHash; -use types::*; - -#[derive(Clone, Debug)] -pub struct WinningRoot { - pub crosslink: Crosslink, - pub attesting_validator_indices: Vec, - pub total_attesting_balance: u64, -} - -impl WinningRoot { - /// Returns `true` if `self` is a "better" candidate than `other`. - /// - /// A winning root is "better" than another if it has a higher `total_attesting_balance`. Ties - /// are broken by favouring the higher `crosslink_data_root` value. - /// - /// Spec v0.8.0 - pub fn is_better_than(&self, other: &Self) -> bool { - (self.total_attesting_balance, self.crosslink.data_root) - > (other.total_attesting_balance, other.crosslink.data_root) - } -} - -/// Returns the crosslink `data_root` with the highest total attesting balance for the given shard. -/// Breaks ties by favouring the smaller crosslink `data_root` hash. -/// -/// The `WinningRoot` object also contains additional fields that are useful in later stages of -/// per-epoch processing. -/// -/// Spec v0.8.0 -pub fn winning_root( - state: &BeaconState, - shard: u64, - epoch: Epoch, - spec: &ChainSpec, -) -> Result, BeaconStateError> { - let attestations: Vec<&_> = state - .get_matching_source_attestations(epoch)? - .iter() - .filter(|a| a.data.crosslink.shard == shard) - .collect(); - - // Build a map from crosslinks to attestations that support that crosslink. - let mut candidate_crosslink_map = HashMap::new(); - let current_shard_crosslink_root = state.get_current_crosslink(shard)?.tree_hash_root(); - - for a in attestations { - if a.data.crosslink.parent_root.as_bytes() == ¤t_shard_crosslink_root[..] - || a.data.crosslink.tree_hash_root() == current_shard_crosslink_root - { - let supporting_attestations = candidate_crosslink_map - .entry(&a.data.crosslink) - .or_insert_with(Vec::new); - supporting_attestations.push(a); - } - } - - // Find the maximum crosslink. - let mut winning_root = None; - for (crosslink, attestations) in candidate_crosslink_map { - let attesting_validator_indices = - get_unslashed_attesting_indices_unsorted(state, &attestations)?; - let total_attesting_balance = - state.get_total_balance(&attesting_validator_indices, spec)?; - - let candidate = WinningRoot { - crosslink: crosslink.clone(), - attesting_validator_indices, - total_attesting_balance, - }; - - if let Some(ref winner) = winning_root { - if candidate.is_better_than(&winner) { - winning_root = Some(candidate); - } - } else { - winning_root = Some(candidate); - } - } - - Ok(winning_root) -} - -pub fn get_unslashed_attesting_indices_unsorted( - state: &BeaconState, - attestations: &[&PendingAttestation], -) -> Result, BeaconStateError> { - let mut output = HashSet::new(); - for a in attestations { - output.extend(get_attesting_indices(state, &a.data, &a.aggregation_bits)?); - } - Ok(output - .into_iter() - .filter(|index| state.validators.get(*index).map_or(false, |v| !v.slashed)) - .collect()) -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn is_better_than() { - let worse = WinningRoot { - crosslink: Crosslink { - shard: 0, - start_epoch: Epoch::new(0), - end_epoch: Epoch::new(1), - parent_root: Hash256::from_slice(&[0; 32]), - data_root: Hash256::from_slice(&[1; 32]), - }, - attesting_validator_indices: vec![], - total_attesting_balance: 42, - }; - - let mut better = worse.clone(); - better.crosslink.data_root = Hash256::from_slice(&[2; 32]); - - assert!(better.is_better_than(&worse)); - - let better = WinningRoot { - total_attesting_balance: worse.total_attesting_balance + 1, - ..worse.clone() - }; - - assert!(better.is_better_than(&worse)); - } -} diff --git a/eth2/state_processing/src/per_slot_processing.rs b/eth2/state_processing/src/per_slot_processing.rs index a1c68edd9e..0b8a906c15 100644 --- a/eth2/state_processing/src/per_slot_processing.rs +++ b/eth2/state_processing/src/per_slot_processing.rs @@ -9,7 +9,7 @@ pub enum Error { /// Advances a state forward by one slot, performing per-epoch processing if required. /// -/// Spec v0.8.0 +/// Spec v0.9.1 pub fn per_slot_processing( state: &mut BeaconState, spec: &ChainSpec, diff --git a/eth2/state_processing/src/test_utils.rs b/eth2/state_processing/src/test_utils.rs index 1651cf7943..803830062e 100644 --- a/eth2/state_processing/src/test_utils.rs +++ b/eth2/state_processing/src/test_utils.rs @@ -15,7 +15,6 @@ pub struct BlockBuilder { pub num_attestations: usize, pub num_deposits: usize, pub num_exits: usize, - pub num_transfers: usize, } impl BlockBuilder { @@ -33,7 +32,6 @@ impl BlockBuilder { num_attestations: 0, num_deposits: 0, num_exits: 0, - num_transfers: 0, } } @@ -43,7 +41,6 @@ impl BlockBuilder { self.num_attestations = T::MaxAttestations::to_usize(); self.num_deposits = T::MaxDeposits::to_usize(); self.num_exits = T::MaxVoluntaryExits::to_usize(); - self.num_transfers = T::MaxTransfers::to_usize(); } pub fn set_slot(&mut self, slot: Slot) { @@ -61,9 +58,7 @@ impl BlockBuilder { builder.set_slot(state.slot); - let proposer_index = state - .get_beacon_proposer_index(state.slot, RelativeEpoch::Current, spec) - .unwrap(); + let proposer_index = state.get_beacon_proposer_index(state.slot, spec).unwrap(); let proposer_keypair = &keypairs[proposer_index]; @@ -80,7 +75,7 @@ impl BlockBuilder { let validator_index = validators_iter.next().expect("Insufficient validators."); builder.insert_proposer_slashing( - &ProposerSlashingTestTask::Valid, + ProposerSlashingTestTask::Valid, validator_index, &keypairs[validator_index as usize].sk, &state.fork, @@ -107,7 +102,7 @@ impl BlockBuilder { } builder.insert_attester_slashing( - &AttesterSlashingTestTask::Valid, + AttesterSlashingTestTask::Valid, &attesters, &secret_keys, &state.fork, @@ -123,7 +118,7 @@ impl BlockBuilder { let all_secret_keys: Vec<&SecretKey> = keypairs.iter().map(|keypair| &keypair.sk).collect(); builder .insert_attestations( - &AttestationTestTask::Valid, + AttestationTestTask::Valid, &state, &all_secret_keys, self.num_attestations as usize, @@ -151,7 +146,7 @@ impl BlockBuilder { let validator_index = validators_iter.next().expect("Insufficient validators."); builder.insert_exit( - &ExitTestTask::Valid, + ExitTestTask::Valid, &mut state, validator_index, &keypairs[validator_index as usize].sk, @@ -163,24 +158,6 @@ impl BlockBuilder { builder.block.body.voluntary_exits.len() ); - // Insert the maximum possible number of `Transfer` objects. - for _ in 0..self.num_transfers { - let validator_index = validators_iter.next().expect("Insufficient validators."); - - // Manually set the validator to be withdrawn. - state.validators[validator_index as usize].withdrawable_epoch = state.previous_epoch(); - - builder.insert_transfer( - &state, - validator_index, - validator_index, - 1, - keypairs[validator_index as usize].clone(), - spec, - ); - } - info!("Inserted {} transfers.", builder.block.body.transfers.len()); - // Set the eth1 data to be different from the state. self.block_builder.block.body.eth1_data.block_hash = Hash256::from_slice(&[42; 32]); diff --git a/eth2/state_processing/tests/tests.rs b/eth2/state_processing/tests/tests.rs index a7390c8505..77bc14ba30 100644 --- a/eth2/state_processing/tests/tests.rs +++ b/eth2/state_processing/tests/tests.rs @@ -205,6 +205,4 @@ mod signatures_minimal { spec, ); } - - // Cannot test transfers because their length is zero. } diff --git a/eth2/types/src/attestation.rs b/eth2/types/src/attestation.rs index c78e998241..5c08964bc7 100644 --- a/eth2/types/src/attestation.rs +++ b/eth2/types/src/attestation.rs @@ -9,7 +9,7 @@ use tree_hash_derive::{SignedRoot, TreeHash}; /// Details an attestation that can be slashable. /// -/// Spec v0.8.0 +/// Spec v0.9.1 #[derive( Debug, Clone, @@ -26,7 +26,6 @@ use tree_hash_derive::{SignedRoot, TreeHash}; pub struct Attestation { pub aggregation_bits: BitList, pub data: AttestationData, - pub custody_bits: BitList, #[signed_root(skip_hashing)] pub signature: AggregateSignature, } @@ -47,7 +46,6 @@ impl Attestation { debug_assert!(self.signers_disjoint_from(other)); self.aggregation_bits = self.aggregation_bits.union(&other.aggregation_bits); - self.custody_bits = self.custody_bits.union(&other.custody_bits); self.signature.add_aggregate(&other.signature); } } diff --git a/eth2/types/src/attestation_data.rs b/eth2/types/src/attestation_data.rs index 4d82ce1261..e768950f05 100644 --- a/eth2/types/src/attestation_data.rs +++ b/eth2/types/src/attestation_data.rs @@ -1,5 +1,5 @@ use crate::test_utils::TestRandom; -use crate::{Checkpoint, Crosslink, Hash256}; +use crate::{Checkpoint, Hash256, Slot}; use serde_derive::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; @@ -8,20 +8,20 @@ use tree_hash_derive::TreeHash; /// The data upon which an attestation is based. /// -/// Spec v0.8.0 +/// Spec v0.9.1 #[derive( Debug, Clone, PartialEq, Eq, Serialize, Deserialize, Hash, Encode, Decode, TreeHash, TestRandom, )] pub struct AttestationData { + pub slot: Slot, + pub index: u64, + // LMD GHOST vote pub beacon_block_root: Hash256, // FFG Vote pub source: Checkpoint, pub target: Checkpoint, - - // Crosslink Vote - pub crosslink: Crosslink, } #[cfg(test)] diff --git a/eth2/types/src/attestation_data_and_custody_bit.rs b/eth2/types/src/attestation_data_and_custody_bit.rs deleted file mode 100644 index a161f346f3..0000000000 --- a/eth2/types/src/attestation_data_and_custody_bit.rs +++ /dev/null @@ -1,22 +0,0 @@ -use super::AttestationData; -use crate::test_utils::TestRandom; -use serde_derive::{Deserialize, Serialize}; -use ssz_derive::{Decode, Encode}; -use test_random_derive::TestRandom; -use tree_hash_derive::TreeHash; - -/// Used for pairing an attestation with a proof-of-custody. -/// -/// Spec v0.8.1 -#[derive(Debug, Clone, PartialEq, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom)] -pub struct AttestationDataAndCustodyBit { - pub data: AttestationData, - pub custody_bit: bool, -} - -#[cfg(test)] -mod test { - use super::*; - - ssz_tests!(AttestationDataAndCustodyBit); -} diff --git a/eth2/types/src/attestation_duty.rs b/eth2/types/src/attestation_duty.rs index 299fdd44cf..d9117fc11f 100644 --- a/eth2/types/src/attestation_duty.rs +++ b/eth2/types/src/attestation_duty.rs @@ -3,8 +3,12 @@ use serde_derive::{Deserialize, Serialize}; #[derive(Debug, PartialEq, Clone, Copy, Default, Serialize, Deserialize)] pub struct AttestationDuty { + /// The slot during which the attester must attest. pub slot: Slot, - pub shard: Shard, - pub committee_index: usize, + /// The index of this committee within the committees in `slot`. + pub index: CommitteeIndex, + /// The position of the attester within the committee. + pub committee_position: usize, + /// The total number of attesters in the committee. pub committee_len: usize, } diff --git a/eth2/types/src/attester_slashing.rs b/eth2/types/src/attester_slashing.rs index 36fe33c755..7284a453d3 100644 --- a/eth2/types/src/attester_slashing.rs +++ b/eth2/types/src/attester_slashing.rs @@ -7,7 +7,7 @@ use tree_hash_derive::TreeHash; /// Two conflicting attestations. /// -/// Spec v0.8.0 +/// Spec v0.9.1 #[derive(Debug, PartialEq, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom)] #[serde(bound = "T: EthSpec")] pub struct AttesterSlashing { diff --git a/eth2/types/src/beacon_block.rs b/eth2/types/src/beacon_block.rs index 500bde6e43..4c8fb0110d 100644 --- a/eth2/types/src/beacon_block.rs +++ b/eth2/types/src/beacon_block.rs @@ -10,7 +10,7 @@ use tree_hash_derive::{SignedRoot, TreeHash}; /// A block of the `BeaconChain`. /// -/// Spec v0.8.0 +/// Spec v0.9.1 #[derive( Debug, PartialEq, @@ -36,7 +36,7 @@ pub struct BeaconBlock { impl BeaconBlock { /// Returns an empty block to be used during genesis. /// - /// Spec v0.8.1 + /// Spec v0.9.1 pub fn empty(spec: &ChainSpec) -> Self { BeaconBlock { slot: spec.genesis_slot, @@ -55,7 +55,6 @@ impl BeaconBlock { attestations: VariableList::empty(), deposits: VariableList::empty(), voluntary_exits: VariableList::empty(), - transfers: VariableList::empty(), }, signature: Signature::empty_signature(), } @@ -68,7 +67,7 @@ impl BeaconBlock { /// Returns the `signed_root` of the block. /// - /// Spec v0.8.1 + /// Spec v0.9.1 pub fn canonical_root(&self) -> Hash256 { Hash256::from_slice(&self.signed_root()[..]) } @@ -80,7 +79,7 @@ impl BeaconBlock { /// /// Note: performs a full tree-hash of `self.body`. /// - /// Spec v0.8.1 + /// Spec v0.9.1 pub fn block_header(&self) -> BeaconBlockHeader { BeaconBlockHeader { slot: self.slot, @@ -93,7 +92,7 @@ impl BeaconBlock { /// Returns a "temporary" header, where the `state_root` is `Hash256::zero()`. /// - /// Spec v0.8.0 + /// Spec v0.9.1 pub fn temporary_block_header(&self) -> BeaconBlockHeader { BeaconBlockHeader { state_root: Hash256::zero(), diff --git a/eth2/types/src/beacon_block_body.rs b/eth2/types/src/beacon_block_body.rs index c1f66b816c..6319b14603 100644 --- a/eth2/types/src/beacon_block_body.rs +++ b/eth2/types/src/beacon_block_body.rs @@ -10,7 +10,7 @@ use tree_hash_derive::TreeHash; /// The body of a `BeaconChain` block, containing operations. /// -/// Spec v0.8.0 +/// Spec v0.9.1 #[derive(Debug, PartialEq, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom)] #[serde(bound = "T: EthSpec")] pub struct BeaconBlockBody { @@ -26,7 +26,6 @@ pub struct BeaconBlockBody { pub attestations: VariableList, T::MaxAttestations>, pub deposits: VariableList, pub voluntary_exits: VariableList, - pub transfers: VariableList, } #[cfg(test)] diff --git a/eth2/types/src/beacon_block_header.rs b/eth2/types/src/beacon_block_header.rs index 19477a18b1..0b61bed500 100644 --- a/eth2/types/src/beacon_block_header.rs +++ b/eth2/types/src/beacon_block_header.rs @@ -10,7 +10,7 @@ use tree_hash_derive::{SignedRoot, TreeHash}; /// A header of a `BeaconBlock`. /// -/// Spec v0.8.1 +/// Spec v0.9.1 #[derive( Debug, PartialEq, @@ -35,14 +35,14 @@ pub struct BeaconBlockHeader { impl BeaconBlockHeader { /// Returns the `tree_hash_root` of the header. /// - /// Spec v0.8.1 + /// Spec v0.9.1 pub fn canonical_root(&self) -> Hash256 { Hash256::from_slice(&self.signed_root()[..]) } /// Given a `body`, consumes `self` and returns a complete `BeaconBlock`. /// - /// Spec v0.8.0 + /// Spec v0.9.1 pub fn into_block(self, body: BeaconBlockBody) -> BeaconBlock { BeaconBlock { slot: self.slot, diff --git a/eth2/types/src/crosslink_committee.rs b/eth2/types/src/beacon_committee.rs similarity index 52% rename from eth2/types/src/crosslink_committee.rs rename to eth2/types/src/beacon_committee.rs index 0f7a401ca2..e237499ff7 100644 --- a/eth2/types/src/crosslink_committee.rs +++ b/eth2/types/src/beacon_committee.rs @@ -1,25 +1,25 @@ use crate::*; #[derive(Default, Clone, Debug, PartialEq)] -pub struct CrosslinkCommittee<'a> { +pub struct BeaconCommittee<'a> { pub slot: Slot, - pub shard: Shard, + pub index: CommitteeIndex, pub committee: &'a [usize], } -impl<'a> CrosslinkCommittee<'a> { - pub fn into_owned(self) -> OwnedCrosslinkCommittee { - OwnedCrosslinkCommittee { +impl<'a> BeaconCommittee<'a> { + pub fn into_owned(self) -> OwnedBeaconCommittee { + OwnedBeaconCommittee { slot: self.slot, - shard: self.shard, + index: self.index, committee: self.committee.to_vec(), } } } #[derive(Default, Clone, Debug, PartialEq)] -pub struct OwnedCrosslinkCommittee { +pub struct OwnedBeaconCommittee { pub slot: Slot, - pub shard: Shard, + pub index: CommitteeIndex, pub committee: Vec, } diff --git a/eth2/types/src/beacon_state.rs b/eth2/types/src/beacon_state.rs index b96a53d74b..9cd949d877 100644 --- a/eth2/types/src/beacon_state.rs +++ b/eth2/types/src/beacon_state.rs @@ -5,12 +5,13 @@ use crate::*; use cached_tree_hash::{CachedTreeHash, MultiTreeHashCache, TreeHashCache}; use compare_fields_derive::CompareFields; use eth2_hashing::hash; -use int_to_bytes::{int_to_bytes32, int_to_bytes8}; +use int_to_bytes::{int_to_bytes4, int_to_bytes8}; use pubkey_cache::PubkeyCache; use serde_derive::{Deserialize, Serialize}; use ssz::ssz_encode; use ssz_derive::{Decode, Encode}; use ssz_types::{typenum::Unsigned, BitVector, FixedVector}; +use swap_or_not_shuffle::compute_shuffled_index; use test_random_derive::TestRandom; use tree_hash::TreeHash; use tree_hash_derive::{CachedTreeHash, TreeHash}; @@ -31,7 +32,6 @@ const MAX_RANDOM_BYTE: u64 = (1 << 8) - 1; pub enum Error { EpochOutOfBounds, SlotOutOfBounds, - ShardOutOfBounds, UnknownValidator, UnableToDetermineProducer, InvalidBitfield, @@ -45,8 +45,10 @@ pub enum Error { InsufficientAttestations, InsufficientCommittees, InsufficientStateRoots, - NoCommitteeForShard, - NoCommitteeForSlot, + NoCommittee { + slot: Slot, + index: CommitteeIndex, + }, ZeroSlotsPerEpoch, PubkeyCacheInconsistent, PubkeyCacheIncomplete { @@ -56,7 +58,7 @@ pub enum Error { PreviousCommitteeCacheUninitialized, CurrentCommitteeCacheUninitialized, RelativeEpochError(RelativeEpochError), - CommitteeCacheUninitialized(RelativeEpoch), + CommitteeCacheUninitialized(Option), SszTypesError(ssz_types::Error), CachedTreeHashError(cached_tree_hash::Error), } @@ -86,8 +88,6 @@ pub struct BeaconTreeHashCache { validators: MultiTreeHashCache, balances: TreeHashCache, randao_mixes: TreeHashCache, - active_index_roots: TreeHashCache, - compact_committees_roots: TreeHashCache, slashings: TreeHashCache, } @@ -99,7 +99,7 @@ impl BeaconTreeHashCache { /// The state of the `BeaconChain` at some slot. /// -/// Spec v0.8.0 +/// Spec v0.9.1 #[derive( Debug, PartialEq, @@ -148,16 +148,9 @@ where #[cached_tree_hash(balances)] pub balances: VariableList, - // Shuffling - pub start_shard: u64, + // Randomness #[cached_tree_hash(randao_mixes)] pub randao_mixes: FixedVector, - #[compare_fields(as_slice)] - #[cached_tree_hash(active_index_roots)] - pub active_index_roots: FixedVector, - #[compare_fields(as_slice)] - #[cached_tree_hash(compact_committees_roots)] - pub compact_committees_roots: FixedVector, // Slashings #[cached_tree_hash(slashings)] @@ -167,10 +160,6 @@ where pub previous_epoch_attestations: VariableList, T::MaxPendingAttestations>, pub current_epoch_attestations: VariableList, T::MaxPendingAttestations>, - // Crosslinks - pub previous_crosslinks: FixedVector, - pub current_crosslinks: FixedVector, - // Finality #[test_random(default)] pub justification_bits: BitVector, @@ -210,7 +199,7 @@ impl BeaconState { /// /// Not a complete genesis state, see `initialize_beacon_state_from_eth1`. /// - /// Spec v0.8.0 + /// Spec v0.9.1 pub fn new(genesis_time: u64, eth1_data: Eth1Data, spec: &ChainSpec) -> Self { BeaconState { // Versioning @@ -233,11 +222,8 @@ impl BeaconState { validators: VariableList::empty(), // Set later. balances: VariableList::empty(), // Set later. - // Shuffling - start_shard: 0, + // Randomness randao_mixes: FixedVector::from_elem(Hash256::zero()), - active_index_roots: FixedVector::from_elem(Hash256::zero()), - compact_committees_roots: FixedVector::from_elem(Hash256::zero()), // Slashings slashings: FixedVector::from_elem(0), @@ -246,10 +232,6 @@ impl BeaconState { previous_epoch_attestations: VariableList::empty(), current_epoch_attestations: VariableList::empty(), - // Crosslinks - previous_crosslinks: FixedVector::from_elem(Crosslink::default()), - current_crosslinks: FixedVector::from_elem(Crosslink::default()), - // Finality justification_bits: BitVector::new(), previous_justified_checkpoint: Checkpoint::default(), @@ -270,7 +252,7 @@ impl BeaconState { /// Returns the `tree_hash_root` of the state. /// - /// Spec v0.8.1 + /// Spec v0.9.1 pub fn canonical_root(&self) -> Hash256 { Hash256::from_slice(&self.tree_hash_root()[..]) } @@ -299,7 +281,7 @@ impl BeaconState { /// The epoch corresponding to `self.slot`. /// - /// Spec v0.8.1 + /// Spec v0.9.1 pub fn current_epoch(&self) -> Epoch { self.slot.epoch(T::slots_per_epoch()) } @@ -308,7 +290,7 @@ impl BeaconState { /// /// If the current epoch is the genesis epoch, the genesis_epoch is returned. /// - /// Spec v0.8.1 + /// Spec v0.9.1 pub fn previous_epoch(&self) -> Epoch { let current_epoch = self.current_epoch(); if current_epoch > T::genesis_epoch() { @@ -320,43 +302,29 @@ impl BeaconState { /// The epoch following `self.current_epoch()`. /// - /// Spec v0.8.1 + /// Spec v0.9.1 pub fn next_epoch(&self) -> Epoch { self.current_epoch() + 1 } - pub fn get_committee_count(&self, relative_epoch: RelativeEpoch) -> Result { - let cache = self.cache(relative_epoch)?; + /// Compute the number of committees at `slot`. + /// + /// Makes use of the committee cache and will fail if no cache exists for the slot's epoch. + /// + /// Spec v0.9.1 + pub fn get_committee_count_at_slot(&self, slot: Slot) -> Result { + let cache = self.committee_cache_at_slot(slot)?; + Ok(cache.committees_per_slot() as u64) + } + /// Compute the number of committees in an entire epoch. + /// + /// Spec v0.9.1 + pub fn get_epoch_committee_count(&self, relative_epoch: RelativeEpoch) -> Result { + let cache = self.committee_cache(relative_epoch)?; Ok(cache.epoch_committee_count() as u64) } - pub fn get_epoch_start_shard(&self, relative_epoch: RelativeEpoch) -> Result { - let cache = self.cache(relative_epoch)?; - - Ok(cache.epoch_start_shard()) - } - - /// Get the slot of an attestation. - /// - /// Note: Utilizes the cache and will fail if the appropriate cache is not initialized. - /// - /// Spec v0.8.0 - pub fn get_attestation_data_slot( - &self, - attestation_data: &AttestationData, - ) -> Result { - let target_relative_epoch = - RelativeEpoch::from_epoch(self.current_epoch(), attestation_data.target.epoch)?; - - let cc = self.get_crosslink_committee_for_shard( - attestation_data.crosslink.shard, - target_relative_epoch, - )?; - - Ok(cc.slot) - } - /// Return the cached active validator indices at some epoch. /// /// Note: the indices are shuffled (i.e., not in ascending order). @@ -366,7 +334,7 @@ impl BeaconState { &self, relative_epoch: RelativeEpoch, ) -> Result<&[usize], Error> { - let cache = self.cache(relative_epoch)?; + let cache = self.committee_cache(relative_epoch)?; Ok(&cache.active_validator_indices()) } @@ -375,7 +343,7 @@ impl BeaconState { /// /// Does not utilize the cache, performs a full iteration over the validator registry. /// - /// Spec v0.8.1 + /// Spec v0.9.1 pub fn get_active_validator_indices(&self, epoch: Epoch) -> Vec { get_active_validator_indices(&self.validators, epoch) } @@ -386,91 +354,106 @@ impl BeaconState { /// /// Returns an error if that epoch is not cached, or the cache is not initialized. pub fn get_shuffling(&self, relative_epoch: RelativeEpoch) -> Result<&[usize], Error> { - let cache = self.cache(relative_epoch)?; + let cache = self.committee_cache(relative_epoch)?; Ok(cache.shuffling()) } - /// Returns the crosslink committees for some slot. + /// Get the Beacon committee at the given slot and index. /// - /// Note: Utilizes the cache and will fail if the appropriate cache is not initialized. + /// Utilises the committee cache. /// - /// Spec v0.8.1 - pub fn get_crosslink_committees_at_slot( + /// Spec v0.9.1 + pub fn get_beacon_committee( &self, slot: Slot, - ) -> Result, Error> { - let relative_epoch = RelativeEpoch::from_slot(self.slot, slot, T::slots_per_epoch())?; - let cache = self.cache(relative_epoch)?; + index: CommitteeIndex, + ) -> Result { + let epoch = slot.epoch(T::slots_per_epoch()); + let relative_epoch = RelativeEpoch::from_epoch(self.current_epoch(), epoch)?; + let cache = self.committee_cache(relative_epoch)?; cache - .get_crosslink_committees_for_slot(slot) - .ok_or_else(|| Error::NoCommitteeForSlot) + .get_beacon_committee(slot, index) + .ok_or(Error::NoCommittee { slot, index }) } - /// Returns the crosslink committees for some shard in some cached epoch. + /// Get all of the Beacon committees at a given slot. /// - /// Note: Utilizes the cache and will fail if the appropriate cache is not initialized. + /// Utilises the committee cache. /// - /// Spec v0.8.1 - pub fn get_crosslink_committee_for_shard( - &self, - shard: u64, - relative_epoch: RelativeEpoch, - ) -> Result { - let cache = self.cache(relative_epoch)?; - - let committee = cache - .get_crosslink_committee_for_shard(shard) - .ok_or_else(|| Error::NoCommitteeForShard)?; - - Ok(committee) + /// Spec v0.9.1 + pub fn get_beacon_committees_at_slot(&self, slot: Slot) -> Result, Error> { + let cache = self.committee_cache_at_slot(slot)?; + cache.get_beacon_committees_at_slot(slot) } - /// Returns the beacon proposer index for the `slot` in the given `relative_epoch`. + /// Compute the proposer (not necessarily for the Beacon chain) from a list of indices. /// - /// Spec v0.8.1 + /// Spec v0.9.1 // NOTE: be sure to test this bad boy. - pub fn get_beacon_proposer_index( + pub fn compute_proposer_index( &self, - slot: Slot, - relative_epoch: RelativeEpoch, + indices: &[usize], + seed: &[u8], spec: &ChainSpec, ) -> Result { - let cache = self.cache(relative_epoch)?; - let epoch = relative_epoch.into_epoch(self.current_epoch()); - - let first_committee = cache - .first_committee_at_slot(slot) - .ok_or_else(|| Error::SlotOutOfBounds)?; - let seed = self.get_seed(epoch, spec)?; - - if first_committee.is_empty() { + if indices.is_empty() { return Err(Error::InsufficientValidators); } let mut i = 0; - Ok(loop { - let candidate_index = first_committee[(epoch.as_usize() + i) % first_committee.len()]; + loop { + let candidate_index = indices[compute_shuffled_index( + i % indices.len(), + indices.len(), + seed, + spec.shuffle_round_count, + ) + .ok_or(Error::UnableToShuffle)?]; let random_byte = { - let mut preimage = seed.as_bytes().to_vec(); + let mut preimage = seed.to_vec(); preimage.append(&mut int_to_bytes8((i / 32) as u64)); let hash = hash(&preimage); hash[i % 32] }; let effective_balance = self.validators[candidate_index].effective_balance; - if (effective_balance * MAX_RANDOM_BYTE) - >= (spec.max_effective_balance * u64::from(random_byte)) + if effective_balance * MAX_RANDOM_BYTE + >= spec.max_effective_balance * u64::from(random_byte) { - break candidate_index; + return Ok(candidate_index); } i += 1; - }) + } + } + + /// Returns the beacon proposer index for the `slot` in the given `relative_epoch`. + /// + /// Spec v0.9.1 + pub fn get_beacon_proposer_index(&self, slot: Slot, spec: &ChainSpec) -> Result { + let epoch = slot.epoch(T::slots_per_epoch()); + let seed = self.get_beacon_proposer_seed(slot, spec)?; + let indices = self.get_active_validator_indices(epoch); + + self.compute_proposer_index(&indices, &seed, spec) + } + + /// Compute the seed to use for the beacon proposer selection at the given `slot`. + /// + /// Spec v0.9.1 + fn get_beacon_proposer_seed(&self, slot: Slot, spec: &ChainSpec) -> Result, Error> { + let epoch = slot.epoch(T::slots_per_epoch()); + let mut preimage = self + .get_seed(epoch, Domain::BeaconProposer, spec)? + .as_bytes() + .to_vec(); + preimage.append(&mut int_to_bytes8(slot.as_u64())); + Ok(hash(&preimage)) } /// Safely obtains the index for latest block roots, given some `slot`. /// - /// Spec v0.8.1 + /// Spec v0.9.1 fn get_latest_block_roots_index(&self, slot: Slot) -> Result { if (slot < self.slot) && (self.slot <= slot + self.block_roots.len() as u64) { Ok(slot.as_usize() % self.block_roots.len()) @@ -481,7 +464,7 @@ impl BeaconState { /// Return the block root at a recent `slot`. /// - /// Spec v0.8.1 + /// Spec v0.9.1 pub fn get_block_root(&self, slot: Slot) -> Result<&Hash256, BeaconStateError> { let i = self.get_latest_block_roots_index(slot)?; Ok(&self.block_roots[i]) @@ -489,7 +472,7 @@ impl BeaconState { /// Return the block root at a recent `epoch`. /// - /// Spec v0.8.1 + /// Spec v0.9.1 // NOTE: the spec calls this get_block_root pub fn get_block_root_at_epoch(&self, epoch: Epoch) -> Result<&Hash256, BeaconStateError> { self.get_block_root(epoch.start_slot(T::slots_per_epoch())) @@ -497,7 +480,7 @@ impl BeaconState { /// Sets the block root for some given slot. /// - /// Spec v0.8.1 + /// Spec v0.9.1 pub fn set_block_root( &mut self, slot: Slot, @@ -508,9 +491,14 @@ impl BeaconState { Ok(()) } + /// Fill `randao_mixes` with + pub fn fill_randao_mixes_with(&mut self, index_root: Hash256) { + self.randao_mixes = FixedVector::from_elem(index_root); + } + /// Safely obtains the index for `randao_mixes` /// - /// Spec v0.8.1 + /// Spec v0.9.1 fn get_randao_mix_index( &self, epoch: Epoch, @@ -532,7 +520,7 @@ impl BeaconState { /// /// See `Self::get_randao_mix`. /// - /// Spec v0.8.0 + /// Spec v0.9.1 pub fn update_randao_mix(&mut self, epoch: Epoch, signature: &Signature) -> Result<(), Error> { let i = epoch.as_usize() % T::EpochsPerHistoricalVector::to_usize(); @@ -545,7 +533,7 @@ impl BeaconState { /// Return the randao mix at a recent ``epoch``. /// - /// Spec v0.8.1 + /// Spec v0.9.1 pub fn get_randao_mix(&self, epoch: Epoch) -> Result<&Hash256, Error> { let i = self.get_randao_mix_index(epoch, AllowNextEpoch::False)?; Ok(&self.randao_mixes[i]) @@ -553,115 +541,16 @@ impl BeaconState { /// Set the randao mix at a recent ``epoch``. /// - /// Spec v0.8.1 + /// Spec v0.9.1 pub fn set_randao_mix(&mut self, epoch: Epoch, mix: Hash256) -> Result<(), Error> { let i = self.get_randao_mix_index(epoch, AllowNextEpoch::True)?; self.randao_mixes[i] = mix; Ok(()) } - /// Safely obtains the index for `active_index_roots`, given some `epoch`. - /// - /// If `allow_next_epoch` is `True`, then we allow an _extra_ one epoch of lookahead. - /// - /// Spec v0.8.1 - fn get_active_index_root_index( - &self, - epoch: Epoch, - spec: &ChainSpec, - allow_next_epoch: AllowNextEpoch, - ) -> Result { - let current_epoch = self.current_epoch(); - - let lookahead = spec.activation_exit_delay; - let lookback = self.active_index_roots.len() as u64 - lookahead; - let epoch_upper_bound = allow_next_epoch.upper_bound_of(current_epoch) + lookahead; - - if current_epoch < epoch + lookback && epoch <= epoch_upper_bound { - Ok(epoch.as_usize() % self.active_index_roots.len()) - } else { - Err(Error::EpochOutOfBounds) - } - } - - /// Return the `active_index_root` at a recent `epoch`. - /// - /// Spec v0.8.1 - pub fn get_active_index_root(&self, epoch: Epoch, spec: &ChainSpec) -> Result { - let i = self.get_active_index_root_index(epoch, spec, AllowNextEpoch::False)?; - Ok(self.active_index_roots[i]) - } - - /// Set the `active_index_root` at a recent `epoch`. - /// - /// Spec v0.8.1 - pub fn set_active_index_root( - &mut self, - epoch: Epoch, - index_root: Hash256, - spec: &ChainSpec, - ) -> Result<(), Error> { - let i = self.get_active_index_root_index(epoch, spec, AllowNextEpoch::True)?; - self.active_index_roots[i] = index_root; - Ok(()) - } - - /// Replace `active_index_roots` with clones of `index_root`. - /// - /// Spec v0.8.0 - pub fn fill_active_index_roots_with(&mut self, index_root: Hash256) { - self.active_index_roots = FixedVector::from_elem(index_root); - } - - /// Safely obtains the index for `compact_committees_roots`, given some `epoch`. - /// - /// Spec v0.8.1 - fn get_compact_committee_root_index( - &self, - epoch: Epoch, - allow_next_epoch: AllowNextEpoch, - ) -> Result { - let current_epoch = self.current_epoch(); - let len = T::EpochsPerHistoricalVector::to_u64(); - - if current_epoch < epoch + len && epoch <= allow_next_epoch.upper_bound_of(current_epoch) { - Ok(epoch.as_usize() % len as usize) - } else { - Err(Error::EpochOutOfBounds) - } - } - - /// Return the `compact_committee_root` at a recent `epoch`. - /// - /// Spec v0.8.1 - pub fn get_compact_committee_root(&self, epoch: Epoch) -> Result { - let i = self.get_compact_committee_root_index(epoch, AllowNextEpoch::False)?; - Ok(self.compact_committees_roots[i]) - } - - /// Set the `compact_committee_root` at a recent `epoch`. - /// - /// Spec v0.8.1 - pub fn set_compact_committee_root( - &mut self, - epoch: Epoch, - index_root: Hash256, - ) -> Result<(), Error> { - let i = self.get_compact_committee_root_index(epoch, AllowNextEpoch::True)?; - self.compact_committees_roots[i] = index_root; - Ok(()) - } - - /// Replace `compact_committees_roots` with clones of `committee_root`. - /// - /// Spec v0.8.0 - pub fn fill_compact_committees_roots_with(&mut self, committee_root: Hash256) { - self.compact_committees_roots = FixedVector::from_elem(committee_root); - } - /// Safely obtains the index for latest state roots, given some `slot`. /// - /// Spec v0.8.1 + /// Spec v0.9.1 fn get_latest_state_roots_index(&self, slot: Slot) -> Result { if (slot < self.slot) && (self.slot <= slot + Slot::from(self.state_roots.len())) { Ok(slot.as_usize() % self.state_roots.len()) @@ -672,7 +561,7 @@ impl BeaconState { /// Gets the state root for some slot. /// - /// Spec v0.8.1 + /// Spec v0.9.1 pub fn get_state_root(&self, slot: Slot) -> Result<&Hash256, Error> { let i = self.get_latest_state_roots_index(slot)?; Ok(&self.state_roots[i]) @@ -680,7 +569,7 @@ impl BeaconState { /// Gets the oldest (earliest slot) state root. /// - /// Spec v0.8.1 + /// Spec v0.9.1 pub fn get_oldest_state_root(&self) -> Result<&Hash256, Error> { let i = self.get_latest_state_roots_index(self.slot - Slot::from(self.state_roots.len()))?; @@ -689,7 +578,7 @@ impl BeaconState { /// Sets the latest state root for slot. /// - /// Spec v0.8.1 + /// Spec v0.9.1 pub fn set_state_root(&mut self, slot: Slot, state_root: Hash256) -> Result<(), Error> { let i = self.get_latest_state_roots_index(slot)?; self.state_roots[i] = state_root; @@ -698,7 +587,7 @@ impl BeaconState { /// Safely obtain the index for `slashings`, given some `epoch`. /// - /// Spec v0.8.1 + /// Spec v0.9.1 fn get_slashings_index( &self, epoch: Epoch, @@ -718,14 +607,14 @@ impl BeaconState { /// Get a reference to the entire `slashings` vector. /// - /// Spec v0.8.0 + /// Spec v0.9.1 pub fn get_all_slashings(&self) -> &[u64] { &self.slashings } /// Get the total slashed balances for some epoch. /// - /// Spec v0.8.1 + /// Spec v0.9.1 pub fn get_slashings(&self, epoch: Epoch) -> Result { let i = self.get_slashings_index(epoch, AllowNextEpoch::False)?; Ok(self.slashings[i]) @@ -733,7 +622,7 @@ impl BeaconState { /// Set the total slashed balances for some epoch. /// - /// Spec v0.8.1 + /// Spec v0.9.1 pub fn set_slashings(&mut self, epoch: Epoch, value: u64) -> Result<(), Error> { let i = self.get_slashings_index(epoch, AllowNextEpoch::True)?; self.slashings[i] = value; @@ -742,7 +631,7 @@ impl BeaconState { /// Get the attestations from the current or previous epoch. /// - /// Spec v0.8.1 + /// Spec v0.9.1 pub fn get_matching_source_attestations( &self, epoch: Epoch, @@ -756,48 +645,40 @@ impl BeaconState { } } - /// Get the current crosslink for a shard. - /// - /// Spec v0.8.1 - pub fn get_current_crosslink(&self, shard: u64) -> Result<&Crosslink, Error> { - self.current_crosslinks - .get(shard as usize) - .ok_or(Error::ShardOutOfBounds) - } - - /// Get the previous crosslink for a shard. - /// - /// Spec v0.8.1 - pub fn get_previous_crosslink(&self, shard: u64) -> Result<&Crosslink, Error> { - self.previous_crosslinks - .get(shard as usize) - .ok_or(Error::ShardOutOfBounds) - } - /// Generate a seed for the given `epoch`. /// - /// Spec v0.8.0 - pub fn get_seed(&self, epoch: Epoch, spec: &ChainSpec) -> Result { + /// Spec v0.9.1 + pub fn get_seed( + &self, + epoch: Epoch, + domain_type: Domain, + spec: &ChainSpec, + ) -> Result { // Bypass the safe getter for RANDAO so we can gracefully handle the scenario where `epoch // == 0`. - let randao = { + let mix = { let i = epoch + T::EpochsPerHistoricalVector::to_u64() - spec.min_seed_lookahead - 1; self.randao_mixes[i.as_usize() % self.randao_mixes.len()] }; - let active_index_root = self.get_active_index_root(epoch, spec)?; - let epoch_bytes = int_to_bytes32(epoch.as_u64()); + let domain_bytes = int_to_bytes4(spec.get_domain_constant(domain_type)); + let epoch_bytes = int_to_bytes8(epoch.as_u64()); - let mut preimage = [0; 32 * 3]; - preimage[0..32].copy_from_slice(&randao[..]); - preimage[32..64].copy_from_slice(&active_index_root[..]); - preimage[64..].copy_from_slice(&epoch_bytes); + const NUM_DOMAIN_BYTES: usize = 4; + const NUM_EPOCH_BYTES: usize = 8; + const NUM_MIX_BYTES: usize = 32; + + let mut preimage = [0; NUM_DOMAIN_BYTES + NUM_EPOCH_BYTES + NUM_MIX_BYTES]; + preimage[0..NUM_DOMAIN_BYTES].copy_from_slice(&domain_bytes); + preimage[NUM_DOMAIN_BYTES..NUM_DOMAIN_BYTES + NUM_EPOCH_BYTES] + .copy_from_slice(&epoch_bytes); + preimage[NUM_DOMAIN_BYTES + NUM_EPOCH_BYTES..].copy_from_slice(mix.as_bytes()); Ok(Hash256::from_slice(&hash(&preimage))) } /// Return the effective balance (also known as "balance at stake") for a validator with the given ``index``. /// - /// Spec v0.8.1 + /// Spec v0.9.1 pub fn get_effective_balance( &self, validator_index: usize, @@ -811,43 +692,44 @@ impl BeaconState { /// Return the epoch at which an activation or exit triggered in ``epoch`` takes effect. /// - /// Spec v0.8.1 + /// Spec v0.9.1 pub fn compute_activation_exit_epoch(&self, epoch: Epoch, spec: &ChainSpec) -> Epoch { - epoch + 1 + spec.activation_exit_delay + epoch + 1 + spec.max_seed_lookahead } /// Return the churn limit for the current epoch (number of validators who can leave per epoch). /// /// Uses the epoch cache, and will error if it isn't initialized. /// - /// Spec v0.8.1 + /// Spec v0.9.1 pub fn get_churn_limit(&self, spec: &ChainSpec) -> Result { Ok(std::cmp::max( spec.min_per_epoch_churn_limit, - self.cache(RelativeEpoch::Current)?.active_validator_count() as u64 + self.committee_cache(RelativeEpoch::Current)? + .active_validator_count() as u64 / spec.churn_limit_quotient, )) } - /// Returns the `slot`, `shard` and `committee_index` for which a validator must produce an + /// Returns the `slot`, `index` and `committee_position` for which a validator must produce an /// attestation. /// /// Note: Utilizes the cache and will fail if the appropriate cache is not initialized. /// - /// Spec v0.8.1 + /// Spec v0.9.1 pub fn get_attestation_duties( &self, validator_index: usize, relative_epoch: RelativeEpoch, ) -> Result, Error> { - let cache = self.cache(relative_epoch)?; + let cache = self.committee_cache(relative_epoch)?; Ok(cache.get_attestation_duties(validator_index)) } /// Return the combined effective balance of an array of validators. /// - /// Spec v0.8.1 + /// Spec v0.9.1 pub fn get_total_balance( &self, validator_indices: &[usize], @@ -887,7 +769,7 @@ impl BeaconState { relative_epoch: RelativeEpoch, spec: &ChainSpec, ) -> Result<(), Error> { - let i = Self::cache_index(relative_epoch); + let i = Self::committee_cache_index(relative_epoch); if self.committee_caches[i] .is_initialized_at(relative_epoch.into_epoch(self.current_epoch())) @@ -906,7 +788,7 @@ impl BeaconState { ) -> Result<(), Error> { let epoch = relative_epoch.into_epoch(self.current_epoch()); - self.committee_caches[Self::cache_index(relative_epoch)] = + self.committee_caches[Self::committee_cache_index(relative_epoch)] = CommitteeCache::initialized(&self, epoch, spec)?; Ok(()) } @@ -917,16 +799,14 @@ impl BeaconState { /// /// Note: whilst this function will preserve already-built caches, it will not build any. pub fn advance_caches(&mut self) { - let next = Self::cache_index(RelativeEpoch::Previous); - let current = Self::cache_index(RelativeEpoch::Current); - let caches = &mut self.committee_caches[..]; caches.rotate_left(1); + + let next = Self::committee_cache_index(RelativeEpoch::Next); caches[next] = CommitteeCache::default(); - caches[current] = CommitteeCache::default(); } - fn cache_index(relative_epoch: RelativeEpoch) -> usize { + fn committee_cache_index(relative_epoch: RelativeEpoch) -> usize { match relative_epoch { RelativeEpoch::Previous => 0, RelativeEpoch::Current => 1, @@ -934,21 +814,31 @@ impl BeaconState { } } + /// Get the committee cache for some `slot`. + /// + /// Return an error if the cache for the slot's epoch is not initialized. + fn committee_cache_at_slot(&self, slot: Slot) -> Result<&CommitteeCache, Error> { + let epoch = slot.epoch(T::slots_per_epoch()); + let relative_epoch = RelativeEpoch::from_epoch(self.current_epoch(), epoch)?; + self.committee_cache(relative_epoch) + } + /// Returns the cache for some `RelativeEpoch`. Returns an error if the cache has not been /// initialized. - fn cache(&self, relative_epoch: RelativeEpoch) -> Result<&CommitteeCache, Error> { - let cache = &self.committee_caches[Self::cache_index(relative_epoch)]; + fn committee_cache(&self, relative_epoch: RelativeEpoch) -> Result<&CommitteeCache, Error> { + let cache = &self.committee_caches[Self::committee_cache_index(relative_epoch)]; if cache.is_initialized_at(relative_epoch.into_epoch(self.current_epoch())) { Ok(cache) } else { - Err(Error::CommitteeCacheUninitialized(relative_epoch)) + Err(Error::CommitteeCacheUninitialized(Some(relative_epoch))) } } /// Drops the cache, leaving it in an uninitialized state. fn drop_committee_cache(&mut self, relative_epoch: RelativeEpoch) { - self.committee_caches[Self::cache_index(relative_epoch)] = CommitteeCache::default(); + self.committee_caches[Self::committee_cache_index(relative_epoch)] = + CommitteeCache::default(); } /// Updates the pubkey cache, if required. diff --git a/eth2/types/src/beacon_state/committee_cache.rs b/eth2/types/src/beacon_state/committee_cache.rs index d9d2e98642..54c6853ba8 100644 --- a/eth2/types/src/beacon_state/committee_cache.rs +++ b/eth2/types/src/beacon_state/committee_cache.rs @@ -15,22 +15,20 @@ pub struct CommitteeCache { initialized_epoch: Option, shuffling: Vec, shuffling_positions: Vec>, - shuffling_start_shard: u64, - shard_count: u64, - committee_count: usize, + committees_per_slot: u64, slots_per_epoch: u64, } impl CommitteeCache { /// Return a new, fully initialized cache. /// - /// Spec v0.8.1 + /// Spec v0.9.1 pub fn initialized( state: &BeaconState, epoch: Epoch, spec: &ChainSpec, ) -> Result { - let relative_epoch = RelativeEpoch::from_epoch(state.current_epoch(), epoch) + RelativeEpoch::from_epoch(state.current_epoch(), epoch) .map_err(|_| Error::EpochOutOfBounds)?; // May cause divide-by-zero errors. @@ -44,14 +42,10 @@ impl CommitteeCache { return Err(Error::InsufficientValidators); } - let committee_count = - T::get_committee_count(active_validator_indices.len(), spec.target_committee_size) - as usize; + let committees_per_slot = + T::get_committee_count_per_slot(active_validator_indices.len(), spec) as u64; - let shuffling_start_shard = - Self::compute_start_shard(state, relative_epoch, active_validator_indices.len(), spec); - - let seed = state.get_seed(epoch, spec)?; + let seed = state.get_seed(epoch, Domain::BeaconAttester, spec)?; let shuffling = shuffle_list( active_validator_indices, @@ -73,46 +67,13 @@ impl CommitteeCache { Ok(CommitteeCache { initialized_epoch: Some(epoch), - shuffling_start_shard, shuffling, - shard_count: T::shard_count() as u64, - committee_count, - slots_per_epoch: T::slots_per_epoch(), shuffling_positions, + committees_per_slot, + slots_per_epoch: T::slots_per_epoch(), }) } - /// Compute the shard which must be attested to first in a given relative epoch. - /// - /// The `active_validator_count` must be the number of validators active at `relative_epoch`. - /// - /// Spec v0.8.1 - pub fn compute_start_shard( - state: &BeaconState, - relative_epoch: RelativeEpoch, - active_validator_count: usize, - spec: &ChainSpec, - ) -> u64 { - match relative_epoch { - RelativeEpoch::Current => state.start_shard, - RelativeEpoch::Previous => { - let shard_delta = - T::get_shard_delta(active_validator_count, spec.target_committee_size); - - (state.start_shard + T::ShardCount::to_u64() - shard_delta) - % T::ShardCount::to_u64() - } - RelativeEpoch::Next => { - let current_active_validators = - get_active_validator_count(&state.validators, state.current_epoch()); - let shard_delta = - T::get_shard_delta(current_active_validators, spec.target_committee_size); - - (state.start_shard + shard_delta) % T::ShardCount::to_u64() - } - } - } - /// Returns `true` if the cache has been initialized at the supplied `epoch`. /// /// An non-initialized cache does not provide any useful information. @@ -126,7 +87,7 @@ impl CommitteeCache { /// /// Always returns `&[]` for a non-initialized epoch. /// - /// Spec v0.8.1 + /// Spec v0.9.1 pub fn active_validator_indices(&self) -> &[usize] { &self.shuffling } @@ -135,34 +96,51 @@ impl CommitteeCache { /// /// Always returns `&[]` for a non-initialized epoch. /// - /// Spec v0.8.1 + /// Spec v0.9.1 pub fn shuffling(&self) -> &[usize] { &self.shuffling } - /// Return `Some(CrosslinkCommittee)` if the given shard has a committee during the given - /// `epoch`. + /// Get the Beacon committee for the given `slot` and `index`. /// - /// Always returns `None` for a non-initialized epoch. - /// - /// Spec v0.8.1 - pub fn get_crosslink_committee_for_shard(&self, shard: Shard) -> Option { - if shard >= self.shard_count || self.initialized_epoch.is_none() { + /// Return `None` if the cache is uninitialized, or the `slot` or `index` is out of range. + pub fn get_beacon_committee( + &self, + slot: Slot, + index: CommitteeIndex, + ) -> Option { + if self.initialized_epoch.is_none() + || !self.is_initialized_at(slot.epoch(self.slots_per_epoch)) + || index >= self.committees_per_slot + { return None; } let committee_index = - (shard + self.shard_count - self.shuffling_start_shard) % self.shard_count; + (slot.as_u64() % self.slots_per_epoch) * self.committees_per_slot + index; let committee = self.compute_committee(committee_index as usize)?; - let slot = self.crosslink_slot_for_shard(shard)?; - Some(CrosslinkCommittee { - shard, - committee, + Some(BeaconCommittee { slot, + index, + committee, }) } + /// Get all the Beacon committees at a given `slot`. + pub fn get_beacon_committees_at_slot(&self, slot: Slot) -> Result, Error> { + if self.initialized_epoch.is_none() { + return Err(Error::CommitteeCacheUninitialized(None)); + } + + (0..self.committees_per_slot()) + .map(|index| { + self.get_beacon_committee(slot, index) + .ok_or(Error::NoCommittee { slot, index }) + }) + .collect() + } + /// Returns the `AttestationDuty` for the given `validator_index`. /// /// Returns `None` if the `validator_index` does not exist, does not have duties or `Self` is @@ -170,36 +148,46 @@ impl CommitteeCache { pub fn get_attestation_duties(&self, validator_index: usize) -> Option { let i = self.shuffled_position(validator_index)?; - (0..self.committee_count) + (0..self.epoch_committee_count()) .map(|nth_committee| (nth_committee, self.compute_committee_range(nth_committee))) .find(|(_, range)| { if let Some(range) = range { - (range.start <= i) && (range.end > i) + range.start <= i && range.end > i } else { false } }) .and_then(|(nth_committee, range)| { - let shard = (self.shuffling_start_shard + nth_committee as u64) % self.shard_count; - let slot = self.crosslink_slot_for_shard(shard)?; + let (slot, index) = self.convert_to_slot_and_index(nth_committee as u64)?; let range = range?; - let committee_index = i - range.start; + let committee_position = i - range.start; let committee_len = range.end - range.start; Some(AttestationDuty { slot, - shard, - committee_index, + index, + committee_position, committee_len, }) }) } + /// Convert an index addressing the list of all epoch committees into a slot and per-slot index. + fn convert_to_slot_and_index( + &self, + global_committee_index: u64, + ) -> Option<(Slot, CommitteeIndex)> { + let epoch_start_slot = self.initialized_epoch?.start_slot(self.slots_per_epoch); + let slot_offset = global_committee_index / self.committees_per_slot; + let index = global_committee_index % self.committees_per_slot; + Some((epoch_start_slot + slot_offset, index)) + } + /// Returns the number of active validators in the initialized epoch. /// /// Always returns `usize::default()` for a non-initialized epoch. /// - /// Spec v0.8.1 + /// Spec v0.9.1 pub fn active_validator_count(&self) -> usize { self.shuffling.len() } @@ -208,64 +196,19 @@ impl CommitteeCache { /// /// Always returns `usize::default()` for a non-initialized epoch. /// - /// Spec v0.8.1 + /// Spec v0.9.1 pub fn epoch_committee_count(&self) -> usize { - self.committee_count + self.committees_per_slot as usize * self.slots_per_epoch as usize } - /// Returns the shard assigned to the first committee in the initialized epoch. - /// - /// Always returns `u64::default()` for a non-initialized epoch. - pub fn epoch_start_shard(&self) -> u64 { - self.shuffling_start_shard - } - - /// Returns all crosslink committees, if any, for the given slot in the initialized epoch. - /// - /// Returns `None` if `slot` is not in the initialized epoch, or if `Self` is not initialized. - /// - /// Spec v0.8.1 - pub fn get_crosslink_committees_for_slot(&self, slot: Slot) -> Option> { - let position = self - .initialized_epoch? - .position(slot, self.slots_per_epoch)?; - let committees_per_slot = self.committee_count / self.slots_per_epoch as usize; - let position = position * committees_per_slot; - - if position >= self.committee_count { - None - } else { - let mut committees = Vec::with_capacity(committees_per_slot); - - for index in position..position + committees_per_slot { - let committee = self.compute_committee(index)?; - let shard = (self.shuffling_start_shard + index as u64) % self.shard_count; - - committees.push(CrosslinkCommittee { - committee, - shard, - slot, - }); - } - - Some(committees) - } - } - - /// Returns the first committee of the first slot of the initialized epoch. - /// - /// Always returns `None` for a non-initialized epoch. - /// - /// Spec v0.8.1 - pub fn first_committee_at_slot(&self, slot: Slot) -> Option<&[usize]> { - self.get_crosslink_committees_for_slot(slot)? - .first() - .and_then(|cc| Some(cc.committee)) + /// Returns the number of committees per slot for this cache's epoch. + pub fn committees_per_slot(&self) -> u64 { + self.committees_per_slot } /// Returns a slice of `self.shuffling` that represents the `index`'th committee in the epoch. /// - /// Spec v0.8.1 + /// Spec v0.9.1 fn compute_committee(&self, index: usize) -> Option<&[usize]> { Some(&self.shuffling[self.compute_committee_range(index)?]) } @@ -276,34 +219,20 @@ impl CommitteeCache { /// /// Will also return `None` if the index is out of bounds. /// - /// Spec v0.8.1 + /// Spec v0.9.1 fn compute_committee_range(&self, index: usize) -> Option> { - if self.committee_count == 0 || index >= self.committee_count { + let count = self.epoch_committee_count(); + if count == 0 || index >= count { return None; } let num_validators = self.shuffling.len(); - let count = self.committee_count; - let start = (num_validators * index) / count; let end = (num_validators * (index + 1)) / count; Some(start..end) } - /// Returns the `slot` that `shard` will be crosslink-ed in during the initialized epoch. - /// - /// Always returns `None` for a non-initialized epoch. - /// - /// Spec v0.8.1 - fn crosslink_slot_for_shard(&self, shard: u64) -> Option { - let offset = (shard + self.shard_count - self.shuffling_start_shard) % self.shard_count; - Some( - self.initialized_epoch?.start_slot(self.slots_per_epoch) - + offset / (self.committee_count as u64 / self.slots_per_epoch), - ) - } - /// Returns the index of some validator in `self.shuffling`. /// /// Always returns `None` for a non-initialized epoch. @@ -317,7 +246,7 @@ impl CommitteeCache { /// Returns a list of all `validators` indices where the validator is active at the given /// `epoch`. /// -/// Spec v0.8.1 +/// Spec v0.9.1 pub fn get_active_validator_indices(validators: &[Validator], epoch: Epoch) -> Vec { let mut active = Vec::with_capacity(validators.len()); @@ -331,11 +260,3 @@ pub fn get_active_validator_indices(validators: &[Validator], epoch: Epoch) -> V active } - -/// Returns the count of all `validators` indices where the validator is active at the given -/// `epoch`. -/// -/// Spec v0.8.1 -fn get_active_validator_count(validators: &[Validator], epoch: Epoch) -> usize { - validators.iter().filter(|v| v.is_active_at(epoch)).count() -} diff --git a/eth2/types/src/beacon_state/committee_cache/tests.rs b/eth2/types/src/beacon_state/committee_cache/tests.rs index a2acae6e2d..ee2ca8eed0 100644 --- a/eth2/types/src/beacon_state/committee_cache/tests.rs +++ b/eth2/types/src/beacon_state/committee_cache/tests.rs @@ -1,8 +1,6 @@ #![cfg(test)] use super::*; use crate::{test_utils::*, *}; -use serde_derive::{Deserialize, Serialize}; -use ssz_types::typenum::*; #[test] fn default_values() { @@ -10,13 +8,11 @@ fn default_values() { assert_eq!(cache.is_initialized_at(Epoch::new(0)), false); assert!(&cache.active_validator_indices().is_empty()); - assert_eq!(cache.get_crosslink_committee_for_shard(0), None); + assert_eq!(cache.get_beacon_committee(Slot::new(0), 0), None); assert_eq!(cache.get_attestation_duties(0), None); assert_eq!(cache.active_validator_count(), 0); assert_eq!(cache.epoch_committee_count(), 0); - assert_eq!(cache.epoch_start_shard(), 0); - assert_eq!(cache.get_crosslink_committees_for_slot(Slot::new(0)), None); - assert_eq!(cache.first_committee_at_slot(Slot::new(0)), None); + assert!(cache.get_beacon_committees_at_slot(Slot::new(0)).is_err()); } fn new_state(validator_count: usize, slot: Slot) -> BeaconState { @@ -78,9 +74,15 @@ fn shuffles_for_the_right_epoch() { state.randao_mixes = FixedVector::from(distinct_hashes); - let previous_seed = state.get_seed(state.previous_epoch(), spec).unwrap(); - let current_seed = state.get_seed(state.current_epoch(), spec).unwrap(); - let next_seed = state.get_seed(state.next_epoch(), spec).unwrap(); + let previous_seed = state + .get_seed(state.previous_epoch(), Domain::BeaconAttester, spec) + .unwrap(); + let current_seed = state + .get_seed(state.current_epoch(), Domain::BeaconAttester, spec) + .unwrap(); + let next_seed = state + .get_seed(state.next_epoch(), Domain::BeaconAttester, spec) + .unwrap(); assert!((previous_seed != current_seed) && (current_seed != next_seed)); @@ -116,153 +118,3 @@ fn shuffles_for_the_right_epoch() { assert_eq!(cache.shuffling, shuffling_with_seed(next_seed)); assert_shuffling_positions_accurate(&cache); } - -#[test] -fn can_start_on_any_shard() { - let num_validators = MinimalEthSpec::minimum_validator_count() * 2; - let epoch = Epoch::new(100_000_000); - let slot = epoch.start_slot(MinimalEthSpec::slots_per_epoch()); - - let mut state = new_state::(num_validators, slot); - let spec = &MinimalEthSpec::default_spec(); - - let target_committee_size = MinimalEthSpec::default_spec().target_committee_size; - - let shard_delta = MinimalEthSpec::get_shard_delta(num_validators, target_committee_size); - let shard_count = MinimalEthSpec::shard_count() as u64; - - for i in 0..MinimalEthSpec::shard_count() as u64 { - state.start_shard = i; - - let cache = CommitteeCache::initialized(&state, state.current_epoch(), spec).unwrap(); - assert_eq!(cache.shuffling_start_shard, i); - - let cache = CommitteeCache::initialized(&state, state.previous_epoch(), spec).unwrap(); - assert_eq!( - cache.shuffling_start_shard, - (i + shard_count - shard_delta) % shard_count - ); - - let cache = CommitteeCache::initialized(&state, state.next_epoch(), spec).unwrap(); - assert_eq!(cache.shuffling_start_shard, (i + shard_delta) % shard_count); - } -} - -/// This spec has more shards than slots in an epoch, permitting epochs where not all shards are -/// included in the committee. -#[derive(Clone, PartialEq, Debug, Default, Serialize, Deserialize)] -pub struct ExcessShardsEthSpec; - -impl EthSpec for ExcessShardsEthSpec { - type ShardCount = U128; - type SlotsPerEpoch = U8; - type MaxPendingAttestations = U1024; - - params_from_eth_spec!(MinimalEthSpec { - JustificationBitsLength, - MaxValidatorsPerCommittee, - GenesisEpoch, - SlotsPerEth1VotingPeriod, - SlotsPerHistoricalRoot, - EpochsPerHistoricalVector, - EpochsPerSlashingsVector, - HistoricalRootsLimit, - ValidatorRegistryLimit, - MaxProposerSlashings, - MaxAttesterSlashings, - MaxAttestations, - MaxDeposits, - MaxVoluntaryExits, - MaxTransfers - }); - - fn default_spec() -> ChainSpec { - ChainSpec::minimal() - } -} - -#[test] -fn starts_on_the_correct_shard() { - let spec = &ExcessShardsEthSpec::default_spec(); - - let num_validators = spec.target_committee_size * ExcessShardsEthSpec::shard_count(); - - let epoch = Epoch::new(100_000_000); - let slot = epoch.start_slot(ExcessShardsEthSpec::slots_per_epoch()); - - let mut state = new_state::(num_validators, slot); - - let validator_count = state.validators.len(); - - let previous_epoch = state.previous_epoch(); - let current_epoch = state.current_epoch(); - let next_epoch = state.next_epoch(); - - for (i, mut v) in state.validators.iter_mut().enumerate() { - let epoch = if i < validator_count / 4 { - previous_epoch - } else if i < validator_count / 2 { - current_epoch - } else { - next_epoch - }; - - v.activation_epoch = epoch; - } - - assert_eq!( - get_active_validator_count(&state.validators, previous_epoch), - validator_count / 4 - ); - assert_eq!( - get_active_validator_count(&state.validators, current_epoch), - validator_count / 2 - ); - assert_eq!( - get_active_validator_count(&state.validators, next_epoch), - validator_count - ); - - let previous_shards = ExcessShardsEthSpec::get_committee_count( - get_active_validator_count(&state.validators, previous_epoch), - spec.target_committee_size, - ); - let current_shards = ExcessShardsEthSpec::get_committee_count( - get_active_validator_count(&state.validators, current_epoch), - spec.target_committee_size, - ); - let next_shards = ExcessShardsEthSpec::get_committee_count( - get_active_validator_count(&state.validators, next_epoch), - spec.target_committee_size, - ); - - assert_eq!( - previous_shards as usize, - ExcessShardsEthSpec::shard_count() / 4 - ); - assert_eq!( - current_shards as usize, - ExcessShardsEthSpec::shard_count() / 2 - ); - assert_eq!(next_shards as usize, ExcessShardsEthSpec::shard_count()); - - let shard_count = ExcessShardsEthSpec::shard_count(); - for i in 0..ExcessShardsEthSpec::shard_count() { - state.start_shard = i as u64; - - let cache = CommitteeCache::initialized(&state, state.current_epoch(), spec).unwrap(); - assert_eq!(cache.shuffling_start_shard as usize, i); - - let cache = CommitteeCache::initialized(&state, state.previous_epoch(), spec).unwrap(); - assert_eq!( - cache.shuffling_start_shard as usize, - (i + shard_count - previous_shards) % shard_count - ); - - let cache = CommitteeCache::initialized(&state, state.next_epoch(), spec).unwrap(); - assert_eq!( - cache.shuffling_start_shard as usize, - (i + current_shards) % shard_count - ); - } -} diff --git a/eth2/types/src/beacon_state/tests.rs b/eth2/types/src/beacon_state/tests.rs index 708f23fd58..a41a46900e 100644 --- a/eth2/types/src/beacon_state/tests.rs +++ b/eth2/types/src/beacon_state/tests.rs @@ -1,7 +1,6 @@ #![cfg(test)] use super::*; use crate::test_utils::*; -use std::ops::RangeInclusive; ssz_tests!(FoundationBeaconState); @@ -19,34 +18,49 @@ fn test_beacon_proposer_index() { state }; + // Get the i'th candidate proposer for the given state and slot + let ith_candidate = |state: &BeaconState, slot: Slot, i: usize| { + let epoch = slot.epoch(T::slots_per_epoch()); + let seed = state.get_beacon_proposer_seed(slot, &spec).unwrap(); + let active_validators = state.get_active_validator_indices(epoch); + active_validators[compute_shuffled_index( + i, + active_validators.len(), + &seed, + spec.shuffle_round_count, + ) + .unwrap()] + }; + // Run a test on the state. - let test = |state: &BeaconState, slot: Slot, shuffling_index: usize| { - let shuffling = state.get_shuffling(relative_epoch).unwrap(); + let test = |state: &BeaconState, slot: Slot, candidate_index: usize| { assert_eq!( - state.get_beacon_proposer_index(slot, relative_epoch, &spec), - Ok(shuffling[shuffling_index]) + state.get_beacon_proposer_index(slot, &spec), + Ok(ith_candidate(state, slot, candidate_index)) ); }; - // Test where we have one validator per slot + // Test where we have one validator per slot. + // 0th candidate should be chosen every time. let state = build_state(T::slots_per_epoch() as usize); for i in 0..T::slots_per_epoch() { - test(&state, Slot::from(i), i as usize); + test(&state, Slot::from(i), 0); } - // Test where we have two validators per slot + // Test where we have two validators per slot. + // 0th candidate should be chosen every time. let state = build_state(T::slots_per_epoch() as usize * 2); for i in 0..T::slots_per_epoch() { - test(&state, Slot::from(i), i as usize * 2); + test(&state, Slot::from(i), 0); } // Test with two validators per slot, first validator has zero balance. let mut state = build_state(T::slots_per_epoch() as usize * 2); - let shuffling = state.get_shuffling(relative_epoch).unwrap().to_vec(); - state.validators[shuffling[0]].effective_balance = 0; + let slot0_candidate0 = ith_candidate(&state, Slot::new(0), 0); + state.validators[slot0_candidate0].effective_balance = 0; test(&state, Slot::new(0), 1); for i in 1..T::slots_per_epoch() { - test(&state, Slot::from(i), i as usize * 2); + test(&state, Slot::from(i), 0); } } @@ -55,72 +69,6 @@ fn beacon_proposer_index() { test_beacon_proposer_index::(); } -/// Should produce (note the set notation brackets): -/// -/// (current_epoch - LATEST_ACTIVE_INDEX_ROOTS_LENGTH + ACTIVATION_EXIT_DELAY, current_epoch + -/// ACTIVATION_EXIT_DELAY] -fn active_index_range(current_epoch: Epoch) -> RangeInclusive { - let delay = T::default_spec().activation_exit_delay; - - let start: i32 = - current_epoch.as_u64() as i32 - T::epochs_per_historical_vector() as i32 + delay as i32; - let end = current_epoch + delay; - - let start: Epoch = if start < 0 { - Epoch::new(0) - } else { - Epoch::from(start as u64 + 1) - }; - - start..=end -} - -/// Test getting an active index root at the start and end of the valid range, and one either side -/// of that range. -fn test_active_index(state_slot: Slot) { - let spec = T::default_spec(); - let builder: TestingBeaconStateBuilder = - TestingBeaconStateBuilder::from_default_keypairs_file_if_exists(16, &spec); - let (mut state, _keypairs) = builder.build(); - state.slot = state_slot; - - let range = active_index_range::(state.current_epoch()); - - let modulo = |epoch: Epoch| epoch.as_usize() % T::epochs_per_historical_vector(); - - // Test the start and end of the range. - assert_eq!( - state.get_active_index_root_index(*range.start(), &spec, AllowNextEpoch::False), - Ok(modulo(*range.start())) - ); - assert_eq!( - state.get_active_index_root_index(*range.end(), &spec, AllowNextEpoch::False), - Ok(modulo(*range.end())) - ); - - // One either side of the range. - if state.current_epoch() > 0 { - // Test is invalid on epoch zero, cannot subtract from zero. - assert_eq!( - state.get_active_index_root_index(*range.start() - 1, &spec, AllowNextEpoch::False), - Err(Error::EpochOutOfBounds) - ); - } - assert_eq!( - state.get_active_index_root_index(*range.end() + 1, &spec, AllowNextEpoch::False), - Err(Error::EpochOutOfBounds) - ); -} - -#[test] -fn get_active_index_root_index() { - test_active_index::(Slot::new(0)); - - let epoch = Epoch::from(MainnetEthSpec::epochs_per_historical_vector() * 4); - let slot = epoch.start_slot(MainnetEthSpec::slots_per_epoch()); - test_active_index::(slot); -} - /// Test that /// /// 1. Using the cache before it's built fails. @@ -138,28 +86,26 @@ fn test_cache_initialization<'a, T: EthSpec>( // Assuming the cache isn't already built, assert that a call to a cache-using function fails. assert_eq!( state.get_attestation_duties(0, relative_epoch), - Err(BeaconStateError::CommitteeCacheUninitialized( + Err(BeaconStateError::CommitteeCacheUninitialized(Some( relative_epoch - )) + ))) ); // Build the cache. state.build_committee_cache(relative_epoch, spec).unwrap(); // Assert a call to a cache-using function passes. - let _ = state - .get_beacon_proposer_index(slot, relative_epoch, spec) - .unwrap(); + let _ = state.get_beacon_proposer_index(slot, spec).unwrap(); // Drop the cache. state.drop_committee_cache(relative_epoch); // Assert a call to a cache-using function fail. assert_eq!( - state.get_beacon_proposer_index(slot, relative_epoch, spec), - Err(BeaconStateError::CommitteeCacheUninitialized( + state.get_beacon_committee(slot, 0), + Err(BeaconStateError::CommitteeCacheUninitialized(Some( relative_epoch - )) + ))) ); } @@ -212,10 +158,8 @@ mod committees { spec: &ChainSpec, ) { let active_indices: Vec = (0..validator_count).collect(); - let seed = state.get_seed(epoch, spec).unwrap(); + let seed = state.get_seed(epoch, Domain::BeaconAttester, spec).unwrap(); let relative_epoch = RelativeEpoch::from_epoch(state.current_epoch(), epoch).unwrap(); - let start_shard = - CommitteeCache::compute_start_shard(&state, relative_epoch, active_indices.len(), spec); let mut ordered_indices = state .get_cached_active_validator_indices(relative_epoch) @@ -231,34 +175,27 @@ mod committees { shuffle_list(active_indices, spec.shuffle_round_count, &seed[..], false).unwrap(); let mut expected_indices_iter = shuffling.iter(); - let mut expected_shards_iter = - (0..T::ShardCount::to_u64()).map(|i| (start_shard + i) % T::ShardCount::to_u64()); // Loop through all slots in the epoch being tested. for slot in epoch.slot_iter(T::slots_per_epoch()) { - let crosslink_committees = state.get_crosslink_committees_at_slot(slot).unwrap(); + let beacon_committees = state.get_beacon_committees_at_slot(slot).unwrap(); // Assert that the number of committees in this slot is consistent with the reported number // of committees in an epoch. assert_eq!( - crosslink_committees.len() as u64, - state.get_committee_count(relative_epoch).unwrap() / T::slots_per_epoch() + beacon_committees.len() as u64, + state.get_epoch_committee_count(relative_epoch).unwrap() / T::slots_per_epoch() ); - for cc in crosslink_committees { - // Assert that shards are assigned contiguously across committees. - assert_eq!(expected_shards_iter.next().unwrap(), cc.shard); + for (committee_index, bc) in beacon_committees.iter().enumerate() { + // Assert that indices are assigned sequentially across committees. + assert_eq!(committee_index as u64, bc.index); // Assert that a committee lookup via slot is identical to a committee lookup via - // shard. - assert_eq!( - state - .get_crosslink_committee_for_shard(cc.shard, relative_epoch) - .unwrap(), - cc - ); + // index. + assert_eq!(state.get_beacon_committee(bc.slot, bc.index).unwrap(), *bc); // Loop through each validator in the committee. - for (committee_i, validator_i) in cc.committee.iter().enumerate() { + for (committee_i, validator_i) in bc.committee.iter().enumerate() { // Assert the validators are assigned contiguously across committees. assert_eq!( *validator_i, @@ -266,24 +203,21 @@ mod committees { "Non-sequential validators." ); // Assert a call to `get_attestation_duties` is consistent with a call to - // `get_crosslink_committees_at_slot` + // `get_beacon_committees_at_slot` let attestation_duty = state .get_attestation_duties(*validator_i, relative_epoch) .unwrap() .unwrap(); assert_eq!(attestation_duty.slot, slot); - assert_eq!(attestation_duty.shard, cc.shard); - assert_eq!(attestation_duty.committee_index, committee_i); - assert_eq!(attestation_duty.committee_len, cc.committee.len()); + assert_eq!(attestation_duty.index, bc.index); + assert_eq!(attestation_duty.committee_position, committee_i); + assert_eq!(attestation_duty.committee_len, bc.committee.len()); } } } // Assert that all validators were assigned to a committee. assert!(expected_indices_iter.next().is_none()); - - // Assert that all shards were assigned to a committee. - assert!(expected_shards_iter.next().is_none()); } fn committee_consistency_test( @@ -327,7 +261,10 @@ mod committees { fn committee_consistency_test_suite(cached_epoch: RelativeEpoch) { let spec = T::default_spec(); - let validator_count = (T::shard_count() * spec.target_committee_size) + 1; + let validator_count = spec.max_committees_per_slot + * T::slots_per_epoch() as usize + * spec.target_committee_size + + 1; committee_consistency_test::(validator_count as usize, Epoch::new(0), cached_epoch); diff --git a/eth2/types/src/chain_spec.rs b/eth2/types/src/chain_spec.rs index bef78d99f1..3889a392ad 100644 --- a/eth2/types/src/chain_spec.rs +++ b/eth2/types/src/chain_spec.rs @@ -5,19 +5,18 @@ use utils::{u8_from_hex_str, u8_to_hex_str}; /// Each of the BLS signature domains. /// -/// Spec v0.8.1 +/// Spec v0.9.1 pub enum Domain { BeaconProposer, + BeaconAttester, Randao, - Attestation, Deposit, VoluntaryExit, - Transfer, } /// Holds all the "constants" for a BeaconChain. /// -/// Spec v0.8.1 +/// Spec v0.9.1 #[derive(PartialEq, Debug, Clone, Serialize, Deserialize)] #[serde(default)] pub struct ChainSpec { @@ -33,6 +32,7 @@ pub struct ChainSpec { /* * Misc */ + pub max_committees_per_slot: usize, pub target_committee_size: usize, pub min_per_epoch_churn_limit: u64, pub churn_limit_quotient: u64, @@ -61,10 +61,9 @@ pub struct ChainSpec { pub milliseconds_per_slot: u64, pub min_attestation_inclusion_delay: u64, pub min_seed_lookahead: Epoch, - pub activation_exit_delay: u64, + pub max_seed_lookahead: Epoch, pub min_validator_withdrawability_delay: Epoch, pub persistent_committee_period: u64, - pub max_epochs_per_crosslink: u64, pub min_epochs_to_inactivity_penalty: u64, /* @@ -78,18 +77,17 @@ pub struct ChainSpec { /* * Signature domains - * - * Fields should be private to prevent accessing a domain that hasn't been modified to suit - * some `Fork`. - * - * Use `ChainSpec::get_domain(..)` to access these values. */ domain_beacon_proposer: u32, + domain_beacon_attester: u32, domain_randao: u32, - domain_attestation: u32, domain_deposit: u32, domain_voluntary_exit: u32, - domain_transfer: u32, + + /* + * Fork choice + */ + pub safe_slots_to_update_justified: u64, /* * Eth1 @@ -103,18 +101,24 @@ pub struct ChainSpec { } impl ChainSpec { - /// Get the domain number that represents the fork meta and signature domain. + /// Get the domain number, unmodified by the fork. /// - /// Spec v0.8.1 - pub fn get_domain(&self, epoch: Epoch, domain: Domain, fork: &Fork) -> u64 { - let domain_constant = match domain { + /// Spec v0.9.1 + pub fn get_domain_constant(&self, domain: Domain) -> u32 { + match domain { Domain::BeaconProposer => self.domain_beacon_proposer, + Domain::BeaconAttester => self.domain_beacon_attester, Domain::Randao => self.domain_randao, - Domain::Attestation => self.domain_attestation, Domain::Deposit => self.domain_deposit, Domain::VoluntaryExit => self.domain_voluntary_exit, - Domain::Transfer => self.domain_transfer, - }; + } + } + + /// Get the domain number that represents the fork meta and signature domain. + /// + /// Spec v0.9.1 + pub fn get_domain(&self, epoch: Epoch, domain: Domain, fork: &Fork) -> u64 { + let domain_constant = self.get_domain_constant(domain); let mut bytes: Vec = int_to_bytes4(domain_constant); bytes.append(&mut fork.get_fork_version(epoch).to_vec()); @@ -143,20 +147,21 @@ impl ChainSpec { /// Returns a `ChainSpec` compatible with the Ethereum Foundation specification. /// - /// Spec v0.8.1 + /// Spec v0.9.1 pub fn mainnet() -> Self { Self { /* * Constants */ far_future_epoch: Epoch::new(u64::max_value()), - base_rewards_per_epoch: 5, + base_rewards_per_epoch: 4, deposit_contract_tree_depth: 32, seconds_per_day: 86400, /* * Misc */ + max_committees_per_slot: 64, target_committee_size: 128, min_per_epoch_churn_limit: 4, churn_limit_quotient: 65_536, @@ -181,13 +186,12 @@ impl ChainSpec { /* * Time parameters */ - milliseconds_per_slot: 6_000, + milliseconds_per_slot: 12_000, min_attestation_inclusion_delay: 1, min_seed_lookahead: Epoch::new(1), - activation_exit_delay: 4, + max_seed_lookahead: Epoch::new(4), min_validator_withdrawability_delay: Epoch::new(256), persistent_committee_period: 2_048, - max_epochs_per_crosslink: 64, min_epochs_to_inactivity_penalty: 4, /* @@ -203,11 +207,15 @@ impl ChainSpec { * Signature domains */ domain_beacon_proposer: 0, - domain_randao: 1, - domain_attestation: 2, + domain_beacon_attester: 1, + domain_randao: 2, domain_deposit: 3, domain_voluntary_exit: 4, - domain_transfer: 5, + + /* + * Fork choice + */ + safe_slots_to_update_justified: 8, /* * Eth1 @@ -235,7 +243,7 @@ impl ChainSpec { /// /// https://github.com/ethereum/eth2.0-specs/blob/v0.8.1/configs/constant_presets/minimal.yaml /// - /// Spec v0.8.1 + /// Spec v0.9.1 pub fn minimal() -> Self { // Note: bootnodes to be updated when static nodes exist. let boot_nodes = vec![]; @@ -244,7 +252,6 @@ impl ChainSpec { target_committee_size: 4, shuffle_round_count: 10, min_genesis_active_validator_count: 64, - max_epochs_per_crosslink: 4, network_id: 2, // lighthouse testnet network id boot_nodes, eth1_follow_distance: 16, @@ -302,10 +309,9 @@ mod tests { let spec = ChainSpec::mainnet(); test_domain(Domain::BeaconProposer, spec.domain_beacon_proposer, &spec); + test_domain(Domain::BeaconAttester, spec.domain_beacon_attester, &spec); test_domain(Domain::Randao, spec.domain_randao, &spec); - test_domain(Domain::Attestation, spec.domain_attestation, &spec); test_domain(Domain::Deposit, spec.domain_deposit, &spec); test_domain(Domain::VoluntaryExit, spec.domain_voluntary_exit, &spec); - test_domain(Domain::Transfer, spec.domain_transfer, &spec); } } diff --git a/eth2/types/src/checkpoint.rs b/eth2/types/src/checkpoint.rs index d420bc82e0..d1fa49becd 100644 --- a/eth2/types/src/checkpoint.rs +++ b/eth2/types/src/checkpoint.rs @@ -7,7 +7,7 @@ use tree_hash_derive::TreeHash; /// Casper FFG checkpoint, used in attestations. /// -/// Spec v0.8.0 +/// Spec v0.9.1 #[derive( Debug, Clone, diff --git a/eth2/types/src/compact_committee.rs b/eth2/types/src/compact_committee.rs deleted file mode 100644 index f35edff08a..0000000000 --- a/eth2/types/src/compact_committee.rs +++ /dev/null @@ -1,24 +0,0 @@ -use crate::test_utils::TestRandom; -use crate::{EthSpec, PublicKey}; -use serde_derive::{Deserialize, Serialize}; -use ssz_derive::{Decode, Encode}; -use ssz_types::VariableList; -use test_random_derive::TestRandom; -use tree_hash_derive::TreeHash; - -/// Spec v0.8.0 -#[derive(Clone, Debug, PartialEq, TreeHash, Encode, Decode, Serialize, Deserialize, TestRandom)] -#[serde(bound = "T: EthSpec")] -pub struct CompactCommittee { - pub pubkeys: VariableList, - pub compact_validators: VariableList, -} - -impl Default for CompactCommittee { - fn default() -> Self { - Self { - pubkeys: VariableList::empty(), - compact_validators: VariableList::empty(), - } - } -} diff --git a/eth2/types/src/crosslink.rs b/eth2/types/src/crosslink.rs deleted file mode 100644 index 817687b03b..0000000000 --- a/eth2/types/src/crosslink.rs +++ /dev/null @@ -1,40 +0,0 @@ -use crate::test_utils::TestRandom; -use crate::{Epoch, Hash256}; - -use serde_derive::{Deserialize, Serialize}; -use ssz_derive::{Decode, Encode}; -use test_random_derive::TestRandom; -use tree_hash_derive::TreeHash; - -/// Specifies the block hash for a shard at an epoch. -/// -/// Spec v0.8.0 -#[derive( - Debug, - Clone, - PartialEq, - Eq, - Default, - Serialize, - Deserialize, - Hash, - Encode, - Decode, - TreeHash, - TestRandom, -)] -pub struct Crosslink { - pub shard: u64, - pub parent_root: Hash256, - // Crosslinking data - pub start_epoch: Epoch, - pub end_epoch: Epoch, - pub data_root: Hash256, -} - -#[cfg(test)] -mod tests { - use super::*; - - ssz_tests!(Crosslink); -} diff --git a/eth2/types/src/deposit.rs b/eth2/types/src/deposit.rs index 0e68454e04..f60932f9c8 100644 --- a/eth2/types/src/deposit.rs +++ b/eth2/types/src/deposit.rs @@ -11,7 +11,7 @@ pub const DEPOSIT_TREE_DEPTH: usize = 32; /// A deposit to potentially become a beacon chain validator. /// -/// Spec v0.8.0 +/// Spec v0.9.1 #[derive(Debug, PartialEq, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom)] pub struct Deposit { pub proof: FixedVector, diff --git a/eth2/types/src/deposit_data.rs b/eth2/types/src/deposit_data.rs index 95588e25a8..a6a1a437fd 100644 --- a/eth2/types/src/deposit_data.rs +++ b/eth2/types/src/deposit_data.rs @@ -11,7 +11,7 @@ use tree_hash_derive::{SignedRoot, TreeHash}; /// The data supplied by the user to the deposit contract. /// -/// Spec v0.8.0 +/// Spec v0.9.1 #[derive( Debug, PartialEq, @@ -35,7 +35,7 @@ pub struct DepositData { impl DepositData { /// Generate the signature for a given DepositData details. /// - /// Spec v0.8.1 + /// Spec v0.9.1 pub fn create_signature(&self, secret_key: &SecretKey, spec: &ChainSpec) -> SignatureBytes { let msg = self.signed_root(); let domain = spec.get_deposit_domain(); diff --git a/eth2/types/src/eth1_data.rs b/eth2/types/src/eth1_data.rs index d98e89cee8..dfca3ec370 100644 --- a/eth2/types/src/eth1_data.rs +++ b/eth2/types/src/eth1_data.rs @@ -8,7 +8,7 @@ use tree_hash_derive::TreeHash; /// Contains data obtained from the Eth1 chain. /// -/// Spec v0.8.1 +/// Spec v0.9.1 #[derive( Debug, PartialEq, diff --git a/eth2/types/src/eth_spec.rs b/eth2/types/src/eth_spec.rs index f589b3d3ef..5377254afd 100644 --- a/eth2/types/src/eth_spec.rs +++ b/eth2/types/src/eth_spec.rs @@ -1,8 +1,8 @@ use crate::*; use serde_derive::{Deserialize, Serialize}; use ssz_types::typenum::{ - Unsigned, U0, U1, U1024, U1099511627776, U128, U16, U16777216, U4, U4096, U64, U65536, U8, - U8192, + Unsigned, U0, U1, U1024, U1099511627776, U128, U16, U16777216, U2048, U32, U4, U4096, U64, + U65536, U8, U8192, }; use std::fmt::Debug; @@ -14,7 +14,6 @@ pub trait EthSpec: 'static + Default + Sync + Send + Clone + Debug + PartialEq { /* * Misc */ - type ShardCount: Unsigned + Clone + Sync + Send + Debug + PartialEq; type MaxValidatorsPerCommittee: Unsigned + Clone + Sync + Send + Debug + PartialEq; /* * Initial values @@ -41,7 +40,6 @@ pub trait EthSpec: 'static + Default + Sync + Send + Clone + Debug + PartialEq { type MaxAttestations: Unsigned + Clone + Sync + Send + Debug + PartialEq; type MaxDeposits: Unsigned + Clone + Sync + Send + Debug + PartialEq; type MaxVoluntaryExits: Unsigned + Clone + Sync + Send + Debug + PartialEq; - type MaxTransfers: Unsigned + Clone + Sync + Send + Debug + PartialEq; /* * Derived values (set these CAREFULLY) */ @@ -58,29 +56,21 @@ pub trait EthSpec: 'static + Default + Sync + Send + Clone + Debug + PartialEq { Epoch::new(Self::GenesisEpoch::to_u64()) } - /// Return the number of committees in one epoch. + /// Return the number of committees per slot. /// - /// Spec v0.8.1 - fn get_committee_count(active_validator_count: usize, target_committee_size: usize) -> usize { - let shard_count = Self::shard_count(); - let slots_per_epoch = Self::slots_per_epoch() as usize; + /// Note: the number of committees per slot is constant in each epoch, and depends only on + /// the `active_validator_count` during the slot's epoch. + /// + /// Spec v0.9.1 + fn get_committee_count_per_slot(active_validator_count: usize, spec: &ChainSpec) -> usize { + let slots_per_epoch = Self::SlotsPerEpoch::to_usize(); std::cmp::max( 1, std::cmp::min( - shard_count / slots_per_epoch, - active_validator_count / slots_per_epoch / target_committee_size, + spec.max_committees_per_slot, + active_validator_count / slots_per_epoch / spec.target_committee_size, ), - ) * slots_per_epoch - } - - /// Return the number of shards to increment `state.start_shard` by in a given epoch. - /// - /// Spec v0.8.1 - fn get_shard_delta(active_validator_count: usize, target_committee_size: usize) -> u64 { - std::cmp::min( - Self::get_committee_count(active_validator_count, target_committee_size) as u64, - Self::ShardCount::to_u64() - Self::ShardCount::to_u64() / Self::slots_per_epoch(), ) } @@ -95,37 +85,30 @@ pub trait EthSpec: 'static + Default + Sync + Send + Clone + Debug + PartialEq { /// Returns the `SLOTS_PER_EPOCH` constant for this specification. /// - /// Spec v0.8.1 + /// Spec v0.9.1 fn slots_per_epoch() -> u64 { Self::SlotsPerEpoch::to_u64() } - /// Returns the `SHARD_COUNT` constant for this specification. - /// - /// Spec v0.8.1 - fn shard_count() -> usize { - Self::ShardCount::to_usize() - } - /// Returns the `SLOTS_PER_HISTORICAL_ROOT` constant for this specification. /// - /// Spec v0.8.1 + /// Spec v0.9.1 fn slots_per_historical_root() -> usize { Self::SlotsPerHistoricalRoot::to_usize() } /// Returns the `EPOCHS_PER_HISTORICAL_VECTOR` constant for this specification. /// - /// Spec v0.8.1 + /// Spec v0.9.1 fn epochs_per_historical_vector() -> usize { Self::EpochsPerHistoricalVector::to_usize() } /// Returns the `SLOTS_PER_ETH1_VOTING_PERIOD` constant for this specification. /// - /// Spec v0.8.1 + /// Spec v0.9.1 fn slots_per_eth1_voting_period() -> usize { - Self::EpochsPerHistoricalVector::to_usize() + Self::SlotsPerEth1VotingPeriod::to_usize() } } @@ -139,16 +122,15 @@ macro_rules! params_from_eth_spec { /// Ethereum Foundation specifications. /// -/// Spec v0.8.0 +/// Spec v0.9.1 #[derive(Clone, PartialEq, Debug, Default, Serialize, Deserialize)] pub struct MainnetEthSpec; impl EthSpec for MainnetEthSpec { type JustificationBitsLength = U4; - type ShardCount = U1024; - type MaxValidatorsPerCommittee = U4096; + type MaxValidatorsPerCommittee = U2048; type GenesisEpoch = U0; - type SlotsPerEpoch = U64; + type SlotsPerEpoch = U32; type SlotsPerEth1VotingPeriod = U1024; type SlotsPerHistoricalRoot = U8192; type EpochsPerHistoricalVector = U65536; @@ -160,8 +142,7 @@ impl EthSpec for MainnetEthSpec { type MaxAttestations = U128; type MaxDeposits = U16; type MaxVoluntaryExits = U16; - type MaxTransfers = U0; - type MaxPendingAttestations = U8192; // 128 max attestations * 64 slots per epoch + type MaxPendingAttestations = U4096; // 128 max attestations * 32 slots per epoch fn default_spec() -> ChainSpec { ChainSpec::mainnet() @@ -174,12 +155,11 @@ pub type FoundationBeaconState = BeaconState; /// /// https://github.com/ethereum/eth2.0-specs/blob/v0.8.0/configs/constant_presets/minimal.yaml /// -/// Spec v0.8.0 +/// Spec v0.9.1 #[derive(Clone, PartialEq, Debug, Default, Serialize, Deserialize)] pub struct MinimalEthSpec; impl EthSpec for MinimalEthSpec { - type ShardCount = U8; type SlotsPerEpoch = U8; type SlotsPerEth1VotingPeriod = U16; type SlotsPerHistoricalRoot = U64; @@ -197,8 +177,7 @@ impl EthSpec for MinimalEthSpec { MaxAttesterSlashings, MaxAttestations, MaxDeposits, - MaxVoluntaryExits, - MaxTransfers + MaxVoluntaryExits }); fn default_spec() -> ChainSpec { @@ -213,7 +192,6 @@ pub type MinimalBeaconState = BeaconState; pub struct InteropEthSpec; impl EthSpec for InteropEthSpec { - type ShardCount = U8; type SlotsPerEpoch = U8; type SlotsPerHistoricalRoot = U64; type SlotsPerEth1VotingPeriod = U16; @@ -231,8 +209,7 @@ impl EthSpec for InteropEthSpec { MaxAttesterSlashings, MaxAttestations, MaxDeposits, - MaxVoluntaryExits, - MaxTransfers + MaxVoluntaryExits }); fn default_spec() -> ChainSpec { diff --git a/eth2/types/src/fork.rs b/eth2/types/src/fork.rs index 2da3218d11..97aadddbf4 100644 --- a/eth2/types/src/fork.rs +++ b/eth2/types/src/fork.rs @@ -9,7 +9,7 @@ use tree_hash_derive::TreeHash; /// Specifies a fork of the `BeaconChain`, to prevent replay attacks. /// -/// Spec v0.8.1 +/// Spec v0.9.1 #[derive( Debug, Clone, PartialEq, Default, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom, )] @@ -30,7 +30,7 @@ pub struct Fork { impl Fork { /// Return the fork version of the given ``epoch``. /// - /// Spec v0.8.1 + /// Spec v0.9.1 pub fn get_fork_version(&self, epoch: Epoch) -> [u8; 4] { if epoch < self.epoch { return self.previous_version; diff --git a/eth2/types/src/historical_batch.rs b/eth2/types/src/historical_batch.rs index 70ace69f96..8158ba3737 100644 --- a/eth2/types/src/historical_batch.rs +++ b/eth2/types/src/historical_batch.rs @@ -9,7 +9,7 @@ use tree_hash_derive::TreeHash; /// Historical block and state roots. /// -/// Spec v0.8.1 +/// Spec v0.9.1 #[derive(Debug, Clone, PartialEq, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom)] pub struct HistoricalBatch { pub block_roots: FixedVector, diff --git a/eth2/types/src/indexed_attestation.rs b/eth2/types/src/indexed_attestation.rs index ab7cfbf7ec..32bd134da6 100644 --- a/eth2/types/src/indexed_attestation.rs +++ b/eth2/types/src/indexed_attestation.rs @@ -9,7 +9,7 @@ use tree_hash_derive::{SignedRoot, TreeHash}; /// /// To be included in an `AttesterSlashing`. /// -/// Spec v0.8.0 +/// Spec v0.9.1 #[derive( Debug, PartialEq, @@ -25,8 +25,7 @@ use tree_hash_derive::{SignedRoot, TreeHash}; #[serde(bound = "T: EthSpec")] pub struct IndexedAttestation { /// Lists validator registry indices, not committee indices. - pub custody_bit_0_indices: VariableList, - pub custody_bit_1_indices: VariableList, + pub attesting_indices: VariableList, pub data: AttestationData, #[signed_root(skip_hashing)] pub signature: AggregateSignature, @@ -35,14 +34,14 @@ pub struct IndexedAttestation { impl IndexedAttestation { /// Check if ``attestation_data_1`` and ``attestation_data_2`` have the same target. /// - /// Spec v0.8.0 + /// Spec v0.9.1 pub fn is_double_vote(&self, other: &Self) -> bool { self.data.target.epoch == other.data.target.epoch && self.data != other.data } /// Check if ``attestation_data_1`` surrounds ``attestation_data_2``. /// - /// Spec v0.8.0 + /// Spec v0.9.1 pub fn is_surround_vote(&self, other: &Self) -> bool { self.data.source.epoch < other.data.source.epoch && other.data.target.epoch < self.data.target.epoch diff --git a/eth2/types/src/lib.rs b/eth2/types/src/lib.rs index d9a4f22353..1fe35ccd60 100644 --- a/eth2/types/src/lib.rs +++ b/eth2/types/src/lib.rs @@ -8,18 +8,15 @@ pub mod test_utils; pub mod attestation; pub mod attestation_data; -pub mod attestation_data_and_custody_bit; pub mod attestation_duty; pub mod attester_slashing; pub mod beacon_block; pub mod beacon_block_body; pub mod beacon_block_header; +pub mod beacon_committee; pub mod beacon_state; pub mod chain_spec; pub mod checkpoint; -pub mod compact_committee; -pub mod crosslink; -pub mod crosslink_committee; pub mod deposit; pub mod deposit_data; pub mod eth1_data; @@ -30,7 +27,6 @@ pub mod historical_batch; pub mod indexed_attestation; pub mod pending_attestation; pub mod proposer_slashing; -pub mod transfer; pub mod utils; pub mod voluntary_exit; #[macro_use] @@ -41,23 +37,19 @@ pub mod slot_height; mod tree_hash_impls; pub mod validator; -use ethereum_types::{H160, H256, U256}; -use std::collections::HashMap; +use ethereum_types::{H160, H256}; pub use crate::attestation::Attestation; pub use crate::attestation_data::AttestationData; -pub use crate::attestation_data_and_custody_bit::AttestationDataAndCustodyBit; pub use crate::attestation_duty::AttestationDuty; pub use crate::attester_slashing::AttesterSlashing; pub use crate::beacon_block::BeaconBlock; pub use crate::beacon_block_body::BeaconBlockBody; pub use crate::beacon_block_header::BeaconBlockHeader; +pub use crate::beacon_committee::{BeaconCommittee, OwnedBeaconCommittee}; pub use crate::beacon_state::{Error as BeaconStateError, *}; pub use crate::chain_spec::{ChainSpec, Domain}; pub use crate::checkpoint::Checkpoint; -pub use crate::compact_committee::CompactCommittee; -pub use crate::crosslink::Crosslink; -pub use crate::crosslink_committee::{CrosslinkCommittee, OwnedCrosslinkCommittee}; pub use crate::deposit::{Deposit, DEPOSIT_TREE_DEPTH}; pub use crate::deposit_data::DepositData; pub use crate::eth1_data::Eth1Data; @@ -70,23 +62,12 @@ pub use crate::proposer_slashing::ProposerSlashing; pub use crate::relative_epoch::{Error as RelativeEpochError, RelativeEpoch}; pub use crate::slot_epoch::{Epoch, Slot}; pub use crate::slot_height::SlotHeight; -pub use crate::transfer::Transfer; pub use crate::validator::Validator; pub use crate::voluntary_exit::VoluntaryExit; -pub type Shard = u64; -pub type Committee = Vec; -pub type CrosslinkCommittees = Vec<(Committee, u64)>; - +pub type CommitteeIndex = u64; pub type Hash256 = H256; pub type Address = H160; -pub type EthBalance = U256; - -/// Maps a (slot, shard_id) to attestation_indices. -pub type AttesterMap = HashMap<(u64, u64), Vec>; - -/// Maps a slot to a block proposer. -pub type ProposerMap = HashMap; pub use bls::{ AggregatePublicKey, AggregateSignature, Keypair, PublicKey, PublicKeyBytes, SecretKey, diff --git a/eth2/types/src/pending_attestation.rs b/eth2/types/src/pending_attestation.rs index 4a8583b0b1..8b6c0022c7 100644 --- a/eth2/types/src/pending_attestation.rs +++ b/eth2/types/src/pending_attestation.rs @@ -8,7 +8,7 @@ use tree_hash_derive::TreeHash; /// An attestation that has been included in the state but not yet fully processed. /// -/// Spec v0.8.0 +/// Spec v0.9.1 #[derive(Debug, Clone, PartialEq, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom)] pub struct PendingAttestation { pub aggregation_bits: BitList, diff --git a/eth2/types/src/proposer_slashing.rs b/eth2/types/src/proposer_slashing.rs index 12a4ca0d8a..678458e7e8 100644 --- a/eth2/types/src/proposer_slashing.rs +++ b/eth2/types/src/proposer_slashing.rs @@ -8,7 +8,7 @@ use tree_hash_derive::TreeHash; /// Two conflicting proposals from the same proposer (validator). /// -/// Spec v0.8.1 +/// Spec v0.9.1 #[derive(Debug, PartialEq, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom)] pub struct ProposerSlashing { pub proposer_index: u64, diff --git a/eth2/types/src/relative_epoch.rs b/eth2/types/src/relative_epoch.rs index 321919dfc5..65971ef2f5 100644 --- a/eth2/types/src/relative_epoch.rs +++ b/eth2/types/src/relative_epoch.rs @@ -9,7 +9,7 @@ pub enum Error { /// Defines the epochs relative to some epoch. Most useful when referring to the committees prior /// to and following some epoch. /// -/// Spec v0.8.1 +/// Spec v0.9.1 #[derive(Debug, PartialEq, Clone, Copy)] pub enum RelativeEpoch { /// The prior epoch. @@ -23,7 +23,7 @@ pub enum RelativeEpoch { impl RelativeEpoch { /// Returns the `epoch` that `self` refers to, with respect to the `base` epoch. /// - /// Spec v0.8.1 + /// Spec v0.9.1 pub fn into_epoch(self, base: Epoch) -> Epoch { match self { // Due to saturating nature of epoch, check for current first. @@ -40,7 +40,7 @@ impl RelativeEpoch { /// - `EpochTooLow` when `other` is more than 1 prior to `base`. /// - `EpochTooHigh` when `other` is more than 1 after `base`. /// - /// Spec v0.8.1 + /// Spec v0.9.1 pub fn from_epoch(base: Epoch, other: Epoch) -> Result { // Due to saturating nature of epoch, check for current first. if other == base { diff --git a/eth2/types/src/test_utils/builders.rs b/eth2/types/src/test_utils/builders.rs index 8017e4e5df..5bbe7b7694 100644 --- a/eth2/types/src/test_utils/builders.rs +++ b/eth2/types/src/test_utils/builders.rs @@ -6,7 +6,6 @@ mod testing_beacon_state_builder; mod testing_deposit_builder; mod testing_pending_attestation_builder; mod testing_proposer_slashing_builder; -mod testing_transfer_builder; mod testing_voluntary_exit_builder; pub use testing_attestation_builder::*; @@ -17,5 +16,4 @@ pub use testing_beacon_state_builder::*; pub use testing_deposit_builder::*; pub use testing_pending_attestation_builder::*; pub use testing_proposer_slashing_builder::*; -pub use testing_transfer_builder::*; pub use testing_voluntary_exit_builder::*; diff --git a/eth2/types/src/test_utils/builders/testing_attestation_builder.rs b/eth2/types/src/test_utils/builders/testing_attestation_builder.rs index 1742ce4d89..3d877f8812 100644 --- a/eth2/types/src/test_utils/builders/testing_attestation_builder.rs +++ b/eth2/types/src/test_utils/builders/testing_attestation_builder.rs @@ -13,35 +13,31 @@ pub struct TestingAttestationBuilder { impl TestingAttestationBuilder { /// Create a new attestation builder. pub fn new( - test_task: &AttestationTestTask, + test_task: AttestationTestTask, state: &BeaconState, committee: &[usize], slot: Slot, - shard: u64, + index: u64, spec: &ChainSpec, ) -> Self { - let data_builder = TestingAttestationDataBuilder::new(test_task, state, shard, slot, spec); + let data_builder = TestingAttestationDataBuilder::new(test_task, state, index, slot, spec); let mut aggregation_bits_len = committee.len(); - let mut custody_bits_len = committee.len(); match test_task { AttestationTestTask::BadAggregationBitfieldLen => aggregation_bits_len += 1, - AttestationTestTask::BadCustodyBitfieldLen => custody_bits_len += 1, _ => (), } + let mut aggregation_bits = BitList::with_capacity(aggregation_bits_len).unwrap(); - let mut custody_bits = BitList::with_capacity(custody_bits_len).unwrap(); for i in 0..committee.len() { - custody_bits.set(i, false).unwrap(); aggregation_bits.set(i, false).unwrap(); } let attestation = Attestation { aggregation_bits, data: data_builder.build(), - custody_bits, signature: AggregateSignature::new(), }; @@ -57,12 +53,11 @@ impl TestingAttestationBuilder { /// keypair must be that of the first signing validator. pub fn sign( &mut self, - test_task: &AttestationTestTask, + test_task: AttestationTestTask, signing_validators: &[usize], secret_keys: &[&SecretKey], fork: &Fork, spec: &ChainSpec, - mut custody_bit: bool, ) -> &mut Self { assert_eq!( signing_validators.len(), @@ -79,7 +74,6 @@ impl TestingAttestationBuilder { match test_task { AttestationTestTask::BadIndexedAttestationBadSignature => (), - AttestationTestTask::CustodyBitfieldNotSubset => custody_bit = true, _ => { self.attestation .aggregation_bits @@ -87,29 +81,16 @@ impl TestingAttestationBuilder { .unwrap(); } } - match (custody_bit, test_task) { - (true, _) | (_, AttestationTestTask::CustodyBitfieldHasSetBits) => { - self.attestation - .custody_bits - .set(committee_index, true) - .unwrap(); - } - (false, _) => (), - } - let message = AttestationDataAndCustodyBit { - data: self.attestation.data.clone(), - custody_bit, - } - .tree_hash_root(); + let message = self.attestation.data.tree_hash_root(); let domain = spec.get_domain( self.attestation.data.target.epoch, - Domain::Attestation, + Domain::BeaconAttester, fork, ); - let index = if *test_task == AttestationTestTask::BadSignature { + let index = if test_task == AttestationTestTask::BadSignature { 0 } else { key_index diff --git a/eth2/types/src/test_utils/builders/testing_attestation_data_builder.rs b/eth2/types/src/test_utils/builders/testing_attestation_data_builder.rs index d439490e90..7be93fd472 100644 --- a/eth2/types/src/test_utils/builders/testing_attestation_data_builder.rs +++ b/eth2/types/src/test_utils/builders/testing_attestation_data_builder.rs @@ -1,6 +1,5 @@ use crate::test_utils::AttestationTestTask; use crate::*; -use tree_hash::TreeHash; /// Builds an `AttestationData` to be used for testing purposes. /// @@ -13,10 +12,10 @@ impl TestingAttestationDataBuilder { /// Configures a new `AttestationData` which attests to all of the same parameters as the /// state. pub fn new( - test_task: &AttestationTestTask, + test_task: AttestationTestTask, state: &BeaconState, - mut shard: u64, - slot: Slot, + index: u64, + mut slot: Slot, spec: &ChainSpec, ) -> Self { let current_epoch = state.current_epoch(); @@ -46,34 +45,16 @@ impl TestingAttestationDataBuilder { } }; - let parent_crosslink = if is_previous_epoch { - state.get_previous_crosslink(shard).unwrap() - } else { - state.get_current_crosslink(shard).unwrap() - }; - - let mut start = parent_crosslink.end_epoch; - let mut end = std::cmp::min( - target.epoch, - parent_crosslink.end_epoch + spec.max_epochs_per_crosslink, - ); - let mut parent_root = Hash256::from_slice(&parent_crosslink.tree_hash_root()); - let mut data_root = Hash256::zero(); let beacon_block_root = *state.get_block_root(slot).unwrap(); match test_task { - AttestationTestTask::BadParentCrosslinkStartEpoch => start = Epoch::from(10 as u64), - AttestationTestTask::BadParentCrosslinkEndEpoch => end = Epoch::from(0 as u64), - AttestationTestTask::BadParentCrosslinkHash => parent_root = Hash256::zero(), - AttestationTestTask::NoCommiteeForShard => shard += 2, - AttestationTestTask::BadShard => shard = T::ShardCount::to_u64(), - AttestationTestTask::IncludedTooEarly => shard += 1, - AttestationTestTask::IncludedTooLate => { - target = Checkpoint { - epoch: Epoch::from(3 as u64), - root: Hash256::zero(), - } + // FIXME: re-enable the shard-like tests + // AttestationTestTask::NoCommiteeForShard => index += 2, + // AttestationTestTask::BadShard => index = T::ShardCount::to_u64(), + AttestationTestTask::IncludedTooEarly => { + slot = state.slot - spec.min_attestation_inclusion_delay + 1 } + AttestationTestTask::IncludedTooLate => slot -= T::SlotsPerEpoch::to_u64(), AttestationTestTask::BadTargetEpoch => { target = Checkpoint { epoch: Epoch::from(5 as u64), @@ -98,27 +79,19 @@ impl TestingAttestationDataBuilder { root: Hash256::zero(), } } - AttestationTestTask::BadParentCrosslinkDataRoot => data_root = parent_root, _ => (), } - let crosslink = Crosslink { - shard, - parent_root, - start_epoch: start, - end_epoch: end, - data_root, - }; let data = AttestationData { + slot, + index, + // LMD GHOST vote beacon_block_root, // FFG Vote source, target, - - // Crosslink vote - crosslink, }; Self { data } diff --git a/eth2/types/src/test_utils/builders/testing_attester_slashing_builder.rs b/eth2/types/src/test_utils/builders/testing_attester_slashing_builder.rs index 353c4e38bc..f2dfca5c8a 100644 --- a/eth2/types/src/test_utils/builders/testing_attester_slashing_builder.rs +++ b/eth2/types/src/test_utils/builders/testing_attester_slashing_builder.rs @@ -19,14 +19,15 @@ impl TestingAttesterSlashingBuilder { /// /// Where domain is a domain "constant" (e.g., `spec.domain_attestation`). pub fn double_vote( - test_task: &AttesterSlashingTestTask, + test_task: AttesterSlashingTestTask, validator_indices: &[u64], signer: F, ) -> AttesterSlashing where F: Fn(u64, &[u8], Epoch, Domain) -> Signature, { - let shard = 0; + let slot = Slot::new(1); + let index = 0; let epoch_1 = Epoch::new(1); let epoch_2 = Epoch::new(2); let hash_1 = Hash256::from_low_u64_le(1); @@ -39,22 +40,16 @@ impl TestingAttesterSlashingBuilder { epoch: epoch_1, root: hash_2, }; - let crosslink = Crosslink { - shard, - parent_root: hash_1, - start_epoch: epoch_1, - end_epoch: epoch_2, - data_root: hash_1, - }; let data_1 = AttestationData { + slot, + index, beacon_block_root: hash_1, source: checkpoint_1.clone(), target: checkpoint_1, - crosslink, }; - let data_2 = if *test_task == AttesterSlashingTestTask::NotSlashable { + let data_2 = if test_task == AttesterSlashingTestTask::NotSlashable { AttestationData { ..data_1.clone() } } else { AttestationData { @@ -64,42 +59,39 @@ impl TestingAttesterSlashingBuilder { }; let mut attestation_1 = IndexedAttestation { - custody_bit_0_indices: validator_indices.to_vec().into(), - custody_bit_1_indices: if *test_task - == AttesterSlashingTestTask::IndexedAttestation1Invalid + attesting_indices: if test_task == AttesterSlashingTestTask::IndexedAttestation1Invalid { - validator_indices.to_vec().into() + // Trigger bad validator indices ordering error. + vec![1, 0].into() } else { - VariableList::empty() + validator_indices.to_vec().into() }, data: data_1, signature: AggregateSignature::new(), }; let mut attestation_2 = IndexedAttestation { - custody_bit_0_indices: validator_indices.to_vec().into(), - custody_bit_1_indices: if *test_task - == AttesterSlashingTestTask::IndexedAttestation2Invalid + attesting_indices: if test_task == AttesterSlashingTestTask::IndexedAttestation2Invalid { - validator_indices.to_vec().into() + // Trigger bad validator indices ordering error. + vec![1, 0].into() } else { - VariableList::empty() + validator_indices.to_vec().into() }, data: data_2, signature: AggregateSignature::new(), }; let add_signatures = |attestation: &mut IndexedAttestation| { - // All validators sign with a `false` custody bit. - let attestation_data_and_custody_bit = AttestationDataAndCustodyBit { - data: attestation.data.clone(), - custody_bit: false, - }; - let message = attestation_data_and_custody_bit.tree_hash_root(); + let message = attestation.data.tree_hash_root(); for validator_index in validator_indices { - let signature = - signer(*validator_index, &message[..], epoch_2, Domain::Attestation); + let signature = signer( + *validator_index, + &message[..], + epoch_2, + Domain::BeaconAttester, + ); attestation.signature.add(&signature); } }; diff --git a/eth2/types/src/test_utils/builders/testing_beacon_block_builder.rs b/eth2/types/src/test_utils/builders/testing_beacon_block_builder.rs index 3df8ed9e9d..b1c0260a5e 100644 --- a/eth2/types/src/test_utils/builders/testing_beacon_block_builder.rs +++ b/eth2/types/src/test_utils/builders/testing_beacon_block_builder.rs @@ -1,7 +1,7 @@ use crate::{ test_utils::{ TestingAttestationBuilder, TestingAttesterSlashingBuilder, TestingDepositBuilder, - TestingProposerSlashingBuilder, TestingTransferBuilder, TestingVoluntaryExitBuilder, + TestingProposerSlashingBuilder, TestingVoluntaryExitBuilder, }, typenum::U4294967296, *, @@ -19,7 +19,7 @@ pub struct TestingBeaconBlockBuilder { } /// Enum used for passing test options to builder -#[derive(PartialEq)] +#[derive(PartialEq, Clone, Copy)] pub enum DepositTestTask { Valid, BadPubKey, @@ -29,6 +29,7 @@ pub enum DepositTestTask { } /// Enum used for passing test options to builder +#[derive(PartialEq, Clone, Copy)] pub enum ExitTestTask { AlreadyInitiated, AlreadyExited, @@ -39,23 +40,16 @@ pub enum ExitTestTask { ValidatorUnknown, } -#[derive(PartialEq)] /// Enum used for passing test options to builder +#[derive(PartialEq, Clone, Copy)] pub enum AttestationTestTask { Valid, - BadParentCrosslinkStartEpoch, - BadParentCrosslinkEndEpoch, - BadParentCrosslinkHash, NoCommiteeForShard, WrongJustifiedCheckpoint, BadTargetTooLow, BadTargetTooHigh, BadShard, - BadParentCrosslinkDataRoot, BadIndexedAttestationBadSignature, - CustodyBitfieldNotSubset, - CustodyBitfieldHasSetBits, - BadCustodyBitfieldLen, BadAggregationBitfieldLen, BadSignature, ValidatorUnknown, @@ -64,8 +58,8 @@ pub enum AttestationTestTask { BadTargetEpoch, } -#[derive(PartialEq)] /// Enum used for passing test options to builder +#[derive(PartialEq, Clone, Copy)] pub enum AttesterSlashingTestTask { Valid, NotSlashable, @@ -74,7 +68,7 @@ pub enum AttesterSlashingTestTask { } /// Enum used for passing test options to builder -#[derive(PartialEq)] +#[derive(PartialEq, Clone, Copy)] pub enum ProposerSlashingTestTask { Valid, ProposerUnknown, @@ -131,7 +125,7 @@ impl TestingBeaconBlockBuilder { /// Inserts a signed, valid `ProposerSlashing` for the validator. pub fn insert_proposer_slashing( &mut self, - test_task: &ProposerSlashingTestTask, + test_task: ProposerSlashingTestTask, validator_index: u64, secret_key: &SecretKey, fork: &Fork, @@ -149,7 +143,7 @@ impl TestingBeaconBlockBuilder { /// Inserts a signed, valid `AttesterSlashing` for each validator index in `validator_indices`. pub fn insert_attester_slashing( &mut self, - test_task: &AttesterSlashingTestTask, + test_task: AttesterSlashingTestTask, validator_indices: &[u64], secret_keys: &[&SecretKey], fork: &Fork, @@ -176,7 +170,7 @@ impl TestingBeaconBlockBuilder { /// to aggregate these split attestations. pub fn insert_attestations( &mut self, - test_task: &AttestationTestTask, + test_task: AttestationTestTask, state: &BeaconState, secret_keys: &[&SecretKey], num_attestations: usize, @@ -190,7 +184,7 @@ impl TestingBeaconBlockBuilder { // - The slot of the committee. // - A list of all validators in the committee. // - A list of all validators in the committee that should sign the attestation. - // - The shard of the committee. + // - The index of the committee. let mut committees: Vec<(Slot, Vec, Vec, u64)> = vec![]; if slot < T::slots_per_epoch() { @@ -206,16 +200,16 @@ impl TestingBeaconBlockBuilder { break; } - for crosslink_committee in state.get_crosslink_committees_at_slot(slot)? { + for beacon_committee in state.get_beacon_committees_at_slot(slot)? { if attestations_added >= num_attestations { break; } committees.push(( slot, - crosslink_committee.committee.to_vec(), - crosslink_committee.committee.to_vec(), - crosslink_committee.shard, + beacon_committee.committee.to_vec(), + beacon_committee.committee.to_vec(), + beacon_committee.index, )); attestations_added += 1; @@ -231,26 +225,26 @@ impl TestingBeaconBlockBuilder { break; } - for index in 0..committees.len() { + for i in 0..committees.len() { if committees.len() >= num_attestations as usize { break; } - let (slot, committee, mut signing_validators, shard) = committees[index].clone(); + let (slot, committee, mut signing_validators, index) = committees[i].clone(); let new_signing_validators = signing_validators.split_off(signing_validators.len() / 2); - committees[index] = (slot, committee.clone(), signing_validators, shard); - committees.push((slot, committee, new_signing_validators, shard)); + committees[i] = (slot, committee.clone(), signing_validators, index); + committees.push((slot, committee, new_signing_validators, index)); } } let attestations: Vec<_> = committees .par_iter() - .map(|(slot, committee, signing_validators, shard)| { + .map(|(slot, committee, signing_validators, index)| { let mut builder = TestingAttestationBuilder::new( - test_task, state, committee, *slot, *shard, spec, + test_task, state, committee, *slot, *index, spec, ); let signing_secret_keys: Vec<&SecretKey> = signing_validators @@ -263,7 +257,6 @@ impl TestingBeaconBlockBuilder { &signing_secret_keys, &state.fork, spec, - false, ); builder.build() @@ -294,7 +287,7 @@ impl TestingBeaconBlockBuilder { let keypair = Keypair::random(); let mut builder = TestingDepositBuilder::new(keypair.pk.clone(), amount); - builder.sign(&test_task, &keypair, spec); + builder.sign(test_task, &keypair, spec); datas.push(builder.build().data); } @@ -347,7 +340,7 @@ impl TestingBeaconBlockBuilder { /// Insert a `Valid` exit into the state. pub fn insert_exit( &mut self, - test_task: &ExitTestTask, + test_task: ExitTestTask, state: &mut BeaconState, mut validator_index: u64, secret_key: &SecretKey, @@ -362,6 +355,7 @@ impl TestingBeaconBlockBuilder { ExitTestTask::AlreadyExited => { state.validators[validator_index as usize].exit_epoch = Epoch::from(314_159 as u64) } + // FIXME: disabled in v0.9 ExitTestTask::NotActive => { state.validators[validator_index as usize].activation_epoch = Epoch::from(314_159 as u64) @@ -381,25 +375,6 @@ impl TestingBeaconBlockBuilder { .unwrap(); } - /// Insert a `Valid` transfer into the state. - /// - /// Note: this will set the validator to be withdrawable by directly modifying the state - /// validator registry. This _may_ cause problems historic hashes, etc. - pub fn insert_transfer( - &mut self, - state: &BeaconState, - from: u64, - to: u64, - amount: u64, - keypair: Keypair, - spec: &ChainSpec, - ) { - let mut builder = TestingTransferBuilder::new(from, to, amount, state.slot); - builder.sign::(keypair, &state.fork, spec); - - self.block.body.transfers.push(builder.build()).unwrap() - } - /// Signs and returns the block, consuming the builder. pub fn build(mut self, sk: &SecretKey, fork: &Fork, spec: &ChainSpec) -> BeaconBlock { self.sign(sk, fork, spec); @@ -416,7 +391,7 @@ impl TestingBeaconBlockBuilder { /// /// Signs the message using a `BeaconChainHarness`. fn build_proposer_slashing( - test_task: &ProposerSlashingTestTask, + test_task: ProposerSlashingTestTask, validator_index: u64, secret_key: &SecretKey, fork: &Fork, @@ -434,7 +409,7 @@ fn build_proposer_slashing( /// /// Signs the message using a `BeaconChainHarness`. fn build_double_vote_attester_slashing( - test_task: &AttesterSlashingTestTask, + test_task: AttesterSlashingTestTask, validator_indices: &[u64], secret_keys: &[&SecretKey], fork: &Fork, diff --git a/eth2/types/src/test_utils/builders/testing_beacon_state_builder.rs b/eth2/types/src/test_utils/builders/testing_beacon_state_builder.rs index d6aa488f99..a4b0cec658 100644 --- a/eth2/types/src/test_utils/builders/testing_beacon_state_builder.rs +++ b/eth2/types/src/test_utils/builders/testing_beacon_state_builder.rs @@ -181,8 +181,6 @@ impl TestingBeaconStateBuilder { state.slot = slot; - // NOTE: we could update the latest start shard here - state.previous_justified_checkpoint.epoch = epoch - 3; state.current_justified_checkpoint.epoch = epoch - 2; state.justification_bits = BitVector::from_bytes(vec![0b0000_1111]).unwrap(); @@ -215,23 +213,23 @@ impl TestingBeaconStateBuilder { for slot in first_slot..=last_slot { let slot = Slot::from(slot); - let committees: Vec = state - .get_crosslink_committees_at_slot(slot) + let committees: Vec = state + .get_beacon_committees_at_slot(slot) .unwrap() .into_iter() .map(|c| c.clone().into_owned()) .collect(); - for crosslink_committee in committees { + for beacon_committee in committees { let mut builder = TestingPendingAttestationBuilder::new( - &AttestationTestTask::Valid, + AttestationTestTask::Valid, state, - crosslink_committee.shard, + beacon_committee.index, slot, spec, ); // The entire committee should have signed the pending attestation. - let signers = vec![true; crosslink_committee.committee.len()]; + let signers = vec![true; beacon_committee.committee.len()]; builder.add_committee_participation(signers); let attestation = builder.build(); diff --git a/eth2/types/src/test_utils/builders/testing_deposit_builder.rs b/eth2/types/src/test_utils/builders/testing_deposit_builder.rs index 41cd194377..aad5f20986 100644 --- a/eth2/types/src/test_utils/builders/testing_deposit_builder.rs +++ b/eth2/types/src/test_utils/builders/testing_deposit_builder.rs @@ -30,7 +30,7 @@ impl TestingDepositBuilder { /// - `pubkey` to the signing pubkey. /// - `withdrawal_credentials` to the signing pubkey. /// - `proof_of_possession` - pub fn sign(&mut self, test_task: &DepositTestTask, keypair: &Keypair, spec: &ChainSpec) { + pub fn sign(&mut self, test_task: DepositTestTask, keypair: &Keypair, spec: &ChainSpec) { let new_key = Keypair::random(); let mut pubkeybytes = PublicKeyBytes::from(keypair.pk.clone()); let mut secret_key = keypair.sk.clone(); diff --git a/eth2/types/src/test_utils/builders/testing_pending_attestation_builder.rs b/eth2/types/src/test_utils/builders/testing_pending_attestation_builder.rs index 77cb6a302f..c56deb647d 100644 --- a/eth2/types/src/test_utils/builders/testing_pending_attestation_builder.rs +++ b/eth2/types/src/test_utils/builders/testing_pending_attestation_builder.rs @@ -13,23 +13,18 @@ impl TestingPendingAttestationBuilder { /// /// The `inclusion_delay` will be set to `MIN_ATTESTATION_INCLUSION_DELAY`. /// - /// * The aggregation and custody bitfields will all be empty, they need to be set with + /// * The aggregation bitfield will be empty, it needs to be set with /// `Self::add_committee_participation`. pub fn new( - test_task: &AttestationTestTask, + test_task: AttestationTestTask, state: &BeaconState, - shard: u64, + index: u64, slot: Slot, spec: &ChainSpec, ) -> Self { - let data_builder = TestingAttestationDataBuilder::new(test_task, state, shard, slot, spec); + let data_builder = TestingAttestationDataBuilder::new(test_task, state, index, slot, spec); - let relative_epoch = - RelativeEpoch::from_epoch(state.current_epoch(), slot.epoch(T::slots_per_epoch())) - .expect("epoch out of bounds"); - let proposer_index = state - .get_beacon_proposer_index(slot, relative_epoch, spec) - .unwrap() as u64; + let proposer_index = state.get_beacon_proposer_index(slot, spec).unwrap() as u64; let pending_attestation = PendingAttestation { aggregation_bits: BitList::with_capacity(T::MaxValidatorsPerCommittee::to_usize()) diff --git a/eth2/types/src/test_utils/builders/testing_proposer_slashing_builder.rs b/eth2/types/src/test_utils/builders/testing_proposer_slashing_builder.rs index 0c14f0a754..bfd4bd334d 100644 --- a/eth2/types/src/test_utils/builders/testing_proposer_slashing_builder.rs +++ b/eth2/types/src/test_utils/builders/testing_proposer_slashing_builder.rs @@ -19,7 +19,7 @@ impl TestingProposerSlashingBuilder { /// /// Where domain is a domain "constant" (e.g., `spec.domain_attestation`). pub fn double_vote( - test_task: &ProposerSlashingTestTask, + test_task: ProposerSlashingTestTask, mut proposer_index: u64, signer: F, ) -> ProposerSlashing @@ -29,7 +29,7 @@ impl TestingProposerSlashingBuilder { { let slot = Slot::new(0); let hash_1 = Hash256::from([1; 32]); - let hash_2 = if *test_task == ProposerSlashingTestTask::ProposalsIdentical { + let hash_2 = if test_task == ProposerSlashingTestTask::ProposalsIdentical { hash_1.clone() } else { Hash256::from([2; 32]) @@ -43,7 +43,7 @@ impl TestingProposerSlashingBuilder { signature: Signature::empty_signature(), }; - let slot_2 = if *test_task == ProposerSlashingTestTask::ProposalEpochMismatch { + let slot_2 = if test_task == ProposerSlashingTestTask::ProposalEpochMismatch { Slot::new(128) } else { Slot::new(0) @@ -57,21 +57,21 @@ impl TestingProposerSlashingBuilder { let epoch = slot.epoch(T::slots_per_epoch()); - if *test_task != ProposerSlashingTestTask::BadProposal1Signature { + if test_task != ProposerSlashingTestTask::BadProposal1Signature { header_1.signature = { let message = header_1.signed_root(); signer(proposer_index, &message[..], epoch, Domain::BeaconProposer) }; } - if *test_task != ProposerSlashingTestTask::BadProposal2Signature { + if test_task != ProposerSlashingTestTask::BadProposal2Signature { header_2.signature = { let message = header_2.signed_root(); signer(proposer_index, &message[..], epoch, Domain::BeaconProposer) }; } - if *test_task == ProposerSlashingTestTask::ProposerUnknown { + if test_task == ProposerSlashingTestTask::ProposerUnknown { proposer_index = 3_141_592; } diff --git a/eth2/types/src/test_utils/builders/testing_transfer_builder.rs b/eth2/types/src/test_utils/builders/testing_transfer_builder.rs deleted file mode 100644 index d3c3da19e6..0000000000 --- a/eth2/types/src/test_utils/builders/testing_transfer_builder.rs +++ /dev/null @@ -1,45 +0,0 @@ -use crate::*; -use tree_hash::SignedRoot; - -/// Builds a transfer to be used for testing purposes. -/// -/// This struct should **never be used for production purposes.** -pub struct TestingTransferBuilder { - transfer: Transfer, -} - -impl TestingTransferBuilder { - /// Instantiates a new builder. - pub fn new(sender: u64, recipient: u64, amount: u64, slot: Slot) -> Self { - let keypair = Keypair::random(); - - let transfer = Transfer { - sender, - recipient, - amount, - fee: 0, - slot, - pubkey: keypair.pk, - signature: Signature::empty_signature(), - }; - - Self { transfer } - } - - /// Signs the transfer. - /// - /// The keypair must match that of the `from` validator index. - pub fn sign(&mut self, keypair: Keypair, fork: &Fork, spec: &ChainSpec) { - self.transfer.pubkey = keypair.pk; - let message = self.transfer.signed_root(); - let epoch = self.transfer.slot.epoch(T::slots_per_epoch()); - let domain = spec.get_domain(epoch, Domain::Transfer, fork); - - self.transfer.signature = Signature::new(&message, domain, &keypair.sk); - } - - /// Builds the transfer, consuming the builder. - pub fn build(self) -> Transfer { - self.transfer - } -} diff --git a/eth2/types/src/transfer.rs b/eth2/types/src/transfer.rs deleted file mode 100644 index 6c70ce7918..0000000000 --- a/eth2/types/src/transfer.rs +++ /dev/null @@ -1,45 +0,0 @@ -use super::Slot; -use crate::test_utils::TestRandom; -use bls::{PublicKey, Signature}; -use derivative::Derivative; - -use serde_derive::{Deserialize, Serialize}; -use ssz_derive::{Decode, Encode}; -use test_random_derive::TestRandom; -use tree_hash::TreeHash; -use tree_hash_derive::{SignedRoot, TreeHash}; - -/// The data submitted to the deposit contract. -/// -/// Spec v0.8.0 -#[derive( - Debug, - Clone, - Serialize, - Deserialize, - Encode, - Decode, - TreeHash, - TestRandom, - SignedRoot, - Derivative, -)] -#[derivative(PartialEq, Eq, Hash)] -pub struct Transfer { - pub sender: u64, - pub recipient: u64, - pub amount: u64, - pub fee: u64, - pub slot: Slot, - pub pubkey: PublicKey, - #[derivative(Hash = "ignore")] - #[signed_root(skip_hashing)] - pub signature: Signature, -} - -#[cfg(test)] -mod tests { - use super::*; - - ssz_tests!(Transfer); -} diff --git a/eth2/types/src/validator.rs b/eth2/types/src/validator.rs index dfdf6e7d49..70c3a69d97 100644 --- a/eth2/types/src/validator.rs +++ b/eth2/types/src/validator.rs @@ -7,7 +7,7 @@ use tree_hash_derive::TreeHash; /// Information about a `BeaconChain` validator. /// -/// Spec v0.8.0 +/// Spec v0.9.1 #[derive(Debug, Clone, PartialEq, Serialize, Deserialize, Encode, Decode, TestRandom, TreeHash)] pub struct Validator { pub pubkey: PublicKey, diff --git a/eth2/types/src/voluntary_exit.rs b/eth2/types/src/voluntary_exit.rs index 0a8524d8f7..b99b309833 100644 --- a/eth2/types/src/voluntary_exit.rs +++ b/eth2/types/src/voluntary_exit.rs @@ -9,7 +9,7 @@ use tree_hash_derive::{SignedRoot, TreeHash}; /// An exit voluntarily submitted a validator who wishes to withdraw. /// -/// Spec v0.8.0 +/// Spec v0.9.1 #[derive( Debug, PartialEq, diff --git a/eth2/utils/swap_or_not_shuffle/src/shuffle_list.rs b/eth2/utils/swap_or_not_shuffle/src/shuffle_list.rs index 416303b820..3b6cfbff4b 100644 --- a/eth2/utils/swap_or_not_shuffle/src/shuffle_list.rs +++ b/eth2/utils/swap_or_not_shuffle/src/shuffle_list.rs @@ -19,7 +19,17 @@ const TOTAL_SIZE: usize = SEED_SIZE + ROUND_SIZE + POSITION_WINDOW_SIZE; /// It holds that: shuffle_list(shuffle_list(l, r, s, true), r, s, false) == l /// and: shuffle_list(shuffle_list(l, r, s, false), r, s, true) == l /// -/// TODO forwards is around the wrong way - denote? +/// The Eth2.0 spec mostly uses shuffling with `forwards == false`, because backwards +/// shuffled lists are slightly easier to specify, and slightly easier to compute. +/// +/// The forwards shuffling of a list is equivalent to: +/// +/// `[indices[x] for i in 0..n, where compute_shuffled_index(x) = i]` +/// +/// Whereas the backwards shuffling of a list is: +/// +/// `[indices[compute_shuffled_index(i)] for i in 0..n]` +/// /// Returns `None` under any of the following conditions: /// - `list_size == 0` /// - `list_size > 2**24` diff --git a/tests/ef_tests/Makefile b/tests/ef_tests/Makefile index a24c21b529..660a8b4387 100644 --- a/tests/ef_tests/Makefile +++ b/tests/ef_tests/Makefile @@ -1,5 +1,5 @@ # Bump the test tag here and in .gitlab-ci.yml and CI will take care of updating the cached tarballs -TESTS_TAG := v0.8.3 +TESTS_TAG := v0.9.1 TESTS = general minimal mainnet TARBALLS = $(patsubst %,%-$(TESTS_TAG).tar.gz,$(TESTS)) diff --git a/tests/ef_tests/src/cases/epoch_processing.rs b/tests/ef_tests/src/cases/epoch_processing.rs index ece69b3fe2..975130b239 100644 --- a/tests/ef_tests/src/cases/epoch_processing.rs +++ b/tests/ef_tests/src/cases/epoch_processing.rs @@ -6,8 +6,8 @@ use crate::type_name; use crate::type_name::TypeName; use serde_derive::Deserialize; use state_processing::per_epoch_processing::{ - errors::EpochProcessingError, process_crosslinks, process_final_updates, - process_justification_and_finalization, process_registry_updates, process_slashings, + errors::EpochProcessingError, process_final_updates, process_justification_and_finalization, + process_registry_updates, process_rewards_and_penalties, process_slashings, validator_statuses::ValidatorStatuses, }; use std::marker::PhantomData; @@ -38,7 +38,7 @@ pub trait EpochTransition: TypeName + Debug + Sync { #[derive(Debug)] pub struct JustificationAndFinalization; #[derive(Debug)] -pub struct Crosslinks; +pub struct RewardsAndPenalties; #[derive(Debug)] pub struct RegistryUpdates; #[derive(Debug)] @@ -50,7 +50,7 @@ type_name!( JustificationAndFinalization, "justification_and_finalization" ); -type_name!(Crosslinks, "crosslinks"); +type_name!(RewardsAndPenalties, "rewards_and_penalties"); type_name!(RegistryUpdates, "registry_updates"); type_name!(Slashings, "slashings"); type_name!(FinalUpdates, "final_updates"); @@ -63,10 +63,11 @@ impl EpochTransition for JustificationAndFinalization { } } -impl EpochTransition for Crosslinks { +impl EpochTransition for RewardsAndPenalties { fn run(state: &mut BeaconState, spec: &ChainSpec) -> Result<(), EpochProcessingError> { - process_crosslinks(state, spec)?; - Ok(()) + let mut validator_statuses = ValidatorStatuses::new(state, spec)?; + validator_statuses.process_attestations(state, spec)?; + process_rewards_and_penalties(state, &mut validator_statuses, spec) } } diff --git a/tests/ef_tests/src/cases/operations.rs b/tests/ef_tests/src/cases/operations.rs index 3da04d2a33..692108681e 100644 --- a/tests/ef_tests/src/cases/operations.rs +++ b/tests/ef_tests/src/cases/operations.rs @@ -8,13 +8,13 @@ use ssz::Decode; use state_processing::per_block_processing::{ errors::BlockProcessingError, process_attestations, process_attester_slashings, process_block_header, process_deposits, process_exits, process_proposer_slashings, - process_transfers, VerifySignatures, + VerifySignatures, }; use std::fmt::Debug; use std::path::Path; use types::{ Attestation, AttesterSlashing, BeaconBlock, BeaconState, ChainSpec, Deposit, EthSpec, - ProposerSlashing, Transfer, VoluntaryExit, + ProposerSlashing, VoluntaryExit, }; #[derive(Debug, Clone, Default, Deserialize)] @@ -95,16 +95,6 @@ impl Operation for ProposerSlashing { } } -impl Operation for Transfer { - fn apply_to( - &self, - state: &mut BeaconState, - spec: &ChainSpec, - ) -> Result<(), BlockProcessingError> { - process_transfers(state, &[self.clone()], VerifySignatures::True, spec) - } -} - impl Operation for VoluntaryExit { fn handler_name() -> String { "voluntary_exit".into() diff --git a/tests/ef_tests/src/lib.rs b/tests/ef_tests/src/lib.rs index 719bfc1aaf..2e7b7eb577 100644 --- a/tests/ef_tests/src/lib.rs +++ b/tests/ef_tests/src/lib.rs @@ -3,7 +3,7 @@ use types::EthSpec; pub use case_result::CaseResult; pub use cases::Case; pub use cases::{ - Crosslinks, FinalUpdates, JustificationAndFinalization, RegistryUpdates, Slashings, + FinalUpdates, JustificationAndFinalization, RegistryUpdates, RewardsAndPenalties, Slashings, }; pub use error::Error; pub use handler::*; diff --git a/tests/ef_tests/src/type_name.rs b/tests/ef_tests/src/type_name.rs index 5af0c52565..d0ab1e164e 100644 --- a/tests/ef_tests/src/type_name.rs +++ b/tests/ef_tests/src/type_name.rs @@ -38,15 +38,12 @@ type_name!(MainnetEthSpec, "mainnet"); type_name_generic!(Attestation); type_name!(AttestationData); -type_name!(AttestationDataAndCustodyBit); type_name_generic!(AttesterSlashing); type_name_generic!(BeaconBlock); type_name_generic!(BeaconBlockBody); type_name!(BeaconBlockHeader); type_name_generic!(BeaconState); type_name!(Checkpoint); -type_name_generic!(CompactCommittee); -type_name!(Crosslink); type_name!(Deposit); type_name!(DepositData); type_name!(Eth1Data); @@ -55,6 +52,5 @@ type_name_generic!(HistoricalBatch); type_name_generic!(IndexedAttestation); type_name_generic!(PendingAttestation); type_name!(ProposerSlashing); -type_name!(Transfer); type_name!(Validator); type_name!(VoluntaryExit); diff --git a/tests/ef_tests/tests/tests.rs b/tests/ef_tests/tests/tests.rs index a9b7c22ac3..cd9cbcec42 100644 --- a/tests/ef_tests/tests/tests.rs +++ b/tests/ef_tests/tests/tests.rs @@ -15,12 +15,6 @@ fn operations_deposit() { OperationsHandler::::run(); } -#[test] -fn operations_transfer() { - OperationsHandler::::run(); - // Note: there are no transfer tests for mainnet -} - #[test] fn operations_exit() { OperationsHandler::::run(); @@ -139,10 +133,6 @@ mod ssz_static { ssz_static_test!(attestation, Attestation<_>, SR); ssz_static_test!(attestation_data, AttestationData); - ssz_static_test!( - attestation_data_and_custody_bit, - AttestationDataAndCustodyBit - ); ssz_static_test!(attester_slashing, AttesterSlashing<_>); ssz_static_test!(beacon_block, BeaconBlock<_>, SR); ssz_static_test!(beacon_block_body, BeaconBlockBody<_>); @@ -155,8 +145,6 @@ mod ssz_static { } ); ssz_static_test!(checkpoint, Checkpoint); - ssz_static_test!(compact_committee, CompactCommittee<_>); - ssz_static_test!(crosslink, Crosslink); ssz_static_test!(deposit, Deposit); ssz_static_test!(deposit_data, DepositData, SR); ssz_static_test!(eth1_data, Eth1Data); @@ -165,7 +153,6 @@ mod ssz_static { ssz_static_test!(indexed_attestation, IndexedAttestation<_>, SR); ssz_static_test!(pending_attestation, PendingAttestation<_>); ssz_static_test!(proposer_slashing, ProposerSlashing); - ssz_static_test!(transfer, Transfer, SR); ssz_static_test!(validator, Validator); ssz_static_test!(voluntary_exit, VoluntaryExit, SR); } @@ -187,9 +174,9 @@ fn epoch_processing_justification_and_finalization() { } #[test] -fn epoch_processing_crosslinks() { - EpochProcessingHandler::::run(); - EpochProcessingHandler::::run(); +fn epoch_processing_rewards_and_penalties() { + EpochProcessingHandler::::run(); + // Note: there are no reward and penalty tests for mainnet yet } #[test] diff --git a/validator_client/README.md b/validator_client/README.md index 0f30ded73d..4cefe0b40f 100644 --- a/validator_client/README.md +++ b/validator_client/README.md @@ -8,11 +8,11 @@ Node (BN) and fulfils the roles of a validator. The VC is responsible for the following tasks: - Requesting validator duties (a.k.a. shuffling) from the BN. -- Prompting the BN to produce a new block, when a validators block production +- Prompting the BN to produce a new block, when a validator's block production duties require. - Completing all the fields on a new block (e.g., RANDAO reveal, signature) and publishing the block to a BN. -- Prompting the BN to produce a new shard attestation as per a validators +- Prompting the BN to produce a new attestation as per a validator's duties. - Ensuring that no slashable messages are signed by a validator private key. - Keeping track of the system clock and how it relates to slots/epochs. diff --git a/validator_client/src/attestation_producer/beacon_node_attestation.rs b/validator_client/src/attestation_producer/beacon_node_attestation.rs index 1213be8a66..d3dfaf3281 100644 --- a/validator_client/src/attestation_producer/beacon_node_attestation.rs +++ b/validator_client/src/attestation_producer/beacon_node_attestation.rs @@ -1,6 +1,6 @@ //TODO: generalise these enums to the crate use crate::block_producer::{BeaconNodeError, PublishOutcome}; -use types::{Attestation, AttestationData, EthSpec, Slot}; +use types::{Attestation, AttestationData, CommitteeIndex, EthSpec, Slot}; /// Defines the methods required to produce and publish attestations on a Beacon Node. Abstracts the /// actual beacon node. @@ -10,7 +10,7 @@ pub trait BeaconNodeAttestation: Send + Sync { fn produce_attestation_data( &self, slot: Slot, - shard: u64, + index: CommitteeIndex, ) -> Result; /// Request that the node publishes a attestation. diff --git a/validator_client/src/attestation_producer/grpc.rs b/validator_client/src/attestation_producer/grpc.rs index 22af304ae4..a7f979dc05 100644 --- a/validator_client/src/attestation_producer/grpc.rs +++ b/validator_client/src/attestation_producer/grpc.rs @@ -6,17 +6,17 @@ use ssz::{Decode, Encode}; use protos::services::{ Attestation as GrpcAttestation, ProduceAttestationDataRequest, PublishAttestationRequest, }; -use types::{Attestation, AttestationData, EthSpec, Slot}; +use types::{Attestation, AttestationData, CommitteeIndex, EthSpec, Slot}; impl BeaconNodeAttestation for AttestationServiceClient { fn produce_attestation_data( &self, slot: Slot, - shard: u64, + index: CommitteeIndex, ) -> Result { let mut req = ProduceAttestationDataRequest::new(); req.set_slot(slot.as_u64()); - req.set_shard(shard); + req.set_shard(index); let reply = self .produce_attestation_data(&req) diff --git a/validator_client/src/attestation_producer/mod.rs b/validator_client/src/attestation_producer/mod.rs index 6f4a5f304e..cd2ed4a138 100644 --- a/validator_client/src/attestation_producer/mod.rs +++ b/validator_client/src/attestation_producer/mod.rs @@ -10,10 +10,7 @@ use beacon_node_attestation::BeaconNodeAttestation; use core::marker::PhantomData; use slog::{error, info, warn}; use tree_hash::TreeHash; -use types::{ - AggregateSignature, Attestation, AttestationData, AttestationDataAndCustodyBit, - AttestationDuty, BitList, -}; +use types::{AggregateSignature, Attestation, AttestationData, AttestationDuty, BitList}; //TODO: Group these errors at a crate level #[derive(Debug, PartialEq)] @@ -90,9 +87,11 @@ impl<'a, B: BeaconNodeAttestation, S: Signer, E: EthSpec> AttestationProducer<'a let attestation = self .beacon_node - .produce_attestation_data(self.duty.slot, self.duty.shard)?; + .produce_attestation_data(self.duty.slot, self.duty.index)?; if self.safe_to_produce(&attestation) { - let domain = self.spec.get_domain(epoch, Domain::Attestation, &self.fork); + let domain = self + .spec + .get_domain(epoch, Domain::BeaconAttester, &self.fork); if let Some(attestation) = self.sign_attestation(attestation, self.duty, domain) { match self.beacon_node.publish_attestation(attestation) { Ok(PublishOutcome::InvalidAttestation(_string)) => { @@ -127,11 +126,7 @@ impl<'a, B: BeaconNodeAttestation, S: Signer, E: EthSpec> AttestationProducer<'a // build the aggregate signature let aggregate_signature = { - let message = AttestationDataAndCustodyBit { - data: attestation.clone(), - custody_bit: false, - } - .tree_hash_root(); + let message = attestation.tree_hash_root(); let sig = self.signer.sign_message(&message, domain)?; @@ -141,13 +136,11 @@ impl<'a, B: BeaconNodeAttestation, S: Signer, E: EthSpec> AttestationProducer<'a }; let mut aggregation_bits = BitList::with_capacity(duties.committee_len).ok()?; - let custody_bits = BitList::with_capacity(duties.committee_len).ok()?; - aggregation_bits.set(duties.committee_index, true).ok()?; + aggregation_bits.set(duties.committee_position, true).ok()?; Some(Attestation { aggregation_bits, data: attestation, - custody_bits, signature: aggregate_signature, }) } diff --git a/validator_client/src/duties/epoch_duties.rs b/validator_client/src/duties/epoch_duties.rs index c8f2b981a9..5a22dccb2b 100644 --- a/validator_client/src/duties/epoch_duties.rs +++ b/validator_client/src/duties/epoch_duties.rs @@ -35,7 +35,7 @@ impl EpochDuty { _ => false, }; - // if the validator is required to attest to a shard, create the data + // if the validator is required to attest to a index, create the data let mut attestation_duty = None; if self.attestation_duty.slot == slot { attestation_duty = Some(self.attestation_duty) @@ -59,8 +59,8 @@ impl fmt::Display for EpochDuty { } write!( f, - "produce block slot: {}, attestation slot: {}, attestation shard: {}", - display_block, self.attestation_duty.slot, self.attestation_duty.shard + "produce block slot: {}, attestation slot: {}, attestation index: {}", + display_block, self.attestation_duty.slot, self.attestation_duty.index ) } } diff --git a/validator_client/src/duties/grpc.rs b/validator_client/src/duties/grpc.rs index 58fb5c992d..565672c982 100644 --- a/validator_client/src/duties/grpc.rs +++ b/validator_client/src/duties/grpc.rs @@ -51,8 +51,8 @@ impl BeaconNodeDuties for ValidatorServiceClient { let attestation_duty = AttestationDuty { slot: Slot::from(active_duty.get_attestation_slot()), - shard: active_duty.get_attestation_shard(), - committee_index: active_duty.get_committee_index() as usize, + index: active_duty.get_attestation_shard(), + committee_position: active_duty.get_committee_index() as usize, committee_len: active_duty.get_committee_len() as usize, };