diff --git a/Cargo.lock b/Cargo.lock index c3391bf0a9..3038e29dff 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2785,6 +2785,8 @@ dependencies = [ "int_to_bytes 0.1.0", "parking_lot 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.7.3 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_derive 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)", "state_processing 0.1.0", "types 0.1.0", ] @@ -3348,6 +3350,7 @@ dependencies = [ "eth2_ssz 0.1.2", "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", "hex 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)", + "operation_pool 0.1.0", "proto_array_fork_choice 0.1.0", "reqwest 0.9.24 (registry+https://github.com/rust-lang/crates.io-index)", "rest_api 0.1.0", @@ -3417,6 +3420,7 @@ dependencies = [ "lighthouse_metrics 0.1.0", "network 0.1.0", "node_test_rig 0.1.0", + "operation_pool 0.1.0", "parking_lot 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", "remote_beacon_node 0.1.0", "serde 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)", @@ -3923,8 +3927,10 @@ dependencies = [ "bls 0.1.0", "criterion 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", "env_logger 0.7.1 (registry+https://github.com/rust-lang/crates.io-index)", + "eth2_hashing 0.1.1", "eth2_ssz 0.1.2", "eth2_ssz_types 0.2.0", + "int_to_bytes 0.1.0", "integer-sqrt 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "itertools 0.8.2 (registry+https://github.com/rust-lang/crates.io-index)", "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 6184013525..95cc86b99b 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -475,7 +475,7 @@ impl BeaconChain { self.canonical_head .try_read_for(HEAD_LOCK_TIMEOUT) .ok_or_else(|| Error::CanonicalHeadLockTimeout) - .map(|v| v.clone()) + .map(|v| v.clone_with_only_committee_caches()) } /// Returns info representing the head block and state. diff --git a/beacon_node/beacon_chain/src/checkpoint.rs b/beacon_node/beacon_chain/src/checkpoint.rs index a043a48132..0ca8a12431 100644 --- a/beacon_node/beacon_chain/src/checkpoint.rs +++ b/beacon_node/beacon_chain/src/checkpoint.rs @@ -41,4 +41,13 @@ impl CheckPoint { self.beacon_state = beacon_state; self.beacon_state_root = beacon_state_root; } + + pub fn clone_with_only_committee_caches(&self) -> Self { + Self { + beacon_block: self.beacon_block.clone(), + beacon_block_root: self.beacon_block_root, + beacon_state: self.beacon_state.clone_with_only_committee_caches(), + beacon_state_root: self.beacon_state_root, + } + } } diff --git a/beacon_node/beacon_chain/src/metrics.rs b/beacon_node/beacon_chain/src/metrics.rs index 72b1fdbc1c..31579d86c8 100644 --- a/beacon_node/beacon_chain/src/metrics.rs +++ b/beacon_node/beacon_chain/src/metrics.rs @@ -198,6 +198,18 @@ lazy_static! { try_create_int_gauge("beacon_head_state_withdrawn_validators_total", "Sum of all validator balances at the head of the chain"); pub static ref HEAD_STATE_ETH1_DEPOSIT_INDEX: Result = try_create_int_gauge("beacon_head_state_eth1_deposit_index", "Eth1 deposit index at the head of the chain"); + + /* + * Operation Pool + */ + pub static ref OP_POOL_NUM_ATTESTATIONS: Result = + try_create_int_gauge("beacon_op_pool_attestations_total", "Count of attestations in the op pool"); + pub static ref OP_POOL_NUM_ATTESTER_SLASHINGS: Result = + try_create_int_gauge("beacon_op_pool_attester_slashings_total", "Count of attester slashings in the op pool"); + pub static ref OP_POOL_NUM_PROPOSER_SLASHINGS: Result = + try_create_int_gauge("beacon_op_pool_proposer_slashings_total", "Count of proposer slashings in the op pool"); + pub static ref OP_POOL_NUM_VOLUNTARY_EXITS: Result = + try_create_int_gauge("beacon_op_pool_voluntary_exits_total", "Count of voluntary exits in the op pool"); } /// Scrape the `beacon_chain` for metrics that are not constantly updated (e.g., the present slot, @@ -206,6 +218,23 @@ pub fn scrape_for_metrics(beacon_chain: &BeaconChain) { if let Ok(head) = beacon_chain.head() { scrape_head_state::(&head.beacon_state, head.beacon_state_root) } + + set_gauge_by_usize( + &OP_POOL_NUM_ATTESTATIONS, + beacon_chain.op_pool.num_attestations(), + ); + set_gauge_by_usize( + &OP_POOL_NUM_ATTESTER_SLASHINGS, + beacon_chain.op_pool.num_attester_slashings(), + ); + set_gauge_by_usize( + &OP_POOL_NUM_PROPOSER_SLASHINGS, + beacon_chain.op_pool.num_proposer_slashings(), + ); + set_gauge_by_usize( + &OP_POOL_NUM_VOLUNTARY_EXITS, + beacon_chain.op_pool.num_voluntary_exits(), + ); } /// Scrape the given `state` assuming it's the head state, updating the `DEFAULT_REGISTRY`. diff --git a/beacon_node/client/src/notifier.rs b/beacon_node/client/src/notifier.rs index faaf8464c5..ec77930c9b 100644 --- a/beacon_node/client/src/notifier.rs +++ b/beacon_node/client/src/notifier.rs @@ -70,14 +70,14 @@ pub fn spawn_notifier( usize::max_value() }; - let head = beacon_chain.head() + let head_info = beacon_chain.head_info() .map_err(|e| error!( log, - "Failed to get beacon chain head"; + "Failed to get beacon chain head info"; "error" => format!("{:?}", e) ))?; - let head_slot = head.beacon_block.slot; + let head_slot = head_info.slot; let head_epoch = head_slot.epoch(T::EthSpec::slots_per_epoch()); let current_slot = beacon_chain.slot().map_err(|e| { error!( @@ -87,9 +87,9 @@ pub fn spawn_notifier( ) })?; let current_epoch = current_slot.epoch(T::EthSpec::slots_per_epoch()); - let finalized_epoch = head.beacon_state.finalized_checkpoint.epoch; - let finalized_root = head.beacon_state.finalized_checkpoint.root; - let head_root = head.beacon_block_root; + let finalized_epoch = head_info.finalized_checkpoint.epoch; + let finalized_root = head_info.finalized_checkpoint.root; + let head_root = head_info.block_root; let mut speedo = speedo.lock(); speedo.observe(head_slot, Instant::now()); diff --git a/beacon_node/eth1/src/deposit_cache.rs b/beacon_node/eth1/src/deposit_cache.rs index a0f9e99962..52e4350c86 100644 --- a/beacon_node/eth1/src/deposit_cache.rs +++ b/beacon_node/eth1/src/deposit_cache.rs @@ -1,6 +1,6 @@ use crate::DepositLog; -use eth2_hashing::hash; use ssz_derive::{Decode, Encode}; +use state_processing::common::DepositDataTree; use std::cmp::Ordering; use tree_hash::TreeHash; use types::{Deposit, Hash256, DEPOSIT_TREE_DEPTH}; @@ -31,56 +31,6 @@ pub enum Error { InternalError(String), } -/// Emulates the eth1 deposit contract merkle tree. -pub struct DepositDataTree { - tree: merkle_proof::MerkleTree, - mix_in_length: usize, - depth: usize, -} - -impl DepositDataTree { - /// Create a new Merkle tree from a list of leaves (`DepositData::tree_hash_root`) and a fixed depth. - pub fn create(leaves: &[Hash256], mix_in_length: usize, depth: usize) -> Self { - Self { - tree: merkle_proof::MerkleTree::create(leaves, depth), - mix_in_length, - depth, - } - } - - /// Returns 32 bytes representing the "mix in length" for the merkle root of this tree. - fn length_bytes(&self) -> Vec { - int_to_bytes32(self.mix_in_length) - } - - /// Retrieve the root hash of this Merkle tree with the length mixed in. - pub fn root(&self) -> Hash256 { - let mut preimage = [0; 64]; - preimage[0..32].copy_from_slice(&self.tree.hash()[..]); - preimage[32..64].copy_from_slice(&self.length_bytes()); - Hash256::from_slice(&hash(&preimage)) - } - - /// Return the leaf at `index` and a Merkle proof of its inclusion. - /// - /// The Merkle proof is in "bottom-up" order, starting with a leaf node - /// and moving up the tree. Its length will be exactly equal to `depth + 1`. - pub fn generate_proof(&self, index: usize) -> (Hash256, Vec) { - let (root, mut proof) = self.tree.generate_proof(index, self.depth); - proof.push(Hash256::from_slice(&self.length_bytes())); - (root, proof) - } - - /// Add a deposit to the merkle tree. - pub fn push_leaf(&mut self, leaf: Hash256) -> Result<(), Error> { - self.tree - .push_leaf(leaf, self.depth) - .map_err(Error::DepositTreeError)?; - self.mix_in_length += 1; - Ok(()) - } -} - #[derive(Encode, Decode, Clone)] pub struct SszDepositCache { logs: Vec, @@ -202,7 +152,9 @@ impl DepositCache { let deposit = Hash256::from_slice(&log.deposit_data.tree_hash_root()); self.leaves.push(deposit); self.logs.push(log); - self.deposit_tree.push_leaf(deposit)?; + self.deposit_tree + .push_leaf(deposit) + .map_err(Error::DepositTreeError)?; self.deposit_roots.push(self.deposit_tree.root()); Ok(()) } @@ -334,13 +286,6 @@ impl DepositCache { } } -/// Returns `int` as little-endian bytes with a length of 32. -fn int_to_bytes32(int: usize) -> Vec { - let mut vec = int.to_le_bytes().to_vec(); - vec.resize(32, 0); - vec -} - #[cfg(test)] pub mod tests { use super::*; diff --git a/beacon_node/rest_api/Cargo.toml b/beacon_node/rest_api/Cargo.toml index 7baa4dcddf..2b2d2b829e 100644 --- a/beacon_node/rest_api/Cargo.toml +++ b/beacon_node/rest_api/Cargo.toml @@ -34,6 +34,7 @@ slot_clock = { path = "../../eth2/utils/slot_clock" } hex = "0.3" parking_lot = "0.9" futures = "0.1.29" +operation_pool = { path = "../../eth2/operation_pool" } [dev-dependencies] remote_beacon_node = { path = "../../eth2/utils/remote_beacon_node" } diff --git a/beacon_node/rest_api/src/advanced.rs b/beacon_node/rest_api/src/advanced.rs index d7ab80299a..ca0ae51f55 100644 --- a/beacon_node/rest_api/src/advanced.rs +++ b/beacon_node/rest_api/src/advanced.rs @@ -2,6 +2,7 @@ use crate::response_builder::ResponseBuilder; use crate::ApiResult; use beacon_chain::{BeaconChain, BeaconChainTypes}; use hyper::{Body, Request}; +use operation_pool::PersistedOperationPool; use std::sync::Arc; /// Returns the `proto_array` fork choice struct, encoded as JSON. @@ -13,3 +14,15 @@ pub fn get_fork_choice( ) -> ApiResult { ResponseBuilder::new(&req)?.body_no_ssz(&*beacon_chain.fork_choice.core_proto_array()) } + +/// Returns the `PersistedOperationPool` struct. +/// +/// Useful for debugging or advanced inspection of the stored operations. +pub fn get_operation_pool( + req: Request, + beacon_chain: Arc>, +) -> ApiResult { + ResponseBuilder::new(&req)?.body(&PersistedOperationPool::from_operation_pool( + &beacon_chain.op_pool, + )) +} diff --git a/beacon_node/rest_api/src/helpers.rs b/beacon_node/rest_api/src/helpers.rs index 26de0a819d..752414e5f1 100644 --- a/beacon_node/rest_api/src/helpers.rs +++ b/beacon_node/rest_api/src/helpers.rs @@ -137,16 +137,10 @@ pub fn state_at_slot( beacon_chain: &BeaconChain, slot: Slot, ) -> Result<(Hash256, BeaconState), ApiError> { - let head_state = &beacon_chain.head()?.beacon_state; + let head = beacon_chain.head()?; - if head_state.slot == slot { - // The request slot is the same as the best block (head) slot. - - // I'm not sure if this `.clone()` will be optimized out. If not, it seems unnecessary. - Ok(( - beacon_chain.head()?.beacon_state_root, - beacon_chain.head()?.beacon_state, - )) + if head.beacon_state.slot == slot { + Ok((head.beacon_state_root, head.beacon_state)) } else { let root = state_root_at_slot(beacon_chain, slot)?; diff --git a/beacon_node/rest_api/src/router.rs b/beacon_node/rest_api/src/router.rs index 02c4678c95..0e629d6359 100644 --- a/beacon_node/rest_api/src/router.rs +++ b/beacon_node/rest_api/src/router.rs @@ -151,6 +151,9 @@ pub fn route( (&Method::GET, "/advanced/fork_choice") => { into_boxfut(advanced::get_fork_choice::(req, beacon_chain)) } + (&Method::GET, "/advanced/operation_pool") => { + into_boxfut(advanced::get_operation_pool::(req, beacon_chain)) + } (&Method::GET, "/metrics") => into_boxfut(metrics::get_prometheus::( req, diff --git a/beacon_node/rest_api/src/validator.rs b/beacon_node/rest_api/src/validator.rs index 09565e3338..c3c7510520 100644 --- a/beacon_node/rest_api/src/validator.rs +++ b/beacon_node/rest_api/src/validator.rs @@ -156,6 +156,7 @@ fn return_validator_duties( let relative_epoch = RelativeEpoch::from_epoch(state.current_epoch(), epoch) .map_err(|_| ApiError::ServerError(String::from("Loaded state is in the wrong epoch")))?; + state.update_pubkey_cache()?; state .build_committee_cache(relative_epoch, &beacon_chain.spec) .map_err(|e| ApiError::ServerError(format!("Unable to build committee cache: {:?}", e)))?; diff --git a/beacon_node/rest_api/tests/test.rs b/beacon_node/rest_api/tests/test.rs index 12eed8bbc5..eae9f3a328 100644 --- a/beacon_node/rest_api/tests/test.rs +++ b/beacon_node/rest_api/tests/test.rs @@ -6,7 +6,8 @@ use node_test_rig::{ testing_client_config, ClientConfig, ClientGenesis, LocalBeaconNode, }; use remote_beacon_node::{ - Committee, HeadBeaconBlock, PublishStatus, ValidatorDuty, ValidatorResponse, + Committee, HeadBeaconBlock, PersistedOperationPool, PublishStatus, ValidatorDuty, + ValidatorResponse, }; use std::convert::TryInto; use std::sync::Arc; @@ -237,10 +238,12 @@ fn check_duties( "there should be a duty for each validator" ); - let state = beacon_chain + let mut state = beacon_chain .state_at_slot(epoch.start_slot(T::EthSpec::slots_per_epoch())) .expect("should get state at slot"); + state.build_all_caches(spec).expect("should build caches"); + validators .iter() .zip(duties.iter()) @@ -816,6 +819,29 @@ fn get_fork_choice() { ); } +#[test] +fn get_operation_pool() { + let mut env = build_env(); + + let node = build_node(&mut env, testing_client_config()); + let remote_node = node.remote_node().expect("should produce remote node"); + + let result = env + .runtime() + .block_on(remote_node.http.advanced().get_operation_pool()) + .expect("should not error when getting fork choice"); + + let expected = PersistedOperationPool::from_operation_pool( + &node + .client + .beacon_chain() + .expect("node should have chain") + .op_pool, + ); + + assert_eq!(result, expected, "result should be as expected"); +} + fn compare_validator_response( state: &BeaconState, response: &ValidatorResponse, diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index 75efeea6c6..814e2f9728 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -377,7 +377,6 @@ fn init_new_client( eth2_testnet_config.deposit_contract_deploy_block; client_config.eth1.follow_distance = spec.eth1_follow_distance / 2; - client_config.dummy_eth1_backend = false; client_config.eth1.lowest_cached_block_number = client_config .eth1 .deposit_contract_deploy_block diff --git a/eth2/operation_pool/Cargo.toml b/eth2/operation_pool/Cargo.toml index e9344a9675..ded1e739e5 100644 --- a/eth2/operation_pool/Cargo.toml +++ b/eth2/operation_pool/Cargo.toml @@ -11,6 +11,8 @@ types = { path = "../types" } state_processing = { path = "../state_processing" } eth2_ssz = "0.1.2" eth2_ssz_derive = "0.1.0" +serde = "1.0.102" +serde_derive = "1.0.102" [dev-dependencies] rand = "0.7.2" diff --git a/eth2/operation_pool/src/attestation_id.rs b/eth2/operation_pool/src/attestation_id.rs index dfe55581d9..38eb8f9e2a 100644 --- a/eth2/operation_pool/src/attestation_id.rs +++ b/eth2/operation_pool/src/attestation_id.rs @@ -1,10 +1,13 @@ use int_to_bytes::int_to_bytes8; +use serde_derive::{Deserialize, Serialize}; use ssz::ssz_encode; use ssz_derive::{Decode, Encode}; use types::{AttestationData, BeaconState, ChainSpec, Domain, Epoch, EthSpec}; /// Serialized `AttestationData` augmented with a domain to encode the fork info. -#[derive(PartialEq, Eq, Clone, Hash, Debug, PartialOrd, Ord, Encode, Decode)] +#[derive( + PartialEq, Eq, Clone, Hash, Debug, PartialOrd, Ord, Encode, Decode, Serialize, Deserialize, +)] pub struct AttestationId { v: Vec, } diff --git a/eth2/operation_pool/src/lib.rs b/eth2/operation_pool/src/lib.rs index 9044fc6e47..e1239f3751 100644 --- a/eth2/operation_pool/src/lib.rs +++ b/eth2/operation_pool/src/lib.rs @@ -284,6 +284,16 @@ impl OperationPool { }); } + /// Total number of attester slashings in the pool. + pub fn num_attester_slashings(&self) -> usize { + self.attester_slashings.read().len() + } + + /// Total number of proposer slashings in the pool. + pub fn num_proposer_slashings(&self) -> usize { + self.proposer_slashings.read().len() + } + /// Insert a voluntary exit, validating it almost-entirely (future exits are permitted). pub fn insert_voluntary_exit( &self, @@ -327,6 +337,11 @@ impl OperationPool { self.prune_attester_slashings(finalized_state, spec); self.prune_voluntary_exits(finalized_state); } + + /// Total number of voluntary exits in the pool. + pub fn num_voluntary_exits(&self) -> usize { + self.voluntary_exits.read().len() + } } /// Filter up to a maximum number of operations out of an iterator. diff --git a/eth2/operation_pool/src/persistence.rs b/eth2/operation_pool/src/persistence.rs index 7e9ae7096a..44e4c31adf 100644 --- a/eth2/operation_pool/src/persistence.rs +++ b/eth2/operation_pool/src/persistence.rs @@ -1,6 +1,7 @@ use crate::attestation_id::AttestationId; use crate::OperationPool; use parking_lot::RwLock; +use serde_derive::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use types::*; @@ -8,7 +9,8 @@ use types::*; /// /// Operations are stored in arbitrary order, so it's not a good idea to compare instances /// of this type (or its encoded form) for equality. Convert back to an `OperationPool` first. -#[derive(Clone, Encode, Decode)] +#[derive(Clone, PartialEq, Debug, Encode, Decode, Serialize, Deserialize)] +#[serde(bound = "T: EthSpec")] pub struct PersistedOperationPool { /// Mapping from attestation ID to attestation mappings. // We could save space by not storing the attestation ID, but it might diff --git a/eth2/state_processing/Cargo.toml b/eth2/state_processing/Cargo.toml index 80b2525ef5..c8ae888aa9 100644 --- a/eth2/state_processing/Cargo.toml +++ b/eth2/state_processing/Cargo.toml @@ -31,6 +31,8 @@ tree_hash = "0.1.0" tree_hash_derive = "0.2" types = { path = "../types" } rayon = "1.2.0" +eth2_hashing = { path = "../utils/eth2_hashing" } +int_to_bytes = { path = "../utils/int_to_bytes" } [features] fake_crypto = ["bls/fake_crypto"] diff --git a/eth2/state_processing/src/common/deposit_data_tree.rs b/eth2/state_processing/src/common/deposit_data_tree.rs new file mode 100644 index 0000000000..e2d92d56d7 --- /dev/null +++ b/eth2/state_processing/src/common/deposit_data_tree.rs @@ -0,0 +1,52 @@ +use eth2_hashing::hash; +use int_to_bytes::int_to_bytes32; +use merkle_proof::{MerkleTree, MerkleTreeError}; +use types::Hash256; + +/// Emulates the eth1 deposit contract merkle tree. +pub struct DepositDataTree { + tree: MerkleTree, + mix_in_length: usize, + depth: usize, +} + +impl DepositDataTree { + /// Create a new Merkle tree from a list of leaves (`DepositData::tree_hash_root`) and a fixed depth. + pub fn create(leaves: &[Hash256], mix_in_length: usize, depth: usize) -> Self { + Self { + tree: MerkleTree::create(leaves, depth), + mix_in_length, + depth, + } + } + + /// Returns 32 bytes representing the "mix in length" for the merkle root of this tree. + fn length_bytes(&self) -> Vec { + int_to_bytes32(self.mix_in_length as u64) + } + + /// Retrieve the root hash of this Merkle tree with the length mixed in. + pub fn root(&self) -> Hash256 { + let mut preimage = [0; 64]; + preimage[0..32].copy_from_slice(&self.tree.hash()[..]); + preimage[32..64].copy_from_slice(&self.length_bytes()); + Hash256::from_slice(&hash(&preimage)) + } + + /// Return the leaf at `index` and a Merkle proof of its inclusion. + /// + /// The Merkle proof is in "bottom-up" order, starting with a leaf node + /// and moving up the tree. Its length will be exactly equal to `depth + 1`. + pub fn generate_proof(&self, index: usize) -> (Hash256, Vec) { + let (root, mut proof) = self.tree.generate_proof(index, self.depth); + proof.push(Hash256::from_slice(&self.length_bytes())); + (root, proof) + } + + /// Add a deposit to the merkle tree. + pub fn push_leaf(&mut self, leaf: Hash256) -> Result<(), MerkleTreeError> { + self.tree.push_leaf(leaf, self.depth)?; + self.mix_in_length += 1; + Ok(()) + } +} diff --git a/eth2/state_processing/src/common/mod.rs b/eth2/state_processing/src/common/mod.rs index ce17b42625..fa0f3d7e16 100644 --- a/eth2/state_processing/src/common/mod.rs +++ b/eth2/state_processing/src/common/mod.rs @@ -1,9 +1,11 @@ +mod deposit_data_tree; mod get_attesting_indices; mod get_base_reward; mod get_indexed_attestation; mod initiate_validator_exit; mod slash_validator; +pub use deposit_data_tree::DepositDataTree; pub use get_attesting_indices::get_attesting_indices; pub use get_base_reward::get_base_reward; pub use get_indexed_attestation::get_indexed_attestation; diff --git a/eth2/state_processing/src/genesis.rs b/eth2/state_processing/src/genesis.rs index f8235db709..88bb4adec3 100644 --- a/eth2/state_processing/src/genesis.rs +++ b/eth2/state_processing/src/genesis.rs @@ -1,6 +1,7 @@ use super::per_block_processing::{errors::BlockProcessingError, process_deposit}; +use crate::common::DepositDataTree; use tree_hash::TreeHash; -use types::typenum::U4294967296; +use types::DEPOSIT_TREE_DEPTH; use types::*; /// Initialize a `BeaconState` from genesis data. @@ -26,14 +27,13 @@ pub fn initialize_beacon_state_from_eth1( // Seed RANDAO with Eth1 entropy state.fill_randao_mixes_with(eth1_block_hash); - // Process deposits - let leaves: Vec<_> = deposits - .iter() - .map(|deposit| deposit.data.clone()) - .collect(); - for (index, deposit) in deposits.into_iter().enumerate() { - let deposit_data_list = VariableList::<_, U4294967296>::from(leaves[..=index].to_vec()); - state.eth1_data.deposit_root = Hash256::from_slice(&deposit_data_list.tree_hash_root()); + let mut deposit_tree = DepositDataTree::create(&[], 0, DEPOSIT_TREE_DEPTH); + + for deposit in deposits.iter() { + deposit_tree + .push_leaf(Hash256::from_slice(&deposit.data.tree_hash_root())) + .map_err(BlockProcessingError::MerkleTreeError)?; + state.eth1_data.deposit_root = deposit_tree.root(); process_deposit(&mut state, &deposit, spec, true)?; } diff --git a/eth2/state_processing/src/per_block_processing/errors.rs b/eth2/state_processing/src/per_block_processing/errors.rs index c21a570645..2b17a26d25 100644 --- a/eth2/state_processing/src/per_block_processing/errors.rs +++ b/eth2/state_processing/src/per_block_processing/errors.rs @@ -1,4 +1,5 @@ use super::signature_sets::Error as SignatureSetError; +use merkle_proof::MerkleTreeError; use types::*; /// The error returned from the `per_block_processing` function. Indicates that a block is either @@ -46,6 +47,7 @@ pub enum BlockProcessingError { BeaconStateError(BeaconStateError), SignatureSetError(SignatureSetError), SszTypesError(ssz_types::Error), + MerkleTreeError(MerkleTreeError), } impl From for BlockProcessingError { diff --git a/eth2/types/src/beacon_state.rs b/eth2/types/src/beacon_state.rs index ac17d6d04a..b86ba42de7 100644 --- a/eth2/types/src/beacon_state.rs +++ b/eth2/types/src/beacon_state.rs @@ -97,6 +97,21 @@ impl BeaconTreeHashCache { pub fn is_initialized(&self) -> bool { self.initialized } + + /// Returns the approximate size of the cache in bytes. + /// + /// The size is approximate because we ignore some stack-allocated `u64` and `Vec` pointers. + /// We focus instead on the lists of hashes, which should massively outweigh the items that we + /// ignore. + pub fn approx_mem_size(&self) -> usize { + self.block_roots.approx_mem_size() + + self.state_roots.approx_mem_size() + + self.historical_roots.approx_mem_size() + + self.validators.approx_mem_size() + + self.balances.approx_mem_size() + + self.randao_mixes.approx_mem_size() + + self.slashings.approx_mem_size() + } } /// The state of the `BeaconChain` at some slot. @@ -996,6 +1011,12 @@ impl BeaconState { tree_hash_cache: BeaconTreeHashCache::default(), } } + + pub fn clone_with_only_committee_caches(&self) -> Self { + let mut state = self.clone_without_caches(); + state.committee_caches = self.committee_caches.clone(); + state + } } impl From for Error { diff --git a/eth2/utils/cached_tree_hash/src/cache.rs b/eth2/utils/cached_tree_hash/src/cache.rs index 4a5d650fb2..002b4f9b0b 100644 --- a/eth2/utils/cached_tree_hash/src/cache.rs +++ b/eth2/utils/cached_tree_hash/src/cache.rs @@ -127,6 +127,15 @@ impl TreeHashCache { pub fn leaves(&mut self) -> &mut Vec { &mut self.layers[self.depth] } + + /// Returns the approximate size of the cache in bytes. + /// + /// The size is approximate because we ignore some stack-allocated `u64` and `Vec` pointers. + /// We focus instead on the lists of hashes, which should massively outweigh the items that we + /// ignore. + pub fn approx_mem_size(&self) -> usize { + self.layers.iter().map(|layer| layer.len() * 32).sum() + } } /// Compute the dirty indices for one layer up. diff --git a/eth2/utils/cached_tree_hash/src/multi_cache.rs b/eth2/utils/cached_tree_hash/src/multi_cache.rs index df2f6a0113..5ecdd3f4ab 100644 --- a/eth2/utils/cached_tree_hash/src/multi_cache.rs +++ b/eth2/utils/cached_tree_hash/src/multi_cache.rs @@ -16,6 +16,22 @@ pub struct MultiTreeHashCache { value_caches: Vec, } +impl MultiTreeHashCache { + /// Returns the approximate size of the cache in bytes. + /// + /// The size is approximate because we ignore some stack-allocated `u64` and `Vec` pointers. + /// We focus instead on the lists of hashes, which should massively outweigh the items that we + /// ignore. + pub fn approx_mem_size(&self) -> usize { + self.list_cache.approx_mem_size() + + self + .value_caches + .iter() + .map(TreeHashCache::approx_mem_size) + .sum::() + } +} + impl CachedTreeHash for VariableList where T: CachedTreeHash, diff --git a/eth2/utils/merkle_proof/src/lib.rs b/eth2/utils/merkle_proof/src/lib.rs index bfc9cc26ec..1ed9d8070a 100644 --- a/eth2/utils/merkle_proof/src/lib.rs +++ b/eth2/utils/merkle_proof/src/lib.rs @@ -28,7 +28,7 @@ pub enum MerkleTree { Zero(usize), } -#[derive(Debug, PartialEq)] +#[derive(Debug, PartialEq, Clone)] pub enum MerkleTreeError { // Trying to push in a leaf LeafReached, diff --git a/eth2/utils/remote_beacon_node/Cargo.toml b/eth2/utils/remote_beacon_node/Cargo.toml index f17109d8d9..ad1b8d4693 100644 --- a/eth2/utils/remote_beacon_node/Cargo.toml +++ b/eth2/utils/remote_beacon_node/Cargo.toml @@ -18,3 +18,4 @@ eth2_ssz = { path = "../../../eth2/utils/ssz" } serde_json = "^1.0" eth2_config = { path = "../../../eth2/utils/eth2_config" } proto_array_fork_choice = { path = "../../../eth2/proto_array_fork_choice" } +operation_pool = { path = "../../../eth2/operation_pool" } diff --git a/eth2/utils/remote_beacon_node/src/lib.rs b/eth2/utils/remote_beacon_node/src/lib.rs index f01c9f1aa0..60668b4732 100644 --- a/eth2/utils/remote_beacon_node/src/lib.rs +++ b/eth2/utils/remote_beacon_node/src/lib.rs @@ -5,7 +5,6 @@ use eth2_config::Eth2Config; use futures::{future, Future, IntoFuture}; -use proto_array_fork_choice::core::ProtoArray; use reqwest::{ r#async::{Client, ClientBuilder, Response}, StatusCode, @@ -20,6 +19,8 @@ use types::{ }; use url::Url; +pub use operation_pool::PersistedOperationPool; +pub use proto_array_fork_choice::core::ProtoArray; pub use rest_api::{ CanonicalHeadResponse, Committee, HeadBeaconBlock, ValidatorDutiesRequest, ValidatorDuty, ValidatorRequest, ValidatorResponse, @@ -560,6 +561,16 @@ impl Advanced { .into_future() .and_then(move |url| client.json_get(url, vec![])) } + + /// Gets the core `PersistedOperationPool` struct from the node. + pub fn get_operation_pool( + &self, + ) -> impl Future, Error = Error> { + let client = self.0.clone(); + self.url("operation_pool") + .into_future() + .and_then(move |url| client.json_get(url, vec![])) + } } #[derive(Deserialize)] diff --git a/lcli/src/change_genesis_time.rs b/lcli/src/change_genesis_time.rs new file mode 100644 index 0000000000..3c94418510 --- /dev/null +++ b/lcli/src/change_genesis_time.rs @@ -0,0 +1,40 @@ +use clap::ArgMatches; +use ssz::{Decode, Encode}; +use std::fs::File; +use std::io::{Read, Write}; +use std::path::PathBuf; +use types::{BeaconState, EthSpec}; + +pub fn run(matches: &ArgMatches) -> Result<(), String> { + let path = matches + .value_of("ssz-state") + .ok_or_else(|| "ssz-state not specified")? + .parse::() + .map_err(|e| format!("Unable to parse ssz-state: {}", e))?; + + let genesis_time = matches + .value_of("genesis-time") + .ok_or_else(|| "genesis-time not specified")? + .parse::() + .map_err(|e| format!("Unable to parse genesis-time: {}", e))?; + + let mut state: BeaconState = { + let mut file = File::open(&path).map_err(|e| format!("Unable to open file: {}", e))?; + + let mut ssz = vec![]; + + file.read_to_end(&mut ssz) + .map_err(|e| format!("Unable to read file: {}", e))?; + + BeaconState::from_ssz_bytes(&ssz).map_err(|e| format!("Unable to decode SSZ: {:?}", e))? + }; + + state.genesis_time = genesis_time; + + let mut file = File::create(path).map_err(|e| format!("Unable to create file: {}", e))?; + + file.write_all(&state.as_ssz_bytes()) + .map_err(|e| format!("Unable to write to file: {}", e))?; + + Ok(()) +} diff --git a/lcli/src/interop_genesis.rs b/lcli/src/interop_genesis.rs new file mode 100644 index 0000000000..6c0748e59c --- /dev/null +++ b/lcli/src/interop_genesis.rs @@ -0,0 +1,65 @@ +use clap::ArgMatches; +use environment::Environment; +use eth2_testnet_config::Eth2TestnetConfig; +use genesis::interop_genesis_state; +use std::path::PathBuf; +use std::time::{SystemTime, UNIX_EPOCH}; +use types::{test_utils::generate_deterministic_keypairs, Epoch, EthSpec, Fork}; + +pub fn run(mut env: Environment, matches: &ArgMatches) -> Result<(), String> { + let validator_count = matches + .value_of("validator-count") + .ok_or_else(|| "validator-count not specified")? + .parse::() + .map_err(|e| format!("Unable to parse validator-count: {}", e))?; + + let genesis_time = if let Some(genesis_time) = matches.value_of("genesis-time") { + genesis_time + .parse::() + .map_err(|e| format!("Unable to parse genesis-time: {}", e))? + } else { + SystemTime::now() + .duration_since(UNIX_EPOCH) + .map_err(|e| format!("Unable to get time: {:?}", e))? + .as_secs() + }; + + let testnet_dir = matches + .value_of("testnet-dir") + .ok_or_else(|| ()) + .and_then(|dir| dir.parse::().map_err(|_| ())) + .unwrap_or_else(|_| { + dirs::home_dir() + .map(|home| home.join(".lighthouse").join("testnet")) + .expect("should locate home directory") + }); + + let mut eth2_testnet_config: Eth2TestnetConfig = + Eth2TestnetConfig::load(testnet_dir.clone())?; + + let mut spec = eth2_testnet_config + .yaml_config + .as_ref() + .ok_or_else(|| "The testnet directory must contain a spec config".to_string())? + .apply_to_chain_spec::(&env.core_context().eth2_config.spec) + .ok_or_else(|| { + format!( + "The loaded config is not compatible with the {} spec", + &env.core_context().eth2_config.spec_constants + ) + })?; + + spec.genesis_fork = Fork { + previous_version: [0, 0, 0, 0], + current_version: [1, 3, 3, 7], + epoch: Epoch::new(0), + }; + + let keypairs = generate_deterministic_keypairs(validator_count); + let genesis_state = interop_genesis_state(&keypairs, genesis_time, &spec)?; + + eth2_testnet_config.genesis_state = Some(genesis_state); + eth2_testnet_config.force_write_to_file(testnet_dir)?; + + Ok(()) +} diff --git a/lcli/src/main.rs b/lcli/src/main.rs index 2213f89c64..1119a54269 100644 --- a/lcli/src/main.rs +++ b/lcli/src/main.rs @@ -1,8 +1,10 @@ #[macro_use] extern crate log; +mod change_genesis_time; mod deploy_deposit_contract; mod eth1_genesis; +mod interop_genesis; mod parse_hex; mod refund_deposit_contract; mod transition_blocks; @@ -226,6 +228,59 @@ fn main() { .help("The URL to the eth1 JSON-RPC http API."), ) ) + .subcommand( + SubCommand::with_name("interop-genesis") + .about( + "Produces an interop-compatible genesis state using deterministic keypairs", + ) + .arg( + Arg::with_name("testnet-dir") + .short("d") + .long("testnet-dir") + .value_name("PATH") + .takes_value(true) + .help("The testnet dir. Defaults to ~/.lighthouse/testnet"), + ) + .arg( + Arg::with_name("validator-count") + .long("validator-count") + .index(1) + .value_name("INTEGER") + .takes_value(true) + .default_value("1024") + .help("The number of validators in the genesis state."), + ) + .arg( + Arg::with_name("genesis-time") + .long("genesis-time") + .short("t") + .value_name("UNIX_EPOCH") + .takes_value(true) + .help("The value for state.genesis_time. Defaults to now."), + ) + ) + .subcommand( + SubCommand::with_name("change-genesis-time") + .about( + "Loads a file with an SSZ-encoded BeaconState and modifies the genesis time.", + ) + .arg( + Arg::with_name("ssz-state") + .index(1) + .value_name("PATH") + .takes_value(true) + .required(true) + .help("The path to the SSZ file"), + ) + .arg( + Arg::with_name("genesis-time") + .index(2) + .value_name("UNIX_EPOCH") + .takes_value(true) + .required(true) + .help("The value for state.genesis_time."), + ) + ) .get_matches(); macro_rules! run_with_spec { @@ -306,6 +361,10 @@ fn run(env_builder: EnvironmentBuilder, matches: &ArgMatches) { } ("eth1-genesis", Some(matches)) => eth1_genesis::run::(env, matches) .unwrap_or_else(|e| error!("Failed to run eth1-genesis command: {}", e)), + ("interop-genesis", Some(matches)) => interop_genesis::run::(env, matches) + .unwrap_or_else(|e| error!("Failed to run interop-genesis command: {}", e)), + ("change-genesis-time", Some(matches)) => change_genesis_time::run::(matches) + .unwrap_or_else(|e| error!("Failed to run change-genesis-time command: {}", e)), (other, _) => error!("Unknown subcommand {}. See --help.", other), } } diff --git a/validator_client/src/cli.rs b/validator_client/src/cli.rs index fd1d46fb9e..4d25ce8c5f 100644 --- a/validator_client/src/cli.rs +++ b/validator_client/src/cli.rs @@ -14,6 +14,12 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .default_value(&DEFAULT_HTTP_SERVER) .takes_value(true), ) + .arg( + Arg::with_name("allow-unsynced") + .long("allow-unsynced") + .help("If present, the validator client will still poll for duties if the beacon + node is not synced.") + ) /* * The "testnet" sub-command. * diff --git a/validator_client/src/config.rs b/validator_client/src/config.rs index 76c9bf814d..9e34838776 100644 --- a/validator_client/src/config.rs +++ b/validator_client/src/config.rs @@ -32,6 +32,9 @@ pub struct Config { /// /// Should be similar to `http://localhost:8080` pub http_server: String, + /// If true, the validator client will still poll for duties and produce blocks even if the + /// beacon node is not synced at startup. + pub allow_unsynced_beacon_node: bool, } impl Default for Config { @@ -44,6 +47,7 @@ impl Default for Config { data_dir, key_source: <_>::default(), http_server: DEFAULT_HTTP_SERVER.to_string(), + allow_unsynced_beacon_node: false, } } } @@ -71,7 +75,7 @@ impl Config { config.http_server = server.to_string(); } - let config = match cli_args.subcommand() { + let mut config = match cli_args.subcommand() { ("testnet", Some(sub_cli_args)) => { if cli_args.is_present("eth2-config") && sub_cli_args.is_present("bootstrap") { return Err( @@ -88,6 +92,8 @@ impl Config { } }; + config.allow_unsynced_beacon_node = cli_args.is_present("allow-unsynced"); + Ok(config) } } diff --git a/validator_client/src/duties_service.rs b/validator_client/src/duties_service.rs index eb91f0a414..bbbe04592c 100644 --- a/validator_client/src/duties_service.rs +++ b/validator_client/src/duties_service.rs @@ -190,6 +190,7 @@ pub struct DutiesServiceBuilder { slot_clock: Option, beacon_node: Option>, context: Option>, + allow_unsynced_beacon_node: bool, } impl DutiesServiceBuilder { @@ -199,6 +200,7 @@ impl DutiesServiceBuilder { slot_clock: None, beacon_node: None, context: None, + allow_unsynced_beacon_node: false, } } @@ -222,6 +224,12 @@ impl DutiesServiceBuilder { self } + /// Set to `true` to allow polling for duties when the beacon node is not synced. + pub fn allow_unsynced_beacon_node(mut self, allow_unsynced_beacon_node: bool) -> Self { + self.allow_unsynced_beacon_node = allow_unsynced_beacon_node; + self + } + pub fn build(self) -> Result, String> { Ok(DutiesService { inner: Arc::new(Inner { @@ -238,6 +246,7 @@ impl DutiesServiceBuilder { context: self .context .ok_or_else(|| "Cannot build DutiesService without runtime_context")?, + allow_unsynced_beacon_node: self.allow_unsynced_beacon_node, }), }) } @@ -250,6 +259,9 @@ pub struct Inner { pub(crate) slot_clock: T, beacon_node: RemoteBeaconNode, context: RuntimeContext, + /// If true, the duties service will poll for duties from the beacon node even if it is not + /// synced. + allow_unsynced_beacon_node: bool, } /// Maintains a store of the duties for all voting validators in the `validator_store`. @@ -404,36 +416,38 @@ impl DutiesService { .and_then(move |(current_epoch, beacon_head_epoch)| { let log = service_3.context.log.clone(); - let future: Box + Send> = - if beacon_head_epoch + 1 < current_epoch { - error!( - log, - "Beacon node is not synced"; - "node_head_epoch" => format!("{}", beacon_head_epoch), - "current_epoch" => format!("{}", current_epoch), - ); + let future: Box + Send> = if beacon_head_epoch + 1 + < current_epoch + && !service_3.allow_unsynced_beacon_node + { + error!( + log, + "Beacon node is not synced"; + "node_head_epoch" => format!("{}", beacon_head_epoch), + "current_epoch" => format!("{}", current_epoch), + ); - Box::new(future::ok(())) - } else { - Box::new(service_3.update_epoch(current_epoch).then(move |result| { - if let Err(e) = result { - error!( - log, - "Failed to get current epoch duties"; - "http_error" => format!("{:?}", e) - ); - } + Box::new(future::ok(())) + } else { + Box::new(service_3.update_epoch(current_epoch).then(move |result| { + if let Err(e) = result { + error!( + log, + "Failed to get current epoch duties"; + "http_error" => format!("{:?}", e) + ); + } - let log = service_4.context.log.clone(); - service_4.update_epoch(current_epoch + 1).map_err(move |e| { - error!( - log, - "Failed to get next epoch duties"; - "http_error" => format!("{:?}", e) - ); - }) - })) - }; + let log = service_4.context.log.clone(); + service_4.update_epoch(current_epoch + 1).map_err(move |e| { + error!( + log, + "Failed to get next epoch duties"; + "http_error" => format!("{:?}", e) + ); + }) + })) + }; future }) diff --git a/validator_client/src/lib.rs b/validator_client/src/lib.rs index a9660ef852..9d529ab41e 100644 --- a/validator_client/src/lib.rs +++ b/validator_client/src/lib.rs @@ -210,6 +210,7 @@ impl ProductionValidatorClient { .validator_store(validator_store.clone()) .beacon_node(beacon_node.clone()) .runtime_context(context.service_context("duties".into())) + .allow_unsynced_beacon_node(config.allow_unsynced_beacon_node) .build()?; let block_service = BlockServiceBuilder::new() diff --git a/validator_client/src/validator_store.rs b/validator_client/src/validator_store.rs index 96c945729e..ac7ebef859 100644 --- a/validator_client/src/validator_store.rs +++ b/validator_client/src/validator_store.rs @@ -33,8 +33,10 @@ impl ValidatorStore { fork_service: ForkService, log: Logger, ) -> Result { - let validator_iter = read_dir(&base_dir) + let validator_key_values = read_dir(&base_dir) .map_err(|e| format!("Failed to read base directory {:?}: {:?}", base_dir, e))? + .collect::>() + .into_par_iter() .filter_map(|validator_dir| { let path = validator_dir.ok()?.path(); @@ -63,7 +65,7 @@ impl ValidatorStore { }); Ok(Self { - validators: Arc::new(RwLock::new(HashMap::from_iter(validator_iter))), + validators: Arc::new(RwLock::new(HashMap::from_par_iter(validator_key_values))), spec: Arc::new(spec), log, temp_dir: None,