From e23726c0a1b02a1afb24f84e799dca881f821cbc Mon Sep 17 00:00:00 2001 From: Grant Wuerker Date: Tue, 9 Jul 2019 12:36:26 +0200 Subject: [PATCH 001/186] Renamed fork_choice::process_attestation_from_block --- beacon_node/beacon_chain/src/fork_choice.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/beacon_node/beacon_chain/src/fork_choice.rs b/beacon_node/beacon_chain/src/fork_choice.rs index c693145ea6..cdda563868 100644 --- a/beacon_node/beacon_chain/src/fork_choice.rs +++ b/beacon_node/beacon_chain/src/fork_choice.rs @@ -112,7 +112,7 @@ impl ForkChoice { // // https://github.com/ethereum/eth2.0-specs/blob/v0.7.0/specs/core/0_fork-choice.md for attestation in &block.body.attestations { - self.process_attestation_from_block(state, attestation)?; + self.process_attestation(state, attestation)?; } self.backend.process_block(block, block_root)?; @@ -120,7 +120,7 @@ impl ForkChoice { Ok(()) } - fn process_attestation_from_block( + pub fn process_attestation( &self, state: &BeaconState, attestation: &Attestation, From adf1d9c533d51c908b8e3b7430ba7e2554a2ef45 Mon Sep 17 00:00:00 2001 From: Grant Wuerker Date: Tue, 9 Jul 2019 12:36:59 +0200 Subject: [PATCH 002/186] Processing attestation in fork choice --- beacon_node/beacon_chain/src/beacon_chain.rs | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 2d82822701..ca4667e00e 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -480,6 +480,14 @@ impl BeaconChain { self.metrics.attestation_processing_requests.inc(); let timer = self.metrics.attestation_processing_times.start_timer(); + match self.store.exists::(&attestation.data.target_root) { + Ok(true) => { + per_block_processing::validate_attestation_time_independent_only(&*self.state.read(), &attestation, &self.spec)?; + self.fork_choice.process_attestation(&*self.state.read(), &attestation); + }, + _ => {} + }; + let result = self .op_pool .insert_attestation(attestation, &*self.state.read(), &self.spec); From 40b166edcdb5d6102216f8f00397659089027ccc Mon Sep 17 00:00:00 2001 From: Grant Wuerker Date: Thu, 11 Jul 2019 16:32:01 +0200 Subject: [PATCH 003/186] Retrieving state from store and checking signature --- beacon_node/beacon_chain/src/beacon_chain.rs | 24 ++++++++++++++------ 1 file changed, 17 insertions(+), 7 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index ca4667e00e..74d24244eb 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -15,7 +15,7 @@ use state_processing::per_block_processing::errors::{ }; use state_processing::{ per_block_processing, per_block_processing_without_verifying_block_signature, - per_slot_processing, BlockProcessingError, + per_slot_processing, BlockProcessingError, common }; use std::sync::Arc; use store::iter::{BlockIterator, BlockRootsIterator, StateRootsIterator}; @@ -480,14 +480,24 @@ impl BeaconChain { self.metrics.attestation_processing_requests.inc(); let timer = self.metrics.attestation_processing_times.start_timer(); - match self.store.exists::(&attestation.data.target_root) { - Ok(true) => { - per_block_processing::validate_attestation_time_independent_only(&*self.state.read(), &attestation, &self.spec)?; - self.fork_choice.process_attestation(&*self.state.read(), &attestation); - }, - _ => {} + // Retrieve the attestation's state from `store` if necessary. + let attestation_state = match attestation.data.beacon_block_root == self.canonical_head.read().beacon_block_root { + true => Some(self.state.read().clone()), + false => match self.store.get::(&attestation.data.beacon_block_root) { + Ok(Some(block)) => match self.store.get::>(&block.state_root) { + Ok(state) => state, + _ => None + }, + _ => None + } }; + if let Some(state) = attestation_state { + let indexed_attestation = common::convert_to_indexed(&state, &attestation)?; + per_block_processing::verify_indexed_attestation(&state, &indexed_attestation, &self.spec)?; + self.fork_choice.process_attestation(&state, &attestation); + } + let result = self .op_pool .insert_attestation(attestation, &*self.state.read(), &self.spec); From 7cdfa3cc279be167b933b3cb632f83233d35baf8 Mon Sep 17 00:00:00 2001 From: Grant Wuerker Date: Fri, 19 Jul 2019 14:52:01 +0200 Subject: [PATCH 004/186] Looser check on beacon state validity. --- beacon_node/beacon_chain/src/beacon_chain.rs | 42 ++++++++++++++------ 1 file changed, 29 insertions(+), 13 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index f18b49e129..701d900c76 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -493,19 +493,7 @@ impl BeaconChain { self.metrics.attestation_processing_requests.inc(); let timer = self.metrics.attestation_processing_times.start_timer(); - // Retrieve the attestation's state from `store` if necessary. - let attestation_state = match attestation.data.beacon_block_root == self.canonical_head.read().beacon_block_root { - true => Some(self.state.read().clone()), - false => match self.store.get::(&attestation.data.beacon_block_root) { - Ok(Some(block)) => match self.store.get::>(&block.state_root) { - Ok(state) => state, - _ => None - }, - _ => None - } - }; - - if let Some(state) = attestation_state { + if let Some(state) = self.get_attestation_state(&attestation) { let indexed_attestation = common::convert_to_indexed(&state, &attestation)?; per_block_processing::verify_indexed_attestation(&state, &indexed_attestation, &self.spec)?; self.fork_choice.process_attestation(&state, &attestation); @@ -535,6 +523,34 @@ impl BeaconChain { result } + fn get_attestation_state(&self, attestation: &Attestation) -> Option> { + let blocks = BestBlockRootsIterator::owned(self.store.clone(), self.state.read().clone(), self.state.read().slot.clone()); + for (root, slot) in blocks { + if root == attestation.data.target_root + && self.slot_epochs_equal_or_adjacent(slot, self.state.read().slot) { + return Some(self.state.read().clone()); + } + }; + + match self.store.get::(&attestation.data.target_root) { + Ok(Some(block)) => match self.store.get::>(&block.state_root) { + Ok(state) => state, + _ => None + }, + _ => None + } + } + + fn slot_epochs_equal_or_adjacent(&self, slot_a: Slot, slot_b: Slot) -> bool { + let slots_per_epoch = T::EthSpec::slots_per_epoch(); + let epoch_a = slot_a.epoch(slots_per_epoch); + let epoch_b = slot_b.epoch(slots_per_epoch); + + epoch_a == epoch_b + || epoch_a + 1 == epoch_b + || epoch_b + 1 == epoch_a + } + /// Accept some deposit and queue it for inclusion in an appropriate block. pub fn process_deposit( &self, From bef7ca6bfb1bb68d43d58b69e9a658b59c69d014 Mon Sep 17 00:00:00 2001 From: Grant Wuerker Date: Sat, 20 Jul 2019 12:47:59 +0200 Subject: [PATCH 005/186] Cleaned up get_attestation_state --- beacon_node/beacon_chain/src/beacon_chain.rs | 23 ++++++++++---------- 1 file changed, 11 insertions(+), 12 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 701d900c76..d02ab31763 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -523,15 +523,24 @@ impl BeaconChain { result } + /// Retrieves the `BeaconState` used to create the attestation. fn get_attestation_state(&self, attestation: &Attestation) -> Option> { + // Current state is used if the attestation targets a historic block and a slot within an + // equal or adjacent epoch. + let slots_per_epoch = T::EthSpec::slots_per_epoch(); + let min_slot = (self.state.read().slot.epoch(slots_per_epoch) - 1).start_slot(slots_per_epoch); let blocks = BestBlockRootsIterator::owned(self.store.clone(), self.state.read().clone(), self.state.read().slot.clone()); for (root, slot) in blocks { - if root == attestation.data.target_root - && self.slot_epochs_equal_or_adjacent(slot, self.state.read().slot) { + if root == attestation.data.target_root { return Some(self.state.read().clone()); } + + if slot == min_slot { + break; + } }; + // A different state is retrieved from the database. match self.store.get::(&attestation.data.target_root) { Ok(Some(block)) => match self.store.get::>(&block.state_root) { Ok(state) => state, @@ -541,16 +550,6 @@ impl BeaconChain { } } - fn slot_epochs_equal_or_adjacent(&self, slot_a: Slot, slot_b: Slot) -> bool { - let slots_per_epoch = T::EthSpec::slots_per_epoch(); - let epoch_a = slot_a.epoch(slots_per_epoch); - let epoch_b = slot_b.epoch(slots_per_epoch); - - epoch_a == epoch_b - || epoch_a + 1 == epoch_b - || epoch_b + 1 == epoch_a - } - /// Accept some deposit and queue it for inclusion in an appropriate block. pub fn process_deposit( &self, From 3b8a584c550fc7788806e04e7a7e76a9dfb07796 Mon Sep 17 00:00:00 2001 From: Grant Wuerker Date: Sun, 21 Jul 2019 22:53:39 +0200 Subject: [PATCH 006/186] Expanded fork choice api to provide latest validator message. --- eth2/lmd_ghost/src/lib.rs | 3 +++ eth2/lmd_ghost/src/reduced_tree.rs | 13 +++++++++++++ 2 files changed, 16 insertions(+) diff --git a/eth2/lmd_ghost/src/lib.rs b/eth2/lmd_ghost/src/lib.rs index dd413e2eb5..f18b5b81f7 100644 --- a/eth2/lmd_ghost/src/lib.rs +++ b/eth2/lmd_ghost/src/lib.rs @@ -43,4 +43,7 @@ pub trait LmdGhost: Send + Sync { finalized_block: &BeaconBlock, finalized_block_root: Hash256, ) -> Result<()>; + + /// Returns the latest message for a given validator index. + fn latest_message(&mut self, validator_index: usize) -> Option<(Hash256, Slot)>; } diff --git a/eth2/lmd_ghost/src/reduced_tree.rs b/eth2/lmd_ghost/src/reduced_tree.rs index dace2bda6f..f069ae68c5 100644 --- a/eth2/lmd_ghost/src/reduced_tree.rs +++ b/eth2/lmd_ghost/src/reduced_tree.rs @@ -87,6 +87,12 @@ where .update_root(new_block.slot, new_root) .map_err(|e| format!("update_finalized_root failed: {:?}", e)) } + + fn latest_message(&mut self, validator_index: usize) -> Option<(Hash256, Slot)> { + self.core + .write() + .latest_message(validator_index) + } } struct ReducedTree { @@ -222,6 +228,13 @@ where Ok(head_node.block_hash) } + pub fn latest_message(&mut self, validator_index: usize) -> Option<(Hash256, Slot)> { + match self.latest_votes.get(validator_index) { + Some(v) => Some((v.hash.clone(), v.slot.clone())), + None => None + } + } + fn find_head_from<'a>(&'a self, start_node: &'a Node) -> Result<&'a Node> { if start_node.does_not_have_children() { Ok(start_node) From b2471eca494369e946fa212610af6b2c8be44802 Mon Sep 17 00:00:00 2001 From: Grant Wuerker Date: Tue, 23 Jul 2019 20:50:18 +0200 Subject: [PATCH 007/186] Checking if the an attestation contains a latest message --- beacon_node/beacon_chain/src/beacon_chain.rs | 19 ++++------------ beacon_node/beacon_chain/src/fork_choice.rs | 24 ++++++++++++++++++++ eth2/lmd_ghost/src/lib.rs | 2 +- eth2/lmd_ghost/src/reduced_tree.rs | 2 +- 4 files changed, 31 insertions(+), 16 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index d02ab31763..f215465f2f 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -494,9 +494,11 @@ impl BeaconChain { let timer = self.metrics.attestation_processing_times.start_timer(); if let Some(state) = self.get_attestation_state(&attestation) { - let indexed_attestation = common::convert_to_indexed(&state, &attestation)?; - per_block_processing::verify_indexed_attestation(&state, &indexed_attestation, &self.spec)?; - self.fork_choice.process_attestation(&state, &attestation); + if self.fork_choice.should_process_attestation(&state, &attestation) { + let indexed_attestation = common::convert_to_indexed(&state, &attestation)?; + per_block_processing::verify_indexed_attestation(&state, &indexed_attestation, &self.spec)?; + self.fork_choice.process_attestation(&state, &attestation); + } } let result = self @@ -509,17 +511,6 @@ impl BeaconChain { self.metrics.attestation_processing_successes.inc(); } - // TODO: process attestation. Please consider: - // - // - Because a block was not added to the op pool does not mean it's invalid (it might - // just be old). - // - The attestation should be rejected if we don't know the block (ideally it should be - // queued, but this may be overkill). - // - The attestation _must_ be validated against it's state before being added to fork - // choice. - // - You can avoid verifying some attestations by first checking if they're a latest - // message. This would involve expanding the `LmdGhost` API. - result } diff --git a/beacon_node/beacon_chain/src/fork_choice.rs b/beacon_node/beacon_chain/src/fork_choice.rs index cdda563868..6b69e3e084 100644 --- a/beacon_node/beacon_chain/src/fork_choice.rs +++ b/beacon_node/beacon_chain/src/fork_choice.rs @@ -4,6 +4,7 @@ use state_processing::common::get_attesting_indices_unsorted; use std::sync::Arc; use store::{Error as StoreError, Store}; use types::{Attestation, BeaconBlock, BeaconState, BeaconStateError, Epoch, EthSpec, Hash256}; +use state_processing::common; type Result = std::result::Result; @@ -120,6 +121,9 @@ impl ForkChoice { Ok(()) } + /// Process an attestation. + /// + /// Assumes the attestation is valid. pub fn process_attestation( &self, state: &BeaconState, @@ -162,6 +166,26 @@ impl ForkChoice { Ok(()) } + /// Determines whether or not the given attestation contains a latest messages. + pub fn should_process_attestation(&self, state: &BeaconState, attestation: &Attestation) -> bool { + let validator_indices = common::get_attesting_indices_unsorted( + state, + &attestation.data, + &attestation.aggregation_bitfield, + ).unwrap(); + + let target_slot = attestation.data.target_epoch.start_slot(T::EthSpec::slots_per_epoch()); + + validator_indices + .iter() + .find(|&&v| { + match self.backend.latest_message(v) { + Some((_, slot)) => target_slot > slot, + None => true + } + }).is_some() + } + /// Inform the fork choice that the given block (and corresponding root) have been finalized so /// it may prune it's storage. /// diff --git a/eth2/lmd_ghost/src/lib.rs b/eth2/lmd_ghost/src/lib.rs index f18b5b81f7..183d45c9a4 100644 --- a/eth2/lmd_ghost/src/lib.rs +++ b/eth2/lmd_ghost/src/lib.rs @@ -45,5 +45,5 @@ pub trait LmdGhost: Send + Sync { ) -> Result<()>; /// Returns the latest message for a given validator index. - fn latest_message(&mut self, validator_index: usize) -> Option<(Hash256, Slot)>; + fn latest_message(&self, validator_index: usize) -> Option<(Hash256, Slot)>; } diff --git a/eth2/lmd_ghost/src/reduced_tree.rs b/eth2/lmd_ghost/src/reduced_tree.rs index f069ae68c5..0985441dfc 100644 --- a/eth2/lmd_ghost/src/reduced_tree.rs +++ b/eth2/lmd_ghost/src/reduced_tree.rs @@ -88,7 +88,7 @@ where .map_err(|e| format!("update_finalized_root failed: {:?}", e)) } - fn latest_message(&mut self, validator_index: usize) -> Option<(Hash256, Slot)> { + fn latest_message(&self, validator_index: usize) -> Option<(Hash256, Slot)> { self.core .write() .latest_message(validator_index) From 51645aa9af00ef2b93226f65c0a64c411a73243d Mon Sep 17 00:00:00 2001 From: Grant Wuerker Date: Wed, 24 Jul 2019 18:03:48 +0200 Subject: [PATCH 008/186] Correct process_attestation error handling. --- beacon_node/beacon_chain/src/beacon_chain.rs | 10 +++++--- beacon_node/beacon_chain/src/errors.rs | 5 ++++ beacon_node/beacon_chain/src/fork_choice.rs | 11 +++++---- beacon_node/rpc/src/attestation.rs | 26 ++++++++++++++++++-- 4 files changed, 41 insertions(+), 11 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index f215465f2f..67d9281277 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -22,6 +22,7 @@ use store::iter::{BestBlockRootsIterator, BlockIterator, BlockRootsIterator, Sta use store::{Error as DBError, Store}; use tree_hash::TreeHash; use types::*; +use crate::BeaconChainError; // Text included in blocks. // Must be 32-bytes or panic. @@ -489,15 +490,15 @@ impl BeaconChain { pub fn process_attestation( &self, attestation: Attestation, - ) -> Result<(), AttestationValidationError> { + ) -> Result<(), Error> { self.metrics.attestation_processing_requests.inc(); let timer = self.metrics.attestation_processing_times.start_timer(); if let Some(state) = self.get_attestation_state(&attestation) { - if self.fork_choice.should_process_attestation(&state, &attestation) { + if self.fork_choice.should_process_attestation(&state, &attestation)? { let indexed_attestation = common::convert_to_indexed(&state, &attestation)?; per_block_processing::verify_indexed_attestation(&state, &indexed_attestation, &self.spec)?; - self.fork_choice.process_attestation(&state, &attestation); + self.fork_choice.process_attestation(&state, &attestation)?; } } @@ -511,7 +512,7 @@ impl BeaconChain { self.metrics.attestation_processing_successes.inc(); } - result + result.map_err(|e| BeaconChainError::AttestationValidationError(e)) } /// Retrieves the `BeaconState` used to create the attestation. @@ -968,3 +969,4 @@ impl From for Error { Error::BeaconStateError(e) } } + diff --git a/beacon_node/beacon_chain/src/errors.rs b/beacon_node/beacon_chain/src/errors.rs index 0d619d7f2d..4e2170ca84 100644 --- a/beacon_node/beacon_chain/src/errors.rs +++ b/beacon_node/beacon_chain/src/errors.rs @@ -3,6 +3,7 @@ use crate::metrics::Error as MetricsError; use state_processing::BlockProcessingError; use state_processing::SlotProcessingError; use types::*; +use state_processing::per_block_processing::errors::{AttestationValidationError, IndexedAttestationValidationError}; macro_rules! easy_from_to { ($from: ident, $to: ident) => { @@ -31,6 +32,8 @@ pub enum BeaconChainError { MissingBeaconState(Hash256), SlotProcessingError(SlotProcessingError), MetricsError(String), + AttestationValidationError(AttestationValidationError), + IndexedAttestationValidationError(IndexedAttestationValidationError) } easy_from_to!(SlotProcessingError, BeaconChainError); @@ -53,3 +56,5 @@ pub enum BlockProductionError { easy_from_to!(BlockProcessingError, BlockProductionError); easy_from_to!(BeaconStateError, BlockProductionError); easy_from_to!(SlotProcessingError, BlockProductionError); +easy_from_to!(AttestationValidationError, BeaconChainError); +easy_from_to!(IndexedAttestationValidationError, BeaconChainError); diff --git a/beacon_node/beacon_chain/src/fork_choice.rs b/beacon_node/beacon_chain/src/fork_choice.rs index 6b69e3e084..92b683590a 100644 --- a/beacon_node/beacon_chain/src/fork_choice.rs +++ b/beacon_node/beacon_chain/src/fork_choice.rs @@ -166,24 +166,24 @@ impl ForkChoice { Ok(()) } - /// Determines whether or not the given attestation contains a latest messages. - pub fn should_process_attestation(&self, state: &BeaconState, attestation: &Attestation) -> bool { + /// Determines whether or not the given attestation contains a latest message. + pub fn should_process_attestation(&self, state: &BeaconState, attestation: &Attestation) -> Result { let validator_indices = common::get_attesting_indices_unsorted( state, &attestation.data, &attestation.aggregation_bitfield, - ).unwrap(); + )?; let target_slot = attestation.data.target_epoch.start_slot(T::EthSpec::slots_per_epoch()); - validator_indices + Ok(validator_indices .iter() .find(|&&v| { match self.backend.latest_message(v) { Some((_, slot)) => target_slot > slot, None => true } - }).is_some() + }).is_some()) } /// Inform the fork choice that the given block (and corresponding root) have been finalized so @@ -218,3 +218,4 @@ impl From for Error { Error::BackendError(e) } } + diff --git a/beacon_node/rpc/src/attestation.rs b/beacon_node/rpc/src/attestation.rs index b85d4e9475..48d9eb4693 100644 --- a/beacon_node/rpc/src/attestation.rs +++ b/beacon_node/rpc/src/attestation.rs @@ -1,4 +1,4 @@ -use beacon_chain::{BeaconChain, BeaconChainTypes}; +use beacon_chain::{BeaconChain, BeaconChainTypes, BeaconChainError}; use eth2_libp2p::PubsubMessage; use eth2_libp2p::TopicBuilder; use eth2_libp2p::BEACON_ATTESTATION_TOPIC; @@ -159,7 +159,7 @@ impl AttestationService for AttestationServiceInstance { resp.set_success(true); } - Err(e) => { + Err(BeaconChainError::AttestationValidationError(e)) => { // Attestation was invalid warn!( self.log, @@ -170,6 +170,28 @@ impl AttestationService for AttestationServiceInstance { resp.set_success(false); resp.set_msg(format!("InvalidAttestation: {:?}", e).as_bytes().to_vec()); } + Err(BeaconChainError::IndexedAttestationValidationError(e)) => { + // Indexed attestation was invalid + warn!( + self.log, + "PublishAttestation"; + "type" => "invalid_attestation", + "error" => format!("{:?}", e), + ); + resp.set_success(false); + resp.set_msg(format!("InvalidIndexedAttestation: {:?}", e).as_bytes().to_vec()); + } + Err(e) => { + // Attestation was invalid + warn!( + self.log, + "PublishAttestation"; + "type" => "beacon_chain_error", + "error" => format!("{:?}", e), + ); + resp.set_success(false); + resp.set_msg(format!("There was a beacon chain error: {:?}", e).as_bytes().to_vec()); + } }; let error_log = self.log.clone(); From b49d592eee41fbbd04e88e8e6a08c3be372d1b77 Mon Sep 17 00:00:00 2001 From: Grant Wuerker Date: Wed, 24 Jul 2019 18:06:18 +0200 Subject: [PATCH 009/186] Copy paste error in comment fixed. --- beacon_node/rpc/src/attestation.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/beacon_node/rpc/src/attestation.rs b/beacon_node/rpc/src/attestation.rs index 48d9eb4693..1cfa81a04b 100644 --- a/beacon_node/rpc/src/attestation.rs +++ b/beacon_node/rpc/src/attestation.rs @@ -182,7 +182,7 @@ impl AttestationService for AttestationServiceInstance { resp.set_msg(format!("InvalidIndexedAttestation: {:?}", e).as_bytes().to_vec()); } Err(e) => { - // Attestation was invalid + // Some other error warn!( self.log, "PublishAttestation"; From b096e3a6432be6ce6850b211704a858bfcad8a28 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Mon, 5 Aug 2019 16:25:21 +1000 Subject: [PATCH 010/186] Tidy ancestor iterators --- beacon_node/beacon_chain/src/beacon_chain.rs | 75 +++++---- beacon_node/beacon_chain/src/fork_choice.rs | 2 +- beacon_node/beacon_chain/src/iter.rs | 48 ++++++ beacon_node/beacon_chain/src/lib.rs | 1 + beacon_node/beacon_chain/src/test_utils.rs | 2 +- beacon_node/network/src/sync/simple_sync.rs | 19 +-- beacon_node/rest_api/src/beacon_node.rs | 2 +- beacon_node/rpc/src/attestation.rs | 6 +- beacon_node/rpc/src/validator.rs | 6 +- beacon_node/store/src/iter.rs | 159 +------------------ eth2/lmd_ghost/tests/test.rs | 4 +- 11 files changed, 125 insertions(+), 199 deletions(-) create mode 100644 beacon_node/beacon_chain/src/iter.rs diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index d520f0b5c9..2901289943 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -1,6 +1,7 @@ use crate::checkpoint::CheckPoint; use crate::errors::{BeaconChainError as Error, BlockProductionError}; use crate::fork_choice::{Error as ForkChoiceError, ForkChoice}; +use crate::iter::{ReverseBlockRootIterator, ReverseStateRootIterator}; use crate::metrics::Metrics; use crate::persisted_beacon_chain::{PersistedBeaconChain, BEACON_CHAIN_DB_KEY}; use lmd_ghost::LmdGhost; @@ -19,7 +20,7 @@ use state_processing::{ per_slot_processing, BlockProcessingError, }; use std::sync::Arc; -use store::iter::{BestBlockRootsIterator, BlockIterator, BlockRootsIterator, StateRootsIterator}; +use store::iter::{BlockRootsIterator, StateRootsIterator}; use store::{Error as DBError, Store}; use tree_hash::TreeHash; use types::*; @@ -224,45 +225,53 @@ impl BeaconChain { Ok(headers?) } - /// Iterate in reverse (highest to lowest slot) through all blocks from the block at `slot` - /// through to the genesis block. - /// - /// Returns `None` for headers prior to genesis or when there is an error reading from `Store`. - /// - /// Contains duplicate headers when skip slots are encountered. - pub fn rev_iter_blocks(&self, slot: Slot) -> BlockIterator { - BlockIterator::owned(self.store.clone(), self.state.read().clone(), slot) - } - /// Iterates in reverse (highest to lowest slot) through all block roots from `slot` through to - /// genesis. + /// Iterates through all the `BeaconBlock` roots and slots, first returning + /// `self.head().beacon_block` then all prior blocks until either genesis or if the database + /// fails to return a prior block. /// - /// Returns `None` for roots prior to genesis or when there is an error reading from `Store`. + /// Returns duplicate roots for skip-slots. /// - /// Contains duplicate roots when skip slots are encountered. - pub fn rev_iter_block_roots(&self, slot: Slot) -> BlockRootsIterator { - BlockRootsIterator::owned(self.store.clone(), self.state.read().clone(), slot) - } - - /// Iterates in reverse (highest to lowest slot) through all block roots from largest - /// `slot <= beacon_state.slot` through to genesis. + /// Iterator returns `(Hash256, Slot)`. /// - /// Returns `None` for roots prior to genesis or when there is an error reading from `Store`. + /// ## Note /// - /// Contains duplicate roots when skip slots are encountered. - pub fn rev_iter_best_block_roots( + /// Because this iterator starts at the `head` of the chain (viz., the best block), the first slot + /// returned may be earlier than the wall-clock slot. + pub fn rev_iter_block_roots( &self, slot: Slot, - ) -> BestBlockRootsIterator { - BestBlockRootsIterator::owned(self.store.clone(), self.state.read().clone(), slot) + ) -> ReverseBlockRootIterator { + let state = &self.head().beacon_state; + let block_root = self.head().beacon_block_root; + let block_slot = state.slot; + + let iter = BlockRootsIterator::owned(self.store.clone(), state.clone(), slot); + + ReverseBlockRootIterator::new((block_root, block_slot), iter) } - /// Iterates in reverse (highest to lowest slot) through all state roots from `slot` through to - /// genesis. + /// Iterates through all the `BeaconState` roots and slots, first returning + /// `self.head().beacon_state` then all prior states until either genesis or if the database + /// fails to return a prior state. /// - /// Returns `None` for roots prior to genesis or when there is an error reading from `Store`. - pub fn rev_iter_state_roots(&self, slot: Slot) -> StateRootsIterator { - StateRootsIterator::owned(self.store.clone(), self.state.read().clone(), slot) + /// Iterator returns `(Hash256, Slot)`. + /// + /// ## Note + /// + /// Because this iterator starts at the `head` of the chain (viz., the best block), the first slot + /// returned may be earlier than the wall-clock slot. + pub fn rev_iter_state_roots( + &self, + slot: Slot, + ) -> ReverseStateRootIterator { + let state = &self.head().beacon_state; + let state_root = self.head().beacon_state_root; + let state_slot = state.slot; + + let iter = StateRootsIterator::owned(self.store.clone(), state.clone(), slot); + + ReverseStateRootIterator::new((state_root, state_slot), iter) } /// Returns the block at the given root, if any. @@ -279,8 +288,10 @@ impl BeaconChain { /// Returns a read-lock guarded `BeaconState` which is the `canonical_head` that has been /// updated to match the current slot clock. - pub fn current_state(&self) -> RwLockReadGuard> { - self.state.read() + pub fn speculative_state(&self) -> Result>, Error> { + // TODO: ensure the state has done a catch-up. + + Ok(self.state.read()) } /// Returns a read-lock guarded `CheckPoint` struct for reading the head (as chosen by the diff --git a/beacon_node/beacon_chain/src/fork_choice.rs b/beacon_node/beacon_chain/src/fork_choice.rs index b77979b741..74778be32e 100644 --- a/beacon_node/beacon_chain/src/fork_choice.rs +++ b/beacon_node/beacon_chain/src/fork_choice.rs @@ -52,7 +52,7 @@ impl ForkChoice { // been justified for at least 1 epoch ... If no such descendant exists, // set justified_head to finalized_head. let (start_state, start_block_root, start_block_slot) = { - let state = chain.current_state(); + let state = &chain.head().beacon_state; let (block_root, block_slot) = if state.current_epoch() + 1 > state.current_justified_checkpoint.epoch { diff --git a/beacon_node/beacon_chain/src/iter.rs b/beacon_node/beacon_chain/src/iter.rs new file mode 100644 index 0000000000..f73e88afa8 --- /dev/null +++ b/beacon_node/beacon_chain/src/iter.rs @@ -0,0 +1,48 @@ +use store::iter::{BlockRootsIterator, StateRootsIterator}; +use types::{Hash256, Slot}; + +pub type ReverseBlockRootIterator<'a, E, S> = + ReverseHashAndSlotIterator>; +pub type ReverseStateRootIterator<'a, E, S> = + ReverseHashAndSlotIterator>; + +pub type ReverseHashAndSlotIterator = ReverseChainIterator<(Hash256, Slot), I>; + +/// Provides a wrapper for an iterator that returns a given `T` before it starts returning results of +/// the `Iterator`. +pub struct ReverseChainIterator { + first_value_used: bool, + first_value: T, + iter: I, +} + +impl ReverseChainIterator +where + T: Sized, + I: Iterator + Sized, +{ + pub fn new(first_value: T, iter: I) -> Self { + Self { + first_value_used: false, + first_value, + iter, + } + } +} + +impl Iterator for ReverseChainIterator +where + T: Clone, + I: Iterator, +{ + type Item = T; + + fn next(&mut self) -> Option { + if self.first_value_used { + self.iter.next() + } else { + self.first_value_used = true; + Some(self.first_value.clone()) + } + } +} diff --git a/beacon_node/beacon_chain/src/lib.rs b/beacon_node/beacon_chain/src/lib.rs index df1de153a2..c2efcad130 100644 --- a/beacon_node/beacon_chain/src/lib.rs +++ b/beacon_node/beacon_chain/src/lib.rs @@ -2,6 +2,7 @@ mod beacon_chain; mod checkpoint; mod errors; mod fork_choice; +mod iter; mod metrics; mod persisted_beacon_chain; pub mod test_utils; diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index 6242b8a0a6..cdcd8bb21e 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -198,7 +198,7 @@ where fn get_state_at_slot(&self, state_slot: Slot) -> BeaconState { let state_root = self .chain - .rev_iter_state_roots(self.chain.current_state().slot - 1) + .rev_iter_state_roots(self.chain.head().beacon_state.slot - 1) .find(|(_hash, slot)| *slot == state_slot) .map(|(hash, _slot)| hash) .expect("could not find state root"); diff --git a/beacon_node/network/src/sync/simple_sync.rs b/beacon_node/network/src/sync/simple_sync.rs index ac001415cd..215e37e7f3 100644 --- a/beacon_node/network/src/sync/simple_sync.rs +++ b/beacon_node/network/src/sync/simple_sync.rs @@ -266,7 +266,7 @@ impl SimpleSync { fn root_at_slot(&self, target_slot: Slot) -> Option { self.chain - .rev_iter_best_block_roots(target_slot) + .rev_iter_block_roots(target_slot) .take(1) .find(|(_root, slot)| *slot == target_slot) .map(|(root, _slot)| root) @@ -280,6 +280,8 @@ impl SimpleSync { req: BeaconBlockRootsRequest, network: &mut NetworkContext, ) { + let state = &self.chain.head().beacon_state; + debug!( self.log, "BlockRootsRequest"; @@ -290,8 +292,8 @@ impl SimpleSync { let mut roots: Vec = self .chain - .rev_iter_best_block_roots(req.start_slot + req.count) - .take(req.count as usize) + .rev_iter_block_roots(std::cmp::min(req.start_slot + req.count, state.slot)) + .take_while(|(_root, slot)| req.start_slot <= *slot) .map(|(block_root, slot)| BlockRootSlot { slot, block_root }) .collect(); @@ -302,7 +304,7 @@ impl SimpleSync { "peer" => format!("{:?}", peer_id), "msg" => "Failed to return all requested hashes", "start_slot" => req.start_slot, - "current_slot" => self.chain.current_state().slot, + "current_slot" => self.chain.present_slot(), "requested" => req.count, "returned" => roots.len(), ); @@ -389,6 +391,8 @@ impl SimpleSync { req: BeaconBlockHeadersRequest, network: &mut NetworkContext, ) { + let state = &self.chain.head().beacon_state; + debug!( self.log, "BlockHeadersRequest"; @@ -399,13 +403,10 @@ impl SimpleSync { let count = req.max_headers; // Collect the block roots. - // - // Instead of using `chain.rev_iter_blocks` we collect the roots first. This avoids - // unnecessary block deserialization when `req.skip_slots > 0`. let mut roots: Vec = self .chain - .rev_iter_best_block_roots(req.start_slot + count) - .take(count as usize) + .rev_iter_block_roots(std::cmp::min(req.start_slot + count, state.slot)) + .take_while(|(_root, slot)| req.start_slot <= *slot) .map(|(root, _slot)| root) .collect(); diff --git a/beacon_node/rest_api/src/beacon_node.rs b/beacon_node/rest_api/src/beacon_node.rs index 87d2d3cdcd..bd8d98a53d 100644 --- a/beacon_node/rest_api/src/beacon_node.rs +++ b/beacon_node/rest_api/src/beacon_node.rs @@ -54,7 +54,7 @@ fn get_version(_req: Request) -> APIResult { fn get_genesis_time(req: Request) -> APIResult { let beacon_chain = req.extensions().get::>>().unwrap(); let gen_time = { - let state = beacon_chain.current_state(); + let state = &beacon_chain.head().beacon_state; state.genesis_time }; let body = Body::from( diff --git a/beacon_node/rpc/src/attestation.rs b/beacon_node/rpc/src/attestation.rs index 5ea8368fd8..20425d292d 100644 --- a/beacon_node/rpc/src/attestation.rs +++ b/beacon_node/rpc/src/attestation.rs @@ -40,7 +40,11 @@ impl AttestationService for AttestationServiceInstance { // verify the slot, drop lock on state afterwards { let slot_requested = req.get_slot(); - let state = &self.chain.current_state(); + // TODO: this whole module is legacy and not maintained well. + let state = &self + .chain + .speculative_state() + .expect("This is legacy code and should be removed"); // Start by performing some checks // Check that the AttestationData is for the current slot (otherwise it will not be valid) diff --git a/beacon_node/rpc/src/validator.rs b/beacon_node/rpc/src/validator.rs index b13303e25c..080c828a78 100644 --- a/beacon_node/rpc/src/validator.rs +++ b/beacon_node/rpc/src/validator.rs @@ -29,7 +29,11 @@ impl ValidatorService for ValidatorServiceInstance { trace!(self.log, "RPC request"; "endpoint" => "GetValidatorDuties", "epoch" => req.get_epoch()); let spec = &self.chain.spec; - let state = &self.chain.current_state(); + // TODO: this whole module is legacy and not maintained well. + let state = &self + .chain + .speculative_state() + .expect("This is legacy code and should be removed"); let epoch = Epoch::from(req.get_epoch()); let mut resp = GetDutiesResponse::new(); let resp_validators = resp.mut_active_validators(); diff --git a/beacon_node/store/src/iter.rs b/beacon_node/store/src/iter.rs index 55c525b11f..fc5d806795 100644 --- a/beacon_node/store/src/iter.rs +++ b/beacon_node/store/src/iter.rs @@ -4,20 +4,23 @@ use std::sync::Arc; use types::{BeaconBlock, BeaconState, BeaconStateError, EthSpec, Hash256, Slot}; /// Implemented for types that have ancestors (e.g., blocks, states) that may be iterated over. +/// +/// ## Note +/// +/// It is assumed that all ancestors for this object are stored in the database. If this is not the +/// case, the iterator will start returning `None` prior to genesis. pub trait AncestorIter { /// Returns an iterator over the roots of the ancestors of `self`. fn try_iter_ancestor_roots(&self, store: Arc) -> Option; } -impl<'a, U: Store, E: EthSpec> AncestorIter> - for BeaconBlock -{ +impl<'a, U: Store, E: EthSpec> AncestorIter> for BeaconBlock { /// Iterates across all the prior block roots of `self`, starting at the most recent and ending /// at genesis. - fn try_iter_ancestor_roots(&self, store: Arc) -> Option> { + fn try_iter_ancestor_roots(&self, store: Arc) -> Option> { let state = store.get::>(&self.state_root).ok()??; - Some(BestBlockRootsIterator::owned(store, state, self.slot)) + Some(BlockRootsIterator::owned(store, state, self.slot)) } } @@ -116,11 +119,6 @@ impl<'a, T: EthSpec, U: Store> Iterator for BlockIterator<'a, T, U> { /// exhausted. /// /// Returns `None` for roots prior to genesis or when there is an error reading from `Store`. -/// -/// ## Notes -/// -/// See [`BestBlockRootsIterator`](struct.BestBlockRootsIterator.html), which has different -/// `start_slot` logic. #[derive(Clone)] pub struct BlockRootsIterator<'a, T: EthSpec, U> { store: Arc, @@ -180,104 +178,6 @@ impl<'a, T: EthSpec, U: Store> Iterator for BlockRootsIterator<'a, T, U> { } } -/// Iterates backwards through block roots with `start_slot` highest possible value -/// `<= beacon_state.slot`. -/// -/// The distinction between `BestBlockRootsIterator` and `BlockRootsIterator` is: -/// -/// - `BestBlockRootsIterator` uses best-effort slot. When `start_slot` is greater than the latest available block root -/// on `beacon_state`, returns `Some(root, slot)` where `slot` is the latest available block -/// root. -/// - `BlockRootsIterator` is strict about `start_slot`. When `start_slot` is greater than the latest available block root -/// on `beacon_state`, returns `None`. -/// -/// This is distinct from `BestBlockRootsIterator`. -/// -/// Uses the `block_roots` field of `BeaconState` to as the source of block roots and will -/// perform a lookup on the `Store` for a prior `BeaconState` if `block_roots` has been -/// exhausted. -/// -/// Returns `None` for roots prior to genesis or when there is an error reading from `Store`. -#[derive(Clone)] -pub struct BestBlockRootsIterator<'a, T: EthSpec, U> { - store: Arc, - beacon_state: Cow<'a, BeaconState>, - slot: Slot, -} - -impl<'a, T: EthSpec, U: Store> BestBlockRootsIterator<'a, T, U> { - /// Create a new iterator over all block roots in the given `beacon_state` and prior states. - pub fn new(store: Arc, beacon_state: &'a BeaconState, start_slot: Slot) -> Self { - let mut slot = start_slot; - if slot >= beacon_state.slot { - // Slot may be too high. - slot = beacon_state.slot; - if beacon_state.get_block_root(slot).is_err() { - slot -= 1; - } - } - - Self { - store, - beacon_state: Cow::Borrowed(beacon_state), - slot: slot + 1, - } - } - - /// Create a new iterator over all block roots in the given `beacon_state` and prior states. - pub fn owned(store: Arc, beacon_state: BeaconState, start_slot: Slot) -> Self { - let mut slot = start_slot; - if slot >= beacon_state.slot { - // Slot may be too high. - slot = beacon_state.slot; - // TODO: Use a function other than `get_block_root` as this will always return `Err()` - // for slot = state.slot. - if beacon_state.get_block_root(slot).is_err() { - slot -= 1; - } - } - - Self { - store, - beacon_state: Cow::Owned(beacon_state), - slot: slot + 1, - } - } -} - -impl<'a, T: EthSpec, U: Store> Iterator for BestBlockRootsIterator<'a, T, U> { - type Item = (Hash256, Slot); - - fn next(&mut self) -> Option { - if self.slot == 0 { - // End of Iterator - return None; - } - - self.slot -= 1; - - match self.beacon_state.get_block_root(self.slot) { - Ok(root) => Some((*root, self.slot)), - Err(BeaconStateError::SlotOutOfBounds) => { - // Read a `BeaconState` from the store that has access to prior historical root. - let beacon_state: BeaconState = { - // Load the earliest state from disk. - let new_state_root = self.beacon_state.get_oldest_state_root().ok()?; - - self.store.get(&new_state_root).ok()? - }?; - - self.beacon_state = Cow::Owned(beacon_state); - - let root = self.beacon_state.get_block_root(self.slot).ok()?; - - Some((*root, self.slot)) - } - _ => None, - } - } -} - #[cfg(test)] mod test { use super::*; @@ -337,49 +237,6 @@ mod test { } } - #[test] - fn best_block_root_iter() { - let store = Arc::new(MemoryStore::open()); - let slots_per_historical_root = MainnetEthSpec::slots_per_historical_root(); - - let mut state_a: BeaconState = get_state(); - let mut state_b: BeaconState = get_state(); - - state_a.slot = Slot::from(slots_per_historical_root); - state_b.slot = Slot::from(slots_per_historical_root * 2); - - let mut hashes = (0..).into_iter().map(|i| Hash256::from(i)); - - for root in &mut state_a.block_roots[..] { - *root = hashes.next().unwrap() - } - for root in &mut state_b.block_roots[..] { - *root = hashes.next().unwrap() - } - - let state_a_root = hashes.next().unwrap(); - state_b.state_roots[0] = state_a_root; - store.put(&state_a_root, &state_a).unwrap(); - - let iter = BestBlockRootsIterator::new(store.clone(), &state_b, state_b.slot); - - assert!( - iter.clone().find(|(_root, slot)| *slot == 0).is_some(), - "iter should contain zero slot" - ); - - let mut collected: Vec<(Hash256, Slot)> = iter.collect(); - collected.reverse(); - - let expected_len = 2 * MainnetEthSpec::slots_per_historical_root(); - - assert_eq!(collected.len(), expected_len); - - for i in 0..expected_len { - assert_eq!(collected[i].0, Hash256::from(i as u64)); - } - } - #[test] fn state_root_iter() { let store = Arc::new(MemoryStore::open()); diff --git a/eth2/lmd_ghost/tests/test.rs b/eth2/lmd_ghost/tests/test.rs index fbe3855605..0ac263638c 100644 --- a/eth2/lmd_ghost/tests/test.rs +++ b/eth2/lmd_ghost/tests/test.rs @@ -10,7 +10,7 @@ use lmd_ghost::{LmdGhost, ThreadSafeReducedTree as BaseThreadSafeReducedTree}; use rand::{prelude::*, rngs::StdRng}; use std::sync::Arc; use store::{ - iter::{AncestorIter, BestBlockRootsIterator}, + iter::{AncestorIter, BlockRootsIterator}, MemoryStore, Store, }; use types::{BeaconBlock, EthSpec, Hash256, MinimalEthSpec, Slot}; @@ -159,7 +159,7 @@ fn get_ancestor_roots( .expect("block should exist") .expect("store should not error"); - as AncestorIter<_, BestBlockRootsIterator>>::try_iter_ancestor_roots( + as AncestorIter<_, BlockRootsIterator>>::try_iter_ancestor_roots( &block, store, ) .expect("should be able to create ancestor iter") From 40c0b70b22de83cb4fea86250397fa568d08dbc9 Mon Sep 17 00:00:00 2001 From: Age Manning Date: Wed, 24 Jul 2019 21:31:49 +1000 Subject: [PATCH 011/186] Add interop chain spec and rename chain_id --- beacon_node/http_server/src/api.rs | 2 +- beacon_node/network/src/sync/simple_sync.rs | 10 +++---- beacon_node/rpc/src/beacon_node.rs | 2 +- beacon_node/src/main.rs | 3 +- beacon_node/src/run.rs | 18 +++++++++++- .../src/beacon_state/beacon_state_types.rs | 20 +++++++++++++ eth2/types/src/chain_spec.rs | 28 +++++++++++++++++-- protos/src/services.proto | 2 +- tests/ef_tests/eth2.0-spec-tests | 2 +- validator_client/src/main.rs | 9 ++++-- validator_client/src/service.rs | 6 ++-- 11 files changed, 83 insertions(+), 19 deletions(-) diff --git a/beacon_node/http_server/src/api.rs b/beacon_node/http_server/src/api.rs index a910808998..8cb023b02c 100644 --- a/beacon_node/http_server/src/api.rs +++ b/beacon_node/http_server/src/api.rs @@ -64,7 +64,7 @@ fn handle_fork(req: &mut Request) -> IronResult(beacon_chain: &BeaconChain) -> HelloMes let state = &beacon_chain.head().beacon_state; HelloMessage { - //TODO: Correctly define the chain/network id - network_id: spec.chain_id, - chain_id: u64::from(spec.chain_id), - latest_finalized_root: state.finalized_checkpoint.root, - latest_finalized_epoch: state.finalized_checkpoint.epoch, + network_id: spec.network_id, + //TODO: Correctly define the chain id + chain_id: spec.network_id as u64, + latest_finalized_root: state.finalized_root, + latest_finalized_epoch: state.finalized_epoch, best_root: beacon_chain.head().beacon_block_root, best_slot: state.slot, } diff --git a/beacon_node/rpc/src/beacon_node.rs b/beacon_node/rpc/src/beacon_node.rs index 631601ac95..5d635c9d1b 100644 --- a/beacon_node/rpc/src/beacon_node.rs +++ b/beacon_node/rpc/src/beacon_node.rs @@ -37,7 +37,7 @@ impl BeaconNodeService for BeaconNodeServiceInstance { node_info.set_fork(fork); node_info.set_genesis_time(genesis_time); node_info.set_genesis_slot(spec.genesis_slot.as_u64()); - node_info.set_chain_id(u32::from(spec.chain_id)); + node_info.set_network_id(u32::from(spec.network_id)); // send the node_info the requester let error_log = self.log.clone(); diff --git a/beacon_node/src/main.rs b/beacon_node/src/main.rs index dd0c695b4a..c61e0c6b62 100644 --- a/beacon_node/src/main.rs +++ b/beacon_node/src/main.rs @@ -136,6 +136,7 @@ fn main() { .help("Listen port for the HTTP server.") .takes_value(true), ) + /* Client related arguments */ .arg( Arg::with_name("api") .long("api") @@ -182,7 +183,7 @@ fn main() { from disk. A spec will be written to disk after this flag is used, so it is primarily used for creating eth2 spec files.") .takes_value(true) - .possible_values(&["mainnet", "minimal"]) + .possible_values(&["mainnet", "minimal", "interop"]) .default_value("minimal"), ) .arg( diff --git a/beacon_node/src/run.rs b/beacon_node/src/run.rs index 010993988c..c16d23e5f1 100644 --- a/beacon_node/src/run.rs +++ b/beacon_node/src/run.rs @@ -13,7 +13,7 @@ use tokio::runtime::Builder; use tokio::runtime::Runtime; use tokio::runtime::TaskExecutor; use tokio_timer::clock::Clock; -use types::{MainnetEthSpec, MinimalEthSpec}; +use types::{InteropEthSpec, MainnetEthSpec, MinimalEthSpec}; /// Reads the configuration and initializes a `BeaconChain` with the required types and parameters. /// @@ -90,6 +90,22 @@ pub fn run_beacon_node( runtime, log, ), + ("disk", "interop") => run::>( + &db_path, + client_config, + eth2_config, + executor, + runtime, + log, + ), + ("memory", "interop") => run::>( + &db_path, + client_config, + eth2_config, + executor, + runtime, + log, + ), (db_type, spec) => { error!(log, "Unknown runtime configuration"; "spec_constants" => spec, "db_type" => db_type); Err("Unknown specification and/or db_type.".into()) diff --git a/eth2/types/src/beacon_state/beacon_state_types.rs b/eth2/types/src/beacon_state/beacon_state_types.rs index 1dc34e1951..dd6ca32724 100644 --- a/eth2/types/src/beacon_state/beacon_state_types.rs +++ b/eth2/types/src/beacon_state/beacon_state_types.rs @@ -200,3 +200,23 @@ impl EthSpec for MinimalEthSpec { } pub type MinimalBeaconState = BeaconState; + +/// Interop testnet spec +#[derive(Clone, PartialEq, Debug, Default, Serialize, Deserialize)] +pub struct InteropEthSpec; + +impl EthSpec for InteropEthSpec { + type ShardCount = U8; + type SlotsPerHistoricalRoot = U64; + type LatestRandaoMixesLength = U64; + type LatestActiveIndexRootsLength = U64; + type LatestSlashedExitLength = U64; + type SlotsPerEpoch = U8; + type GenesisEpoch = U0; + + fn default_spec() -> ChainSpec { + ChainSpec::interop() + } +} + +pub type InteropBeaconState = BeaconState; diff --git a/eth2/types/src/chain_spec.rs b/eth2/types/src/chain_spec.rs index 2128c6ef11..d6eaa123de 100644 --- a/eth2/types/src/chain_spec.rs +++ b/eth2/types/src/chain_spec.rs @@ -92,7 +92,7 @@ pub struct ChainSpec { domain_transfer: u32, pub boot_nodes: Vec, - pub chain_id: u8, + pub network_id: u8, } impl ChainSpec { @@ -190,7 +190,7 @@ impl ChainSpec { * Network specific */ boot_nodes: vec![], - chain_id: 1, // mainnet chain id + network_id: 1, // mainnet network id } } @@ -202,13 +202,35 @@ impl ChainSpec { pub fn minimal() -> Self { // Note: bootnodes to be updated when static nodes exist. let boot_nodes = vec![]; + let genesis_slot = Slot::new(0); Self { target_committee_size: 4, shuffle_round_count: 10, min_genesis_active_validator_count: 64, max_epochs_per_crosslink: 4, - chain_id: 2, // lighthouse testnet chain id + min_attestation_inclusion_delay: 2, + genesis_slot, + network_id: 2, // lighthouse testnet network id + boot_nodes, + ..ChainSpec::mainnet() + } + } + + /// Interop testing spec + /// + /// This allows us to customize a chain spec for interop testing. + pub fn interop() -> Self { + let genesis_slot = Slot::new(0); + let boot_nodes = vec![]; + + Self { + seconds_per_slot: 12, + target_committee_size: 4, + shuffle_round_count: 10, + min_attestation_inclusion_delay: 2, + genesis_slot, + network_id: 13, boot_nodes, ..ChainSpec::mainnet() } diff --git a/protos/src/services.proto b/protos/src/services.proto index bf23ff391d..ba0462bbea 100644 --- a/protos/src/services.proto +++ b/protos/src/services.proto @@ -45,7 +45,7 @@ service AttestationService { message NodeInfoResponse { string version = 1; Fork fork = 2; - uint32 chain_id = 3; + uint32 network_id = 3; uint64 genesis_time = 4; uint64 genesis_slot = 5; } diff --git a/tests/ef_tests/eth2.0-spec-tests b/tests/ef_tests/eth2.0-spec-tests index aaa1673f50..d405782646 160000 --- a/tests/ef_tests/eth2.0-spec-tests +++ b/tests/ef_tests/eth2.0-spec-tests @@ -1 +1 @@ -Subproject commit aaa1673f508103e11304833e0456e4149f880065 +Subproject commit d405782646190595927cc0a59f504f7b00a760f3 diff --git a/validator_client/src/main.rs b/validator_client/src/main.rs index bd3919b5a7..756f829916 100644 --- a/validator_client/src/main.rs +++ b/validator_client/src/main.rs @@ -14,7 +14,7 @@ use protos::services_grpc::ValidatorServiceClient; use slog::{crit, error, info, o, Drain, Level}; use std::fs; use std::path::PathBuf; -use types::{Keypair, MainnetEthSpec, MinimalEthSpec}; +use types::{InteropEthSpec, Keypair, MainnetEthSpec, MinimalEthSpec}; pub const DEFAULT_SPEC: &str = "minimal"; pub const DEFAULT_DATA_DIR: &str = ".lighthouse-validator"; @@ -70,7 +70,7 @@ fn main() { .short("s") .help("The title of the spec constants for chain config.") .takes_value(true) - .possible_values(&["mainnet", "minimal"]) + .possible_values(&["mainnet", "minimal", "interop"]) .default_value("minimal"), ) .arg( @@ -214,6 +214,11 @@ fn main() { eth2_config, log.clone(), ), + "interop" => ValidatorService::::start::( + client_config, + eth2_config, + log.clone(), + ), other => { crit!(log, "Unknown spec constants"; "title" => other); return; diff --git a/validator_client/src/service.rs b/validator_client/src/service.rs index 3f99efe36a..c4ccbc2042 100644 --- a/validator_client/src/service.rs +++ b/validator_client/src/service.rs @@ -107,12 +107,12 @@ impl Service Service node_info.version.clone(), "Chain ID" => node_info.chain_id, "Genesis time" => genesis_time); + info!(log,"Beacon node connected"; "Node Version" => node_info.version.clone(), "Chain ID" => node_info.network_id, "Genesis time" => genesis_time); let proto_fork = node_info.get_fork(); let mut previous_version: [u8; 4] = [0; 4]; From 15c4062761a3ae855bdc237d5edcdf9bf9c8ae44 Mon Sep 17 00:00:00 2001 From: Age Manning Date: Wed, 24 Jul 2019 22:25:37 +1000 Subject: [PATCH 012/186] Add ability to connect to raw libp2p nodes --- beacon_node/eth2-libp2p/src/config.rs | 16 ++++++++++++++++ beacon_node/eth2-libp2p/src/discovery.rs | 6 +++--- beacon_node/eth2-libp2p/src/service.rs | 11 +++++++++++ beacon_node/src/main.rs | 21 ++++++++++++++------- 4 files changed, 44 insertions(+), 10 deletions(-) diff --git a/beacon_node/eth2-libp2p/src/config.rs b/beacon_node/eth2-libp2p/src/config.rs index 7391dba8a1..d04eae14b2 100644 --- a/beacon_node/eth2-libp2p/src/config.rs +++ b/beacon_node/eth2-libp2p/src/config.rs @@ -1,6 +1,7 @@ use clap::ArgMatches; use enr::Enr; use libp2p::gossipsub::{GossipsubConfig, GossipsubConfigBuilder}; +use libp2p::Multiaddr; use serde_derive::{Deserialize, Serialize}; use std::path::PathBuf; use std::time::Duration; @@ -39,6 +40,9 @@ pub struct Config { /// List of nodes to initially connect to. pub boot_nodes: Vec, + /// List of libp2p nodes to initially connect to. + pub libp2p_nodes: Vec, + /// Client version pub client_version: String, @@ -66,6 +70,7 @@ impl Default for Config { .heartbeat_interval(Duration::from_secs(20)) .build(), boot_nodes: vec![], + libp2p_nodes: vec![], client_version: version::version(), topics: Vec::new(), } @@ -118,6 +123,17 @@ impl Config { .collect::, _>>()?; } + if let Some(libp2p_addresses_str) = args.value_of("libp2p-addresses") { + self.libp2p_nodes = libp2p_addresses_str + .split(',') + .map(|multiaddr| { + multiaddr + .parse() + .map_err(|_| format!("Invalid Multiaddr: {}", multiaddr)) + }) + .collect::, _>>()?; + } + if let Some(discovery_address_str) = args.value_of("discovery-address") { self.discovery_address = discovery_address_str .parse() diff --git a/beacon_node/eth2-libp2p/src/discovery.rs b/beacon_node/eth2-libp2p/src/discovery.rs index c2f0087569..96cf718461 100644 --- a/beacon_node/eth2-libp2p/src/discovery.rs +++ b/beacon_node/eth2-libp2p/src/discovery.rs @@ -37,6 +37,9 @@ pub struct Discovery { /// The target number of connected peers on the libp2p interface. max_peers: usize, + /// directory to save ENR to + enr_dir: String, + /// The delay between peer discovery searches. peer_discovery_delay: Delay, @@ -54,9 +57,6 @@ pub struct Discovery { /// Logger for the discovery behaviour. log: slog::Logger, - - /// directory to save ENR to - enr_dir: String, } impl Discovery { diff --git a/beacon_node/eth2-libp2p/src/service.rs b/beacon_node/eth2-libp2p/src/service.rs index 05ae9e4739..5c7c0c7f1c 100644 --- a/beacon_node/eth2-libp2p/src/service.rs +++ b/beacon_node/eth2-libp2p/src/service.rs @@ -76,6 +76,17 @@ impl Service { ), }; + // attempt to connect to user-input libp2p nodes + for multiaddr in config.libp2p_nodes { + match Swarm::dial_addr(&mut swarm, multiaddr.clone()) { + Ok(()) => debug!(log, "Dialing libp2p node: {}", multiaddr), + Err(err) => debug!( + log, + "Could not connect to node: {} error: {:?}", multiaddr, err + ), + }; + } + // subscribe to default gossipsub topics let mut topics = vec![]; //TODO: Handle multiple shard attestations. For now we simply use a separate topic for diff --git a/beacon_node/src/main.rs b/beacon_node/src/main.rs index c61e0c6b62..9a1af2e081 100644 --- a/beacon_node/src/main.rs +++ b/beacon_node/src/main.rs @@ -56,6 +56,13 @@ fn main() { .help("The address lighthouse will listen for UDP and TCP connections. (default 127.0.0.1).") .takes_value(true), ) + .arg( + Arg::with_name("port") + .long("port") + .value_name("Lighthouse Port") + .help("The TCP/UDP port to listen on. The UDP port can be modified by the --discovery-port flag.") + .takes_value(true), + ) .arg( Arg::with_name("maxpeers") .long("maxpeers") @@ -70,13 +77,6 @@ fn main() { .help("One or more comma-delimited base64-encoded ENR's to bootstrap the p2p network.") .takes_value(true), ) - .arg( - Arg::with_name("port") - .long("port") - .value_name("Lighthouse Port") - .help("The TCP/UDP port to listen on. The UDP port can be modified by the --discovery-port flag.") - .takes_value(true), - ) .arg( Arg::with_name("discovery-port") .long("disc-port") @@ -91,6 +91,13 @@ fn main() { .help("The IP address to broadcast to other peers on how to reach this node.") .takes_value(true), ) + .arg( + Arg::with_name("libp2p-addresses") + .long("libp2p-addresses") + .value_name("MULTIADDR") + .help("One or more comma-delimited multiaddrs to manually connect to a libp2p peer without an ENR.") + .takes_value(true), + ) /* * gRPC parameters. */ From 04ce9ec95e5d292d348fb88711187f786f1fc2eb Mon Sep 17 00:00:00 2001 From: Age Manning Date: Fri, 26 Jul 2019 14:43:42 +1000 Subject: [PATCH 013/186] Adds Identify protocol, clean up RPC protocol name handling --- beacon_node/eth2-libp2p/src/behaviour.rs | 101 ++++++++++++++------ beacon_node/eth2-libp2p/src/rpc/protocol.rs | 92 +++++++----------- 2 files changed, 107 insertions(+), 86 deletions(-) diff --git a/beacon_node/eth2-libp2p/src/behaviour.rs b/beacon_node/eth2-libp2p/src/behaviour.rs index 37e3419a36..33acd41e17 100644 --- a/beacon_node/eth2-libp2p/src/behaviour.rs +++ b/beacon_node/eth2-libp2p/src/behaviour.rs @@ -10,39 +10,44 @@ use libp2p::{ }, discv5::Discv5Event, gossipsub::{Gossipsub, GossipsubEvent}, + identify::{Identify, IdentifyEvent}, ping::{Ping, PingConfig, PingEvent}, tokio_io::{AsyncRead, AsyncWrite}, NetworkBehaviour, PeerId, }; -use slog::{o, trace, warn}; +use slog::{debug, o, trace, warn}; use ssz::{ssz_encode, Decode, DecodeError, Encode}; use std::num::NonZeroU32; use std::time::Duration; -use types::{Attestation, BeaconBlock, EthSpec}; +use types::{Attestation, BeaconBlock}; /// Builds the network behaviour that manages the core protocols of eth2. /// This core behaviour is managed by `Behaviour` which adds peer management to all core /// behaviours. #[derive(NetworkBehaviour)] -#[behaviour(out_event = "BehaviourEvent", poll_method = "poll")] -pub struct Behaviour { +#[behaviour(out_event = "BehaviourEvent", poll_method = "poll")] +pub struct Behaviour { /// The routing pub-sub mechanism for eth2. gossipsub: Gossipsub, - /// The serenity RPC specified in the wire-0 protocol. - serenity_rpc: RPC, + /// The Eth2 RPC specified in the wire-0 protocol. + eth2_rpc: RPC, /// Keep regular connection to peers and disconnect if absent. + // TODO: Remove Libp2p ping in favour of discv5 ping. ping: Ping, - /// Kademlia for peer discovery. + // TODO: Using id for initial interop. This will be removed by mainnet. + /// Provides IP addresses and peer information. + identify: Identify, + /// Discovery behaviour. discovery: Discovery, #[behaviour(ignore)] /// The events generated by this behaviour to be consumed in the swarm poll. - events: Vec>, + events: Vec, /// Logger for behaviour actions. #[behaviour(ignore)] log: slog::Logger, } -impl Behaviour { +impl Behaviour { pub fn new( local_key: &Keypair, net_conf: &NetworkConfig, @@ -50,17 +55,25 @@ impl Behaviour { ) -> error::Result { let local_peer_id = local_key.public().clone().into_peer_id(); let behaviour_log = log.new(o!()); + let ping_config = PingConfig::new() .with_timeout(Duration::from_secs(30)) .with_interval(Duration::from_secs(20)) .with_max_failures(NonZeroU32::new(2).expect("2 != 0")) .with_keep_alive(false); + let identify = Identify::new( + "lighthouse/libp2p".into(), + version::version(), + local_key.public(), + ); + Ok(Behaviour { - serenity_rpc: RPC::new(log), + eth2_rpc: RPC::new(log), gossipsub: Gossipsub::new(local_peer_id.clone(), net_conf.gs_config.clone()), discovery: Discovery::new(local_key, net_conf, log)?, ping: Ping::new(ping_config), + identify, events: Vec::new(), log: behaviour_log, }) @@ -68,8 +81,8 @@ impl Behaviour { } // Implement the NetworkBehaviourEventProcess trait so that we can derive NetworkBehaviour for Behaviour -impl NetworkBehaviourEventProcess - for Behaviour +impl NetworkBehaviourEventProcess + for Behaviour { fn inject_event(&mut self, event: GossipsubEvent) { match event { @@ -101,8 +114,8 @@ impl NetworkBehaviourEventProces } } -impl NetworkBehaviourEventProcess - for Behaviour +impl NetworkBehaviourEventProcess + for Behaviour { fn inject_event(&mut self, event: RPCMessage) { match event { @@ -119,19 +132,19 @@ impl NetworkBehaviourEventProces } } -impl NetworkBehaviourEventProcess - for Behaviour +impl NetworkBehaviourEventProcess + for Behaviour { fn inject_event(&mut self, _event: PingEvent) { // not interested in ping responses at the moment. } } -impl Behaviour { +impl Behaviour { /// Consumes the events list when polled. fn poll( &mut self, - ) -> Async>> { + ) -> Async> { if !self.events.is_empty() { return Async::Ready(NetworkBehaviourAction::GenerateEvent(self.events.remove(0))); } @@ -140,8 +153,36 @@ impl Behaviour { } } -impl NetworkBehaviourEventProcess - for Behaviour +impl NetworkBehaviourEventProcess + for Behaviour +{ + fn inject_event(&mut self, event: IdentifyEvent) { + match event { + IdentifyEvent::Identified { + peer_id, mut info, .. + } => { + if info.listen_addrs.len() > 20 { + debug!( + self.log, + "More than 20 addresses have been identified, truncating" + ); + info.listen_addrs.truncate(20); + } + debug!(self.log, "Identified Peer"; "Peer" => format!("{}", peer_id), + "Protocol Version" => info.protocol_version, + "Agent Version" => info.agent_version, + "Listening Addresses" => format!("{:?}", info.listen_addrs), + "Protocols" => format!("{:?}", info.protocols) + ); + } + IdentifyEvent::Error { .. } => {} + IdentifyEvent::SendBack { .. } => {} + } + } +} + +impl NetworkBehaviourEventProcess + for Behaviour { fn inject_event(&mut self, _event: Discv5Event) { // discv5 has no events to inject @@ -149,7 +190,7 @@ impl NetworkBehaviourEventProces } /// Implements the combined behaviour for the libp2p service. -impl Behaviour { +impl Behaviour { /* Pubsub behaviour functions */ /// Subscribes to a gossipsub topic. @@ -158,7 +199,7 @@ impl Behaviour { } /// Publishes a message on the pubsub (gossipsub) behaviour. - pub fn publish(&mut self, topics: Vec, message: PubsubMessage) { + pub fn publish(&mut self, topics: Vec, message: PubsubMessage) { let message_bytes = ssz_encode(&message); for topic in topics { self.gossipsub.publish(topic, message_bytes.clone()); @@ -169,7 +210,7 @@ impl Behaviour { /// Sends an RPC Request/Response via the RPC protocol. pub fn send_rpc(&mut self, peer_id: PeerId, rpc_event: RPCEvent) { - self.serenity_rpc.send_rpc(peer_id, rpc_event); + self.eth2_rpc.send_rpc(peer_id, rpc_event); } /* Discovery / Peer management functions */ @@ -179,28 +220,28 @@ impl Behaviour { } /// The types of events than can be obtained from polling the behaviour. -pub enum BehaviourEvent { +pub enum BehaviourEvent { RPC(PeerId, RPCEvent), PeerDialed(PeerId), PeerDisconnected(PeerId), GossipMessage { source: PeerId, topics: Vec, - message: Box>, + message: Box, }, } /// Messages that are passed to and from the pubsub (Gossipsub) behaviour. #[derive(Debug, Clone, PartialEq)] -pub enum PubsubMessage { +pub enum PubsubMessage { /// Gossipsub message providing notification of a new block. - Block(BeaconBlock), + Block(BeaconBlock), /// Gossipsub message providing notification of a new attestation. - Attestation(Attestation), + Attestation(Attestation), } //TODO: Correctly encode/decode enums. Prefixing with integer for now. -impl Encode for PubsubMessage { +impl Encode for PubsubMessage { fn is_ssz_fixed_len() -> bool { false } @@ -229,7 +270,7 @@ impl Encode for PubsubMessage { } } -impl Decode for PubsubMessage { +impl Decode for PubsubMessage { fn is_ssz_fixed_len() -> bool { false } diff --git a/beacon_node/eth2-libp2p/src/rpc/protocol.rs b/beacon_node/eth2-libp2p/src/rpc/protocol.rs index 8729de3a7a..b606fc7432 100644 --- a/beacon_node/eth2-libp2p/src/rpc/protocol.rs +++ b/beacon_node/eth2-libp2p/src/rpc/protocol.rs @@ -8,7 +8,7 @@ use futures::{ future::{self, FutureResult}, sink, stream, Sink, Stream, }; -use libp2p::core::{upgrade, InboundUpgrade, OutboundUpgrade, UpgradeInfo}; +use libp2p::core::{upgrade, InboundUpgrade, OutboundUpgrade, ProtocolName, UpgradeInfo}; use std::io; use std::time::Duration; use tokio::codec::Framed; @@ -28,24 +28,22 @@ const REQUEST_TIMEOUT: u64 = 3; pub struct RPCProtocol; impl UpgradeInfo for RPCProtocol { - type Info = RawProtocolId; + type Info = ProtocolId; type InfoIter = Vec; fn protocol_info(&self) -> Self::InfoIter { vec![ - ProtocolId::new("hello", "1.0.0", "ssz").into(), - ProtocolId::new("goodbye", "1.0.0", "ssz").into(), - ProtocolId::new("beacon_block_roots", "1.0.0", "ssz").into(), - ProtocolId::new("beacon_block_headers", "1.0.0", "ssz").into(), - ProtocolId::new("beacon_block_bodies", "1.0.0", "ssz").into(), + ProtocolId::new("hello", "1.0.0", "ssz"), + ProtocolId::new("goodbye", "1.0.0", "ssz"), + ProtocolId::new("beacon_block_roots", "1.0.0", "ssz"), + ProtocolId::new("beacon_block_headers", "1.0.0", "ssz"), + ProtocolId::new("beacon_block_bodies", "1.0.0", "ssz"), ] } } -/// The raw protocol id sent over the wire. -type RawProtocolId = Vec; - /// Tracks the types in a protocol id. +#[derive(Clone)] pub struct ProtocolId { /// The rpc message type/name. pub message_name: String, @@ -55,44 +53,31 @@ pub struct ProtocolId { /// The encoding of the RPC. pub encoding: String, + + /// The protocol id that is formed from the above fields. + protocol_id: String, } /// An RPC protocol ID. impl ProtocolId { pub fn new(message_name: &str, version: &str, encoding: &str) -> Self { + let protocol_id = format!( + "{}/{}/{}/{}", + PROTOCOL_PREFIX, message_name, version, encoding + ); + ProtocolId { message_name: message_name.into(), version: version.into(), encoding: encoding.into(), + protocol_id, } } - - /// Converts a raw RPC protocol id string into an `RPCProtocolId` - pub fn from_bytes(bytes: &[u8]) -> Result { - let protocol_string = String::from_utf8(bytes.to_vec()) - .map_err(|_| RPCError::InvalidProtocol("Invalid protocol Id"))?; - let protocol_list: Vec<&str> = protocol_string.as_str().split('/').take(7).collect(); - - if protocol_list.len() != 7 { - return Err(RPCError::InvalidProtocol("Not enough '/'")); - } - - Ok(ProtocolId { - message_name: protocol_list[4].into(), - version: protocol_list[5].into(), - encoding: protocol_list[6].into(), - }) - } } -impl Into for ProtocolId { - fn into(self) -> RawProtocolId { - format!( - "{}/{}/{}/{}", - PROTOCOL_PREFIX, self.message_name, self.version, self.encoding - ) - .as_bytes() - .to_vec() +impl ProtocolName for ProtocolId { + fn protocol_name(&self) -> &[u8] { + self.protocol_id.as_bytes() } } @@ -127,16 +112,11 @@ where fn upgrade_inbound( self, socket: upgrade::Negotiated, - protocol: RawProtocolId, + protocol: ProtocolId, ) -> Self::Future { - // TODO: Verify this - let protocol_id = - ProtocolId::from_bytes(&protocol).expect("Can decode all supported protocols"); - - match protocol_id.encoding.as_str() { + match protocol.encoding.as_str() { "ssz" | _ => { - let ssz_codec = - BaseInboundCodec::new(SSZInboundCodec::new(protocol_id, MAX_RPC_SIZE)); + let ssz_codec = BaseInboundCodec::new(SSZInboundCodec::new(protocol, MAX_RPC_SIZE)); let codec = InboundCodec::SSZ(ssz_codec); Framed::new(socket, codec) .into_future() @@ -171,7 +151,7 @@ pub enum RPCRequest { } impl UpgradeInfo for RPCRequest { - type Info = RawProtocolId; + type Info = ProtocolId; type InfoIter = Vec; // add further protocols as we support more encodings/versions @@ -182,22 +162,25 @@ impl UpgradeInfo for RPCRequest { /// Implements the encoding per supported protocol for RPCRequest. impl RPCRequest { - pub fn supported_protocols(&self) -> Vec { + pub fn supported_protocols(&self) -> Vec { match self { // add more protocols when versions/encodings are supported - RPCRequest::Hello(_) => vec![ProtocolId::new("hello", "1.0.0", "ssz").into()], - RPCRequest::Goodbye(_) => vec![ProtocolId::new("goodbye", "1.0.0", "ssz").into()], + RPCRequest::Hello(_) => vec![ + ProtocolId::new("hello", "1.0.0", "ssz"), + ProtocolId::new("goodbye", "1.0.0", "ssz"), + ], + RPCRequest::Goodbye(_) => vec![ProtocolId::new("goodbye", "1.0.0", "ssz")], RPCRequest::BeaconBlockRoots(_) => { - vec![ProtocolId::new("beacon_block_roots", "1.0.0", "ssz").into()] + vec![ProtocolId::new("beacon_block_roots", "1.0.0", "ssz")] } RPCRequest::BeaconBlockHeaders(_) => { - vec![ProtocolId::new("beacon_block_headers", "1.0.0", "ssz").into()] + vec![ProtocolId::new("beacon_block_headers", "1.0.0", "ssz")] } RPCRequest::BeaconBlockBodies(_) => { - vec![ProtocolId::new("beacon_block_bodies", "1.0.0", "ssz").into()] + vec![ProtocolId::new("beacon_block_bodies", "1.0.0", "ssz")] } RPCRequest::BeaconChainState(_) => { - vec![ProtocolId::new("beacon_block_state", "1.0.0", "ssz").into()] + vec![ProtocolId::new("beacon_block_state", "1.0.0", "ssz")] } } } @@ -230,12 +213,9 @@ where socket: upgrade::Negotiated, protocol: Self::Info, ) -> Self::Future { - let protocol_id = - ProtocolId::from_bytes(&protocol).expect("Can decode all supported protocols"); - - match protocol_id.encoding.as_str() { + match protocol.encoding.as_str() { "ssz" | _ => { - let ssz_codec = BaseOutboundCodec::new(SSZOutboundCodec::new(protocol_id, 4096)); + let ssz_codec = BaseOutboundCodec::new(SSZOutboundCodec::new(protocol, 4096)); let codec = OutboundCodec::SSZ(ssz_codec); Framed::new(socket, codec).send(self) } From 0613bc16fc54f5d02434ec7540500ba255ab5dc9 Mon Sep 17 00:00:00 2001 From: Age Manning Date: Tue, 6 Aug 2019 15:09:47 +1000 Subject: [PATCH 014/186] Update to latest libp2p, gossipsub improvements --- beacon_node/eth2-libp2p/Cargo.toml | 4 ++-- beacon_node/eth2-libp2p/src/behaviour.rs | 6 ++---- beacon_node/eth2-libp2p/src/config.rs | 8 ++++++-- beacon_node/eth2-libp2p/src/discovery.rs | 6 ++---- beacon_node/eth2-libp2p/src/lib.rs | 2 +- beacon_node/eth2-libp2p/src/rpc/handler.rs | 7 ++++--- beacon_node/eth2-libp2p/src/rpc/mod.rs | 6 +++--- beacon_node/eth2-libp2p/src/service.rs | 20 +++++++++++++------- beacon_node/rpc/src/attestation.rs | 4 ++-- beacon_node/rpc/src/beacon_block.rs | 4 ++-- beacon_node/src/main.rs | 15 +++++++++++---- 11 files changed, 48 insertions(+), 34 deletions(-) diff --git a/beacon_node/eth2-libp2p/Cargo.toml b/beacon_node/eth2-libp2p/Cargo.toml index 405c72cc4b..f5fe8a8775 100644 --- a/beacon_node/eth2-libp2p/Cargo.toml +++ b/beacon_node/eth2-libp2p/Cargo.toml @@ -7,8 +7,8 @@ edition = "2018" [dependencies] clap = "2.32.0" #SigP repository -libp2p = { git = "https://github.com/SigP/rust-libp2p", rev = "be5710bbde69d8c5be732c13ba64239e2f370a7b" } -enr = { git = "https://github.com/SigP/rust-libp2p/", rev = "be5710bbde69d8c5be732c13ba64239e2f370a7b", features = ["serde"] } +libp2p = { git = "https://github.com/SigP/rust-libp2p", rev = "35104cca27231b9178e1fea5b3788ea41ba8af76" } +enr = { git = "https://github.com/SigP/rust-libp2p/", rev = "35104cca27231b9178e1fea5b3788ea41ba8af76", features = ["serde"] } types = { path = "../../eth2/types" } serde = "1.0" serde_derive = "1.0" diff --git a/beacon_node/eth2-libp2p/src/behaviour.rs b/beacon_node/eth2-libp2p/src/behaviour.rs index 33acd41e17..fcb1479497 100644 --- a/beacon_node/eth2-libp2p/src/behaviour.rs +++ b/beacon_node/eth2-libp2p/src/behaviour.rs @@ -4,14 +4,12 @@ use crate::{error, NetworkConfig}; use crate::{Topic, TopicHash}; use futures::prelude::*; use libp2p::{ - core::{ - identity::Keypair, - swarm::{NetworkBehaviourAction, NetworkBehaviourEventProcess}, - }, + core::identity::Keypair, discv5::Discv5Event, gossipsub::{Gossipsub, GossipsubEvent}, identify::{Identify, IdentifyEvent}, ping::{Ping, PingConfig, PingEvent}, + swarm::{NetworkBehaviourAction, NetworkBehaviourEventProcess}, tokio_io::{AsyncRead, AsyncWrite}, NetworkBehaviour, PeerId, }; diff --git a/beacon_node/eth2-libp2p/src/config.rs b/beacon_node/eth2-libp2p/src/config.rs index d04eae14b2..44d07795bc 100644 --- a/beacon_node/eth2-libp2p/src/config.rs +++ b/beacon_node/eth2-libp2p/src/config.rs @@ -64,9 +64,9 @@ impl Default for Config { discovery_port: 9000, max_peers: 10, //TODO: Set realistic values for production + // Note: This defaults topics to plain strings. Not hashes gs_config: GossipsubConfigBuilder::new() - .max_gossip_size(4_000_000) - .inactivity_timeout(Duration::from_secs(90)) + .max_transmit_size(1_000_000) .heartbeat_interval(Duration::from_secs(20)) .build(), boot_nodes: vec![], @@ -134,6 +134,10 @@ impl Config { .collect::, _>>()?; } + if let Some(topics_str) = args.value_of("topics") { + self.topics = topics_str.split(',').map(|s| s.into()).collect(); + } + if let Some(discovery_address_str) = args.value_of("discovery-address") { self.discovery_address = discovery_address_str .parse() diff --git a/beacon_node/eth2-libp2p/src/discovery.rs b/beacon_node/eth2-libp2p/src/discovery.rs index 96cf718461..4c1794945d 100644 --- a/beacon_node/eth2-libp2p/src/discovery.rs +++ b/beacon_node/eth2-libp2p/src/discovery.rs @@ -4,13 +4,11 @@ use crate::{error, NetworkConfig}; /// Currently using discv5 for peer discovery. /// use futures::prelude::*; -use libp2p::core::swarm::{ - ConnectedPoint, NetworkBehaviour, NetworkBehaviourAction, PollParameters, -}; -use libp2p::core::{identity::Keypair, Multiaddr, PeerId, ProtocolsHandler}; +use libp2p::core::{identity::Keypair, ConnectedPoint, Multiaddr, PeerId}; use libp2p::discv5::{Discv5, Discv5Event}; use libp2p::enr::{Enr, EnrBuilder, NodeId}; use libp2p::multiaddr::Protocol; +use libp2p::swarm::{NetworkBehaviour, NetworkBehaviourAction, PollParameters, ProtocolsHandler}; use slog::{debug, info, o, warn}; use std::collections::HashSet; use std::fs::File; diff --git a/beacon_node/eth2-libp2p/src/lib.rs b/beacon_node/eth2-libp2p/src/lib.rs index 7a3b2e632d..ca6ac37602 100644 --- a/beacon_node/eth2-libp2p/src/lib.rs +++ b/beacon_node/eth2-libp2p/src/lib.rs @@ -13,7 +13,7 @@ pub use behaviour::PubsubMessage; pub use config::{ Config as NetworkConfig, BEACON_ATTESTATION_TOPIC, BEACON_PUBSUB_TOPIC, SHARD_TOPIC_PREFIX, }; -pub use libp2p::floodsub::{Topic, TopicBuilder, TopicHash}; +pub use libp2p::gossipsub::{Topic, TopicHash}; pub use libp2p::multiaddr; pub use libp2p::Multiaddr; pub use libp2p::{ diff --git a/beacon_node/eth2-libp2p/src/rpc/handler.rs b/beacon_node/eth2-libp2p/src/rpc/handler.rs index 4e796f6fbf..76e04d24e5 100644 --- a/beacon_node/eth2-libp2p/src/rpc/handler.rs +++ b/beacon_node/eth2-libp2p/src/rpc/handler.rs @@ -5,10 +5,10 @@ use crate::rpc::protocol::{InboundFramed, OutboundFramed}; use core::marker::PhantomData; use fnv::FnvHashMap; use futures::prelude::*; -use libp2p::core::protocols_handler::{ +use libp2p::core::upgrade::{InboundUpgrade, OutboundUpgrade}; +use libp2p::swarm::protocols_handler::{ KeepAlive, ProtocolsHandler, ProtocolsHandlerEvent, ProtocolsHandlerUpgrErr, SubstreamProtocol, }; -use libp2p::core::upgrade::{InboundUpgrade, OutboundUpgrade}; use smallvec::SmallVec; use std::time::{Duration, Instant}; use tokio_io::{AsyncRead, AsyncWrite}; @@ -273,7 +273,8 @@ where Self::Error, > { if let Some(err) = self.pending_error.take() { - return Err(err); + dbg!(&err); + //return Err(err); } // return any events that need to be reported diff --git a/beacon_node/eth2-libp2p/src/rpc/mod.rs b/beacon_node/eth2-libp2p/src/rpc/mod.rs index 88060e6024..5593660ffc 100644 --- a/beacon_node/eth2-libp2p/src/rpc/mod.rs +++ b/beacon_node/eth2-libp2p/src/rpc/mod.rs @@ -6,9 +6,9 @@ use futures::prelude::*; use handler::RPCHandler; -use libp2p::core::protocols_handler::ProtocolsHandler; -use libp2p::core::swarm::{ - ConnectedPoint, NetworkBehaviour, NetworkBehaviourAction, PollParameters, +use libp2p::core::ConnectedPoint; +use libp2p::swarm::{ + protocols_handler::ProtocolsHandler, NetworkBehaviour, NetworkBehaviourAction, PollParameters, }; use libp2p::{Multiaddr, PeerId}; pub use methods::{ErrorMessage, HelloMessage, RPCErrorResponse, RPCResponse, RequestId}; diff --git a/beacon_node/eth2-libp2p/src/service.rs b/beacon_node/eth2-libp2p/src/service.rs index 5c7c0c7f1c..5a2fc8d8b3 100644 --- a/beacon_node/eth2-libp2p/src/service.rs +++ b/beacon_node/eth2-libp2p/src/service.rs @@ -3,7 +3,7 @@ use crate::error; use crate::multiaddr::Protocol; use crate::rpc::RPCEvent; use crate::NetworkConfig; -use crate::{TopicBuilder, TopicHash}; +use crate::{Topic, TopicHash}; use crate::{BEACON_ATTESTATION_TOPIC, BEACON_PUBSUB_TOPIC}; use futures::prelude::*; use futures::Stream; @@ -90,15 +90,21 @@ impl Service { // subscribe to default gossipsub topics let mut topics = vec![]; //TODO: Handle multiple shard attestations. For now we simply use a separate topic for - //attestations - topics.push(BEACON_ATTESTATION_TOPIC.to_string()); - topics.push(BEACON_PUBSUB_TOPIC.to_string()); - topics.append(&mut config.topics.clone()); + // attestations + topics.push(Topic::new(BEACON_ATTESTATION_TOPIC.into())); + topics.push(Topic::new(BEACON_PUBSUB_TOPIC.into())); + topics.append( + &mut config + .topics + .iter() + .cloned() + .map(|s| Topic::new(s)) + .collect(), + ); let mut subscribed_topics = vec![]; for topic in topics { - let t = TopicBuilder::new(topic.clone()).build(); - if swarm.subscribe(t) { + if swarm.subscribe(topic.clone()) { trace!(log, "Subscribed to topic: {:?}", topic); subscribed_topics.push(topic); } else { diff --git a/beacon_node/rpc/src/attestation.rs b/beacon_node/rpc/src/attestation.rs index 5ea8368fd8..cbbe4de6e5 100644 --- a/beacon_node/rpc/src/attestation.rs +++ b/beacon_node/rpc/src/attestation.rs @@ -1,6 +1,6 @@ use beacon_chain::{BeaconChain, BeaconChainTypes}; use eth2_libp2p::PubsubMessage; -use eth2_libp2p::TopicBuilder; +use eth2_libp2p::Topic; use eth2_libp2p::BEACON_ATTESTATION_TOPIC; use futures::Future; use grpcio::{RpcContext, RpcStatus, RpcStatusCode, UnarySink}; @@ -140,7 +140,7 @@ impl AttestationService for AttestationServiceInstance { ); // valid attestation, propagate to the network - let topic = TopicBuilder::new(BEACON_ATTESTATION_TOPIC).build(); + let topic = Topic::new(BEACON_ATTESTATION_TOPIC.into()); let message = PubsubMessage::Attestation(attestation); self.network_chan diff --git a/beacon_node/rpc/src/beacon_block.rs b/beacon_node/rpc/src/beacon_block.rs index b42bbb2081..2a8ae2c6b8 100644 --- a/beacon_node/rpc/src/beacon_block.rs +++ b/beacon_node/rpc/src/beacon_block.rs @@ -1,6 +1,6 @@ use beacon_chain::{BeaconChain, BeaconChainTypes, BlockProcessingOutcome}; use eth2_libp2p::BEACON_PUBSUB_TOPIC; -use eth2_libp2p::{PubsubMessage, TopicBuilder}; +use eth2_libp2p::{PubsubMessage, Topic}; use futures::Future; use grpcio::{RpcContext, RpcStatus, RpcStatusCode, UnarySink}; use network::NetworkMessage; @@ -106,7 +106,7 @@ impl BeaconBlockService for BeaconBlockServiceInstance { ); // get the network topic to send on - let topic = TopicBuilder::new(BEACON_PUBSUB_TOPIC).build(); + let topic = Topic::new(BEACON_PUBSUB_TOPIC.into()); let message = PubsubMessage::Block(block); // Publish the block to the p2p network via gossipsub. diff --git a/beacon_node/src/main.rs b/beacon_node/src/main.rs index 9a1af2e081..c85eeedace 100644 --- a/beacon_node/src/main.rs +++ b/beacon_node/src/main.rs @@ -52,14 +52,14 @@ fn main() { .arg( Arg::with_name("listen-address") .long("listen-address") - .value_name("Address") + .value_name("ADDRESS") .help("The address lighthouse will listen for UDP and TCP connections. (default 127.0.0.1).") .takes_value(true), ) .arg( Arg::with_name("port") .long("port") - .value_name("Lighthouse Port") + .value_name("PORT") .help("The TCP/UDP port to listen on. The UDP port can be modified by the --discovery-port flag.") .takes_value(true), ) @@ -80,17 +80,24 @@ fn main() { .arg( Arg::with_name("discovery-port") .long("disc-port") - .value_name("DiscoveryPort") + .value_name("PORT") .help("The discovery UDP port.") .takes_value(true), ) .arg( Arg::with_name("discovery-address") .long("discovery-address") - .value_name("Address") + .value_name("ADDRESS") .help("The IP address to broadcast to other peers on how to reach this node.") .takes_value(true), ) + .arg( + Arg::with_name("topics") + .long("topics") + .value_name("STRING") + .help("One or more comma-delimited gossipsub topic strings to subscribe to.") + .takes_value(true), + ) .arg( Arg::with_name("libp2p-addresses") .long("libp2p-addresses") From 107bbdcccd66d4fa4125bc6f5b3f4fec3353032f Mon Sep 17 00:00:00 2001 From: Age Manning Date: Tue, 6 Aug 2019 17:54:38 +1000 Subject: [PATCH 015/186] Updates to latest interop branch. - Shifts decoding of objects into message handler. - Updates to latest interop gossipsub. - Adds interop spec constant. --- beacon_node/eth2-libp2p/Cargo.toml | 4 +- beacon_node/eth2-libp2p/src/behaviour.rs | 74 ++++++------- beacon_node/eth2-libp2p/src/config.rs | 2 +- beacon_node/eth2-libp2p/src/lib.rs | 2 +- beacon_node/eth2-libp2p/src/rpc/handler.rs | 17 ++- beacon_node/eth2-libp2p/src/rpc/mod.rs | 12 +-- beacon_node/eth2-libp2p/src/service.rs | 23 ++-- beacon_node/http_server/src/lib.rs | 2 +- beacon_node/network/src/message_handler.rs | 101 ++++++++++++------ beacon_node/network/src/service.rs | 33 +++--- beacon_node/network/src/sync/simple_sync.rs | 38 +++---- beacon_node/rpc/src/attestation.rs | 8 +- beacon_node/rpc/src/beacon_block.rs | 12 +-- beacon_node/rpc/src/lib.rs | 2 +- .../src/beacon_state/beacon_state_types.rs | 24 ++++- validator_client/src/main.rs | 2 +- 16 files changed, 199 insertions(+), 157 deletions(-) diff --git a/beacon_node/eth2-libp2p/Cargo.toml b/beacon_node/eth2-libp2p/Cargo.toml index f5fe8a8775..0ea182bc62 100644 --- a/beacon_node/eth2-libp2p/Cargo.toml +++ b/beacon_node/eth2-libp2p/Cargo.toml @@ -7,8 +7,8 @@ edition = "2018" [dependencies] clap = "2.32.0" #SigP repository -libp2p = { git = "https://github.com/SigP/rust-libp2p", rev = "35104cca27231b9178e1fea5b3788ea41ba8af76" } -enr = { git = "https://github.com/SigP/rust-libp2p/", rev = "35104cca27231b9178e1fea5b3788ea41ba8af76", features = ["serde"] } +libp2p = { git = "https://github.com/SigP/rust-libp2p", rev = "b0d3cf7b4b0fa6c555b64dbdd110673a05457abd" } +enr = { git = "https://github.com/SigP/rust-libp2p/", rev = "b0d3cf7b4b0fa6c555b64dbdd110673a05457abd", features = ["serde"] } types = { path = "../../eth2/types" } serde = "1.0" serde_derive = "1.0" diff --git a/beacon_node/eth2-libp2p/src/behaviour.rs b/beacon_node/eth2-libp2p/src/behaviour.rs index fcb1479497..fc224e91a3 100644 --- a/beacon_node/eth2-libp2p/src/behaviour.rs +++ b/beacon_node/eth2-libp2p/src/behaviour.rs @@ -2,6 +2,7 @@ use crate::discovery::Discovery; use crate::rpc::{RPCEvent, RPCMessage, RPC}; use crate::{error, NetworkConfig}; use crate::{Topic, TopicHash}; +use crate::{BEACON_ATTESTATION_TOPIC, BEACON_BLOCK_TOPIC}; use futures::prelude::*; use libp2p::{ core::identity::Keypair, @@ -13,11 +14,10 @@ use libp2p::{ tokio_io::{AsyncRead, AsyncWrite}, NetworkBehaviour, PeerId, }; -use slog::{debug, o, trace, warn}; -use ssz::{ssz_encode, Decode, DecodeError, Encode}; +use slog::{debug, o, trace}; +use ssz::{ssz_encode, Encode}; use std::num::NonZeroU32; use std::time::Duration; -use types::{Attestation, BeaconBlock}; /// Builds the network behaviour that manages the core protocols of eth2. /// This core behaviour is managed by `Behaviour` which adds peer management to all core @@ -87,23 +87,12 @@ impl NetworkBehaviourEventProcess { trace!(self.log, "Received GossipEvent"; "msg" => format!("{:?}", gs_msg)); - let pubsub_message = match PubsubMessage::from_ssz_bytes(&gs_msg.data) { - //TODO: Punish peer on error - Err(e) => { - warn!( - self.log, - "Received undecodable message from Peer {:?} error", gs_msg.source; - "error" => format!("{:?}", e) - ); - return; - } - Ok(msg) => msg, - }; + let msg = PubsubMessage::from_topics(&gs_msg.topics, gs_msg.data); self.events.push(BehaviourEvent::GossipMessage { source: gs_msg.source, topics: gs_msg.topics, - message: Box::new(pubsub_message), + message: msg, }); } GossipsubEvent::Subscribed { .. } => {} @@ -225,7 +214,7 @@ pub enum BehaviourEvent { GossipMessage { source: PeerId, topics: Vec, - message: Box, + message: PubsubMessage, }, } @@ -233,41 +222,50 @@ pub enum BehaviourEvent { #[derive(Debug, Clone, PartialEq)] pub enum PubsubMessage { /// Gossipsub message providing notification of a new block. - Block(BeaconBlock), + Block(Vec), /// Gossipsub message providing notification of a new attestation. - Attestation(Attestation), + Attestation(Vec), + /// Gossipsub message from an unknown topic. + Unknown(Vec), +} + +impl PubsubMessage { + /* Note: This is assuming we are not hashing topics. If we choose to hash topics, these will + * need to be modified. + * + * Also note that a message can be associated with many topics. As soon as one of the topics is + * known we match. If none of the topics are known we return an unknown state. + */ + fn from_topics(topics: &Vec, data: Vec) -> Self { + for topic in topics { + match topic.as_str() { + BEACON_BLOCK_TOPIC => return PubsubMessage::Block(data), + BEACON_ATTESTATION_TOPIC => return PubsubMessage::Attestation(data), + _ => {} + } + } + PubsubMessage::Unknown(data) + } } -//TODO: Correctly encode/decode enums. Prefixing with integer for now. impl Encode for PubsubMessage { fn is_ssz_fixed_len() -> bool { false } fn ssz_append(&self, buf: &mut Vec) { - let offset = ::ssz_fixed_len() + as Encode>::ssz_fixed_len(); - - let mut encoder = ssz::SszEncoder::container(buf, offset); - match self { - PubsubMessage::Block(block_gossip) => { - encoder.append(&0_u32); - + PubsubMessage::Block(inner) + | PubsubMessage::Attestation(inner) + | PubsubMessage::Unknown(inner) => { // Encode the gossip as a Vec; - encoder.append(&block_gossip.as_ssz_bytes()); - } - PubsubMessage::Attestation(attestation_gossip) => { - encoder.append(&1_u32); - - // Encode the gossip as a Vec; - encoder.append(&attestation_gossip.as_ssz_bytes()); + buf.append(&mut inner.as_ssz_bytes()); } } - - encoder.finalize(); } } +/* impl Decode for PubsubMessage { fn is_ssz_fixed_len() -> bool { false @@ -295,7 +293,9 @@ impl Decode for PubsubMessage { } } } +*/ +/* #[cfg(test)] mod test { use super::*; @@ -313,4 +313,6 @@ mod test { assert_eq!(original, decoded); } + } +*/ diff --git a/beacon_node/eth2-libp2p/src/config.rs b/beacon_node/eth2-libp2p/src/config.rs index 44d07795bc..ddf14cc047 100644 --- a/beacon_node/eth2-libp2p/src/config.rs +++ b/beacon_node/eth2-libp2p/src/config.rs @@ -7,7 +7,7 @@ use std::path::PathBuf; use std::time::Duration; /// The beacon node topic string to subscribe to. -pub const BEACON_PUBSUB_TOPIC: &str = "beacon_block"; +pub const BEACON_BLOCK_TOPIC: &str = "beacon_block"; pub const BEACON_ATTESTATION_TOPIC: &str = "beacon_attestation"; pub const SHARD_TOPIC_PREFIX: &str = "shard"; diff --git a/beacon_node/eth2-libp2p/src/lib.rs b/beacon_node/eth2-libp2p/src/lib.rs index ca6ac37602..54a4f2a998 100644 --- a/beacon_node/eth2-libp2p/src/lib.rs +++ b/beacon_node/eth2-libp2p/src/lib.rs @@ -11,7 +11,7 @@ mod service; pub use behaviour::PubsubMessage; pub use config::{ - Config as NetworkConfig, BEACON_ATTESTATION_TOPIC, BEACON_PUBSUB_TOPIC, SHARD_TOPIC_PREFIX, + Config as NetworkConfig, BEACON_ATTESTATION_TOPIC, BEACON_BLOCK_TOPIC, SHARD_TOPIC_PREFIX, }; pub use libp2p::gossipsub::{Topic, TopicHash}; pub use libp2p::multiaddr; diff --git a/beacon_node/eth2-libp2p/src/rpc/handler.rs b/beacon_node/eth2-libp2p/src/rpc/handler.rs index 76e04d24e5..355cc52ee4 100644 --- a/beacon_node/eth2-libp2p/src/rpc/handler.rs +++ b/beacon_node/eth2-libp2p/src/rpc/handler.rs @@ -12,16 +12,14 @@ use libp2p::swarm::protocols_handler::{ use smallvec::SmallVec; use std::time::{Duration, Instant}; use tokio_io::{AsyncRead, AsyncWrite}; -use types::EthSpec; /// The time (in seconds) before a substream that is awaiting a response times out. pub const RESPONSE_TIMEOUT: u64 = 9; /// Implementation of `ProtocolsHandler` for the RPC protocol. -pub struct RPCHandler +pub struct RPCHandler where TSubstream: AsyncRead + AsyncWrite, - E: EthSpec, { /// The upgrade for inbound substreams. listen_protocol: SubstreamProtocol, @@ -56,8 +54,8 @@ where /// After the given duration has elapsed, an inactive connection will shutdown. inactive_timeout: Duration, - /// Phantom EthSpec. - _phantom: PhantomData, + /// Marker to pin the generic stream. + _phantom: PhantomData, } /// An outbound substream is waiting a response from the user. @@ -90,10 +88,9 @@ where }, } -impl RPCHandler +impl RPCHandler where TSubstream: AsyncRead + AsyncWrite, - E: EthSpec, { pub fn new( listen_protocol: SubstreamProtocol, @@ -145,20 +142,18 @@ where } } -impl Default for RPCHandler +impl Default for RPCHandler where TSubstream: AsyncRead + AsyncWrite, - E: EthSpec, { fn default() -> Self { RPCHandler::new(SubstreamProtocol::new(RPCProtocol), Duration::from_secs(30)) } } -impl ProtocolsHandler for RPCHandler +impl ProtocolsHandler for RPCHandler where TSubstream: AsyncRead + AsyncWrite, - E: EthSpec, { type InEvent = RPCEvent; type OutEvent = RPCEvent; diff --git a/beacon_node/eth2-libp2p/src/rpc/mod.rs b/beacon_node/eth2-libp2p/src/rpc/mod.rs index 5593660ffc..756a62e71b 100644 --- a/beacon_node/eth2-libp2p/src/rpc/mod.rs +++ b/beacon_node/eth2-libp2p/src/rpc/mod.rs @@ -16,7 +16,6 @@ pub use protocol::{RPCError, RPCProtocol, RPCRequest}; use slog::o; use std::marker::PhantomData; use tokio::io::{AsyncRead, AsyncWrite}; -use types::EthSpec; pub(crate) mod codec; mod handler; @@ -50,16 +49,16 @@ impl RPCEvent { /// Implements the libp2p `NetworkBehaviour` trait and therefore manages network-level /// logic. -pub struct RPC { +pub struct RPC { /// Queue of events to processed. events: Vec>, /// Pins the generic substream. - marker: PhantomData<(TSubstream, E)>, + marker: PhantomData<(TSubstream)>, /// Slog logger for RPC behaviour. _log: slog::Logger, } -impl RPC { +impl RPC { pub fn new(log: &slog::Logger) -> Self { let log = log.new(o!("Service" => "Libp2p-RPC")); RPC { @@ -80,12 +79,11 @@ impl RPC { } } -impl NetworkBehaviour for RPC +impl NetworkBehaviour for RPC where TSubstream: AsyncRead + AsyncWrite, - E: EthSpec, { - type ProtocolsHandler = RPCHandler; + type ProtocolsHandler = RPCHandler; type OutEvent = RPCMessage; fn new_handler(&mut self) -> Self::ProtocolsHandler { diff --git a/beacon_node/eth2-libp2p/src/service.rs b/beacon_node/eth2-libp2p/src/service.rs index 5a2fc8d8b3..316aa05798 100644 --- a/beacon_node/eth2-libp2p/src/service.rs +++ b/beacon_node/eth2-libp2p/src/service.rs @@ -4,7 +4,7 @@ use crate::multiaddr::Protocol; use crate::rpc::RPCEvent; use crate::NetworkConfig; use crate::{Topic, TopicHash}; -use crate::{BEACON_ATTESTATION_TOPIC, BEACON_PUBSUB_TOPIC}; +use crate::{BEACON_ATTESTATION_TOPIC, BEACON_BLOCK_TOPIC}; use futures::prelude::*; use futures::Stream; use libp2p::core::{ @@ -21,25 +21,24 @@ use std::fs::File; use std::io::prelude::*; use std::io::{Error, ErrorKind}; use std::time::Duration; -use types::EthSpec; type Libp2pStream = Boxed<(PeerId, StreamMuxerBox), Error>; -type Libp2pBehaviour = Behaviour, E>; +type Libp2pBehaviour = Behaviour>; const NETWORK_KEY_FILENAME: &str = "key"; /// The configuration and state of the libp2p components for the beacon node. -pub struct Service { +pub struct Service { /// The libp2p Swarm handler. //TODO: Make this private - pub swarm: Swarm>, + pub swarm: Swarm, /// This node's PeerId. _local_peer_id: PeerId, /// The libp2p logger handle. pub log: slog::Logger, } -impl Service { +impl Service { pub fn new(config: NetworkConfig, log: slog::Logger) -> error::Result { debug!(log, "Network-libp2p Service starting"); @@ -92,7 +91,7 @@ impl Service { //TODO: Handle multiple shard attestations. For now we simply use a separate topic for // attestations topics.push(Topic::new(BEACON_ATTESTATION_TOPIC.into())); - topics.push(Topic::new(BEACON_PUBSUB_TOPIC.into())); + topics.push(Topic::new(BEACON_BLOCK_TOPIC.into())); topics.append( &mut config .topics @@ -121,8 +120,8 @@ impl Service { } } -impl Stream for Service { - type Item = Libp2pEvent; +impl Stream for Service { + type Item = Libp2pEvent; type Error = crate::error::Error; fn poll(&mut self) -> Poll, Self::Error> { @@ -136,7 +135,7 @@ impl Stream for Service { topics, message, } => { - trace!(self.log, "Pubsub message received: {:?}", message); + trace!(self.log, "Gossipsub message received"; "Message" => format!("{:?}", message)); return Ok(Async::Ready(Some(Libp2pEvent::PubsubMessage { source, topics, @@ -196,7 +195,7 @@ fn build_transport(local_private_key: Keypair) -> Boxed<(PeerId, StreamMuxerBox) } /// Events that can be obtained from polling the Libp2p Service. -pub enum Libp2pEvent { +pub enum Libp2pEvent { /// An RPC response request has been received on the swarm. RPC(PeerId, RPCEvent), /// Initiated the connection to a new peer. @@ -207,7 +206,7 @@ pub enum Libp2pEvent { PubsubMessage { source: PeerId, topics: Vec, - message: Box>, + message: PubsubMessage, }, } diff --git a/beacon_node/http_server/src/lib.rs b/beacon_node/http_server/src/lib.rs index b20e43de8b..f1d006a5bc 100644 --- a/beacon_node/http_server/src/lib.rs +++ b/beacon_node/http_server/src/lib.rs @@ -76,7 +76,7 @@ pub fn create_iron_http_server( pub fn start_service( config: &HttpServerConfig, executor: &TaskExecutor, - _network_chan: mpsc::UnboundedSender>, + _network_chan: mpsc::UnboundedSender, beacon_chain: Arc>, db_path: PathBuf, metrics_registry: Registry, diff --git a/beacon_node/network/src/message_handler.rs b/beacon_node/network/src/message_handler.rs index eaddce5334..72a507ad7b 100644 --- a/beacon_node/network/src/message_handler.rs +++ b/beacon_node/network/src/message_handler.rs @@ -14,7 +14,7 @@ use slog::{debug, warn}; use ssz::{Decode, DecodeError}; use std::sync::Arc; use tokio::sync::mpsc; -use types::{BeaconBlockHeader, EthSpec}; +use types::{Attestation, BeaconBlock, BeaconBlockHeader}; /// Handles messages received from the network and client and organises syncing. pub struct MessageHandler { @@ -23,14 +23,14 @@ pub struct MessageHandler { /// The syncing framework. sync: SimpleSync, /// The context required to send messages to, and process messages from peers. - network_context: NetworkContext, + network_context: NetworkContext, /// The `MessageHandler` logger. log: slog::Logger, } /// Types of messages the handler can receive. #[derive(Debug)] -pub enum HandlerMessage { +pub enum HandlerMessage { /// We have initiated a connection to a new peer. PeerDialed(PeerId), /// Peer has disconnected, @@ -38,17 +38,17 @@ pub enum HandlerMessage { /// An RPC response/request has been received. RPC(PeerId, RPCEvent), /// A gossip message has been received. - PubsubMessage(PeerId, Box>), + PubsubMessage(PeerId, PubsubMessage), } impl MessageHandler { /// Initializes and runs the MessageHandler. pub fn spawn( beacon_chain: Arc>, - network_send: mpsc::UnboundedSender>, + network_send: mpsc::UnboundedSender, executor: &tokio::runtime::TaskExecutor, log: slog::Logger, - ) -> error::Result>> { + ) -> error::Result> { debug!(log, "Service starting"); let (handler_send, handler_recv) = mpsc::unbounded_channel(); @@ -78,7 +78,7 @@ impl MessageHandler { } /// Handle all messages incoming from the network service. - fn handle_message(&mut self, message: HandlerMessage) { + fn handle_message(&mut self, message: HandlerMessage) { match message { // we have initiated a connection to a peer HandlerMessage::PeerDialed(peer_id) => { @@ -94,7 +94,7 @@ impl MessageHandler { } // we have received an RPC message request/response HandlerMessage::PubsubMessage(peer_id, gossip) => { - self.handle_gossip(peer_id, *gossip); + self.handle_gossip(peer_id, gossip); } } } @@ -218,6 +218,62 @@ impl MessageHandler { } } + /// Handle various RPC errors + fn handle_rpc_error(&mut self, peer_id: PeerId, request_id: RequestId, error: RPCError) { + //TODO: Handle error correctly + warn!(self.log, "RPC Error"; "Peer" => format!("{:?}", peer_id), "Request Id" => format!("{}", request_id), "Error" => format!("{:?}", error)); + } + + /// Handle RPC messages + fn handle_gossip(&mut self, peer_id: PeerId, gossip_message: PubsubMessage) { + match gossip_message { + PubsubMessage::Block(message) => match self.decode_gossip_block(message) { + Err(e) => { + debug!(self.log, "Invalid Gossiped Beacon Block"; "Peer" => format!("{}", peer_id), "Error" => format!("{:?}", e)); + } + Ok(block) => { + let _should_forward_on = + self.sync + .on_block_gossip(peer_id, block, &mut self.network_context); + } + }, + PubsubMessage::Attestation(message) => match self.decode_gossip_attestation(message) { + Err(e) => { + debug!(self.log, "Invalid Gossiped Attestation"; "Peer" => format!("{}", peer_id), "Error" => format!("{:?}", e)); + } + Ok(attestation) => { + self.sync + .on_attestation_gossip(peer_id, attestation, &mut self.network_context) + } + }, + PubsubMessage::Unknown(message) => { + // Received a message from an unknown topic. Ignore for now + debug!(self.log, "Unknown Gossip Message"; "Peer" => format!("{}", peer_id), "Message" => format!("{:?}", message)); + } + } + } + + /* Decoding of blocks and attestations from the network. + * + * TODO: Apply efficient decoding/verification of these objects + */ + + fn decode_gossip_block( + &self, + beacon_block: Vec, + ) -> Result, DecodeError> { + //TODO: Apply verification before decoding. + BeaconBlock::from_ssz_bytes(&beacon_block) + } + + fn decode_gossip_attestation( + &self, + beacon_block: Vec, + ) -> Result, DecodeError> { + //TODO: Apply verification before decoding. + Attestation::from_ssz_bytes(&beacon_block) + } + /// Verifies and decodes the ssz-encoded block bodies received from peers. fn decode_block_bodies( &self, @@ -241,39 +297,18 @@ impl MessageHandler { //TODO: Implement faster header verification before decoding entirely Vec::from_ssz_bytes(&headers_response.headers) } - - /// Handle various RPC errors - fn handle_rpc_error(&mut self, peer_id: PeerId, request_id: RequestId, error: RPCError) { - //TODO: Handle error correctly - warn!(self.log, "RPC Error"; "Peer" => format!("{:?}", peer_id), "Request Id" => format!("{}", request_id), "Error" => format!("{:?}", error)); - } - - /// Handle RPC messages - fn handle_gossip(&mut self, peer_id: PeerId, gossip_message: PubsubMessage) { - match gossip_message { - PubsubMessage::Block(message) => { - let _should_forward_on = - self.sync - .on_block_gossip(peer_id, message, &mut self.network_context); - } - PubsubMessage::Attestation(message) => { - self.sync - .on_attestation_gossip(peer_id, message, &mut self.network_context) - } - } - } } // TODO: RPC Rewrite makes this struct fairly pointless -pub struct NetworkContext { +pub struct NetworkContext { /// The network channel to relay messages to the Network service. - network_send: mpsc::UnboundedSender>, + network_send: mpsc::UnboundedSender, /// The `MessageHandler` logger. log: slog::Logger, } -impl NetworkContext { - pub fn new(network_send: mpsc::UnboundedSender>, log: slog::Logger) -> Self { +impl NetworkContext { + pub fn new(network_send: mpsc::UnboundedSender, log: slog::Logger) -> Self { Self { network_send, log } } diff --git a/beacon_node/network/src/service.rs b/beacon_node/network/src/service.rs index 7a21f7f287..e5ca2a9175 100644 --- a/beacon_node/network/src/service.rs +++ b/beacon_node/network/src/service.rs @@ -14,13 +14,12 @@ use slog::{debug, info, o, trace}; use std::sync::Arc; use tokio::runtime::TaskExecutor; use tokio::sync::{mpsc, oneshot}; -use types::EthSpec; /// Service that handles communication between internal services and the eth2_libp2p network service. pub struct Service { - libp2p_service: Arc>>, + libp2p_service: Arc>, _libp2p_exit: oneshot::Sender<()>, - _network_send: mpsc::UnboundedSender>, + _network_send: mpsc::UnboundedSender, _phantom: PhantomData, //message_handler: MessageHandler, //message_handler_send: Sender } @@ -31,9 +30,9 @@ impl Service { config: &NetworkConfig, executor: &TaskExecutor, log: slog::Logger, - ) -> error::Result<(Arc, mpsc::UnboundedSender>)> { + ) -> error::Result<(Arc, mpsc::UnboundedSender)> { // build the network channel - let (network_send, network_recv) = mpsc::unbounded_channel::>(); + let (network_send, network_recv) = mpsc::unbounded_channel::(); // launch message handler thread let message_handler_log = log.new(o!("Service" => "MessageHandler")); let message_handler_send = MessageHandler::spawn( @@ -65,15 +64,15 @@ impl Service { Ok((Arc::new(network_service), network_send)) } - pub fn libp2p_service(&self) -> Arc>> { + pub fn libp2p_service(&self) -> Arc> { self.libp2p_service.clone() } } -fn spawn_service( - libp2p_service: Arc>>, - network_recv: mpsc::UnboundedReceiver>, - message_handler_send: mpsc::UnboundedSender>, +fn spawn_service( + libp2p_service: Arc>, + network_recv: mpsc::UnboundedReceiver, + message_handler_send: mpsc::UnboundedSender, executor: &TaskExecutor, log: slog::Logger, ) -> error::Result> { @@ -99,10 +98,10 @@ fn spawn_service( } //TODO: Potentially handle channel errors -fn network_service( - libp2p_service: Arc>>, - mut network_recv: mpsc::UnboundedReceiver>, - mut message_handler_send: mpsc::UnboundedSender>, +fn network_service( + libp2p_service: Arc>, + mut network_recv: mpsc::UnboundedReceiver, + mut message_handler_send: mpsc::UnboundedSender, log: slog::Logger, ) -> impl futures::Future { futures::future::poll_fn(move || -> Result<_, eth2_libp2p::error::Error> { @@ -119,7 +118,7 @@ fn network_service( }, NetworkMessage::Publish { topics, message } => { debug!(log, "Sending pubsub message"; "topics" => format!("{:?}",topics)); - libp2p_service.lock().swarm.publish(topics, *message); + libp2p_service.lock().swarm.publish(topics, message); } }, Ok(Async::NotReady) => break, @@ -176,14 +175,14 @@ fn network_service( /// Types of messages that the network service can receive. #[derive(Debug)] -pub enum NetworkMessage { +pub enum NetworkMessage { /// Send a message to libp2p service. //TODO: Define typing for messages across the wire Send(PeerId, OutgoingMessage), /// Publish a message to pubsub mechanism. Publish { topics: Vec, - message: Box>, + message: PubsubMessage, }, } diff --git a/beacon_node/network/src/sync/simple_sync.rs b/beacon_node/network/src/sync/simple_sync.rs index 9a9d15503e..40a1881dd3 100644 --- a/beacon_node/network/src/sync/simple_sync.rs +++ b/beacon_node/network/src/sync/simple_sync.rs @@ -123,7 +123,7 @@ impl SimpleSync { /// Handle the connection of a new peer. /// /// Sends a `Hello` message to the peer. - pub fn on_connect(&self, peer_id: PeerId, network: &mut NetworkContext) { + pub fn on_connect(&self, peer_id: PeerId, network: &mut NetworkContext) { info!(self.log, "PeerConnected"; "peer" => format!("{:?}", peer_id)); network.send_rpc_request(peer_id, RPCRequest::Hello(hello_message(&self.chain))); @@ -137,7 +137,7 @@ impl SimpleSync { peer_id: PeerId, request_id: RequestId, hello: HelloMessage, - network: &mut NetworkContext, + network: &mut NetworkContext, ) { debug!(self.log, "HelloRequest"; "peer" => format!("{:?}", peer_id)); @@ -156,7 +156,7 @@ impl SimpleSync { &mut self, peer_id: PeerId, hello: HelloMessage, - network: &mut NetworkContext, + network: &mut NetworkContext, ) { debug!(self.log, "HelloResponse"; "peer" => format!("{:?}", peer_id)); @@ -171,7 +171,7 @@ impl SimpleSync { &mut self, peer_id: PeerId, hello: HelloMessage, - network: &mut NetworkContext, + network: &mut NetworkContext, ) { let remote = PeerSyncInfo::from(hello); let local = PeerSyncInfo::from(&self.chain); @@ -278,7 +278,7 @@ impl SimpleSync { peer_id: PeerId, request_id: RequestId, req: BeaconBlockRootsRequest, - network: &mut NetworkContext, + network: &mut NetworkContext, ) { debug!( self.log, @@ -323,7 +323,7 @@ impl SimpleSync { &mut self, peer_id: PeerId, res: BeaconBlockRootsResponse, - network: &mut NetworkContext, + network: &mut NetworkContext, ) { debug!( self.log, @@ -387,7 +387,7 @@ impl SimpleSync { peer_id: PeerId, request_id: RequestId, req: BeaconBlockHeadersRequest, - network: &mut NetworkContext, + network: &mut NetworkContext, ) { debug!( self.log, @@ -440,7 +440,7 @@ impl SimpleSync { &mut self, peer_id: PeerId, headers: Vec, - network: &mut NetworkContext, + network: &mut NetworkContext, ) { debug!( self.log, @@ -472,7 +472,7 @@ impl SimpleSync { peer_id: PeerId, request_id: RequestId, req: BeaconBlockBodiesRequest, - network: &mut NetworkContext, + network: &mut NetworkContext, ) { let block_bodies: Vec> = req .block_roots @@ -518,7 +518,7 @@ impl SimpleSync { &mut self, peer_id: PeerId, res: DecodedBeaconBlockBodiesResponse, - network: &mut NetworkContext, + network: &mut NetworkContext, ) { debug!( self.log, @@ -557,7 +557,7 @@ impl SimpleSync { &mut self, peer_id: PeerId, block: BeaconBlock, - network: &mut NetworkContext, + network: &mut NetworkContext, ) -> bool { if let Some(outcome) = self.process_block(peer_id.clone(), block.clone(), network, &"gossip") @@ -627,7 +627,7 @@ impl SimpleSync { &mut self, _peer_id: PeerId, msg: Attestation, - _network: &mut NetworkContext, + _network: &mut NetworkContext, ) { match self.chain.process_attestation(msg) { Ok(()) => info!(self.log, "ImportedAttestation"; "source" => "gossip"), @@ -642,7 +642,7 @@ impl SimpleSync { &mut self, peer_id: PeerId, req: BeaconBlockRootsRequest, - network: &mut NetworkContext, + network: &mut NetworkContext, ) { // Potentially set state to sync. if self.state == SyncState::Idle && req.count > SLOT_IMPORT_TOLERANCE { @@ -666,7 +666,7 @@ impl SimpleSync { &mut self, peer_id: PeerId, req: BeaconBlockHeadersRequest, - network: &mut NetworkContext, + network: &mut NetworkContext, ) { debug!( self.log, @@ -683,7 +683,7 @@ impl SimpleSync { &mut self, peer_id: PeerId, req: BeaconBlockBodiesRequest, - network: &mut NetworkContext, + network: &mut NetworkContext, ) { debug!( self.log, @@ -719,7 +719,7 @@ impl SimpleSync { &mut self, peer_id: PeerId, block_root: Hash256, - network: &mut NetworkContext, + network: &mut NetworkContext, source: &str, ) -> Option { match self.import_queue.attempt_complete_block(block_root) { @@ -812,7 +812,7 @@ impl SimpleSync { &mut self, peer_id: PeerId, block: BeaconBlock, - network: &mut NetworkContext, + network: &mut NetworkContext, source: &str, ) -> Option { let processing_result = self.chain.process_block(block.clone()); @@ -917,8 +917,8 @@ fn hello_message(beacon_chain: &BeaconChain) -> HelloMes network_id: spec.network_id, //TODO: Correctly define the chain id chain_id: spec.network_id as u64, - latest_finalized_root: state.finalized_root, - latest_finalized_epoch: state.finalized_epoch, + latest_finalized_root: state.finalized_checkpoint.root, + latest_finalized_epoch: state.finalized_checkpoint.epoch, best_root: beacon_chain.head().beacon_block_root, best_slot: state.slot, } diff --git a/beacon_node/rpc/src/attestation.rs b/beacon_node/rpc/src/attestation.rs index cbbe4de6e5..3de3639d81 100644 --- a/beacon_node/rpc/src/attestation.rs +++ b/beacon_node/rpc/src/attestation.rs @@ -11,7 +11,7 @@ use protos::services::{ }; use protos::services_grpc::AttestationService; use slog::{error, info, trace, warn}; -use ssz::{ssz_encode, Decode}; +use ssz::{ssz_encode, Decode, Encode}; use std::sync::Arc; use tokio::sync::mpsc; use types::Attestation; @@ -19,7 +19,7 @@ use types::Attestation; #[derive(Clone)] pub struct AttestationServiceInstance { pub chain: Arc>, - pub network_chan: mpsc::UnboundedSender>, + pub network_chan: mpsc::UnboundedSender, pub log: slog::Logger, } @@ -141,12 +141,12 @@ impl AttestationService for AttestationServiceInstance { // valid attestation, propagate to the network let topic = Topic::new(BEACON_ATTESTATION_TOPIC.into()); - let message = PubsubMessage::Attestation(attestation); + let message = PubsubMessage::Attestation(attestation.as_ssz_bytes()); self.network_chan .try_send(NetworkMessage::Publish { topics: vec![topic], - message: Box::new(message), + message: message, }) .unwrap_or_else(|e| { error!( diff --git a/beacon_node/rpc/src/beacon_block.rs b/beacon_node/rpc/src/beacon_block.rs index 2a8ae2c6b8..b1a67399e2 100644 --- a/beacon_node/rpc/src/beacon_block.rs +++ b/beacon_node/rpc/src/beacon_block.rs @@ -1,5 +1,5 @@ use beacon_chain::{BeaconChain, BeaconChainTypes, BlockProcessingOutcome}; -use eth2_libp2p::BEACON_PUBSUB_TOPIC; +use eth2_libp2p::BEACON_BLOCK_TOPIC; use eth2_libp2p::{PubsubMessage, Topic}; use futures::Future; use grpcio::{RpcContext, RpcStatus, RpcStatusCode, UnarySink}; @@ -11,7 +11,7 @@ use protos::services::{ use protos::services_grpc::BeaconBlockService; use slog::Logger; use slog::{error, info, trace, warn}; -use ssz::{ssz_encode, Decode}; +use ssz::{ssz_encode, Decode, Encode}; use std::sync::Arc; use tokio::sync::mpsc; use types::{BeaconBlock, Signature, Slot}; @@ -19,7 +19,7 @@ use types::{BeaconBlock, Signature, Slot}; #[derive(Clone)] pub struct BeaconBlockServiceInstance { pub chain: Arc>, - pub network_chan: mpsc::UnboundedSender>, + pub network_chan: mpsc::UnboundedSender, pub log: Logger, } @@ -106,14 +106,14 @@ impl BeaconBlockService for BeaconBlockServiceInstance { ); // get the network topic to send on - let topic = Topic::new(BEACON_PUBSUB_TOPIC.into()); - let message = PubsubMessage::Block(block); + let topic = Topic::new(BEACON_BLOCK_TOPIC.into()); + let message = PubsubMessage::Block(block.as_ssz_bytes()); // Publish the block to the p2p network via gossipsub. self.network_chan .try_send(NetworkMessage::Publish { topics: vec![topic], - message: Box::new(message), + message: message, }) .unwrap_or_else(|e| { error!( diff --git a/beacon_node/rpc/src/lib.rs b/beacon_node/rpc/src/lib.rs index de90395051..eef0092921 100644 --- a/beacon_node/rpc/src/lib.rs +++ b/beacon_node/rpc/src/lib.rs @@ -25,7 +25,7 @@ use tokio::sync::mpsc; pub fn start_server( config: &RPCConfig, executor: &TaskExecutor, - network_chan: mpsc::UnboundedSender>, + network_chan: mpsc::UnboundedSender, beacon_chain: Arc>, log: &slog::Logger, ) -> exit_future::Signal { diff --git a/eth2/types/src/beacon_state/beacon_state_types.rs b/eth2/types/src/beacon_state/beacon_state_types.rs index dd6ca32724..0e76942dde 100644 --- a/eth2/types/src/beacon_state/beacon_state_types.rs +++ b/eth2/types/src/beacon_state/beacon_state_types.rs @@ -207,12 +207,26 @@ pub struct InteropEthSpec; impl EthSpec for InteropEthSpec { type ShardCount = U8; - type SlotsPerHistoricalRoot = U64; - type LatestRandaoMixesLength = U64; - type LatestActiveIndexRootsLength = U64; - type LatestSlashedExitLength = U64; type SlotsPerEpoch = U8; - type GenesisEpoch = U0; + type SlotsPerHistoricalRoot = U64; + type SlotsPerEth1VotingPeriod = U16; + type EpochsPerHistoricalVector = U64; + type EpochsPerSlashingsVector = U64; + type MaxPendingAttestations = U1024; // 128 max attestations * 8 slots per epoch + + params_from_eth_spec!(MainnetEthSpec { + JustificationBitsLength, + MaxValidatorsPerCommittee, + GenesisEpoch, + HistoricalRootsLimit, + ValidatorRegistryLimit, + MaxProposerSlashings, + MaxAttesterSlashings, + MaxAttestations, + MaxDeposits, + MaxVoluntaryExits, + MaxTransfers + }); fn default_spec() -> ChainSpec { ChainSpec::interop() diff --git a/validator_client/src/main.rs b/validator_client/src/main.rs index 756f829916..76acb2f1a1 100644 --- a/validator_client/src/main.rs +++ b/validator_client/src/main.rs @@ -214,7 +214,7 @@ fn main() { eth2_config, log.clone(), ), - "interop" => ValidatorService::::start::( + "interop" => ValidatorService::::start( client_config, eth2_config, log.clone(), From edd99fafb6c42212bda9bcaa8f77d11c15515e23 Mon Sep 17 00:00:00 2001 From: Grant Wuerker Date: Thu, 25 Jul 2019 15:08:18 +0200 Subject: [PATCH 016/186] Getting attestation slot via helper method --- beacon_node/beacon_chain/src/beacon_chain.rs | 2 +- beacon_node/beacon_chain/src/fork_choice.rs | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 67d9281277..8a9421a1b7 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -10,7 +10,7 @@ use operation_pool::{OperationPool, PersistedOperationPool}; use parking_lot::{RwLock, RwLockReadGuard}; use slot_clock::SlotClock; use state_processing::per_block_processing::errors::{ - AttestationValidationError, AttesterSlashingValidationError, DepositValidationError, + AttesterSlashingValidationError, DepositValidationError, ExitValidationError, ProposerSlashingValidationError, TransferValidationError, }; use state_processing::{ diff --git a/beacon_node/beacon_chain/src/fork_choice.rs b/beacon_node/beacon_chain/src/fork_choice.rs index 92b683590a..0f98ac9ce9 100644 --- a/beacon_node/beacon_chain/src/fork_choice.rs +++ b/beacon_node/beacon_chain/src/fork_choice.rs @@ -174,13 +174,13 @@ impl ForkChoice { &attestation.aggregation_bitfield, )?; - let target_slot = attestation.data.target_epoch.start_slot(T::EthSpec::slots_per_epoch()); + let block_slot = state.get_attestation_slot(&attestation.data)?; Ok(validator_indices .iter() .find(|&&v| { match self.backend.latest_message(v) { - Some((_, slot)) => target_slot > slot, + Some((_, slot)) => block_slot > slot, None => true } }).is_some()) From 78f39115229e4bf5ace88193303443c50297e613 Mon Sep 17 00:00:00 2001 From: Grant Wuerker Date: Fri, 26 Jul 2019 12:48:17 +0200 Subject: [PATCH 017/186] Refactored attestation creation in test utils --- beacon_node/beacon_chain/src/test_utils.rs | 127 ++++++++++++--------- 1 file changed, 72 insertions(+), 55 deletions(-) diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index 991d29418e..c43309cbfd 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -8,11 +8,7 @@ use std::sync::Arc; use store::MemoryStore; use store::Store; use tree_hash::{SignedRoot, TreeHash}; -use types::{ - test_utils::TestingBeaconStateBuilder, AggregateSignature, Attestation, - AttestationDataAndCustodyBit, BeaconBlock, BeaconState, Bitfield, ChainSpec, Domain, EthSpec, - Hash256, Keypair, RelativeEpoch, SecretKey, Signature, Slot, -}; +use types::{test_utils::TestingBeaconStateBuilder, AggregateSignature, Attestation, AttestationDataAndCustodyBit, BeaconBlock, BeaconState, Bitfield, ChainSpec, Domain, EthSpec, Hash256, Keypair, RelativeEpoch, SecretKey, Signature, Slot, CrosslinkCommittee}; pub use crate::persisted_beacon_chain::{PersistedBeaconChain, BEACON_CHAIN_DB_KEY}; @@ -171,7 +167,7 @@ where if let BlockProcessingOutcome::Processed { block_root } = outcome { head_block_root = Some(block_root); - self.add_attestations_to_op_pool( + self.add_attestations_to_chain( &attestation_strategy, &new_state, block_root, @@ -256,18 +252,16 @@ where (block, state) } - /// Adds attestations to the `BeaconChain` operations pool to be included in future blocks. + /// Adds attestations to the `BeaconChain` operations pool and fork choice. /// /// The `attestation_strategy` dictates which validators should attest. - fn add_attestations_to_op_pool( + fn add_attestations_to_chain( &self, attestation_strategy: &AttestationStrategy, state: &BeaconState, head_block_root: Hash256, head_block_slot: Slot, ) { - let spec = &self.spec; - let fork = &state.fork; let attesting_validators: Vec = match attestation_strategy { AttestationStrategy::AllValidators => (0..self.keypairs.len()).collect(), @@ -279,55 +273,18 @@ where .expect("should get committees") .iter() .for_each(|cc| { - let committee_size = cc.committee.len(); - for (i, validator_index) in cc.committee.iter().enumerate() { // Note: searching this array is worst-case `O(n)`. A hashset could be a better // alternative. if attesting_validators.contains(validator_index) { - let data = self - .chain - .produce_attestation_data_for_block( - cc.shard, - head_block_root, - head_block_slot, - state, - ) - .expect("should produce attestation data"); - - let mut aggregation_bitfield = Bitfield::new(); - aggregation_bitfield.set(i, true); - aggregation_bitfield.set(committee_size, false); - - let mut custody_bitfield = Bitfield::new(); - custody_bitfield.set(committee_size, false); - - let signature = { - let message = AttestationDataAndCustodyBit { - data: data.clone(), - custody_bit: false, - } - .tree_hash_root(); - - let domain = - spec.get_domain(data.target_epoch, Domain::Attestation, fork); - - let mut agg_sig = AggregateSignature::new(); - agg_sig.add(&Signature::new( - &message, - domain, - self.get_sk(*validator_index), - )); - - agg_sig - }; - - let attestation = Attestation { - aggregation_bitfield, - data, - custody_bitfield, - signature, - }; + let attestation = self.create_attestation( + *validator_index, + cc, + head_block_root, + head_block_slot, + state, + i + ); self.chain .process_attestation(attestation) @@ -337,6 +294,66 @@ where }); } + /// Creates an attestation for a validator with the given data. + pub fn create_attestation( + &self, + validator_index: usize, + crosslink_committee: &CrosslinkCommittee, + head_block_root: Hash256, + head_block_slot: Slot, + state: &BeaconState, + bitfield_index: usize + ) -> Attestation { + let committee_size = crosslink_committee.committee.len(); + let spec = &self.spec; + let fork = &state.fork; + + let data = self + .chain + .produce_attestation_data_for_block( + crosslink_committee.shard, + head_block_root, + head_block_slot, + state, + ) + .expect("should produce attestation data"); + + let mut aggregation_bitfield = Bitfield::new(); + aggregation_bitfield.set(bitfield_index, true); + aggregation_bitfield.set(committee_size, false); + + let mut custody_bitfield = Bitfield::new(); + custody_bitfield.set(committee_size, false); + + let signature = { + let message = AttestationDataAndCustodyBit { + data: data.clone(), + custody_bit: false, + } + .tree_hash_root(); + + let domain = + spec.get_domain(data.target_epoch, Domain::Attestation, fork); + + let mut agg_sig = AggregateSignature::new(); + agg_sig.add(&Signature::new( + &message, + domain, + self.get_sk(validator_index), + )); + + agg_sig + }; + + Attestation { + aggregation_bitfield, + data, + custody_bitfield, + signature, + } + } + + /// Returns the secret key for the given validator index. fn get_sk(&self, validator_index: usize) -> &SecretKey { &self.keypairs[validator_index].sk From dcac8d56bd163845c0b928abf1ca54a85e179fd2 Mon Sep 17 00:00:00 2001 From: Grant Wuerker Date: Sat, 27 Jul 2019 22:32:11 +0200 Subject: [PATCH 018/186] Revert "Refactored attestation creation in test utils" This reverts commit 4d277fe4239a7194758b18fb5c00dfe0b8231306. --- beacon_node/beacon_chain/src/test_utils.rs | 127 +++++++++------------ 1 file changed, 55 insertions(+), 72 deletions(-) diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index c43309cbfd..991d29418e 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -8,7 +8,11 @@ use std::sync::Arc; use store::MemoryStore; use store::Store; use tree_hash::{SignedRoot, TreeHash}; -use types::{test_utils::TestingBeaconStateBuilder, AggregateSignature, Attestation, AttestationDataAndCustodyBit, BeaconBlock, BeaconState, Bitfield, ChainSpec, Domain, EthSpec, Hash256, Keypair, RelativeEpoch, SecretKey, Signature, Slot, CrosslinkCommittee}; +use types::{ + test_utils::TestingBeaconStateBuilder, AggregateSignature, Attestation, + AttestationDataAndCustodyBit, BeaconBlock, BeaconState, Bitfield, ChainSpec, Domain, EthSpec, + Hash256, Keypair, RelativeEpoch, SecretKey, Signature, Slot, +}; pub use crate::persisted_beacon_chain::{PersistedBeaconChain, BEACON_CHAIN_DB_KEY}; @@ -167,7 +171,7 @@ where if let BlockProcessingOutcome::Processed { block_root } = outcome { head_block_root = Some(block_root); - self.add_attestations_to_chain( + self.add_attestations_to_op_pool( &attestation_strategy, &new_state, block_root, @@ -252,16 +256,18 @@ where (block, state) } - /// Adds attestations to the `BeaconChain` operations pool and fork choice. + /// Adds attestations to the `BeaconChain` operations pool to be included in future blocks. /// /// The `attestation_strategy` dictates which validators should attest. - fn add_attestations_to_chain( + fn add_attestations_to_op_pool( &self, attestation_strategy: &AttestationStrategy, state: &BeaconState, head_block_root: Hash256, head_block_slot: Slot, ) { + let spec = &self.spec; + let fork = &state.fork; let attesting_validators: Vec = match attestation_strategy { AttestationStrategy::AllValidators => (0..self.keypairs.len()).collect(), @@ -273,18 +279,55 @@ where .expect("should get committees") .iter() .for_each(|cc| { + let committee_size = cc.committee.len(); + for (i, validator_index) in cc.committee.iter().enumerate() { // Note: searching this array is worst-case `O(n)`. A hashset could be a better // alternative. if attesting_validators.contains(validator_index) { - let attestation = self.create_attestation( - *validator_index, - cc, - head_block_root, - head_block_slot, - state, - i - ); + let data = self + .chain + .produce_attestation_data_for_block( + cc.shard, + head_block_root, + head_block_slot, + state, + ) + .expect("should produce attestation data"); + + let mut aggregation_bitfield = Bitfield::new(); + aggregation_bitfield.set(i, true); + aggregation_bitfield.set(committee_size, false); + + let mut custody_bitfield = Bitfield::new(); + custody_bitfield.set(committee_size, false); + + let signature = { + let message = AttestationDataAndCustodyBit { + data: data.clone(), + custody_bit: false, + } + .tree_hash_root(); + + let domain = + spec.get_domain(data.target_epoch, Domain::Attestation, fork); + + let mut agg_sig = AggregateSignature::new(); + agg_sig.add(&Signature::new( + &message, + domain, + self.get_sk(*validator_index), + )); + + agg_sig + }; + + let attestation = Attestation { + aggregation_bitfield, + data, + custody_bitfield, + signature, + }; self.chain .process_attestation(attestation) @@ -294,66 +337,6 @@ where }); } - /// Creates an attestation for a validator with the given data. - pub fn create_attestation( - &self, - validator_index: usize, - crosslink_committee: &CrosslinkCommittee, - head_block_root: Hash256, - head_block_slot: Slot, - state: &BeaconState, - bitfield_index: usize - ) -> Attestation { - let committee_size = crosslink_committee.committee.len(); - let spec = &self.spec; - let fork = &state.fork; - - let data = self - .chain - .produce_attestation_data_for_block( - crosslink_committee.shard, - head_block_root, - head_block_slot, - state, - ) - .expect("should produce attestation data"); - - let mut aggregation_bitfield = Bitfield::new(); - aggregation_bitfield.set(bitfield_index, true); - aggregation_bitfield.set(committee_size, false); - - let mut custody_bitfield = Bitfield::new(); - custody_bitfield.set(committee_size, false); - - let signature = { - let message = AttestationDataAndCustodyBit { - data: data.clone(), - custody_bit: false, - } - .tree_hash_root(); - - let domain = - spec.get_domain(data.target_epoch, Domain::Attestation, fork); - - let mut agg_sig = AggregateSignature::new(); - agg_sig.add(&Signature::new( - &message, - domain, - self.get_sk(validator_index), - )); - - agg_sig - }; - - Attestation { - aggregation_bitfield, - data, - custody_bitfield, - signature, - } - } - - /// Returns the secret key for the given validator index. fn get_sk(&self, validator_index: usize) -> &SecretKey { &self.keypairs[validator_index].sk From f4b169ce80b8b5acbc1b32b9ab488acabdb0bc84 Mon Sep 17 00:00:00 2001 From: Grant Wuerker Date: Mon, 29 Jul 2019 22:51:42 +0200 Subject: [PATCH 019/186] Integration tests for free attestation processing --- beacon_node/beacon_chain/src/fork_choice.rs | 7 +- beacon_node/beacon_chain/src/test_utils.rs | 6 +- beacon_node/beacon_chain/tests/tests.rs | 92 ++++++++++++++++++++- 3 files changed, 99 insertions(+), 6 deletions(-) diff --git a/beacon_node/beacon_chain/src/fork_choice.rs b/beacon_node/beacon_chain/src/fork_choice.rs index 0f98ac9ce9..7d1830afec 100644 --- a/beacon_node/beacon_chain/src/fork_choice.rs +++ b/beacon_node/beacon_chain/src/fork_choice.rs @@ -3,7 +3,7 @@ use lmd_ghost::LmdGhost; use state_processing::common::get_attesting_indices_unsorted; use std::sync::Arc; use store::{Error as StoreError, Store}; -use types::{Attestation, BeaconBlock, BeaconState, BeaconStateError, Epoch, EthSpec, Hash256}; +use types::{Attestation, BeaconBlock, BeaconState, BeaconStateError, Epoch, EthSpec, Hash256, Slot}; use state_processing::common; type Result = std::result::Result; @@ -186,6 +186,11 @@ impl ForkChoice { }).is_some()) } + // Returns the latest message for a given validator + pub fn latest_message(&self, validator_index: usize) -> Option<(Hash256, Slot)> { + self.backend.latest_message(validator_index) + } + /// Inform the fork choice that the given block (and corresponding root) have been finalized so /// it may prune it's storage. /// diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index 991d29418e..9a440b8877 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -171,7 +171,7 @@ where if let BlockProcessingOutcome::Processed { block_root } = outcome { head_block_root = Some(block_root); - self.add_attestations_to_op_pool( + self.add_free_attestations( &attestation_strategy, &new_state, block_root, @@ -256,10 +256,10 @@ where (block, state) } - /// Adds attestations to the `BeaconChain` operations pool to be included in future blocks. + /// Adds attestations to the `BeaconChain` operations pool and fork choice. /// /// The `attestation_strategy` dictates which validators should attest. - fn add_attestations_to_op_pool( + fn add_free_attestations( &self, attestation_strategy: &AttestationStrategy, state: &BeaconState, diff --git a/beacon_node/beacon_chain/tests/tests.rs b/beacon_node/beacon_chain/tests/tests.rs index 882d9f2355..2f4e5badeb 100644 --- a/beacon_node/beacon_chain/tests/tests.rs +++ b/beacon_node/beacon_chain/tests/tests.rs @@ -1,4 +1,3 @@ -#![cfg(not(debug_assertions))] use beacon_chain::test_utils::{ AttestationStrategy, BeaconChainHarness, BlockStrategy, CommonTypes, PersistedBeaconChain, @@ -8,7 +7,7 @@ use lmd_ghost::ThreadSafeReducedTree; use rand::Rng; use store::{MemoryStore, Store}; use types::test_utils::{SeedableRng, TestRandom, XorShiftRng}; -use types::{Deposit, EthSpec, Hash256, MinimalEthSpec, Slot}; +use types::{Deposit, EthSpec, Hash256, MinimalEthSpec, Slot, RelativeEpoch}; // Should ideally be divisible by 3. pub const VALIDATOR_COUNT: usize = 24; @@ -265,3 +264,92 @@ fn roundtrip_operation_pool() { assert_eq!(harness.chain.op_pool, restored_op_pool); } + +#[test] +fn free_attestations_added_to_fork_choice_some_none() { + let num_blocks_produced = MinimalEthSpec::slots_per_epoch() / 2; + + let harness = get_harness(VALIDATOR_COUNT); + + harness.extend_chain( + num_blocks_produced as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ); + + let state = &harness.chain.head().beacon_state; + let fork_choice = &harness.chain.fork_choice; + + let validators: Vec = (0..VALIDATOR_COUNT).collect(); + let slots: Vec = validators + .iter() + .map(|&v| + state.get_attestation_duties(v, RelativeEpoch::Current) + .expect("should get attester duties") + .unwrap() + .slot + ).collect(); + let validator_slots: Vec<(&usize, Slot)> = validators.iter().zip(slots).collect(); + + for (validator, slot) in validator_slots.clone() { + let latest_message = fork_choice.latest_message(*validator); + + if slot <= num_blocks_produced && slot != 0{ + assert_eq!( + latest_message.unwrap().1, slot, + "Latest message slot should be equal to attester duty." + ) + } else { + assert!( + latest_message.is_none(), + "Latest message slot should be None." + ) + } + } +} + +#[test] +fn free_attestations_added_to_fork_choice_all_updated() { + let num_blocks_produced = MinimalEthSpec::slots_per_epoch() * 2 - 1; + + let harness = get_harness(VALIDATOR_COUNT); + + harness.extend_chain( + num_blocks_produced as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ); + + let state = &harness.chain.head().beacon_state; + let fork_choice = &harness.chain.fork_choice; + + let validators: Vec = (0..VALIDATOR_COUNT).collect(); + let slots: Vec = validators + .iter() + .map(|&v| + state.get_attestation_duties(v, RelativeEpoch::Current) + .expect("should get attester duties") + .unwrap() + .slot + ).collect(); + let validator_slots: Vec<(&usize, Slot)> = validators.iter().zip(slots).collect(); + + for (validator, slot) in validator_slots { + let latest_message = fork_choice.latest_message(*validator); + + assert_eq!( + latest_message.unwrap().1, slot, + "Latest message slot should be equal to attester duty." + ); + + if slot != num_blocks_produced { + let block_root = state.get_block_root(slot) + .expect("Should get block root at slot"); + + assert_eq!( + latest_message.unwrap().0, *block_root, + "Latest message block root should be equal to block at slot." + ); + } + } +} \ No newline at end of file From c431bd993e9ed0d7aeffd37574e30a416955ea9c Mon Sep 17 00:00:00 2001 From: Grant Wuerker Date: Tue, 6 Aug 2019 14:56:13 +0200 Subject: [PATCH 020/186] Implicit conflicts resolved. --- beacon_node/beacon_chain/src/beacon_chain.rs | 10 +++++----- beacon_node/beacon_chain/src/fork_choice.rs | 12 ++++-------- beacon_node/beacon_chain/tests/tests.rs | 1 + 3 files changed, 10 insertions(+), 13 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 3e8467a492..49e2cec838 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -520,8 +520,8 @@ impl BeaconChain { if let Some(state) = self.get_attestation_state(&attestation) { if self.fork_choice.should_process_attestation(&state, &attestation)? { - let indexed_attestation = common::convert_to_indexed(&state, &attestation)?; - per_block_processing::verify_indexed_attestation(&state, &indexed_attestation, &self.spec)?; + let indexed_attestation = common::get_indexed_attestation(&state, &attestation)?; + per_block_processing::is_valid_indexed_attestation(&state, &indexed_attestation, &self.spec)?; self.fork_choice.process_attestation(&state, &attestation)?; } } @@ -540,14 +540,14 @@ impl BeaconChain { } /// Retrieves the `BeaconState` used to create the attestation. - fn get_attestation_state(&self, attestation: &Attestation) -> Option> { + fn get_attestation_state(&self, attestation: &Attestation) -> Option> { // Current state is used if the attestation targets a historic block and a slot within an // equal or adjacent epoch. let slots_per_epoch = T::EthSpec::slots_per_epoch(); let min_slot = (self.state.read().slot.epoch(slots_per_epoch) - 1).start_slot(slots_per_epoch); let blocks = BestBlockRootsIterator::owned(self.store.clone(), self.state.read().clone(), self.state.read().slot.clone()); for (root, slot) in blocks { - if root == attestation.data.target_root { + if root == attestation.data.target.root { return Some(self.state.read().clone()); } @@ -557,7 +557,7 @@ impl BeaconChain { }; // A different state is retrieved from the database. - match self.store.get::(&attestation.data.target_root) { + match self.store.get::>(&attestation.data.target.root) { Ok(Some(block)) => match self.store.get::>(&block.state_root) { Ok(state) => state, _ => None diff --git a/beacon_node/beacon_chain/src/fork_choice.rs b/beacon_node/beacon_chain/src/fork_choice.rs index 29b3664f18..3900575aee 100644 --- a/beacon_node/beacon_chain/src/fork_choice.rs +++ b/beacon_node/beacon_chain/src/fork_choice.rs @@ -4,7 +4,6 @@ use state_processing::common::get_attesting_indices; use std::sync::Arc; use store::{Error as StoreError, Store}; use types::{Attestation, BeaconBlock, BeaconState, BeaconStateError, Epoch, EthSpec, Hash256, Slot}; -use state_processing::common; type Result = std::result::Result; @@ -172,14 +171,11 @@ impl ForkChoice { } /// Determines whether or not the given attestation contains a latest message. - pub fn should_process_attestation(&self, state: &BeaconState, attestation: &Attestation) -> Result { - let validator_indices = common::get_attesting_indices_unsorted( - state, - &attestation.data, - &attestation.aggregation_bitfield, - )?; + pub fn should_process_attestation(&self, state: &BeaconState, attestation: &Attestation) -> Result { + let validator_indices = + get_attesting_indices(state, &attestation.data, &attestation.aggregation_bits)?; - let block_slot = state.get_attestation_slot(&attestation.data)?; + let block_slot = state.get_attestation_data_slot(&attestation.data)?; Ok(validator_indices .iter() diff --git a/beacon_node/beacon_chain/tests/tests.rs b/beacon_node/beacon_chain/tests/tests.rs index 730b8ec67e..cc1a84973e 100644 --- a/beacon_node/beacon_chain/tests/tests.rs +++ b/beacon_node/beacon_chain/tests/tests.rs @@ -1,3 +1,4 @@ +#![cfg(not(debug_assertions))] use beacon_chain::test_utils::{ AttestationStrategy, BeaconChainHarness, BlockStrategy, CommonTypes, PersistedBeaconChain, From ce73705498f3c39504b2822f67f422da7a3bfdb2 Mon Sep 17 00:00:00 2001 From: Grant Wuerker Date: Tue, 6 Aug 2019 19:17:15 +0200 Subject: [PATCH 021/186] formatting --- beacon_node/beacon_chain/src/beacon_chain.rs | 52 +++++++++++++------- beacon_node/beacon_chain/src/errors.rs | 6 ++- beacon_node/beacon_chain/src/fork_choice.rs | 22 +++++---- beacon_node/beacon_chain/src/test_utils.rs | 7 +-- beacon_node/beacon_chain/tests/tests.rs | 34 ++++++++----- beacon_node/rpc/src/attestation.rs | 14 ++++-- eth2/lmd_ghost/src/reduced_tree.rs | 6 +-- 7 files changed, 85 insertions(+), 56 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 49e2cec838..0becbf2c9c 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -3,6 +3,7 @@ use crate::errors::{BeaconChainError as Error, BlockProductionError}; use crate::fork_choice::{Error as ForkChoiceError, ForkChoice}; use crate::metrics::Metrics; use crate::persisted_beacon_chain::{PersistedBeaconChain, BEACON_CHAIN_DB_KEY}; +use crate::BeaconChainError; use lmd_ghost::LmdGhost; use log::trace; use operation_pool::DepositInsertStatus; @@ -11,19 +12,18 @@ use parking_lot::{RwLock, RwLockReadGuard}; use slog::{error, info, warn, Logger}; use slot_clock::SlotClock; use state_processing::per_block_processing::errors::{ - AttesterSlashingValidationError, DepositValidationError, - ExitValidationError, ProposerSlashingValidationError, TransferValidationError, + AttesterSlashingValidationError, DepositValidationError, ExitValidationError, + ProposerSlashingValidationError, TransferValidationError, }; use state_processing::{ - per_block_processing, per_block_processing_without_verifying_block_signature, - per_slot_processing, BlockProcessingError, common + common, per_block_processing, per_block_processing_without_verifying_block_signature, + per_slot_processing, BlockProcessingError, }; use std::sync::Arc; use store::iter::{BestBlockRootsIterator, BlockIterator, BlockRootsIterator, StateRootsIterator}; use store::{Error as DBError, Store}; use tree_hash::TreeHash; use types::*; -use crate::BeaconChainError; // Text included in blocks. // Must be 32-bytes or panic. @@ -511,17 +511,21 @@ impl BeaconChain { /// /// If valid, the attestation is added to the `op_pool` and aggregated with another attestation /// if possible. - pub fn process_attestation( - &self, - attestation: Attestation, - ) -> Result<(), Error> { + pub fn process_attestation(&self, attestation: Attestation) -> Result<(), Error> { self.metrics.attestation_processing_requests.inc(); let timer = self.metrics.attestation_processing_times.start_timer(); if let Some(state) = self.get_attestation_state(&attestation) { - if self.fork_choice.should_process_attestation(&state, &attestation)? { + if self + .fork_choice + .should_process_attestation(&state, &attestation)? + { let indexed_attestation = common::get_indexed_attestation(&state, &attestation)?; - per_block_processing::is_valid_indexed_attestation(&state, &indexed_attestation, &self.spec)?; + per_block_processing::is_valid_indexed_attestation( + &state, + &indexed_attestation, + &self.spec, + )?; self.fork_choice.process_attestation(&state, &attestation)?; } } @@ -540,12 +544,20 @@ impl BeaconChain { } /// Retrieves the `BeaconState` used to create the attestation. - fn get_attestation_state(&self, attestation: &Attestation) -> Option> { + fn get_attestation_state( + &self, + attestation: &Attestation, + ) -> Option> { // Current state is used if the attestation targets a historic block and a slot within an // equal or adjacent epoch. let slots_per_epoch = T::EthSpec::slots_per_epoch(); - let min_slot = (self.state.read().slot.epoch(slots_per_epoch) - 1).start_slot(slots_per_epoch); - let blocks = BestBlockRootsIterator::owned(self.store.clone(), self.state.read().clone(), self.state.read().slot.clone()); + let min_slot = + (self.state.read().slot.epoch(slots_per_epoch) - 1).start_slot(slots_per_epoch); + let blocks = BestBlockRootsIterator::owned( + self.store.clone(), + self.state.read().clone(), + self.state.read().slot.clone(), + ); for (root, slot) in blocks { if root == attestation.data.target.root { return Some(self.state.read().clone()); @@ -554,15 +566,18 @@ impl BeaconChain { if slot == min_slot { break; } - }; + } // A different state is retrieved from the database. - match self.store.get::>(&attestation.data.target.root) { + match self + .store + .get::>(&attestation.data.target.root) + { Ok(Some(block)) => match self.store.get::>(&block.state_root) { Ok(state) => state, - _ => None + _ => None, }, - _ => None + _ => None, } } @@ -1031,4 +1046,3 @@ impl From for Error { Error::BeaconStateError(e) } } - diff --git a/beacon_node/beacon_chain/src/errors.rs b/beacon_node/beacon_chain/src/errors.rs index 4e2170ca84..266c598ac8 100644 --- a/beacon_node/beacon_chain/src/errors.rs +++ b/beacon_node/beacon_chain/src/errors.rs @@ -1,9 +1,11 @@ use crate::fork_choice::Error as ForkChoiceError; use crate::metrics::Error as MetricsError; +use state_processing::per_block_processing::errors::{ + AttestationValidationError, IndexedAttestationValidationError, +}; use state_processing::BlockProcessingError; use state_processing::SlotProcessingError; use types::*; -use state_processing::per_block_processing::errors::{AttestationValidationError, IndexedAttestationValidationError}; macro_rules! easy_from_to { ($from: ident, $to: ident) => { @@ -33,7 +35,7 @@ pub enum BeaconChainError { SlotProcessingError(SlotProcessingError), MetricsError(String), AttestationValidationError(AttestationValidationError), - IndexedAttestationValidationError(IndexedAttestationValidationError) + IndexedAttestationValidationError(IndexedAttestationValidationError), } easy_from_to!(SlotProcessingError, BeaconChainError); diff --git a/beacon_node/beacon_chain/src/fork_choice.rs b/beacon_node/beacon_chain/src/fork_choice.rs index 3900575aee..d16a8f9a83 100644 --- a/beacon_node/beacon_chain/src/fork_choice.rs +++ b/beacon_node/beacon_chain/src/fork_choice.rs @@ -3,7 +3,9 @@ use lmd_ghost::LmdGhost; use state_processing::common::get_attesting_indices; use std::sync::Arc; use store::{Error as StoreError, Store}; -use types::{Attestation, BeaconBlock, BeaconState, BeaconStateError, Epoch, EthSpec, Hash256, Slot}; +use types::{ + Attestation, BeaconBlock, BeaconState, BeaconStateError, Epoch, EthSpec, Hash256, Slot, +}; type Result = std::result::Result; @@ -171,7 +173,11 @@ impl ForkChoice { } /// Determines whether or not the given attestation contains a latest message. - pub fn should_process_attestation(&self, state: &BeaconState, attestation: &Attestation) -> Result { + pub fn should_process_attestation( + &self, + state: &BeaconState, + attestation: &Attestation, + ) -> Result { let validator_indices = get_attesting_indices(state, &attestation.data, &attestation.aggregation_bits)?; @@ -179,12 +185,11 @@ impl ForkChoice { Ok(validator_indices .iter() - .find(|&&v| { - match self.backend.latest_message(v) { - Some((_, slot)) => block_slot > slot, - None => true - } - }).is_some()) + .find(|&&v| match self.backend.latest_message(v) { + Some((_, slot)) => block_slot > slot, + None => true, + }) + .is_some()) } // Returns the latest message for a given validator @@ -224,4 +229,3 @@ impl From for Error { Error::BackendError(e) } } - diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index 8f0d4c8ee4..ab1a31690d 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -178,12 +178,7 @@ where if let BlockProcessingOutcome::Processed { block_root } = outcome { head_block_root = Some(block_root); - self.add_free_attestations( - &attestation_strategy, - &new_state, - block_root, - slot, - ); + self.add_free_attestations(&attestation_strategy, &new_state, block_root, slot); } else { panic!("block should be successfully processed: {:?}", outcome); } diff --git a/beacon_node/beacon_chain/tests/tests.rs b/beacon_node/beacon_chain/tests/tests.rs index cc1a84973e..5b8a09fafc 100644 --- a/beacon_node/beacon_chain/tests/tests.rs +++ b/beacon_node/beacon_chain/tests/tests.rs @@ -8,7 +8,7 @@ use lmd_ghost::ThreadSafeReducedTree; use rand::Rng; use store::{MemoryStore, Store}; use types::test_utils::{SeedableRng, TestRandom, XorShiftRng}; -use types::{Deposit, EthSpec, Hash256, MinimalEthSpec, Slot, RelativeEpoch}; +use types::{Deposit, EthSpec, Hash256, MinimalEthSpec, RelativeEpoch, Slot}; // Should ideally be divisible by 3. pub const VALIDATOR_COUNT: usize = 24; @@ -270,20 +270,23 @@ fn free_attestations_added_to_fork_choice_some_none() { let validators: Vec = (0..VALIDATOR_COUNT).collect(); let slots: Vec = validators .iter() - .map(|&v| - state.get_attestation_duties(v, RelativeEpoch::Current) + .map(|&v| { + state + .get_attestation_duties(v, RelativeEpoch::Current) .expect("should get attester duties") .unwrap() .slot - ).collect(); + }) + .collect(); let validator_slots: Vec<(&usize, Slot)> = validators.iter().zip(slots).collect(); for (validator, slot) in validator_slots.clone() { let latest_message = fork_choice.latest_message(*validator); - if slot <= num_blocks_produced && slot != 0{ + if slot <= num_blocks_produced && slot != 0 { assert_eq!( - latest_message.unwrap().1, slot, + latest_message.unwrap().1, + slot, "Latest message slot should be equal to attester duty." ) } else { @@ -313,30 +316,35 @@ fn free_attestations_added_to_fork_choice_all_updated() { let validators: Vec = (0..VALIDATOR_COUNT).collect(); let slots: Vec = validators .iter() - .map(|&v| - state.get_attestation_duties(v, RelativeEpoch::Current) + .map(|&v| { + state + .get_attestation_duties(v, RelativeEpoch::Current) .expect("should get attester duties") .unwrap() .slot - ).collect(); + }) + .collect(); let validator_slots: Vec<(&usize, Slot)> = validators.iter().zip(slots).collect(); for (validator, slot) in validator_slots { let latest_message = fork_choice.latest_message(*validator); assert_eq!( - latest_message.unwrap().1, slot, + latest_message.unwrap().1, + slot, "Latest message slot should be equal to attester duty." ); if slot != num_blocks_produced { - let block_root = state.get_block_root(slot) + let block_root = state + .get_block_root(slot) .expect("Should get block root at slot"); assert_eq!( - latest_message.unwrap().0, *block_root, + latest_message.unwrap().0, + *block_root, "Latest message block root should be equal to block at slot." ); } } -} \ No newline at end of file +} diff --git a/beacon_node/rpc/src/attestation.rs b/beacon_node/rpc/src/attestation.rs index c7b3a5711f..00a6431519 100644 --- a/beacon_node/rpc/src/attestation.rs +++ b/beacon_node/rpc/src/attestation.rs @@ -1,4 +1,4 @@ -use beacon_chain::{BeaconChain, BeaconChainTypes, BeaconChainError}; +use beacon_chain::{BeaconChain, BeaconChainError, BeaconChainTypes}; use eth2_libp2p::PubsubMessage; use eth2_libp2p::TopicBuilder; use eth2_libp2p::BEACON_ATTESTATION_TOPIC; @@ -179,7 +179,11 @@ impl AttestationService for AttestationServiceInstance { "error" => format!("{:?}", e), ); resp.set_success(false); - resp.set_msg(format!("InvalidIndexedAttestation: {:?}", e).as_bytes().to_vec()); + resp.set_msg( + format!("InvalidIndexedAttestation: {:?}", e) + .as_bytes() + .to_vec(), + ); } Err(e) => { // Some other error @@ -190,7 +194,11 @@ impl AttestationService for AttestationServiceInstance { "error" => format!("{:?}", e), ); resp.set_success(false); - resp.set_msg(format!("There was a beacon chain error: {:?}", e).as_bytes().to_vec()); + resp.set_msg( + format!("There was a beacon chain error: {:?}", e) + .as_bytes() + .to_vec(), + ); } }; diff --git a/eth2/lmd_ghost/src/reduced_tree.rs b/eth2/lmd_ghost/src/reduced_tree.rs index 0ef78c37e9..5d70748042 100644 --- a/eth2/lmd_ghost/src/reduced_tree.rs +++ b/eth2/lmd_ghost/src/reduced_tree.rs @@ -111,9 +111,7 @@ where } fn latest_message(&self, validator_index: usize) -> Option<(Hash256, Slot)> { - self.core - .write() - .latest_message(validator_index) + self.core.write().latest_message(validator_index) } } @@ -263,7 +261,7 @@ where pub fn latest_message(&mut self, validator_index: usize) -> Option<(Hash256, Slot)> { match self.latest_votes.get(validator_index) { Some(v) => Some((v.hash.clone(), v.slot.clone())), - None => None + None => None, } } From 2c3fc318bafcd230ff6b9c3c44519d2f6197018e Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Wed, 7 Aug 2019 13:20:15 +1000 Subject: [PATCH 022/186] Do first pass on Grants code --- beacon_node/beacon_chain/src/beacon_chain.rs | 143 ++++++++++++++++--- beacon_node/beacon_chain/src/errors.rs | 3 + beacon_node/network/src/sync/simple_sync.rs | 7 +- eth2/types/src/beacon_block.rs | 5 + 4 files changed, 141 insertions(+), 17 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 0becbf2c9c..fed48036d9 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -54,6 +54,12 @@ pub enum BlockProcessingOutcome { PerBlockProcessingError(BlockProcessingError), } +#[derive(Debug, PartialEq)] +pub enum AttestationProcessingOutcome { + Processed, + UnknownHeadBlock { beacon_block_root: Hash256 }, +} + pub trait BeaconChainTypes { type Store: store::Store; type SlotClock: slot_clock::SlotClock; @@ -511,28 +517,114 @@ impl BeaconChain { /// /// If valid, the attestation is added to the `op_pool` and aggregated with another attestation /// if possible. - pub fn process_attestation(&self, attestation: Attestation) -> Result<(), Error> { + pub fn process_attestation( + &self, + attestation: Attestation, + ) -> Result { + // From the store, load the attestation's "head block". + // + // An honest validator would have set this block to be the head of the chain (i.e., the + // result of running fork choice). + if let Some(attestation_head_block) = self + .store + .get::>(&attestation.data.beacon_block_root)? + { + // Attempt to process the attestation using the `self.head()` state. + // + // This is purely an effort to avoid loading a `BeaconState` unnecessarily from the DB. + let outcome: Option> = { + // Take a read lock on the head beacon state. + // + // The purpose of this whole `let processed ...` block is to ensure that the read + // lock is dropped if we don't end up using the head beacon state. + let state = &self.head().beacon_state; + + // If it turns out that the attestation was made using the head state, then there + // is no need to load a state from the database to process the attestation. + if state.current_epoch() == attestation_head_block.epoch() + && state + .get_block_root(attestation_head_block.slot) + .map(|root| *root == attestation.data.beacon_block_root) + .unwrap_or_else(|_| false) + { + // The head state is able to be used to validate this attestation. No need to load + // anything from the database. + Some(self.process_attestation_for_state_and_block( + attestation.clone(), + state, + &attestation_head_block, + )) + } else { + None + } + }; + + // TODO: we could try and see if the "speculative state" (e.g., self.state) can support + // this, without needing to load it from the db. + + if let Some(result) = outcome { + result + } else { + // The state required to verify this attestation must be loaded from the database. + let mut state: BeaconState = self + .store + .get(&attestation_head_block.state_root)? + .ok_or_else(|| Error::MissingBeaconState(attestation_head_block.state_root))?; + + // Ensure the state loaded from the database matches the state of the attestation + // head block. + for _ in state.slot.as_u64()..attestation_head_block.slot.as_u64() { + per_slot_processing(&mut state, &self.spec)?; + } + + self.process_attestation_for_state_and_block( + attestation, + &state, + &attestation_head_block, + ) + } + } else { + // Reject any block where we have not processed `attestation.data.beacon_block_root`. + // + // This is likely overly restrictive, we could store the attestation for later + // processing. + warn!( + self.log, + "Dropping attestation for unknown block"; + "block" => format!("{}", attestation.data.beacon_block_root) + ); + Ok(AttestationProcessingOutcome::UnknownHeadBlock { + beacon_block_root: attestation.data.beacon_block_root, + }) + } + } + + fn process_attestation_for_state_and_block( + &self, + attestation: Attestation, + state: &BeaconState, + head_block: &BeaconBlock, + ) -> Result { self.metrics.attestation_processing_requests.inc(); let timer = self.metrics.attestation_processing_times.start_timer(); - if let Some(state) = self.get_attestation_state(&attestation) { - if self - .fork_choice - .should_process_attestation(&state, &attestation)? - { - let indexed_attestation = common::get_indexed_attestation(&state, &attestation)?; - per_block_processing::is_valid_indexed_attestation( - &state, - &indexed_attestation, - &self.spec, - )?; - self.fork_choice.process_attestation(&state, &attestation)?; - } + if self + .fork_choice + .should_process_attestation(state, &attestation)? + { + // TODO: check validation. + let indexed_attestation = common::get_indexed_attestation(state, &attestation)?; + per_block_processing::is_valid_indexed_attestation( + state, + &indexed_attestation, + &self.spec, + )?; + self.fork_choice.process_attestation(&state, &attestation)?; } let result = self .op_pool - .insert_attestation(attestation, &*self.state.read(), &self.spec); + .insert_attestation(attestation, state, &self.spec); timer.observe_duration(); @@ -540,14 +632,32 @@ impl BeaconChain { self.metrics.attestation_processing_successes.inc(); } - result.map_err(|e| BeaconChainError::AttestationValidationError(e)) + result + .map(|_| AttestationProcessingOutcome::Processed) + .map_err(|e| Error::AttestationValidationError(e)) } + fn state_can_process_attestation( + state: &BeaconState, + data: &AttestationData, + head_block: &BeaconBlock, + ) -> bool { + (state.current_epoch() - 1 <= data.target.epoch) + && (data.target.epoch <= state.current_epoch() + 1) + && state + .get_block_root(head_block.slot) + .map(|root| *root == data.beacon_block_root) + .unwrap_or_else(|_| false) + } + + /* /// Retrieves the `BeaconState` used to create the attestation. fn get_attestation_state( &self, attestation: &Attestation, ) -> Option> { + let state = &self.head().beacon_state; + // Current state is used if the attestation targets a historic block and a slot within an // equal or adjacent epoch. let slots_per_epoch = T::EthSpec::slots_per_epoch(); @@ -580,6 +690,7 @@ impl BeaconChain { _ => None, } } + */ /// Accept some deposit and queue it for inclusion in an appropriate block. pub fn process_deposit( diff --git a/beacon_node/beacon_chain/src/errors.rs b/beacon_node/beacon_chain/src/errors.rs index 266c598ac8..0b8fae7bf6 100644 --- a/beacon_node/beacon_chain/src/errors.rs +++ b/beacon_node/beacon_chain/src/errors.rs @@ -34,6 +34,9 @@ pub enum BeaconChainError { MissingBeaconState(Hash256), SlotProcessingError(SlotProcessingError), MetricsError(String), + NoStateForAttestation { + beacon_block_root: Hash256, + }, AttestationValidationError(AttestationValidationError), IndexedAttestationValidationError(IndexedAttestationValidationError), } diff --git a/beacon_node/network/src/sync/simple_sync.rs b/beacon_node/network/src/sync/simple_sync.rs index ac001415cd..13e9203dd3 100644 --- a/beacon_node/network/src/sync/simple_sync.rs +++ b/beacon_node/network/src/sync/simple_sync.rs @@ -630,7 +630,12 @@ impl SimpleSync { _network: &mut NetworkContext, ) { match self.chain.process_attestation(msg) { - Ok(()) => info!(self.log, "ImportedAttestation"; "source" => "gossip"), + Ok(outcome) => info!( + self.log, + "Processed attestation"; + "source" => "gossip", + "outcome" => format!("{:?}", outcome) + ), Err(e) => { warn!(self.log, "InvalidAttestation"; "source" => "gossip", "error" => format!("{:?}", e)) } diff --git a/eth2/types/src/beacon_block.rs b/eth2/types/src/beacon_block.rs index 772ef0c46d..ecf8797992 100644 --- a/eth2/types/src/beacon_block.rs +++ b/eth2/types/src/beacon_block.rs @@ -62,6 +62,11 @@ impl BeaconBlock { } } + /// Returns the epoch corresponding to `self.slot`. + pub fn epoch(&self) -> Epoch { + self.slot.epoch(T::slots_per_epoch()) + } + /// Returns the `signed_root` of the block. /// /// Spec v0.8.1 From 907a4e5a4b7f8e4a70a2c790d6f85daa48fbd45e Mon Sep 17 00:00:00 2001 From: Age Manning Date: Wed, 7 Aug 2019 14:54:08 +1000 Subject: [PATCH 023/186] Configuration updates allow for verbosity CLI flag and spec constants --- beacon_node/client/src/config.rs | 12 +++++- beacon_node/src/main.rs | 61 ++++++++++++++++++++----------- eth2/utils/eth2_config/src/lib.rs | 7 ++++ validator_client/eth2_config.toml | 47 ------------------------ validator_client/src/main.rs | 59 +++++++++++++++++++----------- 5 files changed, 94 insertions(+), 92 deletions(-) delete mode 100644 validator_client/eth2_config.toml diff --git a/beacon_node/client/src/config.rs b/beacon_node/client/src/config.rs index 1a27de406b..176625d772 100644 --- a/beacon_node/client/src/config.rs +++ b/beacon_node/client/src/config.rs @@ -1,3 +1,4 @@ +use crate::Eth2Config; use clap::ArgMatches; use http_server::HttpServerConfig; use network::NetworkConfig; @@ -56,8 +57,6 @@ impl Default for Config { log_file: PathBuf::from(""), db_type: "disk".to_string(), db_name: "chain_db".to_string(), - // Note: there are no default bootnodes specified. - // Once bootnodes are established, add them here. network: NetworkConfig::new(), rpc: rpc::RPCConfig::default(), http: HttpServerConfig::default(), @@ -129,6 +128,15 @@ impl Config { self.data_dir = PathBuf::from(dir); }; + if let Some(default_spec) = args.value_of("default-spec") { + match default_spec { + "mainnet" => self.spec_constants = Eth2Config::mainnet().spec_constants, + "minimal" => self.spec_constants = Eth2Config::minimal().spec_constants, + "interop" => self.spec_constants = Eth2Config::interop().spec_constants, + _ => {} // not supported + } + } + if let Some(dir) = args.value_of("db") { self.db_type = dir.to_string(); }; diff --git a/beacon_node/src/main.rs b/beacon_node/src/main.rs index c85eeedace..be57c6c9de 100644 --- a/beacon_node/src/main.rs +++ b/beacon_node/src/main.rs @@ -193,12 +193,9 @@ fn main() { .long("default-spec") .value_name("TITLE") .short("default-spec") - .help("Specifies the default eth2 spec to be used. Overridden by any spec loaded - from disk. A spec will be written to disk after this flag is used, so it is - primarily used for creating eth2 spec files.") + .help("Specifies the default eth2 spec to be used. This will override any spec written to disk and will therefore be used by default in future instances.") .takes_value(true) .possible_values(&["mainnet", "minimal", "interop"]) - .default_value("minimal"), ) .arg( Arg::with_name("recent-genesis") @@ -217,7 +214,7 @@ fn main() { .help("The title of the spec constants for chain config.") .takes_value(true) .possible_values(&["info", "debug", "trace", "warn", "error", "crit"]) - .default_value("info"), + .default_value("trace"), ) .arg( Arg::with_name("verbosity") @@ -316,26 +313,42 @@ fn main() { let eth2_config_path = data_dir.join(ETH2_CONFIG_FILENAME); - // Attempt to load the `Eth2Config` from file. + // Initialise the `Eth2Config`. // - // If the file doesn't exist, create a default one depending on the CLI flags. - let mut eth2_config = match read_from_file::(eth2_config_path.clone()) { - Ok(Some(c)) => c, - Ok(None) => { - let default = match matches.value_of("default-spec") { - Some("mainnet") => Eth2Config::mainnet(), - Some("minimal") => Eth2Config::minimal(), - _ => unreachable!(), // Guarded by slog. - }; - if let Err(e) = write_to_file(eth2_config_path, &default) { + // If a CLI parameter is set, overwrite any config file present. + // If a parameter is not set, use either the config file present or default to minimal. + let cli_config = match matches.value_of("default-spec") { + Some("mainnet") => Some(Eth2Config::mainnet()), + Some("minimal") => Some(Eth2Config::minimal()), + Some("interop") => Some(Eth2Config::interop()), + _ => None, + }; + // if cli is specified, write the new config + let mut eth2_config = { + if let Some(cli_config) = cli_config { + if let Err(e) = write_to_file(eth2_config_path, &cli_config) { crit!(log, "Failed to write default Eth2Config to file"; "error" => format!("{:?}", e)); return; } - default - } - Err(e) => { - crit!(log, "Failed to load/generate an Eth2Config"; "error" => format!("{:?}", e)); - return; + cli_config + } else { + // config not specified, read from disk + match read_from_file::(eth2_config_path.clone()) { + Ok(Some(c)) => c, + Ok(None) => { + // set default to minimal + let eth2_config = Eth2Config::minimal(); + if let Err(e) = write_to_file(eth2_config_path, ð2_config) { + crit!(log, "Failed to write default Eth2Config to file"; "error" => format!("{:?}", e)); + return; + } + eth2_config + } + Err(e) => { + crit!(log, "Failed to instantiate an Eth2Config"; "error" => format!("{:?}", e)); + return; + } + } } }; @@ -348,6 +361,12 @@ fn main() { } }; + // check to ensure the spec constants between the client and eth2_config match + if eth2_config.spec_constants != client_config.spec_constants { + crit!(log, "Specification constants do not match."; "Client Config" => format!("{}", client_config.spec_constants), "Eth2 Config" => format!("{}", eth2_config.spec_constants)); + return; + } + // Start the node using a `tokio` executor. match run::run_beacon_node(client_config, eth2_config, &log) { Ok(_) => {} diff --git a/eth2/utils/eth2_config/src/lib.rs b/eth2/utils/eth2_config/src/lib.rs index 17cbc4211a..794a27e4e6 100644 --- a/eth2/utils/eth2_config/src/lib.rs +++ b/eth2/utils/eth2_config/src/lib.rs @@ -37,6 +37,13 @@ impl Eth2Config { spec: ChainSpec::minimal(), } } + + pub fn interop() -> Self { + Self { + spec_constants: "interop".to_string(), + spec: ChainSpec::interop(), + } + } } impl Eth2Config { diff --git a/validator_client/eth2_config.toml b/validator_client/eth2_config.toml deleted file mode 100644 index 1e0781378d..0000000000 --- a/validator_client/eth2_config.toml +++ /dev/null @@ -1,47 +0,0 @@ -spec_constants = "minimal" - -[spec] -target_committee_size = 4 -max_indices_per_attestation = 4096 -min_per_epoch_churn_limit = 4 -churn_limit_quotient = 65536 -base_rewards_per_epoch = 5 -shuffle_round_count = 10 -deposit_contract_tree_depth = 32 -min_deposit_amount = 1000000000 -max_effective_balance = 32000000000 -ejection_balance = 16000000000 -effective_balance_increment = 1000000000 -genesis_slot = 0 -zero_hash = "0x0000000000000000000000000000000000000000000000000000000000000000" -bls_withdrawal_prefix_byte = "0x00" -genesis_time = 4294967295 -seconds_per_slot = 6 -min_attestation_inclusion_delay = 2 -min_seed_lookahead = 1 -activation_exit_delay = 4 -slots_per_eth1_voting_period = 16 -slots_per_historical_root = 8192 -min_validator_withdrawability_delay = 256 -persistent_committee_period = 2048 -max_crosslink_epochs = 64 -min_epochs_to_inactivity_penalty = 4 -base_reward_quotient = 32 -whistleblowing_reward_quotient = 512 -proposer_reward_quotient = 8 -inactivity_penalty_quotient = 33554432 -min_slashing_penalty_quotient = 32 -max_proposer_slashings = 16 -max_attester_slashings = 1 -max_attestations = 128 -max_deposits = 16 -max_voluntary_exits = 16 -max_transfers = 0 -domain_beacon_proposer = 0 -domain_randao = 1 -domain_attestation = 2 -domain_deposit = 3 -domain_voluntary_exit = 4 -domain_transfer = 5 -boot_nodes = ["/ip4/127.0.0.1/tcp/9000"] -chain_id = 2 diff --git a/validator_client/src/main.rs b/validator_client/src/main.rs index 76acb2f1a1..0782df3236 100644 --- a/validator_client/src/main.rs +++ b/validator_client/src/main.rs @@ -64,14 +64,13 @@ fn main() { .takes_value(true), ) .arg( - Arg::with_name("spec-constants") - .long("spec-constants") + Arg::with_name("default-spec") + .long("default-spec") .value_name("TITLE") - .short("s") - .help("The title of the spec constants for chain config.") + .short("default-spec") + .help("Specifies the default eth2 spec to be used. This will override any spec written to disk and will therefore be used by default in future instances.") .takes_value(true) .possible_values(&["mainnet", "minimal", "interop"]) - .default_value("minimal"), ) .arg( Arg::with_name("debug-level") @@ -126,7 +125,7 @@ fn main() { let client_config_path = data_dir.join(CLIENT_CONFIG_FILENAME); - // Attempt to lead the `ClientConfig` from disk. + // Attempt to load the `ClientConfig` from disk. // // If file doesn't exist, create a new, default one. let mut client_config = match read_from_file::( @@ -164,26 +163,42 @@ fn main() { .and_then(|s| Some(PathBuf::from(s))) .unwrap_or_else(|| data_dir.join(ETH2_CONFIG_FILENAME)); - // Attempt to load the `Eth2Config` from file. + // Initialise the `Eth2Config`. // - // If the file doesn't exist, create a default one depending on the CLI flags. - let mut eth2_config = match read_from_file::(eth2_config_path.clone()) { - Ok(Some(c)) => c, - Ok(None) => { - let default = match matches.value_of("spec-constants") { - Some("mainnet") => Eth2Config::mainnet(), - Some("minimal") => Eth2Config::minimal(), - _ => unreachable!(), // Guarded by slog. - }; - if let Err(e) = write_to_file(eth2_config_path, &default) { + // If a CLI parameter is set, overwrite any config file present. + // If a parameter is not set, use either the config file present or default to minimal. + let cli_config = match matches.value_of("default-spec") { + Some("mainnet") => Some(Eth2Config::mainnet()), + Some("minimal") => Some(Eth2Config::minimal()), + Some("interop") => Some(Eth2Config::interop()), + _ => None, + }; + // if cli is specified, write the new config + let mut eth2_config = { + if let Some(cli_config) = cli_config { + if let Err(e) = write_to_file(eth2_config_path, &cli_config) { crit!(log, "Failed to write default Eth2Config to file"; "error" => format!("{:?}", e)); return; } - default - } - Err(e) => { - crit!(log, "Failed to instantiate an Eth2Config"; "error" => format!("{:?}", e)); - return; + cli_config + } else { + // config not specified, read from disk + match read_from_file::(eth2_config_path.clone()) { + Ok(Some(c)) => c, + Ok(None) => { + // set default to minimal + let eth2_config = Eth2Config::minimal(); + if let Err(e) = write_to_file(eth2_config_path, ð2_config) { + crit!(log, "Failed to write default Eth2Config to file"; "error" => format!("{:?}", e)); + return; + } + eth2_config + } + Err(e) => { + crit!(log, "Failed to instantiate an Eth2Config"; "error" => format!("{:?}", e)); + return; + } + } } }; From dba7bfc4e14d6bd57a7617d5464dfaa1d0f46581 Mon Sep 17 00:00:00 2001 From: Age Manning Date: Wed, 7 Aug 2019 15:17:21 +1000 Subject: [PATCH 024/186] Update submodules to master --- tests/ef_tests/eth2.0-spec-tests | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/ef_tests/eth2.0-spec-tests b/tests/ef_tests/eth2.0-spec-tests index d405782646..aaa1673f50 160000 --- a/tests/ef_tests/eth2.0-spec-tests +++ b/tests/ef_tests/eth2.0-spec-tests @@ -1 +1 @@ -Subproject commit d405782646190595927cc0a59f504f7b00a760f3 +Subproject commit aaa1673f508103e11304833e0456e4149f880065 From b3e0aad7bfa3a3ebfd69f61163b18048438924e8 Mon Sep 17 00:00:00 2001 From: Age Manning Date: Wed, 7 Aug 2019 15:55:09 +1000 Subject: [PATCH 025/186] Correct minimal chainspec modifications --- eth2/types/src/chain_spec.rs | 6 ------ 1 file changed, 6 deletions(-) diff --git a/eth2/types/src/chain_spec.rs b/eth2/types/src/chain_spec.rs index d6eaa123de..9dec626d44 100644 --- a/eth2/types/src/chain_spec.rs +++ b/eth2/types/src/chain_spec.rs @@ -202,15 +202,12 @@ impl ChainSpec { pub fn minimal() -> Self { // Note: bootnodes to be updated when static nodes exist. let boot_nodes = vec![]; - let genesis_slot = Slot::new(0); Self { target_committee_size: 4, shuffle_round_count: 10, min_genesis_active_validator_count: 64, max_epochs_per_crosslink: 4, - min_attestation_inclusion_delay: 2, - genesis_slot, network_id: 2, // lighthouse testnet network id boot_nodes, ..ChainSpec::mainnet() @@ -221,15 +218,12 @@ impl ChainSpec { /// /// This allows us to customize a chain spec for interop testing. pub fn interop() -> Self { - let genesis_slot = Slot::new(0); let boot_nodes = vec![]; Self { seconds_per_slot: 12, target_committee_size: 4, shuffle_round_count: 10, - min_attestation_inclusion_delay: 2, - genesis_slot, network_id: 13, boot_nodes, ..ChainSpec::mainnet() From fe2402b361b9e7d8bc6f23cb022da875c32c050c Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Wed, 7 Aug 2019 16:02:30 +1000 Subject: [PATCH 026/186] Add another attestation processing test --- beacon_node/beacon_chain/src/beacon_chain.rs | 9 ++--- beacon_node/beacon_chain/src/lib.rs | 4 ++- beacon_node/beacon_chain/src/test_utils.rs | 30 +++++++++++++++-- beacon_node/beacon_chain/tests/tests.rs | 35 ++++++++++++++++++++ 4 files changed, 70 insertions(+), 8 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index c58e619bc1..8d5922850e 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -543,7 +543,7 @@ impl BeaconChain { // Attempt to process the attestation using the `self.head()` state. // // This is purely an effort to avoid loading a `BeaconState` unnecessarily from the DB. - let outcome: Option> = { + let optional_outcome: Option> = { // Take a read lock on the head beacon state. // // The purpose of this whole `let processed ...` block is to ensure that the read @@ -553,10 +553,11 @@ impl BeaconChain { // If it turns out that the attestation was made using the head state, then there // is no need to load a state from the database to process the attestation. if state.current_epoch() == attestation_head_block.epoch() - && state + && (state .get_block_root(attestation_head_block.slot) .map(|root| *root == attestation.data.beacon_block_root) .unwrap_or_else(|_| false) + || attestation.data.beacon_block_root == self.head().beacon_block_root) { // The head state is able to be used to validate this attestation. No need to load // anything from the database. @@ -573,8 +574,8 @@ impl BeaconChain { // TODO: we could try and see if the "speculative state" (e.g., self.state) can support // this, without needing to load it from the db. - if let Some(result) = outcome { - result + if let Some(outcome) = optional_outcome { + outcome } else { // The state required to verify this attestation must be loaded from the database. let mut state: BeaconState = self diff --git a/beacon_node/beacon_chain/src/lib.rs b/beacon_node/beacon_chain/src/lib.rs index c2efcad130..3188760a42 100644 --- a/beacon_node/beacon_chain/src/lib.rs +++ b/beacon_node/beacon_chain/src/lib.rs @@ -7,7 +7,9 @@ mod metrics; mod persisted_beacon_chain; pub mod test_utils; -pub use self::beacon_chain::{BeaconChain, BeaconChainTypes, BlockProcessingOutcome}; +pub use self::beacon_chain::{ + AttestationProcessingOutcome, BeaconChain, BeaconChainTypes, BlockProcessingOutcome, +}; pub use self::checkpoint::CheckPoint; pub use self::errors::{BeaconChainError, BlockProductionError}; pub use lmd_ghost; diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index 1049c66ad3..ce6d4d20ce 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -268,6 +268,28 @@ where head_block_root: Hash256, head_block_slot: Slot, ) { + self.get_free_attestations( + attestation_strategy, + state, + head_block_root, + head_block_slot, + ) + .into_iter() + .for_each(|attestation| { + self.chain + .process_attestation(attestation) + .expect("should process attestation"); + }); + } + + /// Generates a `Vec` for some attestation strategy and head_block. + pub fn get_free_attestations( + &self, + attestation_strategy: &AttestationStrategy, + state: &BeaconState, + head_block_root: Hash256, + head_block_slot: Slot, + ) -> Vec> { let spec = &self.spec; let fork = &state.fork; @@ -276,6 +298,8 @@ where AttestationStrategy::SomeValidators(vec) => vec.clone(), }; + let mut vec = vec![]; + state .get_crosslink_committees_at_slot(state.slot) .expect("should get committees") @@ -328,12 +352,12 @@ where signature, }; - self.chain - .process_attestation(attestation) - .expect("should process attestation"); + vec.push(attestation) } } }); + + vec } /// Creates two forks: diff --git a/beacon_node/beacon_chain/tests/tests.rs b/beacon_node/beacon_chain/tests/tests.rs index 5b8a09fafc..1f84008496 100644 --- a/beacon_node/beacon_chain/tests/tests.rs +++ b/beacon_node/beacon_chain/tests/tests.rs @@ -4,6 +4,7 @@ use beacon_chain::test_utils::{ AttestationStrategy, BeaconChainHarness, BlockStrategy, CommonTypes, PersistedBeaconChain, BEACON_CHAIN_DB_KEY, }; +use beacon_chain::AttestationProcessingOutcome; use lmd_ghost::ThreadSafeReducedTree; use rand::Rng; use store::{MemoryStore, Store}; @@ -298,6 +299,40 @@ fn free_attestations_added_to_fork_choice_some_none() { } } +#[test] +fn free_attestations_over_slots() { + let num_blocks_produced = MinimalEthSpec::slots_per_epoch() * 5; + + let harness = get_harness(VALIDATOR_COUNT); + + let mut attestations = vec![]; + + for _ in 0..num_blocks_produced { + harness.extend_chain( + 2, + BlockStrategy::OnCanonicalHead, + // Don't produce & include any attestations (we'll collect them later). + AttestationStrategy::SomeValidators(vec![]), + ); + + attestations.append(&mut harness.get_free_attestations( + &AttestationStrategy::AllValidators, + &harness.chain.head().beacon_state, + harness.chain.head().beacon_block_root, + harness.chain.head().beacon_block.slot, + )); + + harness.advance_slot(); + } + + for attestation in attestations { + assert_eq!( + harness.chain.process_attestation(attestation), + Ok(AttestationProcessingOutcome::Processed) + ) + } +} + #[test] fn free_attestations_added_to_fork_choice_all_updated() { let num_blocks_produced = MinimalEthSpec::slots_per_epoch() * 2 - 1; From 107f32642f2b82db7becce53bce7638f635834fa Mon Sep 17 00:00:00 2001 From: Age Manning Date: Wed, 7 Aug 2019 16:33:21 +1000 Subject: [PATCH 027/186] Duplication of validator polls are no longer fatal --- validator_client/src/service.rs | 22 +++++++++++++--------- 1 file changed, 13 insertions(+), 9 deletions(-) diff --git a/validator_client/src/service.rs b/validator_client/src/service.rs index c4ccbc2042..3ddb96e4c2 100644 --- a/validator_client/src/service.rs +++ b/validator_client/src/service.rs @@ -23,7 +23,7 @@ use protos::services_grpc::{ AttestationServiceClient, BeaconBlockServiceClient, BeaconNodeServiceClient, ValidatorServiceClient, }; -use slog::{error, info, warn}; +use slog::{crit, error, info, warn}; use slot_clock::{SlotClock, SystemTimeSlotClock}; use std::marker::PhantomData; use std::sync::Arc; @@ -37,7 +37,7 @@ use types::{ChainSpec, Epoch, EthSpec, Fork, Slot}; /// A fixed amount of time after a slot to perform operations. This gives the node time to complete /// per-slot processes. -const TIME_DELAY_FROM_SLOT: Duration = Duration::from_millis(200); +const TIME_DELAY_FROM_SLOT: Duration = Duration::from_millis(100); /// The validator service. This is the main thread that executes and maintains validator /// duties. @@ -106,7 +106,7 @@ impl Service Service self.current_slot, - "The Timer should poll a new slot" - ); + // this is a non-fatal error. If the slot clock repeats, the node could + // have been slow to process the previous slot and is now duplicating tasks. + // We ignore duplicated but raise a critical error. + if current_slot <= self.current_slot { + crit!( + self.log, + "The validator tried to duplicate a slot. Likely missed the previous slot" + ); + return Err("Duplicate slot".into()); + } self.current_slot = current_slot; info!(self.log, "Processing"; "slot" => current_slot.as_u64(), "epoch" => current_epoch.as_u64()); Ok(()) From 378fe05c895a19a960c51ec91d1f89084fc561ce Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Wed, 7 Aug 2019 16:40:49 +1000 Subject: [PATCH 028/186] Tidy attestation processing --- beacon_node/beacon_chain/Cargo.toml | 1 + beacon_node/beacon_chain/src/beacon_chain.rs | 16 +------------ beacon_node/beacon_chain/src/test_utils.rs | 24 ++++++++++++++++---- beacon_node/beacon_chain/tests/tests.rs | 15 ++++++++---- 4 files changed, 33 insertions(+), 23 deletions(-) diff --git a/beacon_node/beacon_chain/Cargo.toml b/beacon_node/beacon_chain/Cargo.toml index af6736edec..89260cf51b 100644 --- a/beacon_node/beacon_chain/Cargo.toml +++ b/beacon_node/beacon_chain/Cargo.toml @@ -24,3 +24,4 @@ lmd_ghost = { path = "../../eth2/lmd_ghost" } [dev-dependencies] rand = "0.5.5" +lazy_static = "1.3.0" diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 8d5922850e..5fc59ba663 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -4,7 +4,6 @@ use crate::fork_choice::{Error as ForkChoiceError, ForkChoice}; use crate::iter::{ReverseBlockRootIterator, ReverseStateRootIterator}; use crate::metrics::Metrics; use crate::persisted_beacon_chain::{PersistedBeaconChain, BEACON_CHAIN_DB_KEY}; -use crate::BeaconChainError; use lmd_ghost::LmdGhost; use log::trace; use operation_pool::DepositInsertStatus; @@ -615,7 +614,7 @@ impl BeaconChain { &self, attestation: Attestation, state: &BeaconState, - head_block: &BeaconBlock, + _head_block: &BeaconBlock, ) -> Result { self.metrics.attestation_processing_requests.inc(); let timer = self.metrics.attestation_processing_times.start_timer(); @@ -649,19 +648,6 @@ impl BeaconChain { .map_err(|e| Error::AttestationValidationError(e)) } - fn state_can_process_attestation( - state: &BeaconState, - data: &AttestationData, - head_block: &BeaconBlock, - ) -> bool { - (state.current_epoch() - 1 <= data.target.epoch) - && (data.target.epoch <= state.current_epoch() + 1) - && state - .get_block_root(head_block.slot) - .map(|root| *root == data.beacon_block_root) - .unwrap_or_else(|_| false) - } - /* /// Retrieves the `BeaconState` used to create the attestation. fn get_attestation_state( diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index ce6d4d20ce..293d3b9b91 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -84,14 +84,30 @@ where { /// Instantiate a new harness with `validator_count` initial validators. pub fn new(validator_count: usize) -> Self { + let state_builder = TestingBeaconStateBuilder::from_default_keypairs_file_if_exists( + validator_count, + &E::default_spec(), + ); + let (genesis_state, keypairs) = state_builder.build(); + + Self::from_state_and_keypairs(genesis_state, keypairs) + } + + /// Instantiate a new harness with an initial validator for each key supplied. + pub fn from_keypairs(keypairs: Vec) -> Self { + let state_builder = TestingBeaconStateBuilder::from_keypairs(keypairs, &E::default_spec()); + let (genesis_state, keypairs) = state_builder.build(); + + Self::from_state_and_keypairs(genesis_state, keypairs) + } + + /// Instantiate a new harness with the given genesis state and a keypair for each of the + /// initial validators in the given state. + pub fn from_state_and_keypairs(genesis_state: BeaconState, keypairs: Vec) -> Self { let spec = E::default_spec(); let store = Arc::new(MemoryStore::open()); - let state_builder = - TestingBeaconStateBuilder::from_default_keypairs_file_if_exists(validator_count, &spec); - let (genesis_state, keypairs) = state_builder.build(); - let mut genesis_block = BeaconBlock::empty(&spec); genesis_block.state_root = Hash256::from_slice(&genesis_state.tree_hash_root()); diff --git a/beacon_node/beacon_chain/tests/tests.rs b/beacon_node/beacon_chain/tests/tests.rs index 1f84008496..d286aaec0c 100644 --- a/beacon_node/beacon_chain/tests/tests.rs +++ b/beacon_node/beacon_chain/tests/tests.rs @@ -1,5 +1,8 @@ #![cfg(not(debug_assertions))] +#[macro_use] +extern crate lazy_static; + use beacon_chain::test_utils::{ AttestationStrategy, BeaconChainHarness, BlockStrategy, CommonTypes, PersistedBeaconChain, BEACON_CHAIN_DB_KEY, @@ -9,17 +12,21 @@ use lmd_ghost::ThreadSafeReducedTree; use rand::Rng; use store::{MemoryStore, Store}; use types::test_utils::{SeedableRng, TestRandom, XorShiftRng}; -use types::{Deposit, EthSpec, Hash256, MinimalEthSpec, RelativeEpoch, Slot}; +use types::{Deposit, EthSpec, Hash256, Keypair, MinimalEthSpec, RelativeEpoch, Slot}; // Should ideally be divisible by 3. pub const VALIDATOR_COUNT: usize = 24; +lazy_static! { + /// A cached set of keys. + static ref KEYPAIRS: Vec = types::test_utils::generate_deterministic_keypairs(VALIDATOR_COUNT); +} + type TestForkChoice = ThreadSafeReducedTree; fn get_harness(validator_count: usize) -> BeaconChainHarness { - let harness = BeaconChainHarness::new(validator_count); + let harness = BeaconChainHarness::from_keypairs(KEYPAIRS[0..validator_count].to_vec()); - // Move past the zero slot. harness.advance_slot(); harness @@ -300,7 +307,7 @@ fn free_attestations_added_to_fork_choice_some_none() { } #[test] -fn free_attestations_over_slots() { +fn attestations_with_increasing_slots() { let num_blocks_produced = MinimalEthSpec::slots_per_epoch() * 5; let harness = get_harness(VALIDATOR_COUNT); From 65ce94b2efc5acd97acfb742dd72380626fa210e Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Wed, 7 Aug 2019 16:54:35 +1000 Subject: [PATCH 029/186] Remove old code fragment --- beacon_node/beacon_chain/src/beacon_chain.rs | 42 -------------------- 1 file changed, 42 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 5fc59ba663..60b65c95b4 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -648,48 +648,6 @@ impl BeaconChain { .map_err(|e| Error::AttestationValidationError(e)) } - /* - /// Retrieves the `BeaconState` used to create the attestation. - fn get_attestation_state( - &self, - attestation: &Attestation, - ) -> Option> { - let state = &self.head().beacon_state; - - // Current state is used if the attestation targets a historic block and a slot within an - // equal or adjacent epoch. - let slots_per_epoch = T::EthSpec::slots_per_epoch(); - let min_slot = - (self.state.read().slot.epoch(slots_per_epoch) - 1).start_slot(slots_per_epoch); - let blocks = BestBlockRootsIterator::owned( - self.store.clone(), - self.state.read().clone(), - self.state.read().slot.clone(), - ); - for (root, slot) in blocks { - if root == attestation.data.target.root { - return Some(self.state.read().clone()); - } - - if slot == min_slot { - break; - } - } - - // A different state is retrieved from the database. - match self - .store - .get::>(&attestation.data.target.root) - { - Ok(Some(block)) => match self.store.get::>(&block.state_root) { - Ok(state) => state, - _ => None, - }, - _ => None, - } - } - */ - /// Accept some deposit and queue it for inclusion in an appropriate block. pub fn process_deposit( &self, From 9f9af746eaa255d11bca18e17368cbacb3666d22 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Thu, 8 Aug 2019 10:29:27 +1000 Subject: [PATCH 030/186] Add non-compiling half finished changes --- .../src/per_block_processing.rs | 14 ++- .../verify_attestation.rs | 113 +++++++----------- 2 files changed, 54 insertions(+), 73 deletions(-) diff --git a/eth2/state_processing/src/per_block_processing.rs b/eth2/state_processing/src/per_block_processing.rs index 3c89215550..3acadfde26 100644 --- a/eth2/state_processing/src/per_block_processing.rs +++ b/eth2/state_processing/src/per_block_processing.rs @@ -14,10 +14,7 @@ pub use self::verify_proposer_slashing::verify_proposer_slashing; pub use is_valid_indexed_attestation::{ is_valid_indexed_attestation, is_valid_indexed_attestation_without_signature, }; -pub use verify_attestation::{ - verify_attestation, verify_attestation_time_independent_only, - verify_attestation_without_signature, -}; +pub use verify_attestation::{verify_attestation_for_block, verify_attestation_for_state}; pub use verify_deposit::{ get_existing_validator_index, verify_deposit_merkle_proof, verify_deposit_signature, }; @@ -37,6 +34,12 @@ mod verify_exit; mod verify_proposer_slashing; mod verify_transfer; +#[derive(PartialEq)] +pub enum VerifySignatures { + True, + False, +} + /// Updates the state for a new block, whilst validating that the block is valid. /// /// Returns `Ok(())` if the block is valid and the state was successfully updated. Otherwise @@ -312,7 +315,8 @@ pub fn process_attestations( .par_iter() .enumerate() .try_for_each(|(i, attestation)| { - verify_attestation(state, attestation, spec).map_err(|e| e.into_with_index(i)) + verify_attestation_for_block(state, attestation, spec, VerifySignatures::True) + .map_err(|e| e.into_with_index(i)) })?; // Update the state in series. diff --git a/eth2/state_processing/src/per_block_processing/verify_attestation.rs b/eth2/state_processing/src/per_block_processing/verify_attestation.rs index af25300457..bca6a90854 100644 --- a/eth2/state_processing/src/per_block_processing/verify_attestation.rs +++ b/eth2/state_processing/src/per_block_processing/verify_attestation.rs @@ -1,4 +1,5 @@ use super::errors::{AttestationInvalid as Invalid, AttestationValidationError as Error}; +use super::VerifySignatures; use crate::common::get_indexed_attestation; use crate::per_block_processing::{ is_valid_indexed_attestation, is_valid_indexed_attestation_without_signature, @@ -6,67 +7,23 @@ use crate::per_block_processing::{ use tree_hash::TreeHash; use types::*; -/// Indicates if an `Attestation` is valid to be included in a block in the current epoch of the -/// given state. -/// -/// Returns `Ok(())` if the `Attestation` is valid, otherwise indicates the reason for invalidity. -/// -/// Spec v0.8.0 -pub fn verify_attestation( - state: &BeaconState, - attestation: &Attestation, - spec: &ChainSpec, -) -> Result<(), Error> { - verify_attestation_parametric(state, attestation, spec, true, false) -} - -/// Like `verify_attestation` but doesn't run checks which may become true in future states. -pub fn verify_attestation_time_independent_only( - state: &BeaconState, - attestation: &Attestation, - spec: &ChainSpec, -) -> Result<(), Error> { - verify_attestation_parametric(state, attestation, spec, true, true) -} - -/// Indicates if an `Attestation` is valid to be included in a block in the current epoch of the -/// given state, without validating the aggregate signature. -/// -/// Returns `Ok(())` if the `Attestation` is valid, otherwise indicates the reason for invalidity. -/// -/// Spec v0.8.0 -pub fn verify_attestation_without_signature( - state: &BeaconState, - attestation: &Attestation, - spec: &ChainSpec, -) -> Result<(), Error> { - verify_attestation_parametric(state, attestation, spec, false, false) -} - /// Indicates if an `Attestation` is valid to be included in a block in the current epoch of the /// given state, optionally validating the aggregate signature. /// -/// /// Spec v0.8.0 -fn verify_attestation_parametric( +pub fn verify_attestation_for_block( state: &BeaconState, attestation: &Attestation, spec: &ChainSpec, - verify_signature: bool, - time_independent_only: bool, + verify_signatures: VerifySignatures, ) -> Result<(), Error> { let data = &attestation.data; - verify!( - data.crosslink.shard < T::ShardCount::to_u64(), - Invalid::BadShard - ); // Check attestation slot. let attestation_slot = state.get_attestation_data_slot(&data)?; verify!( - time_independent_only - || attestation_slot + spec.min_attestation_inclusion_delay <= state.slot, + attestation_slot + spec.min_attestation_inclusion_delay <= state.slot, Invalid::IncludedTooEarly { state: state.slot, delay: spec.min_attestation_inclusion_delay, @@ -81,27 +38,47 @@ fn verify_attestation_parametric( } ); - // Verify the Casper FFG vote and crosslink data. - if !time_independent_only { - let parent_crosslink = verify_casper_ffg_vote(attestation, state)?; + verify_attestation_for_state(state, attestation, spec, verify_signatures) +} - verify!( - data.crosslink.parent_root == Hash256::from_slice(&parent_crosslink.tree_hash_root()), - Invalid::BadParentCrosslinkHash - ); - verify!( - data.crosslink.start_epoch == parent_crosslink.end_epoch, - Invalid::BadParentCrosslinkStartEpoch - ); - verify!( - data.crosslink.end_epoch - == std::cmp::min( - data.target.epoch, - parent_crosslink.end_epoch + spec.max_epochs_per_crosslink - ), - Invalid::BadParentCrosslinkEndEpoch - ); - } +/// Returns `Ok(())` if `attestation` is a valid attestation to the chain that preceeds the given +/// `state`. +/// +/// Returns a descriptive `Err` if the attestation is malformed or does not accurately reflect the +/// prior blocks in `state`. +/// +/// Spec v0.8.0 +pub fn verify_attestation_for_state( + state: &BeaconState, + attestation: &Attestation, + spec: &ChainSpec, + verify_signature: VerifySignatures, +) -> Result<(), Error> { + let data = &attestation.data; + verify!( + data.crosslink.shard < T::ShardCount::to_u64(), + Invalid::BadShard + ); + + // Verify the Casper FFG vote and crosslink data. + let parent_crosslink = verify_casper_ffg_vote(attestation, state)?; + + verify!( + data.crosslink.parent_root == Hash256::from_slice(&parent_crosslink.tree_hash_root()), + Invalid::BadParentCrosslinkHash + ); + verify!( + data.crosslink.start_epoch == parent_crosslink.end_epoch, + Invalid::BadParentCrosslinkStartEpoch + ); + verify!( + data.crosslink.end_epoch + == std::cmp::min( + data.target.epoch, + parent_crosslink.end_epoch + spec.max_epochs_per_crosslink + ), + Invalid::BadParentCrosslinkEndEpoch + ); // Crosslink data root is zero (to be removed in phase 1). verify!( @@ -111,7 +88,7 @@ fn verify_attestation_parametric( // Check signature and bitfields let indexed_attestation = get_indexed_attestation(state, attestation)?; - if verify_signature { + if verify_signature == VerifySignatures::True { is_valid_indexed_attestation(state, &indexed_attestation, spec)?; } else { is_valid_indexed_attestation_without_signature(state, &indexed_attestation, spec)?; From 7c134a7504d2e8ff8b8cdd7d20459f96abde04a9 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Thu, 8 Aug 2019 16:47:24 +1000 Subject: [PATCH 031/186] Simplify, fix bugs, add tests for chain iters --- beacon_node/beacon_chain/src/beacon_chain.rs | 53 ++++----------- beacon_node/beacon_chain/src/test_utils.rs | 32 +++------ beacon_node/beacon_chain/tests/tests.rs | 68 +++++++++++++++++++- beacon_node/store/src/iter.rs | 30 ++++----- eth2/lmd_ghost/src/reduced_tree.rs | 6 +- 5 files changed, 106 insertions(+), 83 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 60b65c95b4..e8dcd50abf 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -244,15 +244,12 @@ impl BeaconChain { /// /// Because this iterator starts at the `head` of the chain (viz., the best block), the first slot /// returned may be earlier than the wall-clock slot. - pub fn rev_iter_block_roots( - &self, - slot: Slot, - ) -> ReverseBlockRootIterator { + pub fn rev_iter_block_roots(&self) -> ReverseBlockRootIterator { let state = &self.head().beacon_state; let block_root = self.head().beacon_block_root; let block_slot = state.slot; - let iter = BlockRootsIterator::owned(self.store.clone(), state.clone(), slot); + let iter = BlockRootsIterator::owned(self.store.clone(), state.clone()); ReverseBlockRootIterator::new((block_root, block_slot), iter) } @@ -267,15 +264,12 @@ impl BeaconChain { /// /// Because this iterator starts at the `head` of the chain (viz., the best block), the first slot /// returned may be earlier than the wall-clock slot. - pub fn rev_iter_state_roots( - &self, - slot: Slot, - ) -> ReverseStateRootIterator { + pub fn rev_iter_state_roots(&self) -> ReverseStateRootIterator { let state = &self.head().beacon_state; let state_root = self.head().beacon_state_root; let state_slot = state.slot; - let iter = StateRootsIterator::owned(self.store.clone(), state.clone(), slot); + let iter = StateRootsIterator::owned(self.store.clone(), state.clone()); ReverseStateRootIterator::new((state_root, state_slot), iter) } @@ -448,9 +442,8 @@ impl BeaconChain { pub fn produce_attestation_data(&self, shard: u64) -> Result { let state = self.state.read(); let head_block_root = self.head().beacon_block_root; - let head_block_slot = self.head().beacon_block.slot; - self.produce_attestation_data_for_block(shard, head_block_root, head_block_slot, &*state) + self.produce_attestation_data_for_block(shard, head_block_root, &*state) } /// Produce an `AttestationData` that attests to the chain denoted by `block_root` and `state`. @@ -461,39 +454,19 @@ impl BeaconChain { &self, shard: u64, head_block_root: Hash256, - head_block_slot: Slot, state: &BeaconState, ) -> Result { // Collect some metrics. self.metrics.attestation_production_requests.inc(); let timer = self.metrics.attestation_production_times.start_timer(); - let slots_per_epoch = T::EthSpec::slots_per_epoch(); - let current_epoch_start_slot = state.current_epoch().start_slot(slots_per_epoch); - // The `target_root` is the root of the first block of the current epoch. - // - // The `state` does not know the root of the block for it's current slot (it only knows - // about blocks from prior slots). This creates an edge-case when the state is on the first - // slot of the epoch -- we're unable to obtain the `target_root` because it is not a prior - // root. - // - // This edge case is handled in two ways: - // - // - If the head block is on the same slot as the state, we use it's root. - // - Otherwise, assume the current slot has been skipped and use the block root from the - // prior slot. - // - // For all other cases, we simply read the `target_root` from `state.latest_block_roots`. - let target_root = if state.slot == current_epoch_start_slot { - if head_block_slot == current_epoch_start_slot { - head_block_root - } else { - *state.get_block_root(current_epoch_start_slot - 1)? - } - } else { - *state.get_block_root(current_epoch_start_slot)? - }; + let target_root = self + .rev_iter_block_roots() + .find(|(_root, slot)| *slot % T::EthSpec::slots_per_epoch() == 0) + .map(|(root, _slot)| root) + .ok_or_else(|| Error::UnableToFindTargetRoot(self.head().beacon_state.slot))?; + let target = Checkpoint { epoch: state.current_epoch(), root: target_root, @@ -523,7 +496,7 @@ impl BeaconChain { }) } - /// Accept a new attestation from the network. + /// Accept a new, potentially invalid attestation from the network. /// /// If valid, the attestation is added to the `op_pool` and aggregated with another attestation /// if possible. @@ -614,7 +587,7 @@ impl BeaconChain { &self, attestation: Attestation, state: &BeaconState, - _head_block: &BeaconBlock, + block: &BeaconBlock, ) -> Result { self.metrics.attestation_processing_requests.inc(); let timer = self.metrics.attestation_processing_times.start_timer(); diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index 293d3b9b91..f2ec5a0fd6 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -194,7 +194,7 @@ where if let BlockProcessingOutcome::Processed { block_root } = outcome { head_block_root = Some(block_root); - self.add_free_attestations(&attestation_strategy, &new_state, block_root, slot); + self.add_free_attestations(&attestation_strategy, &new_state, block_root); } else { panic!("block should be successfully processed: {:?}", outcome); } @@ -209,7 +209,7 @@ where fn get_state_at_slot(&self, state_slot: Slot) -> BeaconState { let state_root = self .chain - .rev_iter_state_roots(self.chain.head().beacon_state.slot - 1) + .rev_iter_state_roots() .find(|(_hash, slot)| *slot == state_slot) .map(|(hash, _slot)| hash) .expect("could not find state root"); @@ -282,20 +282,14 @@ where attestation_strategy: &AttestationStrategy, state: &BeaconState, head_block_root: Hash256, - head_block_slot: Slot, ) { - self.get_free_attestations( - attestation_strategy, - state, - head_block_root, - head_block_slot, - ) - .into_iter() - .for_each(|attestation| { - self.chain - .process_attestation(attestation) - .expect("should process attestation"); - }); + self.get_free_attestations(attestation_strategy, state, head_block_root) + .into_iter() + .for_each(|attestation| { + self.chain + .process_attestation(attestation) + .expect("should process attestation"); + }); } /// Generates a `Vec` for some attestation strategy and head_block. @@ -304,7 +298,6 @@ where attestation_strategy: &AttestationStrategy, state: &BeaconState, head_block_root: Hash256, - head_block_slot: Slot, ) -> Vec> { let spec = &self.spec; let fork = &state.fork; @@ -329,12 +322,7 @@ where if attesting_validators.contains(validator_index) { let data = self .chain - .produce_attestation_data_for_block( - cc.shard, - head_block_root, - head_block_slot, - state, - ) + .produce_attestation_data_for_block(cc.shard, head_block_root, state) .expect("should produce attestation data"); let mut aggregation_bits = BitList::with_capacity(committee_size).unwrap(); diff --git a/beacon_node/beacon_chain/tests/tests.rs b/beacon_node/beacon_chain/tests/tests.rs index d286aaec0c..8dc4ae6ec8 100644 --- a/beacon_node/beacon_chain/tests/tests.rs +++ b/beacon_node/beacon_chain/tests/tests.rs @@ -32,6 +32,73 @@ fn get_harness(validator_count: usize) -> BeaconChainHarness = harness.chain.rev_iter_block_roots().collect(); + let state_roots: Vec<(Hash256, Slot)> = harness.chain.rev_iter_state_roots().collect(); + + assert_eq!( + block_roots.len(), + state_roots.len(), + "should be an equal amount of block and state roots" + ); + + assert!( + block_roots.iter().any(|(_root, slot)| *slot == 0), + "should contain genesis block root" + ); + assert!( + state_roots.iter().any(|(_root, slot)| *slot == 0), + "should contain genesis state root" + ); + + assert_eq!( + block_roots.len(), + num_blocks_produced as usize + 1, + "should contain all produced blocks, plus the genesis block" + ); + + block_roots.windows(2).for_each(|x| { + assert_eq!( + x[1].1, + x[0].1 - 1, + "block root slots should be decreasing by one" + ) + }); + state_roots.windows(2).for_each(|x| { + assert_eq!( + x[1].1, + x[0].1 - 1, + "state root slots should be decreasing by one" + ) + }); + + let head = &harness.chain.head(); + + assert_eq!( + *block_roots.first().expect("should have some block roots"), + (head.beacon_block_root, head.beacon_block.slot), + "first block root and slot should be for the head block" + ); + + assert_eq!( + *state_roots.first().expect("should have some state roots"), + (head.beacon_state_root, head.beacon_state.slot), + "first state root and slot should be for the head state" + ); +} + #[test] fn chooses_fork() { let harness = get_harness(VALIDATOR_COUNT); @@ -326,7 +393,6 @@ fn attestations_with_increasing_slots() { &AttestationStrategy::AllValidators, &harness.chain.head().beacon_state, harness.chain.head().beacon_block_root, - harness.chain.head().beacon_block.slot, )); harness.advance_slot(); diff --git a/beacon_node/store/src/iter.rs b/beacon_node/store/src/iter.rs index c4e557b2de..84bf3759fb 100644 --- a/beacon_node/store/src/iter.rs +++ b/beacon_node/store/src/iter.rs @@ -20,7 +20,7 @@ impl<'a, U: Store, E: EthSpec> AncestorIter> for fn try_iter_ancestor_roots(&self, store: Arc) -> Option> { let state = store.get::>(&self.state_root).ok()??; - Some(BlockRootsIterator::owned(store, state, self.slot)) + Some(BlockRootsIterator::owned(store, state)) } } @@ -32,19 +32,19 @@ pub struct StateRootsIterator<'a, T: EthSpec, U> { } impl<'a, T: EthSpec, U: Store> StateRootsIterator<'a, T, U> { - pub fn new(store: Arc, beacon_state: &'a BeaconState, start_slot: Slot) -> Self { + pub fn new(store: Arc, beacon_state: &'a BeaconState) -> Self { Self { store, + slot: beacon_state.slot, beacon_state: Cow::Borrowed(beacon_state), - slot: start_slot + 1, } } - pub fn owned(store: Arc, beacon_state: BeaconState, start_slot: Slot) -> Self { + pub fn owned(store: Arc, beacon_state: BeaconState) -> Self { Self { store, + slot: beacon_state.slot, beacon_state: Cow::Owned(beacon_state), - slot: start_slot + 1, } } } @@ -88,16 +88,16 @@ pub struct BlockIterator<'a, T: EthSpec, U> { impl<'a, T: EthSpec, U: Store> BlockIterator<'a, T, U> { /// Create a new iterator over all blocks in the given `beacon_state` and prior states. - pub fn new(store: Arc, beacon_state: &'a BeaconState, start_slot: Slot) -> Self { + pub fn new(store: Arc, beacon_state: &'a BeaconState) -> Self { Self { - roots: BlockRootsIterator::new(store, beacon_state, start_slot), + roots: BlockRootsIterator::new(store, beacon_state), } } /// Create a new iterator over all blocks in the given `beacon_state` and prior states. - pub fn owned(store: Arc, beacon_state: BeaconState, start_slot: Slot) -> Self { + pub fn owned(store: Arc, beacon_state: BeaconState) -> Self { Self { - roots: BlockRootsIterator::owned(store, beacon_state, start_slot), + roots: BlockRootsIterator::owned(store, beacon_state), } } } @@ -128,20 +128,20 @@ pub struct BlockRootsIterator<'a, T: EthSpec, U> { impl<'a, T: EthSpec, U: Store> BlockRootsIterator<'a, T, U> { /// Create a new iterator over all block roots in the given `beacon_state` and prior states. - pub fn new(store: Arc, beacon_state: &'a BeaconState, start_slot: Slot) -> Self { + pub fn new(store: Arc, beacon_state: &'a BeaconState) -> Self { Self { store, + slot: beacon_state.slot, beacon_state: Cow::Borrowed(beacon_state), - slot: start_slot + 1, } } /// Create a new iterator over all block roots in the given `beacon_state` and prior states. - pub fn owned(store: Arc, beacon_state: BeaconState, start_slot: Slot) -> Self { + pub fn owned(store: Arc, beacon_state: BeaconState) -> Self { Self { store, + slot: beacon_state.slot, beacon_state: Cow::Owned(beacon_state), - slot: start_slot + 1, } } } @@ -218,7 +218,7 @@ mod test { state_b.state_roots[0] = state_a_root; store.put(&state_a_root, &state_a).unwrap(); - let iter = BlockRootsIterator::new(store.clone(), &state_b, state_b.slot - 1); + let iter = BlockRootsIterator::new(store.clone(), &state_b); assert!( iter.clone().find(|(_root, slot)| *slot == 0).is_some(), @@ -267,7 +267,7 @@ mod test { store.put(&state_a_root, &state_a).unwrap(); store.put(&state_b_root, &state_b).unwrap(); - let iter = StateRootsIterator::new(store.clone(), &state_b, state_b.slot - 1); + let iter = StateRootsIterator::new(store.clone(), &state_b); assert!( iter.clone().find(|(_root, slot)| *slot == 0).is_some(), diff --git a/eth2/lmd_ghost/src/reduced_tree.rs b/eth2/lmd_ghost/src/reduced_tree.rs index 5d70748042..9668620b79 100644 --- a/eth2/lmd_ghost/src/reduced_tree.rs +++ b/eth2/lmd_ghost/src/reduced_tree.rs @@ -611,11 +611,7 @@ where let block = self.get_block(child)?; let state = self.get_state(block.state_root)?; - Ok(BlockRootsIterator::owned( - self.store.clone(), - state, - block.slot - 1, - )) + Ok(BlockRootsIterator::owned(self.store.clone(), state)) } /// Verify the integrity of `self`. Returns `Ok(())` if the tree has integrity, otherwise returns `Err(description)`. From b1591c3c12d777377524fcd37809ad4508e4e7c9 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Thu, 8 Aug 2019 16:49:27 +1000 Subject: [PATCH 032/186] Remove attestation processing from op pool --- beacon_node/beacon_chain/src/beacon_chain.rs | 68 +++++++++++++++++-- beacon_node/beacon_chain/src/errors.rs | 1 + beacon_node/beacon_chain/src/fork_choice.rs | 18 ++--- eth2/operation_pool/src/lib.rs | 20 ++++-- .../src/per_block_processing.rs | 6 +- .../verify_attestation.rs | 10 +-- 6 files changed, 92 insertions(+), 31 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index e8dcd50abf..8982cdf796 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -11,12 +11,15 @@ use operation_pool::{OperationPool, PersistedOperationPool}; use parking_lot::{RwLock, RwLockReadGuard}; use slog::{error, info, warn, Logger}; use slot_clock::SlotClock; -use state_processing::per_block_processing::errors::{ - AttesterSlashingValidationError, DepositValidationError, ExitValidationError, - ProposerSlashingValidationError, TransferValidationError, +use state_processing::per_block_processing::{ + errors::{ + AttestationValidationError, AttesterSlashingValidationError, DepositValidationError, + ExitValidationError, ProposerSlashingValidationError, TransferValidationError, + }, + verify_attestation_for_state, VerifySignatures, }; use state_processing::{ - common, per_block_processing, per_block_processing_without_verifying_block_signature, + per_block_processing, per_block_processing_without_verifying_block_signature, per_slot_processing, BlockProcessingError, }; use std::sync::Arc; @@ -58,6 +61,7 @@ pub enum BlockProcessingOutcome { pub enum AttestationProcessingOutcome { Processed, UnknownHeadBlock { beacon_block_root: Hash256 }, + Invalid(AttestationValidationError), } pub trait BeaconChainTypes { @@ -543,9 +547,6 @@ impl BeaconChain { } }; - // TODO: we could try and see if the "speculative state" (e.g., self.state) can support - // this, without needing to load it from the db. - if let Some(outcome) = optional_outcome { outcome } else { @@ -583,6 +584,25 @@ impl BeaconChain { } } + /// Verifies the `attestation` against the `state` to which it is attesting. + /// + /// Updates fork choice with any new latest messages, but _does not_ find or update the head. + /// + /// ## Notes + /// + /// The given `state` must fulfil one of the following conditions: + /// + /// - `state` corresponds to the `block.state_root` identified by + /// `attestation.data.beacon_block_root`. (Viz., `attestation` was created using `state`. + /// - `state.slot` is in the same epoch as `block.slot` and + /// `attestation.data.beacon_block_root` is in `state.block_roots`. (Viz., the attestation was + /// attesting to an ancestor of `state` from the same epoch as `state`. + /// + /// Additionally, `attestation.data.beacon_block_root` **must** be available to read in + /// `self.store` _and_ be the root of the given `block`. + /// + /// If the given conditions are not fulfilled, the function may error or provide a false + /// negative (indicating that a given `attestation` is invalid when it is was validly formed). fn process_attestation_for_state_and_block( &self, attestation: Attestation, @@ -592,6 +612,39 @@ impl BeaconChain { self.metrics.attestation_processing_requests.inc(); let timer = self.metrics.attestation_processing_times.start_timer(); + let result = if let Err(e) = + verify_attestation_for_state(state, &attestation, &self.spec, VerifySignatures::True) + { + warn!( + self.log, + "Invalid attestation"; + "state_epoch" => state.current_epoch(), + "error" => format!("{:?}", e), + ); + + Ok(AttestationProcessingOutcome::Invalid(e)) + } else { + // Provide the attestation to fork choice, updating the validator latest messages but + // _without_ finding and updating the head. + self.fork_choice + .process_attestation(&state, &attestation, block)?; + + // Provide the valid attestation to op pool, which may choose to retain the + // attestation for inclusion in a future block. + self.op_pool + .insert_attestation(attestation, state, &self.spec)?; + + // Update the metrics. + self.metrics.attestation_processing_successes.inc(); + + Ok(AttestationProcessingOutcome::Processed) + }; + + timer.observe_duration(); + + result + + /* if self .fork_choice .should_process_attestation(state, &attestation)? @@ -619,6 +672,7 @@ impl BeaconChain { result .map(|_| AttestationProcessingOutcome::Processed) .map_err(|e| Error::AttestationValidationError(e)) + */ } /// Accept some deposit and queue it for inclusion in an appropriate block. diff --git a/beacon_node/beacon_chain/src/errors.rs b/beacon_node/beacon_chain/src/errors.rs index 0b8fae7bf6..7a51fc4258 100644 --- a/beacon_node/beacon_chain/src/errors.rs +++ b/beacon_node/beacon_chain/src/errors.rs @@ -26,6 +26,7 @@ pub enum BeaconChainError { previous_epoch: Epoch, new_epoch: Epoch, }, + UnableToFindTargetRoot(Slot), BeaconStateError(BeaconStateError), DBInconsistent(String), DBError(store::Error), diff --git a/beacon_node/beacon_chain/src/fork_choice.rs b/beacon_node/beacon_chain/src/fork_choice.rs index 71415d1915..83d6c335f1 100644 --- a/beacon_node/beacon_chain/src/fork_choice.rs +++ b/beacon_node/beacon_chain/src/fork_choice.rs @@ -20,7 +20,6 @@ pub enum Error { pub struct ForkChoice { backend: T::LmdGhost, - store: Arc, /// Used for resolving the `0x00..00` alias back to genesis. /// /// Does not necessarily need to be the _actual_ genesis, it suffices to be the finalized root @@ -39,7 +38,6 @@ impl ForkChoice { genesis_block_root: Hash256, ) -> Self { Self { - store: store.clone(), backend: T::LmdGhost::new(store, genesis_block, genesis_block_root), genesis_block_root, } @@ -119,7 +117,7 @@ impl ForkChoice { // // https://github.com/ethereum/eth2.0-specs/blob/v0.7.0/specs/core/0_fork-choice.md for attestation in &block.body.attestations { - self.process_attestation(state, attestation)?; + self.process_attestation(state, attestation, block)?; } self.backend.process_block(block, block_root)?; @@ -127,13 +125,14 @@ impl ForkChoice { Ok(()) } - /// Process an attestation. + /// Process an attestation which references `block` in `attestation.data.beacon_block_root`. /// /// Assumes the attestation is valid. pub fn process_attestation( &self, state: &BeaconState, attestation: &Attestation, + block: &BeaconBlock, ) -> Result<()> { let block_hash = attestation.data.beacon_block_root; @@ -152,20 +151,13 @@ impl ForkChoice { // to genesis just by being present in the chain. // // Additionally, don't add any block hash to fork choice unless we have imported the block. - if block_hash != Hash256::zero() - && self - .store - .exists::>(&block_hash) - .unwrap_or(false) - { + if block_hash != Hash256::zero() { let validator_indices = get_attesting_indices(state, &attestation.data, &attestation.aggregation_bits)?; - let block_slot = state.get_attestation_data_slot(&attestation.data)?; - for validator_index in validator_indices { self.backend - .process_attestation(validator_index, block_hash, block_slot)?; + .process_attestation(validator_index, block_hash, block.slot)?; } } diff --git a/eth2/operation_pool/src/lib.rs b/eth2/operation_pool/src/lib.rs index 92d5fb1683..ba9ca81c07 100644 --- a/eth2/operation_pool/src/lib.rs +++ b/eth2/operation_pool/src/lib.rs @@ -15,9 +15,10 @@ use state_processing::per_block_processing::errors::{ ExitValidationError, ProposerSlashingValidationError, TransferValidationError, }; use state_processing::per_block_processing::{ - get_slashable_indices_modular, verify_attestation, verify_attestation_time_independent_only, + get_slashable_indices_modular, verify_attestation_for_block_inclusion, verify_attester_slashing, verify_exit, verify_exit_time_independent_only, verify_proposer_slashing, verify_transfer, verify_transfer_time_independent_only, + VerifySignatures, }; use std::collections::{btree_map::Entry, hash_map, BTreeMap, HashMap, HashSet}; use std::marker::PhantomData; @@ -64,15 +65,16 @@ impl OperationPool { } /// Insert an attestation into the pool, aggregating it with existing attestations if possible. + /// + /// ## Note + /// + /// This function assumes the given `attestation` is valid. pub fn insert_attestation( &self, attestation: Attestation, state: &BeaconState, spec: &ChainSpec, ) -> Result<(), AttestationValidationError> { - // Check that attestation signatures are valid. - verify_attestation_time_independent_only(state, &attestation, spec)?; - let id = AttestationId::from_data(&attestation.data, state, spec); // Take a write lock on the attestations map. @@ -128,7 +130,15 @@ impl OperationPool { }) .flat_map(|(_, attestations)| attestations) // That are valid... - .filter(|attestation| verify_attestation(state, attestation, spec).is_ok()) + .filter(|attestation| { + verify_attestation_for_block_inclusion( + state, + attestation, + spec, + VerifySignatures::True, + ) + .is_ok() + }) .map(|att| AttMaxCover::new(att, earliest_attestation_validators(att, state))); maximum_cover(valid_attestations, T::MaxAttestations::to_usize()) diff --git a/eth2/state_processing/src/per_block_processing.rs b/eth2/state_processing/src/per_block_processing.rs index 3acadfde26..a64158ac98 100644 --- a/eth2/state_processing/src/per_block_processing.rs +++ b/eth2/state_processing/src/per_block_processing.rs @@ -14,7 +14,9 @@ pub use self::verify_proposer_slashing::verify_proposer_slashing; pub use is_valid_indexed_attestation::{ is_valid_indexed_attestation, is_valid_indexed_attestation_without_signature, }; -pub use verify_attestation::{verify_attestation_for_block, verify_attestation_for_state}; +pub use verify_attestation::{ + verify_attestation_for_block_inclusion, verify_attestation_for_state, +}; pub use verify_deposit::{ get_existing_validator_index, verify_deposit_merkle_proof, verify_deposit_signature, }; @@ -315,7 +317,7 @@ pub fn process_attestations( .par_iter() .enumerate() .try_for_each(|(i, attestation)| { - verify_attestation_for_block(state, attestation, spec, VerifySignatures::True) + verify_attestation_for_block_inclusion(state, attestation, spec, VerifySignatures::True) .map_err(|e| e.into_with_index(i)) })?; diff --git a/eth2/state_processing/src/per_block_processing/verify_attestation.rs b/eth2/state_processing/src/per_block_processing/verify_attestation.rs index bca6a90854..74dbefa232 100644 --- a/eth2/state_processing/src/per_block_processing/verify_attestation.rs +++ b/eth2/state_processing/src/per_block_processing/verify_attestation.rs @@ -7,11 +7,13 @@ use crate::per_block_processing::{ use tree_hash::TreeHash; use types::*; -/// Indicates if an `Attestation` is valid to be included in a block in the current epoch of the -/// given state, optionally validating the aggregate signature. +/// Returns `Ok(())` if the given `attestation` is valid to be included in a block that is applied +/// to `state`. Otherwise, returns a descriptive `Err`. +/// +/// Optionally verifies the aggregate signature, depending on `verify_signatures`. /// /// Spec v0.8.0 -pub fn verify_attestation_for_block( +pub fn verify_attestation_for_block_inclusion( state: &BeaconState, attestation: &Attestation, spec: &ChainSpec, @@ -41,7 +43,7 @@ pub fn verify_attestation_for_block( verify_attestation_for_state(state, attestation, spec, verify_signatures) } -/// Returns `Ok(())` if `attestation` is a valid attestation to the chain that preceeds the given +/// Returns `Ok(())` if `attestation` is a valid attestation to the chain that precedes the given /// `state`. /// /// Returns a descriptive `Err` if the attestation is malformed or does not accurately reflect the From 76bb6710844d4f683d1681ef738efe9e5880b137 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Fri, 9 Aug 2019 11:54:35 +1000 Subject: [PATCH 033/186] Fix bug with fork choice, tidy --- beacon_node/beacon_chain/src/beacon_chain.rs | 63 +++++++++---------- beacon_node/beacon_chain/src/fork_choice.rs | 14 ++++- beacon_node/beacon_chain/src/test_utils.rs | 6 +- beacon_node/beacon_chain/tests/tests.rs | 22 ++++--- .../src/per_block_processing/errors.rs | 3 + .../verify_attestation.rs | 11 ++++ 6 files changed, 70 insertions(+), 49 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 3d50d701c4..81e5bdd65a 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -524,7 +524,13 @@ impl BeaconChain { // If it turns out that the attestation was made using the head state, then there // is no need to load a state from the database to process the attestation. - if state.current_epoch() == attestation_head_block.epoch() + // + // Note: use the epoch of the target because it indicates which epoch the + // attestation was created in. You cannot use the epoch of the head block, because + // the block doesn't necessarily need to be in the same epoch as the attestation + // (e.g., if there are skip slots between the epoch the block was created in and + // the epoch for the attestation). + if state.current_epoch() == attestation.data.target.epoch && (state .get_block_root(attestation_head_block.slot) .map(|root| *root == attestation.data.beacon_block_root) @@ -546,7 +552,11 @@ impl BeaconChain { if let Some(outcome) = optional_outcome { outcome } else { - // The state required to verify this attestation must be loaded from the database. + // Use the `data.beacon_block_root` to load the state from the latest non-skipped + // slot preceding the attestations creation. + // + // This state is guaranteed to be in the same chain as the attestation, but it's + // not guaranteed to be from the same slot or epoch as the attestation. let mut state: BeaconState = self .store .get(&attestation_head_block.state_root)? @@ -554,7 +564,21 @@ impl BeaconChain { // Ensure the state loaded from the database matches the state of the attestation // head block. - for _ in state.slot.as_u64()..attestation_head_block.slot.as_u64() { + // + // The state needs to be advanced from the current slot through to the epoch in + // which the attestation was created in. It would be an error to try and use + // `state.get_attestation_data_slot(..)` because the state matching the + // `data.beacon_block_root` isn't necessarily in a nearby epoch to the attestation + // (e.g., if there were lots of skip slots since the head of the chain and the + // epoch creation epoch). + for _ in state.slot.as_u64() + ..attestation + .data + .target + .epoch + .start_slot(T::EthSpec::slots_per_epoch()) + .as_u64() + { per_slot_processing(&mut state, &self.spec)?; } @@ -639,36 +663,6 @@ impl BeaconChain { timer.observe_duration(); result - - /* - if self - .fork_choice - .should_process_attestation(state, &attestation)? - { - // TODO: check validation. - let indexed_attestation = common::get_indexed_attestation(state, &attestation)?; - per_block_processing::is_valid_indexed_attestation( - state, - &indexed_attestation, - &self.spec, - )?; - self.fork_choice.process_attestation(&state, &attestation)?; - } - - let result = self - .op_pool - .insert_attestation(attestation, state, &self.spec); - - timer.observe_duration(); - - if result.is_ok() { - self.metrics.attestation_processing_successes.inc(); - } - - result - .map(|_| AttestationProcessingOutcome::Processed) - .map_err(|e| Error::AttestationValidationError(e)) - */ } /// Accept some deposit and queue it for inclusion in an appropriate block. @@ -735,7 +729,7 @@ impl BeaconChain { return Ok(BlockProcessingOutcome::GenesisBlock); } - let block_root = block.block_header().canonical_root(); + let block_root = block.canonical_root(); if block_root == self.genesis_block_root { return Ok(BlockProcessingOutcome::GenesisBlock); @@ -781,6 +775,7 @@ impl BeaconChain { per_slot_processing(&mut state, &self.spec)?; } + state.build_committee_cache(RelativeEpoch::Previous, &self.spec)?; state.build_committee_cache(RelativeEpoch::Current, &self.spec)?; // Apply the received block to its parent state (which has been transitioned into this diff --git a/beacon_node/beacon_chain/src/fork_choice.rs b/beacon_node/beacon_chain/src/fork_choice.rs index 83d6c335f1..6800f61d8c 100644 --- a/beacon_node/beacon_chain/src/fork_choice.rs +++ b/beacon_node/beacon_chain/src/fork_choice.rs @@ -19,6 +19,7 @@ pub enum Error { } pub struct ForkChoice { + store: Arc, backend: T::LmdGhost, /// Used for resolving the `0x00..00` alias back to genesis. /// @@ -38,6 +39,7 @@ impl ForkChoice { genesis_block_root: Hash256, ) -> Self { Self { + store: store.clone(), backend: T::LmdGhost::new(store, genesis_block, genesis_block_root), genesis_block_root, } @@ -117,9 +119,19 @@ impl ForkChoice { // // https://github.com/ethereum/eth2.0-specs/blob/v0.7.0/specs/core/0_fork-choice.md for attestation in &block.body.attestations { - self.process_attestation(state, attestation, block)?; + let block = self + .store + .get::>(&attestation.data.beacon_block_root)? + .ok_or_else(|| Error::MissingBlock(attestation.data.beacon_block_root))?; + + self.process_attestation(state, attestation, &block)?; } + // This does not apply a vote to the block, it just makes fork choice aware of the block so + // it can still be identified as the head even if it doesn't have any votes. + // + // A case where a block without any votes can be the head is where it is the only child of + // a block that has the majority of votes applied to it. self.backend.process_block(block, block_root)?; Ok(()) diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index f2ec5a0fd6..6997f52aec 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -349,14 +349,12 @@ where agg_sig }; - let attestation = Attestation { + vec.push(Attestation { aggregation_bits, data, custody_bits, signature, - }; - - vec.push(attestation) + }) } } }); diff --git a/beacon_node/beacon_chain/tests/tests.rs b/beacon_node/beacon_chain/tests/tests.rs index 8dc4ae6ec8..c22f025639 100644 --- a/beacon_node/beacon_chain/tests/tests.rs +++ b/beacon_node/beacon_chain/tests/tests.rs @@ -342,27 +342,29 @@ fn free_attestations_added_to_fork_choice_some_none() { let state = &harness.chain.head().beacon_state; let fork_choice = &harness.chain.fork_choice; - let validators: Vec = (0..VALIDATOR_COUNT).collect(); - let slots: Vec = validators - .iter() - .map(|&v| { - state - .get_attestation_duties(v, RelativeEpoch::Current) + let validator_slots: Vec<(usize, Slot)> = (0..VALIDATOR_COUNT) + .into_iter() + .map(|validator_index| { + let slot = state + .get_attestation_duties(validator_index, RelativeEpoch::Current) .expect("should get attester duties") .unwrap() - .slot + .slot; + + (validator_index, slot) }) .collect(); - let validator_slots: Vec<(&usize, Slot)> = validators.iter().zip(slots).collect(); for (validator, slot) in validator_slots.clone() { - let latest_message = fork_choice.latest_message(*validator); + let latest_message = fork_choice.latest_message(validator); if slot <= num_blocks_produced && slot != 0 { assert_eq!( latest_message.unwrap().1, slot, - "Latest message slot should be equal to attester duty." + "Latest message slot for {} should be equal to slot {}.", + validator, + slot ) } else { assert!( diff --git a/eth2/state_processing/src/per_block_processing/errors.rs b/eth2/state_processing/src/per_block_processing/errors.rs index 65179167c1..436ec96cee 100644 --- a/eth2/state_processing/src/per_block_processing/errors.rs +++ b/eth2/state_processing/src/per_block_processing/errors.rs @@ -136,6 +136,9 @@ pub enum AttestationInvalid { delay: u64, attestation: Slot, }, + /// The attestation is attesting to a state that is later than itself. (Viz., attesting to the + /// future). + AttestsToFutureState { state: Slot, attestation: Slot }, /// Attestation slot is too far in the past to be included in a block. IncludedTooLate { state: Slot, attestation: Slot }, /// Attestation target epoch does not match the current or previous epoch. diff --git a/eth2/state_processing/src/per_block_processing/verify_attestation.rs b/eth2/state_processing/src/per_block_processing/verify_attestation.rs index 74dbefa232..127d251dea 100644 --- a/eth2/state_processing/src/per_block_processing/verify_attestation.rs +++ b/eth2/state_processing/src/per_block_processing/verify_attestation.rs @@ -62,6 +62,17 @@ pub fn verify_attestation_for_state( Invalid::BadShard ); + let attestation_slot = state.get_attestation_data_slot(&data)?; + + // An attestation cannot attest to a state that is later than itself. + verify!( + attestation_slot <= state.slot, + Invalid::AttestsToFutureState { + state: state.slot, + attestation: attestation_slot + } + ); + // Verify the Casper FFG vote and crosslink data. let parent_crosslink = verify_casper_ffg_vote(attestation, state)?; From d191812d4bfbed364f8f9157a708ff516011a026 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Fri, 9 Aug 2019 12:23:10 +1000 Subject: [PATCH 034/186] Fix overly restrictive check in fork choice. --- beacon_node/beacon_chain/src/beacon_chain.rs | 33 +++++++++++++++---- .../src/per_block_processing/errors.rs | 3 -- .../verify_attestation.rs | 11 ------- 3 files changed, 27 insertions(+), 20 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 81e5bdd65a..7af64924e4 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -60,7 +60,15 @@ pub enum BlockProcessingOutcome { #[derive(Debug, PartialEq)] pub enum AttestationProcessingOutcome { Processed, - UnknownHeadBlock { beacon_block_root: Hash256 }, + UnknownHeadBlock { + beacon_block_root: Hash256, + }, + /// The attestation is attesting to a state that is later than itself. (Viz., attesting to the + /// future). + AttestsToFutureState { + state: Slot, + attestation: Slot, + }, Invalid(AttestationValidationError), } @@ -582,11 +590,24 @@ impl BeaconChain { per_slot_processing(&mut state, &self.spec)?; } - self.process_attestation_for_state_and_block( - attestation, - &state, - &attestation_head_block, - ) + let attestation_slot = state.get_attestation_data_slot(&attestation.data)?; + + // Reject any attestation where the `state` loaded from `data.beacon_block_root` + // has a higher slot than the attestation. + // + // Permitting this would allow for attesters to vote on _future_ slots. + if attestation_slot > state.slot { + Ok(AttestationProcessingOutcome::AttestsToFutureState { + state: state.slot, + attestation: attestation_slot, + }) + } else { + self.process_attestation_for_state_and_block( + attestation, + &state, + &attestation_head_block, + ) + } } } else { // Reject any block where we have not processed `attestation.data.beacon_block_root`. diff --git a/eth2/state_processing/src/per_block_processing/errors.rs b/eth2/state_processing/src/per_block_processing/errors.rs index 436ec96cee..65179167c1 100644 --- a/eth2/state_processing/src/per_block_processing/errors.rs +++ b/eth2/state_processing/src/per_block_processing/errors.rs @@ -136,9 +136,6 @@ pub enum AttestationInvalid { delay: u64, attestation: Slot, }, - /// The attestation is attesting to a state that is later than itself. (Viz., attesting to the - /// future). - AttestsToFutureState { state: Slot, attestation: Slot }, /// Attestation slot is too far in the past to be included in a block. IncludedTooLate { state: Slot, attestation: Slot }, /// Attestation target epoch does not match the current or previous epoch. diff --git a/eth2/state_processing/src/per_block_processing/verify_attestation.rs b/eth2/state_processing/src/per_block_processing/verify_attestation.rs index 127d251dea..74dbefa232 100644 --- a/eth2/state_processing/src/per_block_processing/verify_attestation.rs +++ b/eth2/state_processing/src/per_block_processing/verify_attestation.rs @@ -62,17 +62,6 @@ pub fn verify_attestation_for_state( Invalid::BadShard ); - let attestation_slot = state.get_attestation_data_slot(&data)?; - - // An attestation cannot attest to a state that is later than itself. - verify!( - attestation_slot <= state.slot, - Invalid::AttestsToFutureState { - state: state.slot, - attestation: attestation_slot - } - ); - // Verify the Casper FFG vote and crosslink data. let parent_crosslink = verify_casper_ffg_vote(attestation, state)?; From 67fe21c1c03f550c74d6e0b190f05843770e6fcf Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Fri, 9 Aug 2019 12:32:32 +1000 Subject: [PATCH 035/186] Ensure committee cache is build during attn proc --- beacon_node/beacon_chain/src/beacon_chain.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 7af64924e4..834b04582d 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -590,6 +590,8 @@ impl BeaconChain { per_slot_processing(&mut state, &self.spec)?; } + state.build_committee_cache(RelativeEpoch::Current, &self.spec)?; + let attestation_slot = state.get_attestation_data_slot(&attestation.data)?; // Reject any attestation where the `state` loaded from `data.beacon_block_root` From f4121d9debb4ff235d8d6c74236d00d373be4020 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Fri, 9 Aug 2019 12:34:56 +1000 Subject: [PATCH 036/186] Ignore unknown blocks at fork choice --- beacon_node/beacon_chain/src/fork_choice.rs | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/beacon_node/beacon_chain/src/fork_choice.rs b/beacon_node/beacon_chain/src/fork_choice.rs index 6800f61d8c..640f5223d5 100644 --- a/beacon_node/beacon_chain/src/fork_choice.rs +++ b/beacon_node/beacon_chain/src/fork_choice.rs @@ -119,12 +119,14 @@ impl ForkChoice { // // https://github.com/ethereum/eth2.0-specs/blob/v0.7.0/specs/core/0_fork-choice.md for attestation in &block.body.attestations { - let block = self + // If the `data.beacon_block_root` block is not known to us, simply ignore the latest + // vote. + if let Some(block) = self .store .get::>(&attestation.data.beacon_block_root)? - .ok_or_else(|| Error::MissingBlock(attestation.data.beacon_block_root))?; - - self.process_attestation(state, attestation, &block)?; + { + self.process_attestation(state, attestation, &block)?; + } } // This does not apply a vote to the block, it just makes fork choice aware of the block so From 3210489a36892260799acfc2094b7d17e33c619a Mon Sep 17 00:00:00 2001 From: Age Manning Date: Fri, 9 Aug 2019 13:23:47 +1000 Subject: [PATCH 037/186] Apply PR suggestions --- beacon_node/eth2-libp2p/src/behaviour.rs | 58 ++-------------------- beacon_node/eth2-libp2p/src/rpc/handler.rs | 5 +- beacon_node/src/main.rs | 41 +++++++++------ validator_client/src/main.rs | 39 ++++++++++----- 4 files changed, 61 insertions(+), 82 deletions(-) diff --git a/beacon_node/eth2-libp2p/src/behaviour.rs b/beacon_node/eth2-libp2p/src/behaviour.rs index fc224e91a3..b87f8a0613 100644 --- a/beacon_node/eth2-libp2p/src/behaviour.rs +++ b/beacon_node/eth2-libp2p/src/behaviour.rs @@ -19,6 +19,8 @@ use ssz::{ssz_encode, Encode}; use std::num::NonZeroU32; use std::time::Duration; +const MAX_IDENTIFY_ADDRESSES: usize = 20; + /// Builds the network behaviour that manages the core protocols of eth2. /// This core behaviour is managed by `Behaviour` which adds peer management to all core /// behaviours. @@ -148,12 +150,12 @@ impl NetworkBehaviourEventProcess { - if info.listen_addrs.len() > 20 { + if info.listen_addrs.len() > MAX_IDENTIFY_ADDRESSES { debug!( self.log, "More than 20 addresses have been identified, truncating" ); - info.listen_addrs.truncate(20); + info.listen_addrs.truncate(MAX_IDENTIFY_ADDRESSES); } debug!(self.log, "Identified Peer"; "Peer" => format!("{}", peer_id), "Protocol Version" => info.protocol_version, @@ -264,55 +266,3 @@ impl Encode for PubsubMessage { } } } - -/* -impl Decode for PubsubMessage { - fn is_ssz_fixed_len() -> bool { - false - } - - fn from_ssz_bytes(bytes: &[u8]) -> Result { - let mut builder = ssz::SszDecoderBuilder::new(&bytes); - - builder.register_type::()?; - builder.register_type::>()?; - - let mut decoder = builder.build()?; - - let id: u32 = decoder.decode_next()?; - let body: Vec = decoder.decode_next()?; - - match id { - 0 => Ok(PubsubMessage::Block(BeaconBlock::from_ssz_bytes(&body)?)), - 1 => Ok(PubsubMessage::Attestation(Attestation::from_ssz_bytes( - &body, - )?)), - _ => Err(DecodeError::BytesInvalid( - "Invalid PubsubMessage id".to_string(), - )), - } - } -} -*/ - -/* -#[cfg(test)] -mod test { - use super::*; - use types::*; - - #[test] - fn ssz_encoding() { - let original = PubsubMessage::Block(BeaconBlock::::empty( - &MainnetEthSpec::default_spec(), - )); - - let encoded = ssz_encode(&original); - - let decoded = PubsubMessage::from_ssz_bytes(&encoded).unwrap(); - - assert_eq!(original, decoded); - } - -} -*/ diff --git a/beacon_node/eth2-libp2p/src/rpc/handler.rs b/beacon_node/eth2-libp2p/src/rpc/handler.rs index 355cc52ee4..dbc32c5a48 100644 --- a/beacon_node/eth2-libp2p/src/rpc/handler.rs +++ b/beacon_node/eth2-libp2p/src/rpc/handler.rs @@ -268,8 +268,11 @@ where Self::Error, > { if let Some(err) = self.pending_error.take() { + // Returning an error here will result in dropping any peer that doesn't support any of + // the RPC protocols. For our immediate purposes we permit this and simply log that an + // upgrade was not supported. + // TODO: Add a logger to the handler for trace output. dbg!(&err); - //return Err(err); } // return any events that need to be reported diff --git a/beacon_node/src/main.rs b/beacon_node/src/main.rs index be57c6c9de..b34259f5a4 100644 --- a/beacon_node/src/main.rs +++ b/beacon_node/src/main.rs @@ -4,7 +4,7 @@ use clap::{App, Arg}; use client::{ClientConfig, Eth2Config}; use env_logger::{Builder, Env}; use eth2_config::{read_from_file, write_to_file}; -use slog::{crit, o, Drain, Level}; +use slog::{crit, o, warn, Drain, Level}; use std::fs; use std::path::PathBuf; @@ -323,19 +323,36 @@ fn main() { Some("interop") => Some(Eth2Config::interop()), _ => None, }; - // if cli is specified, write the new config + // if a CLI flag is specified, write the new config if it doesn't exist, + // otherwise notify the user that the file will not be written. + let eth2_config_from_file = match read_from_file::(eth2_config_path.clone()) { + Ok(config) => config, + Err(e) => { + crit!(log, "Failed to read the Eth2Config from file"; "error" => format!("{:?}", e)); + return; + } + }; + let mut eth2_config = { if let Some(cli_config) = cli_config { - if let Err(e) = write_to_file(eth2_config_path, &cli_config) { - crit!(log, "Failed to write default Eth2Config to file"; "error" => format!("{:?}", e)); - return; + if eth2_config_from_file.is_none() { + // write to file if one doesn't exist + if let Err(e) = write_to_file(eth2_config_path, &cli_config) { + crit!(log, "Failed to write default Eth2Config to file"; "error" => format!("{:?}", e)); + return; + } + } else { + warn!( + log, + "Eth2Config file exists. Configuration file is ignored, using default" + ); } cli_config } else { - // config not specified, read from disk - match read_from_file::(eth2_config_path.clone()) { - Ok(Some(c)) => c, - Ok(None) => { + // CLI config not specified, read from disk + match eth2_config_from_file { + Some(config) => config, + None => { // set default to minimal let eth2_config = Eth2Config::minimal(); if let Err(e) = write_to_file(eth2_config_path, ð2_config) { @@ -344,10 +361,6 @@ fn main() { } eth2_config } - Err(e) => { - crit!(log, "Failed to instantiate an Eth2Config"; "error" => format!("{:?}", e)); - return; - } } } }; @@ -363,7 +376,7 @@ fn main() { // check to ensure the spec constants between the client and eth2_config match if eth2_config.spec_constants != client_config.spec_constants { - crit!(log, "Specification constants do not match."; "Client Config" => format!("{}", client_config.spec_constants), "Eth2 Config" => format!("{}", eth2_config.spec_constants)); + crit!(log, "Specification constants do not match."; "client_config" => format!("{}", client_config.spec_constants), "eth2_config" => format!("{}", eth2_config.spec_constants)); return; } diff --git a/validator_client/src/main.rs b/validator_client/src/main.rs index 0782df3236..83a874df7d 100644 --- a/validator_client/src/main.rs +++ b/validator_client/src/main.rs @@ -11,7 +11,7 @@ use crate::service::Service as ValidatorService; use clap::{App, Arg}; use eth2_config::{read_from_file, write_to_file, Eth2Config}; use protos::services_grpc::ValidatorServiceClient; -use slog::{crit, error, info, o, Drain, Level}; +use slog::{crit, error, info, o, warn, Drain, Level}; use std::fs; use std::path::PathBuf; use types::{InteropEthSpec, Keypair, MainnetEthSpec, MinimalEthSpec}; @@ -173,19 +173,36 @@ fn main() { Some("interop") => Some(Eth2Config::interop()), _ => None, }; - // if cli is specified, write the new config + // if a CLI flag is specified, write the new config if it doesn't exist, + // otherwise notify the user that the file will not be written. + let eth2_config_from_file = match read_from_file::(eth2_config_path.clone()) { + Ok(config) => config, + Err(e) => { + crit!(log, "Failed to read the Eth2Config from file"; "error" => format!("{:?}", e)); + return; + } + }; + let mut eth2_config = { if let Some(cli_config) = cli_config { - if let Err(e) = write_to_file(eth2_config_path, &cli_config) { - crit!(log, "Failed to write default Eth2Config to file"; "error" => format!("{:?}", e)); - return; + if eth2_config_from_file.is_none() { + // write to file if one doesn't exist + if let Err(e) = write_to_file(eth2_config_path, &cli_config) { + crit!(log, "Failed to write default Eth2Config to file"; "error" => format!("{:?}", e)); + return; + } + } else { + warn!( + log, + "Eth2Config file exists. Configuration file is ignored, using default" + ); } cli_config } else { - // config not specified, read from disk - match read_from_file::(eth2_config_path.clone()) { - Ok(Some(c)) => c, - Ok(None) => { + // CLI config not specified, read from disk + match eth2_config_from_file { + Some(config) => config, + None => { // set default to minimal let eth2_config = Eth2Config::minimal(); if let Err(e) = write_to_file(eth2_config_path, ð2_config) { @@ -194,10 +211,6 @@ fn main() { } eth2_config } - Err(e) => { - crit!(log, "Failed to instantiate an Eth2Config"; "error" => format!("{:?}", e)); - return; - } } } }; From ce5061603250b10f2e18a1090c5751f028460c32 Mon Sep 17 00:00:00 2001 From: Age Manning Date: Thu, 8 Aug 2019 11:31:36 +1000 Subject: [PATCH 038/186] Improve logging --- beacon_node/client/src/lib.rs | 11 ++------ beacon_node/client/src/notifier.rs | 2 +- beacon_node/eth2-libp2p/src/discovery.rs | 36 +++++++++++------------- beacon_node/eth2-libp2p/src/service.rs | 19 ++++++------- beacon_node/network/src/service.rs | 17 +++++------ 5 files changed, 38 insertions(+), 47 deletions(-) diff --git a/beacon_node/client/src/lib.rs b/beacon_node/client/src/lib.rs index 65ba071fa1..4b64c10705 100644 --- a/beacon_node/client/src/lib.rs +++ b/beacon_node/client/src/lib.rs @@ -100,16 +100,9 @@ where } do_state_catchup(&beacon_chain, &log); - // Start the network service, libp2p and syncing threads - // TODO: Add beacon_chain reference to network parameters let network_config = &client_config.network; - let network_logger = log.new(o!("Service" => "Network")); - let (network, network_send) = NetworkService::new( - beacon_chain.clone(), - network_config, - executor, - network_logger, - )?; + let (network, network_send) = + NetworkService::new(beacon_chain.clone(), network_config, executor, log.clone())?; // spawn the RPC server let rpc_exit_signal = if client_config.rpc.enabled { diff --git a/beacon_node/client/src/notifier.rs b/beacon_node/client/src/notifier.rs index 1c7cf38670..a763196c94 100644 --- a/beacon_node/client/src/notifier.rs +++ b/beacon_node/client/src/notifier.rs @@ -38,7 +38,7 @@ pub fn run( // Panics if libp2p is poisoned. let connected_peer_count = libp2p.lock().swarm.connected_peers(); - debug!(log, "libp2p"; "peer_count" => connected_peer_count); + debug!(log, "Libp2p connected peer status"; "peer_count" => connected_peer_count); if connected_peer_count <= WARN_PEER_COUNT { warn!(log, "Low libp2p peer count"; "peer_count" => connected_peer_count); diff --git a/beacon_node/eth2-libp2p/src/discovery.rs b/beacon_node/eth2-libp2p/src/discovery.rs index 4c1794945d..3e34b9b037 100644 --- a/beacon_node/eth2-libp2p/src/discovery.rs +++ b/beacon_node/eth2-libp2p/src/discovery.rs @@ -9,7 +9,7 @@ use libp2p::discv5::{Discv5, Discv5Event}; use libp2p::enr::{Enr, EnrBuilder, NodeId}; use libp2p::multiaddr::Protocol; use libp2p::swarm::{NetworkBehaviour, NetworkBehaviourAction, PollParameters, ProtocolsHandler}; -use slog::{debug, info, o, warn}; +use slog::{debug, info, warn}; use std::collections::HashSet; use std::fs::File; use std::io::prelude::*; @@ -63,7 +63,7 @@ impl Discovery { config: &NetworkConfig, log: &slog::Logger, ) -> error::Result { - let log = log.new(o!("Service" => "Libp2p-Discovery")); + let log = log.clone(); // checks if current ENR matches that found on disk let local_enr = load_enr(local_key, config, &log)?; @@ -73,19 +73,19 @@ impl Discovery { None => String::from(""), }; - info!(log, "Local ENR: {}", local_enr.to_base64()); - debug!(log, "Local Node Id: {}", local_enr.node_id()); - debug!(log, "Local ENR seq: {}", local_enr.seq()); + info!(log, "ENR Initialised"; "ENR" => local_enr.to_base64(), "Seq" => local_enr.seq()); + debug!(log, "Discv5 Node ID Initialised"; "node_id" => format!("{}",local_enr.node_id())); let mut discovery = Discv5::new(local_enr, local_key.clone(), config.listen_address) - .map_err(|e| format!("Discv5 service failed: {:?}", e))?; + .map_err(|e| format!("Discv5 service failed. Error: {:?}", e))?; // Add bootnodes to routing table for bootnode_enr in config.boot_nodes.clone() { debug!( log, - "Adding node to routing table: {}", - bootnode_enr.node_id() + "Adding node to routing table"; + "Node ID" => format!("{}", + bootnode_enr.node_id()) ); discovery.add_enr(bootnode_enr); } @@ -123,7 +123,7 @@ impl Discovery { fn find_peers(&mut self) { // pick a random NodeId let random_node = NodeId::random(); - debug!(self.log, "Searching for peers..."); + debug!(self.log, "Searching for peers"); self.discovery.find_node(random_node); // update the time until next discovery @@ -201,7 +201,7 @@ where } Ok(Async::NotReady) => break, Err(e) => { - warn!(self.log, "Discovery peer search failed: {:?}", e); + warn!(self.log, "Discovery peer search failed"; "Error" => format!("{:?}", e)); } } } @@ -227,16 +227,16 @@ where }); } Discv5Event::FindNodeResult { closer_peers, .. } => { - debug!(self.log, "Discv5 query found {} peers", closer_peers.len()); + debug!(self.log, "Discovery query completed"; "peers_found" => closer_peers.len()); if closer_peers.is_empty() { - debug!(self.log, "Discv5 random query yielded empty results"); + debug!(self.log, "Discovery random query found no peers"); } for peer_id in closer_peers { // if we need more peers, attempt a connection if self.connected_peers.len() < self.max_peers && self.connected_peers.get(&peer_id).is_none() { - debug!(self.log, "Discv5: Peer discovered"; "Peer"=> format!("{:?}", peer_id)); + debug!(self.log, "Peer discovered"; "peer_id"=> format!("{:?}", peer_id)); return Async::Ready(NetworkBehaviourAction::DialPeer { peer_id, }); @@ -283,14 +283,12 @@ fn load_enr( Ok(_) => { match Enr::from_str(&enr_string) { Ok(enr) => { - debug!(log, "ENR found in file: {:?}", enr_f); - if enr.node_id() == local_enr.node_id() { if enr.ip() == config.discovery_address.into() && enr.tcp() == Some(config.libp2p_port) && enr.udp() == Some(config.discovery_port) { - debug!(log, "ENR loaded from file"); + debug!(log, "ENR loaded from file"; "File" => format!("{:?}", enr_f)); // the stored ENR has the same configuration, use it return Ok(enr); } @@ -300,11 +298,11 @@ fn load_enr( local_enr.set_seq(new_seq_no, local_key).map_err(|e| { format!("Could not update ENR sequence number: {:?}", e) })?; - debug!(log, "ENR sequence number increased to: {}", new_seq_no); + debug!(log, "ENR sequence number increased"; "Seq" => new_seq_no); } } Err(e) => { - warn!(log, "ENR from file could not be decoded: {:?}", e); + warn!(log, "ENR from file could not be decoded"; "Error" => format!("{:?}", e)); } } } @@ -327,7 +325,7 @@ fn save_enr_to_disc(dir: &Path, enr: &Enr, log: &slog::Logger) { Err(e) => { warn!( log, - "Could not write ENR to file: {:?}{:?}. Error: {}", dir, ENR_FILENAME, e + "Could not write ENR to file"; "File" => format!("{:?}{:?}",dir, ENR_FILENAME), "Error" => format!("{}", e) ); } } diff --git a/beacon_node/eth2-libp2p/src/service.rs b/beacon_node/eth2-libp2p/src/service.rs index 316aa05798..e0867e87f2 100644 --- a/beacon_node/eth2-libp2p/src/service.rs +++ b/beacon_node/eth2-libp2p/src/service.rs @@ -40,13 +40,12 @@ pub struct Service { impl Service { pub fn new(config: NetworkConfig, log: slog::Logger) -> error::Result { - debug!(log, "Network-libp2p Service starting"); + trace!(log, "Libp2p Service starting"); // load the private key from CLI flag, disk or generate a new one let local_private_key = load_private_key(&config, &log); - let local_peer_id = PeerId::from(local_private_key.public()); - info!(log, "Local peer id: {:?}", local_peer_id); + info!(log, "Libp2p Service"; "peer_id" => format!("{:?}", local_peer_id)); let mut swarm = { // Set up the transport - tcp/ws with secio and mplex/yamux @@ -67,21 +66,21 @@ impl Service { Ok(_) => { let mut log_address = listen_multiaddr; log_address.push(Protocol::P2p(local_peer_id.clone().into())); - info!(log, "Listening on: {}", log_address); + info!(log, "Listening established"; "Address" => format!("{}", log_address)); } Err(err) => warn!( log, - "Cannot listen on: {} because: {:?}", listen_multiaddr, err + "Failed to listen on address"; "Address" => format!("{}", listen_multiaddr), "Error" => format!("{:?}", err) ), }; // attempt to connect to user-input libp2p nodes for multiaddr in config.libp2p_nodes { match Swarm::dial_addr(&mut swarm, multiaddr.clone()) { - Ok(()) => debug!(log, "Dialing libp2p node: {}", multiaddr), + Ok(()) => debug!(log, "Dialing libp2p peer"; "Address" => format!("{}", multiaddr)), Err(err) => debug!( log, - "Could not connect to node: {} error: {:?}", multiaddr, err + "Could not connect to peer"; "Address" => format!("{}", multiaddr), "Error" => format!("{:?}", err) ), }; } @@ -104,13 +103,13 @@ impl Service { let mut subscribed_topics = vec![]; for topic in topics { if swarm.subscribe(topic.clone()) { - trace!(log, "Subscribed to topic: {:?}", topic); + trace!(log, "Subscribed to topic"; "Topic" => format!("{}", topic)); subscribed_topics.push(topic); } else { - warn!(log, "Could not subscribe to topic: {:?}", topic) + warn!(log, "Could not subscribe to topic"; "Topic" => format!("{}", topic)); } } - info!(log, "Subscribed to topics: {:?}", subscribed_topics); + info!(log, "Subscribed to topics"; "Topics" => format!("{:?}", subscribed_topics.iter().map(|t| format!("{}", t)).collect::>())); Ok(Service { _local_peer_id: local_peer_id, diff --git a/beacon_node/network/src/service.rs b/beacon_node/network/src/service.rs index e5ca2a9175..df0404cfaa 100644 --- a/beacon_node/network/src/service.rs +++ b/beacon_node/network/src/service.rs @@ -20,8 +20,7 @@ pub struct Service { libp2p_service: Arc>, _libp2p_exit: oneshot::Sender<()>, _network_send: mpsc::UnboundedSender, - _phantom: PhantomData, //message_handler: MessageHandler, - //message_handler_send: Sender + _phantom: PhantomData, } impl Service { @@ -42,17 +41,19 @@ impl Service { message_handler_log, )?; + let network_log = log.new(o!("Service" => "Network")); // launch libp2p service - let libp2p_log = log.new(o!("Service" => "Libp2p")); - let libp2p_service = Arc::new(Mutex::new(LibP2PService::new(config.clone(), libp2p_log)?)); + let libp2p_service = Arc::new(Mutex::new(LibP2PService::new( + config.clone(), + network_log.clone(), + )?)); - // TODO: Spawn thread to handle libp2p messages and pass to message handler thread. let libp2p_exit = spawn_service( libp2p_service.clone(), network_recv, message_handler_send, executor, - log, + network_log, )?; let network_service = Service { libp2p_service, @@ -142,13 +143,13 @@ fn network_service( .map_err(|_| "Failed to send RPC to handler")?; } Libp2pEvent::PeerDialed(peer_id) => { - debug!(log, "Peer Dialed: {:?}", peer_id); + debug!(log, "Peer Dialed"; "PeerID" => format!("{:?}", peer_id)); message_handler_send .try_send(HandlerMessage::PeerDialed(peer_id)) .map_err(|_| "Failed to send PeerDialed to handler")?; } Libp2pEvent::PeerDisconnected(peer_id) => { - debug!(log, "Peer Disconnected: {:?}", peer_id); + debug!(log, "Peer Disconnected"; "PeerID" => format!("{:?}", peer_id)); message_handler_send .try_send(HandlerMessage::PeerDisconnected(peer_id)) .map_err(|_| "Failed to send PeerDisconnected to handler")?; From d83fa670681f96d705da89300e7c4ad126049bff Mon Sep 17 00:00:00 2001 From: Age Manning Date: Thu, 8 Aug 2019 12:06:46 +1000 Subject: [PATCH 039/186] Subscribe to all required gossipsub topics --- beacon_node/eth2-libp2p/src/config.rs | 13 +++++++++---- beacon_node/eth2-libp2p/src/lib.rs | 4 +--- beacon_node/eth2-libp2p/src/service.rs | 24 +++++++++++++++++++----- 3 files changed, 29 insertions(+), 12 deletions(-) diff --git a/beacon_node/eth2-libp2p/src/config.rs b/beacon_node/eth2-libp2p/src/config.rs index ddf14cc047..d7648ec3fd 100644 --- a/beacon_node/eth2-libp2p/src/config.rs +++ b/beacon_node/eth2-libp2p/src/config.rs @@ -6,9 +6,14 @@ use serde_derive::{Deserialize, Serialize}; use std::path::PathBuf; use std::time::Duration; -/// The beacon node topic string to subscribe to. +/// The gossipsub topic names. +pub const TOPIC_PREFIX: &str = "eth2"; +pub const TOPIC_ENCODING_POSTFIX: &str = "ssz"; pub const BEACON_BLOCK_TOPIC: &str = "beacon_block"; pub const BEACON_ATTESTATION_TOPIC: &str = "beacon_attestation"; +pub const VOLUNTARY_EXIT_TOPIC: &str = "voluntary_exit"; +pub const PROPOSER_SLASHING_TOPIC: &str = "proposer_slashing"; +pub const ATTESTER_SLASHING_TOPIC: &str = "attester_slashing"; pub const SHARD_TOPIC_PREFIX: &str = "shard"; #[derive(Clone, Debug, Serialize, Deserialize)] @@ -63,10 +68,10 @@ impl Default for Config { discovery_address: "127.0.0.1".parse().expect("valid ip address"), discovery_port: 9000, max_peers: 10, - //TODO: Set realistic values for production - // Note: This defaults topics to plain strings. Not hashes + // Note: The topics by default are sent as plain strings. Hashes are an optional + // parameter. gs_config: GossipsubConfigBuilder::new() - .max_transmit_size(1_000_000) + .max_transmit_size(1_048_576) .heartbeat_interval(Duration::from_secs(20)) .build(), boot_nodes: vec![], diff --git a/beacon_node/eth2-libp2p/src/lib.rs b/beacon_node/eth2-libp2p/src/lib.rs index 54a4f2a998..7c3a93d61a 100644 --- a/beacon_node/eth2-libp2p/src/lib.rs +++ b/beacon_node/eth2-libp2p/src/lib.rs @@ -10,9 +10,7 @@ pub mod rpc; mod service; pub use behaviour::PubsubMessage; -pub use config::{ - Config as NetworkConfig, BEACON_ATTESTATION_TOPIC, BEACON_BLOCK_TOPIC, SHARD_TOPIC_PREFIX, -}; +pub use config::{Config as NetworkConfig, *}; pub use libp2p::gossipsub::{Topic, TopicHash}; pub use libp2p::multiaddr; pub use libp2p::Multiaddr; diff --git a/beacon_node/eth2-libp2p/src/service.rs b/beacon_node/eth2-libp2p/src/service.rs index e0867e87f2..98718445b3 100644 --- a/beacon_node/eth2-libp2p/src/service.rs +++ b/beacon_node/eth2-libp2p/src/service.rs @@ -1,10 +1,10 @@ use crate::behaviour::{Behaviour, BehaviourEvent, PubsubMessage}; +use crate::config::*; use crate::error; use crate::multiaddr::Protocol; use crate::rpc::RPCEvent; use crate::NetworkConfig; use crate::{Topic, TopicHash}; -use crate::{BEACON_ATTESTATION_TOPIC, BEACON_BLOCK_TOPIC}; use futures::prelude::*; use futures::Stream; use libp2p::core::{ @@ -87,10 +87,24 @@ impl Service { // subscribe to default gossipsub topics let mut topics = vec![]; - //TODO: Handle multiple shard attestations. For now we simply use a separate topic for - // attestations - topics.push(Topic::new(BEACON_ATTESTATION_TOPIC.into())); - topics.push(Topic::new(BEACON_BLOCK_TOPIC.into())); + + /* Here we subscribe to all the required gossipsub topics required for interop. + * The topic builder adds the required prefix and postfix to the hardcoded topics that we + * must subscribe to. + */ + let topic_builder = |topic| { + Topic::new(format!( + "/{}/{}/{}", + TOPIC_PREFIX, topic, TOPIC_ENCODING_POSTFIX, + )) + }; + topics.push(topic_builder(BEACON_BLOCK_TOPIC)); + topics.push(topic_builder(BEACON_ATTESTATION_TOPIC)); + topics.push(topic_builder(VOLUNTARY_EXIT_TOPIC)); + topics.push(topic_builder(PROPOSER_SLASHING_TOPIC)); + topics.push(topic_builder(ATTESTER_SLASHING_TOPIC)); + + // Add any topics specified by the user topics.append( &mut config .topics From 80f15f5d700693520ed7ca722e1cd9b0227147c2 Mon Sep 17 00:00:00 2001 From: Age Manning Date: Thu, 8 Aug 2019 12:38:54 +1000 Subject: [PATCH 040/186] Correct gossipsub message encoding. Add extended topics --- beacon_node/eth2-libp2p/src/behaviour.rs | 54 ++++++++++++++---------- 1 file changed, 32 insertions(+), 22 deletions(-) diff --git a/beacon_node/eth2-libp2p/src/behaviour.rs b/beacon_node/eth2-libp2p/src/behaviour.rs index b87f8a0613..749d2e5b44 100644 --- a/beacon_node/eth2-libp2p/src/behaviour.rs +++ b/beacon_node/eth2-libp2p/src/behaviour.rs @@ -1,8 +1,8 @@ +use crate::config::*; use crate::discovery::Discovery; use crate::rpc::{RPCEvent, RPCMessage, RPC}; use crate::{error, NetworkConfig}; use crate::{Topic, TopicHash}; -use crate::{BEACON_ATTESTATION_TOPIC, BEACON_BLOCK_TOPIC}; use futures::prelude::*; use libp2p::{ core::identity::Keypair, @@ -15,7 +15,6 @@ use libp2p::{ NetworkBehaviour, PeerId, }; use slog::{debug, o, trace}; -use ssz::{ssz_encode, Encode}; use std::num::NonZeroU32; use std::time::Duration; @@ -189,9 +188,9 @@ impl Behaviour { /// Publishes a message on the pubsub (gossipsub) behaviour. pub fn publish(&mut self, topics: Vec, message: PubsubMessage) { - let message_bytes = ssz_encode(&message); + let message_data = message.to_data(); for topic in topics { - self.gossipsub.publish(topic, message_bytes.clone()); + self.gossipsub.publish(topic, message_data.clone()); } } @@ -220,13 +219,20 @@ pub enum BehaviourEvent { }, } -/// Messages that are passed to and from the pubsub (Gossipsub) behaviour. +/// Messages that are passed to and from the pubsub (Gossipsub) behaviour. These are encoded and +/// decoded upstream. #[derive(Debug, Clone, PartialEq)] pub enum PubsubMessage { /// Gossipsub message providing notification of a new block. Block(Vec), /// Gossipsub message providing notification of a new attestation. Attestation(Vec), + /// Gossipsub message providing notification of a voluntary exit. + VoluntaryExit(Vec), + /// Gossipsub message providing notification of a new proposer slashing. + ProposerSlashing(Vec), + /// Gossipsub message providing notification of a new attester slashing. + AttesterSlashing(Vec), /// Gossipsub message from an unknown topic. Unknown(Vec), } @@ -240,29 +246,33 @@ impl PubsubMessage { */ fn from_topics(topics: &Vec, data: Vec) -> Self { for topic in topics { - match topic.as_str() { - BEACON_BLOCK_TOPIC => return PubsubMessage::Block(data), - BEACON_ATTESTATION_TOPIC => return PubsubMessage::Attestation(data), - _ => {} + // compare the prefix and postfix, then match on the topic + let topic_parts: Vec<&str> = topic.as_str().split('/').collect(); + if topic_parts.len() == 4 + && topic_parts[1] == TOPIC_PREFIX + && topic_parts[3] == TOPIC_ENCODING_POSTFIX + { + match topic_parts[2] { + BEACON_BLOCK_TOPIC => return PubsubMessage::Block(data), + BEACON_ATTESTATION_TOPIC => return PubsubMessage::Attestation(data), + VOLUNTARY_EXIT_TOPIC => return PubsubMessage::VoluntaryExit(data), + PROPOSER_SLASHING_TOPIC => return PubsubMessage::ProposerSlashing(data), + ATTESTER_SLASHING_TOPIC => return PubsubMessage::AttesterSlashing(data), + _ => {} + } } } PubsubMessage::Unknown(data) } -} -impl Encode for PubsubMessage { - fn is_ssz_fixed_len() -> bool { - false - } - - fn ssz_append(&self, buf: &mut Vec) { + fn to_data(self) -> Vec { match self { - PubsubMessage::Block(inner) - | PubsubMessage::Attestation(inner) - | PubsubMessage::Unknown(inner) => { - // Encode the gossip as a Vec; - buf.append(&mut inner.as_ssz_bytes()); - } + PubsubMessage::Block(data) + | PubsubMessage::Attestation(data) + | PubsubMessage::VoluntaryExit(data) + | PubsubMessage::ProposerSlashing(data) + | PubsubMessage::AttesterSlashing(data) + | PubsubMessage::Unknown(data) => data, } } } From 5a74239ebcf0473120cdfc1acb4bf31fcc338f24 Mon Sep 17 00:00:00 2001 From: Age Manning Date: Thu, 8 Aug 2019 14:58:33 +1000 Subject: [PATCH 041/186] Add decoding/encoding for extended gossip topics. Correct logging CLI --- beacon_node/Cargo.toml | 4 +- beacon_node/eth2-libp2p/src/config.rs | 2 + beacon_node/network/src/message_handler.rs | 86 ++++++++++++++++++---- beacon_node/src/main.rs | 17 +---- 4 files changed, 79 insertions(+), 30 deletions(-) diff --git a/beacon_node/Cargo.toml b/beacon_node/Cargo.toml index 9124047e45..cba73b8a43 100644 --- a/beacon_node/Cargo.toml +++ b/beacon_node/Cargo.toml @@ -11,7 +11,7 @@ store = { path = "./store" } client = { path = "client" } version = { path = "version" } clap = "2.32.0" -slog = { version = "^2.2.3" , features = ["max_level_trace"] } +slog = { version = "^2.2.3" , features = ["max_level_trace", "release_max_level_trace"] } slog-term = "^2.4.0" slog-async = "^2.3.0" ctrlc = { version = "3.1.1", features = ["termination"] } @@ -22,3 +22,5 @@ exit-future = "0.1.3" env_logger = "0.6.1" dirs = "2.0.1" logging = { path = "../eth2/utils/logging" } +slog-scope = "4.1.2" +slog-stdlog = "3.0.5" diff --git a/beacon_node/eth2-libp2p/src/config.rs b/beacon_node/eth2-libp2p/src/config.rs index d7648ec3fd..7cb501c1f0 100644 --- a/beacon_node/eth2-libp2p/src/config.rs +++ b/beacon_node/eth2-libp2p/src/config.rs @@ -7,6 +7,8 @@ use std::path::PathBuf; use std::time::Duration; /// The gossipsub topic names. +// These constants form a topic name of the form /TOPIC_PREFIX/TOPIC/ENCODING_POSTFIX +// For example /eth2/beacon_block/ssz pub const TOPIC_PREFIX: &str = "eth2"; pub const TOPIC_ENCODING_POSTFIX: &str = "ssz"; pub const BEACON_BLOCK_TOPIC: &str = "beacon_block"; diff --git a/beacon_node/network/src/message_handler.rs b/beacon_node/network/src/message_handler.rs index 72a507ad7b..b86dcb9697 100644 --- a/beacon_node/network/src/message_handler.rs +++ b/beacon_node/network/src/message_handler.rs @@ -10,11 +10,13 @@ use eth2_libp2p::{ }; use futures::future::Future; use futures::stream::Stream; -use slog::{debug, warn}; +use slog::{debug, trace, warn}; use ssz::{Decode, DecodeError}; use std::sync::Arc; use tokio::sync::mpsc; -use types::{Attestation, BeaconBlock, BeaconBlockHeader}; +use types::{ + Attestation, AttesterSlashing, BeaconBlock, BeaconBlockHeader, ProposerSlashing, VoluntaryExit, +}; /// Handles messages received from the network and client and organises syncing. pub struct MessageHandler { @@ -49,7 +51,7 @@ impl MessageHandler { executor: &tokio::runtime::TaskExecutor, log: slog::Logger, ) -> error::Result> { - debug!(log, "Service starting"); + trace!(log, "Service starting"); let (handler_send, handler_recv) = mpsc::unbounded_channel(); @@ -65,7 +67,6 @@ impl MessageHandler { }; // spawn handler task - // TODO: Handle manual termination of thread executor.spawn( handler_recv .for_each(move |msg| Ok(handler.handle_message(msg))) @@ -221,43 +222,79 @@ impl MessageHandler { /// Handle various RPC errors fn handle_rpc_error(&mut self, peer_id: PeerId, request_id: RequestId, error: RPCError) { //TODO: Handle error correctly - warn!(self.log, "RPC Error"; "Peer" => format!("{:?}", peer_id), "Request Id" => format!("{}", request_id), "Error" => format!("{:?}", error)); + warn!(self.log, "RPC Error"; "Peer" => format!("{:?}", peer_id), "request_id" => format!("{}", request_id), "Error" => format!("{:?}", error)); } /// Handle RPC messages fn handle_gossip(&mut self, peer_id: PeerId, gossip_message: PubsubMessage) { match gossip_message { PubsubMessage::Block(message) => match self.decode_gossip_block(message) { - Err(e) => { - debug!(self.log, "Invalid Gossiped Beacon Block"; "Peer" => format!("{}", peer_id), "Error" => format!("{:?}", e)); - } Ok(block) => { let _should_forward_on = self.sync .on_block_gossip(peer_id, block, &mut self.network_context); } + Err(e) => { + debug!(self.log, "Invalid gossiped beacon block"; "peer_id" => format!("{}", peer_id), "Error" => format!("{:?}", e)); + } }, PubsubMessage::Attestation(message) => match self.decode_gossip_attestation(message) { - Err(e) => { - debug!(self.log, "Invalid Gossiped Attestation"; "Peer" => format!("{}", peer_id), "Error" => format!("{:?}", e)); - } Ok(attestation) => { self.sync .on_attestation_gossip(peer_id, attestation, &mut self.network_context) } + Err(e) => { + debug!(self.log, "Invalid gossiped attestation"; "peer_id" => format!("{}", peer_id), "Error" => format!("{:?}", e)); + } }, + PubsubMessage::VoluntaryExit(message) => match self.decode_gossip_exit(message) { + Ok(_exit) => { + // TODO: Handle exits + debug!(self.log, "Received a voluntary exit"; "peer_id" => format!("{}", peer_id) ); + } + Err(e) => { + debug!(self.log, "Invalid gossiped exit"; "peer_id" => format!("{}", peer_id), "Error" => format!("{:?}", e)); + } + }, + PubsubMessage::ProposerSlashing(message) => { + match self.decode_gossip_proposer_slashing(message) { + Ok(_slashing) => { + // TODO: Handle proposer slashings + debug!(self.log, "Received a proposer slashing"; "peer_id" => format!("{}", peer_id) ); + } + Err(e) => { + debug!(self.log, "Invalid gossiped proposer slashing"; "peer_id" => format!("{}", peer_id), "Error" => format!("{:?}", e)); + } + } + } + PubsubMessage::AttesterSlashing(message) => { + match self.decode_gossip_attestation_slashing(message) { + Ok(_slashing) => { + // TODO: Handle attester slashings + debug!(self.log, "Received an attester slashing"; "peer_id" => format!("{}", peer_id) ); + } + Err(e) => { + debug!(self.log, "Invalid gossiped attester slashing"; "peer_id" => format!("{}", peer_id), "Error" => format!("{:?}", e)); + } + } + } PubsubMessage::Unknown(message) => { // Received a message from an unknown topic. Ignore for now - debug!(self.log, "Unknown Gossip Message"; "Peer" => format!("{}", peer_id), "Message" => format!("{:?}", message)); + debug!(self.log, "Unknown Gossip Message"; "peer_id" => format!("{}", peer_id), "Message" => format!("{:?}", message)); } } } - /* Decoding of blocks and attestations from the network. + /* Decoding of gossipsub objects from the network. + * + * The decoding is done in the message handler as it has access to to a `BeaconChain` and can + * therefore apply more efficient logic in decoding and verification. * * TODO: Apply efficient decoding/verification of these objects */ + /* Gossipsub Domain Decoding */ + // Note: These are not generics as type-specific verification will need to be applied. fn decode_gossip_block( &self, beacon_block: Vec, @@ -274,6 +311,29 @@ impl MessageHandler { Attestation::from_ssz_bytes(&beacon_block) } + fn decode_gossip_exit(&self, voluntary_exit: Vec) -> Result { + //TODO: Apply verification before decoding. + VoluntaryExit::from_ssz_bytes(&voluntary_exit) + } + + fn decode_gossip_proposer_slashing( + &self, + proposer_slashing: Vec, + ) -> Result { + //TODO: Apply verification before decoding. + ProposerSlashing::from_ssz_bytes(&proposer_slashing) + } + + fn decode_gossip_attestation_slashing( + &self, + attester_slashing: Vec, + ) -> Result, DecodeError> { + //TODO: Apply verification before decoding. + AttesterSlashing::from_ssz_bytes(&attester_slashing) + } + + /* Req/Resp Domain Decoding */ + /// Verifies and decodes the ssz-encoded block bodies received from peers. fn decode_block_bodies( &self, diff --git a/beacon_node/src/main.rs b/beacon_node/src/main.rs index b34259f5a4..086ccc5beb 100644 --- a/beacon_node/src/main.rs +++ b/beacon_node/src/main.rs @@ -9,7 +9,6 @@ use std::fs; use std::path::PathBuf; pub const DEFAULT_DATA_DIR: &str = ".lighthouse"; - pub const CLIENT_CONFIG_FILENAME: &str = "beacon-node.toml"; pub const ETH2_CONFIG_FILENAME: &str = "eth2-spec.toml"; pub const TESTNET_CONFIG_FILENAME: &str = "testnet.toml"; @@ -214,14 +213,7 @@ fn main() { .help("The title of the spec constants for chain config.") .takes_value(true) .possible_values(&["info", "debug", "trace", "warn", "error", "crit"]) - .default_value("trace"), - ) - .arg( - Arg::with_name("verbosity") - .short("v") - .multiple(true) - .help("Sets the verbosity level") - .takes_value(true), + .default_value("info"), ) .get_matches(); @@ -241,13 +233,6 @@ fn main() { _ => unreachable!("guarded by clap"), }; - let drain = match matches.occurrences_of("verbosity") { - 0 => drain.filter_level(Level::Info), - 1 => drain.filter_level(Level::Debug), - 2 => drain.filter_level(Level::Trace), - _ => drain.filter_level(Level::Trace), - }; - let mut log = slog::Logger::root(drain.fuse(), o!()); let data_dir = match matches From ec73dfe90b0568fcbc22a775b2e2bf509fde6370 Mon Sep 17 00:00:00 2001 From: Age Manning Date: Thu, 8 Aug 2019 17:46:39 +1000 Subject: [PATCH 042/186] Starting of req/resp overhaul --- beacon_node/eth2-libp2p/Cargo.toml | 1 + beacon_node/eth2-libp2p/src/rpc/codec/ssz.rs | 114 ++++---------- beacon_node/eth2-libp2p/src/rpc/methods.rs | 153 +++++-------------- beacon_node/eth2-libp2p/src/rpc/protocol.rs | 55 +++---- 4 files changed, 96 insertions(+), 227 deletions(-) diff --git a/beacon_node/eth2-libp2p/Cargo.toml b/beacon_node/eth2-libp2p/Cargo.toml index 794b097128..55081aed58 100644 --- a/beacon_node/eth2-libp2p/Cargo.toml +++ b/beacon_node/eth2-libp2p/Cargo.toml @@ -26,3 +26,4 @@ smallvec = "0.6.10" fnv = "1.0.6" unsigned-varint = "0.2.2" bytes = "0.4.12" +tokio-io-timeout = "0.3.1" diff --git a/beacon_node/eth2-libp2p/src/rpc/codec/ssz.rs b/beacon_node/eth2-libp2p/src/rpc/codec/ssz.rs index 8e2bdaa647..f7262118d6 100644 --- a/beacon_node/eth2-libp2p/src/rpc/codec/ssz.rs +++ b/beacon_node/eth2-libp2p/src/rpc/codec/ssz.rs @@ -41,10 +41,8 @@ impl Encoder for SSZInboundCodec { RPCErrorResponse::Success(resp) => { match resp { RPCResponse::Hello(res) => res.as_ssz_bytes(), - RPCResponse::BeaconBlockRoots(res) => res.as_ssz_bytes(), - RPCResponse::BeaconBlockHeaders(res) => res.headers, // already raw bytes - RPCResponse::BeaconBlockBodies(res) => res.block_bodies, // already raw bytes - RPCResponse::BeaconChainState(res) => res.as_ssz_bytes(), + RPCResponse::BeaconBlocks(res) => res, // already raw bytes + RPCResponse::RecentBeaconBlocks(res) => res, // already raw bytes } } RPCErrorResponse::InvalidRequest(err) => err.as_ssz_bytes(), @@ -72,52 +70,30 @@ impl Decoder for SSZInboundCodec { match self.inner.decode(src).map_err(RPCError::from) { Ok(Some(packet)) => match self.protocol.message_name.as_str() { "hello" => match self.protocol.version.as_str() { - "1.0.0" => Ok(Some(RPCRequest::Hello(HelloMessage::from_ssz_bytes( + "1" => Ok(Some(RPCRequest::Hello(HelloMessage::from_ssz_bytes( &packet, )?))), - _ => Err(RPCError::InvalidProtocol("Unknown HELLO version")), + _ => unreachable!("Cannot negotiate an unknown version"), }, "goodbye" => match self.protocol.version.as_str() { - "1.0.0" => Ok(Some(RPCRequest::Goodbye(GoodbyeReason::from_ssz_bytes( + "1" => Ok(Some(RPCRequest::Goodbye(GoodbyeReason::from_ssz_bytes( &packet, )?))), - _ => Err(RPCError::InvalidProtocol( - "Unknown GOODBYE version.as_str()", - )), + _ => unreachable!("Cannot negotiate an unknown version"), }, - "beacon_block_roots" => match self.protocol.version.as_str() { - "1.0.0" => Ok(Some(RPCRequest::BeaconBlockRoots( - BeaconBlockRootsRequest::from_ssz_bytes(&packet)?, + "beacon_blocks" => match self.protocol.version.as_str() { + "1" => Ok(Some(RPCRequest::BeaconBlocks( + BeaconBlocksRequest::from_ssz_bytes(&packet)?, ))), - _ => Err(RPCError::InvalidProtocol( - "Unknown BEACON_BLOCK_ROOTS version.", - )), + _ => unreachable!("Cannot negotiate an unknown version"), }, - "beacon_block_headers" => match self.protocol.version.as_str() { - "1.0.0" => Ok(Some(RPCRequest::BeaconBlockHeaders( - BeaconBlockHeadersRequest::from_ssz_bytes(&packet)?, + "recent_beacon_blocks" => match self.protocol.version.as_str() { + "1" => Ok(Some(RPCRequest::RecentBeaconBlocks( + RecentBeaconBlocksRequest::from_ssz_bytes(&packet)?, ))), - _ => Err(RPCError::InvalidProtocol( - "Unknown BEACON_BLOCK_HEADERS version.", - )), + _ => unreachable!("Cannot negotiate an unknown version"), }, - "beacon_block_bodies" => match self.protocol.version.as_str() { - "1.0.0" => Ok(Some(RPCRequest::BeaconBlockBodies( - BeaconBlockBodiesRequest::from_ssz_bytes(&packet)?, - ))), - _ => Err(RPCError::InvalidProtocol( - "Unknown BEACON_BLOCK_BODIES version.", - )), - }, - "beacon_chain_state" => match self.protocol.version.as_str() { - "1.0.0" => Ok(Some(RPCRequest::BeaconChainState( - BeaconChainStateRequest::from_ssz_bytes(&packet)?, - ))), - _ => Err(RPCError::InvalidProtocol( - "Unknown BEACON_CHAIN_STATE version.", - )), - }, - _ => Err(RPCError::InvalidProtocol("Unknown message name.")), + _ => unreachable!("Cannot negotiate an unknown protocol"), }, Ok(None) => Ok(None), Err(e) => Err(e), @@ -156,10 +132,8 @@ impl Encoder for SSZOutboundCodec { let bytes = match item { RPCRequest::Hello(req) => req.as_ssz_bytes(), RPCRequest::Goodbye(req) => req.as_ssz_bytes(), - RPCRequest::BeaconBlockRoots(req) => req.as_ssz_bytes(), - RPCRequest::BeaconBlockHeaders(req) => req.as_ssz_bytes(), - RPCRequest::BeaconBlockBodies(req) => req.as_ssz_bytes(), - RPCRequest::BeaconChainState(req) => req.as_ssz_bytes(), + RPCRequest::BeaconBlocks(req) => req.as_ssz_bytes(), + RPCRequest::RecentBeaconBlocks(req) => req.as_ssz_bytes(), }; // length-prefix self.inner @@ -168,7 +142,11 @@ impl Encoder for SSZOutboundCodec { } } -// Decoder for outbound +// Decoder for outbound streams +// +// The majority of the decoding has now been pushed upstream due to the changing specification. +// We prefer to decode blocks and attestations with extra knowledge about the chain to perform +// faster verification checks before decoding entire blocks/attestations. impl Decoder for SSZOutboundCodec { type Item = RPCResponse; type Error = RPCError; @@ -177,51 +155,21 @@ impl Decoder for SSZOutboundCodec { match self.inner.decode(src).map_err(RPCError::from) { Ok(Some(packet)) => match self.protocol.message_name.as_str() { "hello" => match self.protocol.version.as_str() { - "1.0.0" => Ok(Some(RPCResponse::Hello(HelloMessage::from_ssz_bytes( + "1" => Ok(Some(RPCResponse::Hello(HelloMessage::from_ssz_bytes( &packet, )?))), - _ => Err(RPCError::InvalidProtocol("Unknown HELLO version.")), + _ => unreachable!("Cannot negotiate an unknown version"), }, "goodbye" => Err(RPCError::InvalidProtocol("GOODBYE doesn't have a response")), - "beacon_block_roots" => match self.protocol.version.as_str() { - "1.0.0" => Ok(Some(RPCResponse::BeaconBlockRoots( - BeaconBlockRootsResponse::from_ssz_bytes(&packet)?, - ))), - _ => Err(RPCError::InvalidProtocol( - "Unknown BEACON_BLOCK_ROOTS version.", - )), + "beacon_blocks" => match self.protocol.version.as_str() { + "1" => Ok(Some(RPCResponse::BeaconBlocks(packet.to_vec()))), + _ => unreachable!("Cannot negotiate an unknown version"), }, - "beacon_block_headers" => match self.protocol.version.as_str() { - "1.0.0" => Ok(Some(RPCResponse::BeaconBlockHeaders( - BeaconBlockHeadersResponse { - headers: packet.to_vec(), - }, - ))), - _ => Err(RPCError::InvalidProtocol( - "Unknown BEACON_BLOCK_HEADERS version.", - )), + "recent_beacon_blocks" => match self.protocol.version.as_str() { + "1" => Ok(Some(RPCResponse::RecentBeaconBlocks(packet.to_vec()))), + _ => unreachable!("Cannot negotiate an unknown version"), }, - "beacon_block_bodies" => match self.protocol.version.as_str() { - "1.0.0" => Ok(Some(RPCResponse::BeaconBlockBodies( - BeaconBlockBodiesResponse { - block_bodies: packet.to_vec(), - // this gets filled in the protocol handler - block_roots: None, - }, - ))), - _ => Err(RPCError::InvalidProtocol( - "Unknown BEACON_BLOCK_BODIES version.", - )), - }, - "beacon_chain_state" => match self.protocol.version.as_str() { - "1.0.0" => Ok(Some(RPCResponse::BeaconChainState( - BeaconChainStateResponse::from_ssz_bytes(&packet)?, - ))), - _ => Err(RPCError::InvalidProtocol( - "Unknown BEACON_CHAIN_STATE version.", - )), - }, - _ => Err(RPCError::InvalidProtocol("Unknown method")), + _ => unreachable!("Cannot negotiate an unknown protocol"), }, Ok(None) => Ok(None), Err(e) => Err(e), diff --git a/beacon_node/eth2-libp2p/src/rpc/methods.rs b/beacon_node/eth2-libp2p/src/rpc/methods.rs index 2e5a9a7ffd..8fef1a75a6 100644 --- a/beacon_node/eth2-libp2p/src/rpc/methods.rs +++ b/beacon_node/eth2-libp2p/src/rpc/methods.rs @@ -2,7 +2,7 @@ use ssz::{impl_decode_via_from, impl_encode_via_from}; use ssz_derive::{Decode, Encode}; -use types::{BeaconBlockBody, Epoch, EthSpec, Hash256, Slot}; +use types::{Epoch, Hash256, Slot}; /* Request/Response data structures for RPC methods */ @@ -13,23 +13,20 @@ pub type RequestId = usize; /// The HELLO request/response handshake message. #[derive(Encode, Decode, Clone, Debug)] pub struct HelloMessage { - /// The network ID of the peer. - pub network_id: u8, + /// The fork version of the chain we are broadcasting. + pub fork_version: [u8; 4], - /// The chain id for the HELLO request. - pub chain_id: u64, + /// Latest finalized root. + pub finalized_root: Hash256, - /// The peers last finalized root. - pub latest_finalized_root: Hash256, + /// Latest finalized epoch. + pub finalized_epoch: Epoch, - /// The peers last finalized epoch. - pub latest_finalized_epoch: Epoch, + /// The latest block root. + pub head_root: Hash256, - /// The peers last block root. - pub best_root: Hash256, - - /// The peers last slot. - pub best_slot: Slot, + /// The slot associated with the latest block root. + pub head_slot: Slot, } /// The reason given for a `Goodbye` message. @@ -74,108 +71,42 @@ impl_decode_via_from!(GoodbyeReason, u64); /// Request a number of beacon block roots from a peer. #[derive(Encode, Decode, Clone, Debug, PartialEq)] -pub struct BeaconBlockRootsRequest { - /// The starting slot of the requested blocks. - pub start_slot: Slot, +pub struct BeaconBlocksRequest { + /// The hash tree root of a block on the requested chain. + pub head_block_root: Hash256, + + /// The starting slot to request blocks. + pub start_slot: u64, /// The number of blocks from the start slot. - pub count: u64, // this must be less than 32768. //TODO: Enforce this in the lower layers + pub count: u64, + + /// The step increment to receive blocks. + /// + /// A value of 1 returns every block. + /// A value of 2 returns every second block. + /// A value of 3 returns every third block and so on. + pub step: u64, } +// TODO: Currently handle encoding/decoding of blocks in the message handler. Leave this struct +// here in case encoding/decoding of ssz requires an object. +/* /// Response containing a number of beacon block roots from a peer. #[derive(Encode, Decode, Clone, Debug, PartialEq)] -pub struct BeaconBlockRootsResponse { +pub struct BeaconBlocksResponse { /// List of requested blocks and associated slots. - pub roots: Vec, -} - -/// Contains a block root and associated slot. -#[derive(Encode, Decode, Clone, Debug, PartialEq)] -pub struct BlockRootSlot { - /// The block root. - pub block_root: Hash256, - - /// The block slot. - pub slot: Slot, -} - -/// The response of a beacon block roots request. -impl BeaconBlockRootsResponse { - /// Returns `true` if each `self.roots.slot[i]` is higher than the preceding `i`. - pub fn slots_are_ascending(&self) -> bool { - for window in self.roots.windows(2) { - if window[0].slot >= window[1].slot { - return false; - } - } - - true - } -} - -/// Request a number of beacon block headers from a peer. -#[derive(Encode, Decode, Clone, Debug, PartialEq)] -pub struct BeaconBlockHeadersRequest { - /// The starting header hash of the requested headers. - pub start_root: Hash256, - - /// The starting slot of the requested headers. - pub start_slot: Slot, - - /// The maximum number of headers than can be returned. - pub max_headers: u64, - - /// The maximum number of slots to skip between blocks. - pub skip_slots: u64, -} - -/// Response containing requested block headers. -#[derive(Clone, Debug, PartialEq)] -pub struct BeaconBlockHeadersResponse { - /// The list of ssz-encoded requested beacon block headers. - pub headers: Vec, + pub beacon_blocks: Vec, } +*/ /// Request a number of beacon block bodies from a peer. #[derive(Encode, Decode, Clone, Debug, PartialEq)] -pub struct BeaconBlockBodiesRequest { +pub struct RecentBeaconBlocksRequest { /// The list of beacon block bodies being requested. pub block_roots: Vec, } -/// Response containing the list of requested beacon block bodies. -#[derive(Clone, Debug, PartialEq)] -pub struct BeaconBlockBodiesResponse { - /// The list of hashes that were sent in the request and match these roots response. None when - /// sending outbound. - pub block_roots: Option>, - /// The list of ssz-encoded beacon block bodies being requested. - pub block_bodies: Vec, -} - -/// The decoded version of `BeaconBlockBodiesResponse` which is expected in `SimpleSync`. -pub struct DecodedBeaconBlockBodiesResponse { - /// The list of hashes sent in the request to get this response. - pub block_roots: Vec, - /// The valid decoded block bodies. - pub block_bodies: Vec>, -} - -/// Request values for tree hashes which yield a blocks `state_root`. -#[derive(Encode, Decode, Clone, Debug, PartialEq)] -pub struct BeaconChainStateRequest { - /// The tree hashes that a value is requested for. - pub hashes: Vec, -} - -/// Request values for tree hashes which yield a blocks `state_root`. -// Note: TBD -#[derive(Encode, Decode, Clone, Debug, PartialEq)] -pub struct BeaconChainStateResponse { - /// The values corresponding the to the requested tree hashes. - pub values: bool, //TBD - stubbed with encodable bool -} - /* RPC Handling and Grouping */ // Collection of enums and structs used by the Codecs to encode/decode RPC messages @@ -183,14 +114,10 @@ pub struct BeaconChainStateResponse { pub enum RPCResponse { /// A HELLO message. Hello(HelloMessage), - /// A response to a get BEACON_BLOCK_ROOTS request. - BeaconBlockRoots(BeaconBlockRootsResponse), - /// A response to a get BEACON_BLOCK_HEADERS request. - BeaconBlockHeaders(BeaconBlockHeadersResponse), - /// A response to a get BEACON_BLOCK_BODIES request. - BeaconBlockBodies(BeaconBlockBodiesResponse), - /// A response to a get BEACON_CHAIN_STATE request. - BeaconChainState(BeaconChainStateResponse), + /// A response to a get BEACON_BLOCKS request. + BeaconBlocks(Vec), + /// A response to a get RECENT_BEACON_BLOCKS request. + RecentBeaconBlocks(Vec), } #[derive(Debug)] @@ -206,8 +133,8 @@ impl RPCErrorResponse { pub fn as_u8(&self) -> u8 { match self { RPCErrorResponse::Success(_) => 0, - RPCErrorResponse::InvalidRequest(_) => 2, - RPCErrorResponse::ServerError(_) => 3, + RPCErrorResponse::InvalidRequest(_) => 1, + RPCErrorResponse::ServerError(_) => 2, RPCErrorResponse::Unknown(_) => 255, } } @@ -223,8 +150,8 @@ impl RPCErrorResponse { /// Builds an RPCErrorResponse from a response code and an ErrorMessage pub fn from_error(response_code: u8, err: ErrorMessage) -> Self { match response_code { - 2 => RPCErrorResponse::InvalidRequest(err), - 3 => RPCErrorResponse::ServerError(err), + 1 => RPCErrorResponse::InvalidRequest(err), + 2 => RPCErrorResponse::ServerError(err), _ => RPCErrorResponse::Unknown(err), } } diff --git a/beacon_node/eth2-libp2p/src/rpc/protocol.rs b/beacon_node/eth2-libp2p/src/rpc/protocol.rs index b606fc7432..be1efdf5d4 100644 --- a/beacon_node/eth2-libp2p/src/rpc/protocol.rs +++ b/beacon_node/eth2-libp2p/src/rpc/protocol.rs @@ -16,13 +16,17 @@ use tokio::io::{AsyncRead, AsyncWrite}; use tokio::prelude::*; use tokio::timer::timeout; use tokio::util::FutureExt; +use tokio_io_timeout::TimeoutStream; /// The maximum bytes that can be sent across the RPC. const MAX_RPC_SIZE: usize = 4_194_304; // 4M /// The protocol prefix the RPC protocol id. -const PROTOCOL_PREFIX: &str = "/eth2/beacon_node/rpc"; -/// The number of seconds to wait for a request once a protocol has been established before the stream is terminated. -const REQUEST_TIMEOUT: u64 = 3; +const PROTOCOL_PREFIX: &str = "/eth2/beacon_chain/req"; +/// Time allowed for the first byte of a request to arrive before we time out (Time To First Byte). +const TTFB_TIMEOUT: u64 = 5; +/// The number of seconds to wait for the first bytes of a request once a protocol has been +/// established before the stream is terminated. +const REQUEST_TIMEOUT: u64 = 15; #[derive(Debug, Clone)] pub struct RPCProtocol; @@ -33,11 +37,10 @@ impl UpgradeInfo for RPCProtocol { fn protocol_info(&self) -> Self::InfoIter { vec![ - ProtocolId::new("hello", "1.0.0", "ssz"), - ProtocolId::new("goodbye", "1.0.0", "ssz"), - ProtocolId::new("beacon_block_roots", "1.0.0", "ssz"), - ProtocolId::new("beacon_block_headers", "1.0.0", "ssz"), - ProtocolId::new("beacon_block_bodies", "1.0.0", "ssz"), + ProtocolId::new("hello", "1", "ssz"), + ProtocolId::new("goodbye", "1", "ssz"), + ProtocolId::new("beacon_blocks", "1", "ssz"), + ProtocolId::new("recent_beacon_blocks", "1", "ssz"), ] } } @@ -87,7 +90,7 @@ impl ProtocolName for ProtocolId { // handler to respond to once ready. pub type InboundOutput = (RPCRequest, InboundFramed); -pub type InboundFramed = Framed, InboundCodec>; +pub type InboundFramed = Framed>, InboundCodec>; type FnAndThen = fn( (Option, InboundFramed), ) -> FutureResult, RPCError>; @@ -118,7 +121,9 @@ where "ssz" | _ => { let ssz_codec = BaseInboundCodec::new(SSZInboundCodec::new(protocol, MAX_RPC_SIZE)); let codec = InboundCodec::SSZ(ssz_codec); - Framed::new(socket, codec) + let mut timed_socket = TimeoutStream::new(socket); + timed_socket.set_read_timeout(Some(Duration::from_secs(TTFB_TIMEOUT))); + Framed::new(timed_socket, codec) .into_future() .timeout(Duration::from_secs(REQUEST_TIMEOUT)) .map_err(RPCError::from as FnMapErr) @@ -144,10 +149,8 @@ where pub enum RPCRequest { Hello(HelloMessage), Goodbye(GoodbyeReason), - BeaconBlockRoots(BeaconBlockRootsRequest), - BeaconBlockHeaders(BeaconBlockHeadersRequest), - BeaconBlockBodies(BeaconBlockBodiesRequest), - BeaconChainState(BeaconChainStateRequest), + BeaconBlocks(BeaconBlocksRequest), + RecentBeaconBlocks(RecentBeaconBlocksRequest), } impl UpgradeInfo for RPCRequest { @@ -165,22 +168,11 @@ impl RPCRequest { pub fn supported_protocols(&self) -> Vec { match self { // add more protocols when versions/encodings are supported - RPCRequest::Hello(_) => vec![ - ProtocolId::new("hello", "1.0.0", "ssz"), - ProtocolId::new("goodbye", "1.0.0", "ssz"), - ], - RPCRequest::Goodbye(_) => vec![ProtocolId::new("goodbye", "1.0.0", "ssz")], - RPCRequest::BeaconBlockRoots(_) => { - vec![ProtocolId::new("beacon_block_roots", "1.0.0", "ssz")] - } - RPCRequest::BeaconBlockHeaders(_) => { - vec![ProtocolId::new("beacon_block_headers", "1.0.0", "ssz")] - } - RPCRequest::BeaconBlockBodies(_) => { - vec![ProtocolId::new("beacon_block_bodies", "1.0.0", "ssz")] - } - RPCRequest::BeaconChainState(_) => { - vec![ProtocolId::new("beacon_block_state", "1.0.0", "ssz")] + RPCRequest::Hello(_) => vec![ProtocolId::new("hello", "1", "ssz")], + RPCRequest::Goodbye(_) => vec![ProtocolId::new("goodbye", "1", "ssz")], + RPCRequest::BeaconBlocks(_) => vec![ProtocolId::new("beacon_blocks", "1", "ssz")], + RPCRequest::RecentBeaconBlocks(_) => { + vec![ProtocolId::new("recent_beacon_blocks", "1", "ssz")] } } } @@ -215,7 +207,8 @@ where ) -> Self::Future { match protocol.encoding.as_str() { "ssz" | _ => { - let ssz_codec = BaseOutboundCodec::new(SSZOutboundCodec::new(protocol, 4096)); + let ssz_codec = + BaseOutboundCodec::new(SSZOutboundCodec::new(protocol, MAX_RPC_SIZE)); let codec = OutboundCodec::SSZ(ssz_codec); Framed::new(socket, codec).send(self) } From 66419d00eadc4068243364c93d651e473954f34c Mon Sep 17 00:00:00 2001 From: Age Manning Date: Fri, 9 Aug 2019 10:39:32 +1000 Subject: [PATCH 043/186] Remove redundant slog dependencies --- beacon_node/Cargo.toml | 2 -- 1 file changed, 2 deletions(-) diff --git a/beacon_node/Cargo.toml b/beacon_node/Cargo.toml index cba73b8a43..32b7e92110 100644 --- a/beacon_node/Cargo.toml +++ b/beacon_node/Cargo.toml @@ -22,5 +22,3 @@ exit-future = "0.1.3" env_logger = "0.6.1" dirs = "2.0.1" logging = { path = "../eth2/utils/logging" } -slog-scope = "4.1.2" -slog-stdlog = "3.0.5" From 64a6e1475c567d9dd137033a93f6ab27291a0dd8 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Sat, 10 Aug 2019 11:31:31 +1000 Subject: [PATCH 044/186] Various minor fixes --- beacon_node/beacon_chain/src/beacon_chain.rs | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 834b04582d..7fefb7690f 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -539,11 +539,11 @@ impl BeaconChain { // (e.g., if there are skip slots between the epoch the block was created in and // the epoch for the attestation). if state.current_epoch() == attestation.data.target.epoch - && (state - .get_block_root(attestation_head_block.slot) - .map(|root| *root == attestation.data.beacon_block_root) - .unwrap_or_else(|_| false) - || attestation.data.beacon_block_root == self.head().beacon_block_root) + && (attestation.data.beacon_block_root == self.head().beacon_block_root + || state + .get_block_root(attestation_head_block.slot) + .map(|root| *root == attestation.data.beacon_block_root) + .unwrap_or_else(|_| false)) { // The head state is able to be used to validate this attestation. No need to load // anything from the database. @@ -558,6 +558,7 @@ impl BeaconChain { }; if let Some(outcome) = optional_outcome { + // Verification was already completed with an in-memory state. Return that result. outcome } else { // Use the `data.beacon_block_root` to load the state from the latest non-skipped @@ -612,13 +613,13 @@ impl BeaconChain { } } } else { - // Reject any block where we have not processed `attestation.data.beacon_block_root`. + // Drop any attestation where we have not processed `attestation.data.beacon_block_root`. // // This is likely overly restrictive, we could store the attestation for later // processing. warn!( self.log, - "Dropping attestation for unknown block"; + "Dropped attestation for unknown block"; "block" => format!("{}", attestation.data.beacon_block_root) ); Ok(AttestationProcessingOutcome::UnknownHeadBlock { From 0d4b58978ccd5423f8026a8436215c14279abff4 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Sat, 10 Aug 2019 17:19:27 +1000 Subject: [PATCH 045/186] Make fork choice write lock in to read lock --- eth2/lmd_ghost/src/reduced_tree.rs | 18 +++++++++++++----- 1 file changed, 13 insertions(+), 5 deletions(-) diff --git a/eth2/lmd_ghost/src/reduced_tree.rs b/eth2/lmd_ghost/src/reduced_tree.rs index 9668620b79..822c388f6a 100644 --- a/eth2/lmd_ghost/src/reduced_tree.rs +++ b/eth2/lmd_ghost/src/reduced_tree.rs @@ -111,7 +111,7 @@ where } fn latest_message(&self, validator_index: usize) -> Option<(Hash256, Slot)> { - self.core.write().latest_message(validator_index) + self.core.read().latest_message(validator_index) } } @@ -258,10 +258,10 @@ where Ok(head_node.block_hash) } - pub fn latest_message(&mut self, validator_index: usize) -> Option<(Hash256, Slot)> { - match self.latest_votes.get(validator_index) { - Some(v) => Some((v.hash.clone(), v.slot.clone())), - None => None, + pub fn latest_message(&self, validator_index: usize) -> Option<(Hash256, Slot)> { + match self.latest_votes.get_ref(validator_index) { + Some(Some(v)) => Some((v.hash.clone(), v.slot.clone())), + _ => None, } } @@ -776,6 +776,14 @@ where &self.0[i] } + pub fn get_ref(&self, i: usize) -> Option<&T> { + if i < self.0.len() { + Some(&self.0[i]) + } else { + None + } + } + pub fn insert(&mut self, i: usize, element: T) { self.ensure(i); self.0[i] = element; From 1beab66078e41a7ae46b9c304d1dd478de1716e5 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Sat, 10 Aug 2019 17:21:54 +1000 Subject: [PATCH 046/186] Remove unused method --- beacon_node/beacon_chain/src/fork_choice.rs | 24 +++------------------ 1 file changed, 3 insertions(+), 21 deletions(-) diff --git a/beacon_node/beacon_chain/src/fork_choice.rs b/beacon_node/beacon_chain/src/fork_choice.rs index 640f5223d5..edd426f296 100644 --- a/beacon_node/beacon_chain/src/fork_choice.rs +++ b/beacon_node/beacon_chain/src/fork_choice.rs @@ -178,27 +178,9 @@ impl ForkChoice { Ok(()) } - /// Determines whether or not the given attestation contains a latest message. - pub fn should_process_attestation( - &self, - state: &BeaconState, - attestation: &Attestation, - ) -> Result { - let validator_indices = - get_attesting_indices(state, &attestation.data, &attestation.aggregation_bits)?; - - let block_slot = state.get_attestation_data_slot(&attestation.data)?; - - Ok(validator_indices - .iter() - .find(|&&v| match self.backend.latest_message(v) { - Some((_, slot)) => block_slot > slot, - None => true, - }) - .is_some()) - } - - // Returns the latest message for a given validator + /// Returns the latest message for a given validator, if any. + /// + /// Returns `(block_root, block_slot)`. pub fn latest_message(&self, validator_index: usize) -> Option<(Hash256, Slot)> { self.backend.latest_message(validator_index) } From 963fb7bc87901a00a1bfcdb1c3120cdfd6985f14 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Sat, 10 Aug 2019 17:36:53 +1000 Subject: [PATCH 047/186] Tidy comments --- beacon_node/beacon_chain/src/beacon_chain.rs | 20 +++++++++++++++----- 1 file changed, 15 insertions(+), 5 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 7fefb7690f..c92a05a728 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -506,8 +506,16 @@ impl BeaconChain { /// Accept a new, potentially invalid attestation from the network. /// - /// If valid, the attestation is added to the `op_pool` and aggregated with another attestation - /// if possible. + /// If valid, the attestation is added to `self.op_pool` and `self.fork_choice`. + /// + /// Returns an `Ok(AttestationProcessingOutcome)` if the chain was able to make a determination + /// about the `attestation` (wether it was invalid or not). Returns an `Err` if the was an + /// error during this process and no determination was able to be made. + /// + /// ## Notes + /// + /// - Whilst the `attestation` is added to fork choice, the head is not updated. That must be + /// done separately. pub fn process_attestation( &self, attestation: Attestation, @@ -538,6 +546,9 @@ impl BeaconChain { // the block doesn't necessarily need to be in the same epoch as the attestation // (e.g., if there are skip slots between the epoch the block was created in and // the epoch for the attestation). + // + // This check also ensures that the slot for `data.beacon_block_root` is not higher + // than `state.root` by ensuring that the block is in the history of `state`. if state.current_epoch() == attestation.data.target.epoch && (attestation.data.beacon_block_root == self.head().beacon_block_root || state @@ -638,9 +649,8 @@ impl BeaconChain { /// /// - `state` corresponds to the `block.state_root` identified by /// `attestation.data.beacon_block_root`. (Viz., `attestation` was created using `state`. - /// - `state.slot` is in the same epoch as `block.slot` and - /// `attestation.data.beacon_block_root` is in `state.block_roots`. (Viz., the attestation was - /// attesting to an ancestor of `state` from the same epoch as `state`. + /// - `state.slot` is in the same epoch as `data.target.epoch` and + /// `attestation.data.beacon_block_root` is in the history of `state`. /// /// Additionally, `attestation.data.beacon_block_root` **must** be available to read in /// `self.store` _and_ be the root of the given `block`. From 04bef689e33c0f3f4288a96d90bd06f91e0eacea Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Sat, 10 Aug 2019 17:47:34 +1000 Subject: [PATCH 048/186] Fix attestation prod. target roots change --- beacon_node/beacon_chain/src/beacon_chain.rs | 34 ++++++++++++++++---- beacon_node/beacon_chain/src/test_utils.rs | 30 +++++++++++------ beacon_node/beacon_chain/tests/tests.rs | 1 + 3 files changed, 50 insertions(+), 15 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index c92a05a728..7488a7795e 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -450,8 +450,9 @@ impl BeaconChain { pub fn produce_attestation_data(&self, shard: u64) -> Result { let state = self.state.read(); let head_block_root = self.head().beacon_block_root; + let head_block_slot = self.head().beacon_block.slot; - self.produce_attestation_data_for_block(shard, head_block_root, &*state) + self.produce_attestation_data_for_block(shard, head_block_root, head_block_slot, &*state) } /// Produce an `AttestationData` that attests to the chain denoted by `block_root` and `state`. @@ -462,18 +463,39 @@ impl BeaconChain { &self, shard: u64, head_block_root: Hash256, + head_block_slot: Slot, state: &BeaconState, ) -> Result { // Collect some metrics. self.metrics.attestation_production_requests.inc(); let timer = self.metrics.attestation_production_times.start_timer(); + let slots_per_epoch = T::EthSpec::slots_per_epoch(); + let current_epoch_start_slot = state.current_epoch().start_slot(slots_per_epoch); + // The `target_root` is the root of the first block of the current epoch. - let target_root = self - .rev_iter_block_roots() - .find(|(_root, slot)| *slot % T::EthSpec::slots_per_epoch() == 0) - .map(|(root, _slot)| root) - .ok_or_else(|| Error::UnableToFindTargetRoot(self.head().beacon_state.slot))?; + // + // The `state` does not know the root of the block for it's current slot (it only knows + // about blocks from prior slots). This creates an edge-case when the state is on the first + // slot of the epoch -- we're unable to obtain the `target_root` because it is not a prior + // root. + // + // This edge case is handled in two ways: + // + // - If the head block is on the same slot as the state, we use it's root. + // - Otherwise, assume the current slot has been skipped and use the block root from the + // prior slot. + // + // For all other cases, we simply read the `target_root` from `state.latest_block_roots`. + let target_root = if state.slot == current_epoch_start_slot { + if head_block_slot == current_epoch_start_slot { + head_block_root + } else { + *state.get_block_root(current_epoch_start_slot - 1)? + } + } else { + *state.get_block_root(current_epoch_start_slot)? + }; let target = Checkpoint { epoch: state.current_epoch(), diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index 6997f52aec..298c637dbd 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -194,7 +194,7 @@ where if let BlockProcessingOutcome::Processed { block_root } = outcome { head_block_root = Some(block_root); - self.add_free_attestations(&attestation_strategy, &new_state, block_root); + self.add_free_attestations(&attestation_strategy, &new_state, block_root, slot); } else { panic!("block should be successfully processed: {:?}", outcome); } @@ -282,14 +282,20 @@ where attestation_strategy: &AttestationStrategy, state: &BeaconState, head_block_root: Hash256, + head_block_slot: Slot, ) { - self.get_free_attestations(attestation_strategy, state, head_block_root) - .into_iter() - .for_each(|attestation| { - self.chain - .process_attestation(attestation) - .expect("should process attestation"); - }); + self.get_free_attestations( + attestation_strategy, + state, + head_block_root, + head_block_slot, + ) + .into_iter() + .for_each(|attestation| { + self.chain + .process_attestation(attestation) + .expect("should process attestation"); + }); } /// Generates a `Vec` for some attestation strategy and head_block. @@ -298,6 +304,7 @@ where attestation_strategy: &AttestationStrategy, state: &BeaconState, head_block_root: Hash256, + head_block_slot: Slot, ) -> Vec> { let spec = &self.spec; let fork = &state.fork; @@ -322,7 +329,12 @@ where if attesting_validators.contains(validator_index) { let data = self .chain - .produce_attestation_data_for_block(cc.shard, head_block_root, state) + .produce_attestation_data_for_block( + cc.shard, + head_block_root, + head_block_slot, + state, + ) .expect("should produce attestation data"); let mut aggregation_bits = BitList::with_capacity(committee_size).unwrap(); diff --git a/beacon_node/beacon_chain/tests/tests.rs b/beacon_node/beacon_chain/tests/tests.rs index c22f025639..22b667f159 100644 --- a/beacon_node/beacon_chain/tests/tests.rs +++ b/beacon_node/beacon_chain/tests/tests.rs @@ -395,6 +395,7 @@ fn attestations_with_increasing_slots() { &AttestationStrategy::AllValidators, &harness.chain.head().beacon_state, harness.chain.head().beacon_block_root, + harness.chain.head().beacon_block.slot, )); harness.advance_slot(); From 6c9ebf4b9647932379b64baee681ddf16e4dc144 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Sun, 11 Aug 2019 09:15:39 +1000 Subject: [PATCH 049/186] Fix compile error in store iters --- beacon_node/store/src/iter.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/beacon_node/store/src/iter.rs b/beacon_node/store/src/iter.rs index 3d01b7015d..c97241903f 100644 --- a/beacon_node/store/src/iter.rs +++ b/beacon_node/store/src/iter.rs @@ -15,7 +15,7 @@ pub trait AncestorIter { } impl<'a, U: Store, E: EthSpec> AncestorIter> for BeaconBlock { - /// Iterates across all the prior block roots of `self`, starting at the most recent and ending + /// Iterates across all available prior block roots of `self`, starting at the most recent and ending /// at genesis. fn try_iter_ancestor_roots(&self, store: Arc) -> Option> { let state = store.get::>(&self.state_root).ok()??; @@ -25,11 +25,11 @@ impl<'a, U: Store, E: EthSpec> AncestorIter> for } impl<'a, U: Store, E: EthSpec> AncestorIter> for BeaconState { - /// Iterates across all the prior state roots of `self`, starting at the most recent and ending + /// Iterates across all available prior state roots of `self`, starting at the most recent and ending /// at genesis. fn try_iter_ancestor_roots(&self, store: Arc) -> Option> { // The `self.clone()` here is wasteful. - Some(StateRootsIterator::owned(store, self.clone(), self.slot)) + Some(StateRootsIterator::owned(store, self.clone())) } } From 4020d13064fb6e6085e90aad23d2b1a5891f03df Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Sun, 11 Aug 2019 09:34:49 +1000 Subject: [PATCH 050/186] Reject any attestation prior to finalization --- beacon_node/beacon_chain/src/beacon_chain.rs | 44 +++++++++++++++++++- 1 file changed, 43 insertions(+), 1 deletion(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index d30b12c988..9ccf595893 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -69,6 +69,11 @@ pub enum AttestationProcessingOutcome { state: Slot, attestation: Slot, }, + /// The slot is finalized, no need to import. + FinalizedSlot { + attestation: Epoch, + finalized: Epoch, + }, Invalid(AttestationValidationError), } @@ -550,6 +555,23 @@ impl BeaconChain { .store .get::>(&attestation.data.beacon_block_root)? { + let finalized_epoch = self.head().beacon_state.finalized_checkpoint.epoch; + + if attestation_head_block.slot + <= finalized_epoch.start_slot(T::EthSpec::slots_per_epoch()) + { + // Ignore any attestation where the slot of `data.beacon_block_root` is equal to or + // prior to the finalized epoch. + // + // For any valid attestation if the `beacon_block_root` is prior to finalization, then + // all other parameters (source, target, etc) must all be prior to finalization and + // therefore no longer interesting. + return Ok(AttestationProcessingOutcome::FinalizedSlot { + attestation: attestation_head_block.epoch(), + finalized: finalized_epoch, + }); + } + // Attempt to process the attestation using the `self.head()` state. // // This is purely an effort to avoid loading a `BeaconState` unnecessarily from the DB. @@ -688,7 +710,27 @@ impl BeaconChain { self.metrics.attestation_processing_requests.inc(); let timer = self.metrics.attestation_processing_times.start_timer(); - let result = if let Err(e) = + // Find the highest between: + // + // - The highest valid finalized epoch we've ever seen (i.e., the head). + // - The finalized epoch that this attestation was created against. + let finalized_epoch = std::cmp::max( + self.head().beacon_state.finalized_checkpoint.epoch, + state.finalized_checkpoint.epoch, + ); + + let result = if block.slot <= finalized_epoch.start_slot(T::EthSpec::slots_per_epoch()) { + // Ignore any attestation where the slot of `data.beacon_block_root` is equal to or + // prior to the finalized epoch. + // + // For any valid attestation if the `beacon_block_root` is prior to finalization, then + // all other parameters (source, target, etc) must all be prior to finalization and + // therefore no longer interesting. + Ok(AttestationProcessingOutcome::FinalizedSlot { + attestation: block.slot.epoch(T::EthSpec::slots_per_epoch()), + finalized: finalized_epoch, + }) + } else if let Err(e) = verify_attestation_for_state(state, &attestation, &self.spec, VerifySignatures::True) { warn!( From 48733917be2a59ba87b01a0bc4678347ebb96f4f Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Sun, 11 Aug 2019 12:12:19 +1000 Subject: [PATCH 051/186] Begin metrics refactor --- beacon_node/beacon_chain/Cargo.toml | 2 +- beacon_node/beacon_chain/src/beacon_chain.rs | 7 +++++++ beacon_node/beacon_chain/src/lib.rs | 6 ++++++ beacon_node/beacon_chain/src/metrics.rs | 12 ++++++++++++ beacon_node/rest_api/Cargo.toml | 1 + beacon_node/rest_api/src/lib.rs | 2 ++ beacon_node/rest_api/src/metrics.rs | 17 +++++++++++++++++ 7 files changed, 46 insertions(+), 1 deletion(-) create mode 100644 beacon_node/rest_api/src/metrics.rs diff --git a/beacon_node/beacon_chain/Cargo.toml b/beacon_node/beacon_chain/Cargo.toml index 778224a3d4..43e7614b6a 100644 --- a/beacon_node/beacon_chain/Cargo.toml +++ b/beacon_node/beacon_chain/Cargo.toml @@ -17,6 +17,7 @@ sloggers = { version = "^0.3" } slot_clock = { path = "../../eth2/utils/slot_clock" } eth2_ssz = "0.1" eth2_ssz_derive = "0.1" +lazy_static = "1.3.0" state_processing = { path = "../../eth2/state_processing" } tree_hash = "0.1" types = { path = "../../eth2/types" } @@ -24,4 +25,3 @@ lmd_ghost = { path = "../../eth2/lmd_ghost" } [dev-dependencies] rand = "0.5.5" -lazy_static = "1.3.0" diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 9ccf595893..e31844d582 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -2,6 +2,7 @@ use crate::checkpoint::CheckPoint; use crate::errors::{BeaconChainError as Error, BlockProductionError}; use crate::fork_choice::{Error as ForkChoiceError, ForkChoice}; use crate::iter::{ReverseBlockRootIterator, ReverseStateRootIterator}; +use crate::metrics; use crate::metrics::Metrics; use crate::persisted_beacon_chain::{PersistedBeaconChain, BEACON_CHAIN_DB_KEY}; use lmd_ghost::LmdGhost; @@ -848,6 +849,10 @@ impl BeaconChain { return Ok(BlockProcessingOutcome::BlockIsAlreadyKnown); } + // Records the time taken to load the block and state from the database during block + // processing. + let db_read_timer = metrics::BLOCK_PROCESSING_DB_READ.start_timer(); + // Load the blocks parent block from the database, returning invalid if that block is not // found. let parent_block: BeaconBlock = match self.store.get(&block.parent_root)? { @@ -867,6 +872,8 @@ impl BeaconChain { .get(&parent_state_root)? .ok_or_else(|| Error::DBInconsistent(format!("Missing state {}", parent_state_root)))?; + db_read_timer.observe_duration(); + // Transition the parent state to the block slot. let mut state: BeaconState = parent_state; for _ in state.slot.as_u64()..block.slot.as_u64() { diff --git a/beacon_node/beacon_chain/src/lib.rs b/beacon_node/beacon_chain/src/lib.rs index 3188760a42..e24534a2eb 100644 --- a/beacon_node/beacon_chain/src/lib.rs +++ b/beacon_node/beacon_chain/src/lib.rs @@ -1,3 +1,8 @@ +#[macro_use] +extern crate prometheus; +#[macro_use] +extern crate lazy_static; + mod beacon_chain; mod checkpoint; mod errors; @@ -13,6 +18,7 @@ pub use self::beacon_chain::{ pub use self::checkpoint::CheckPoint; pub use self::errors::{BeaconChainError, BlockProductionError}; pub use lmd_ghost; +pub use metrics::gather_metrics; pub use parking_lot; pub use slot_clock; pub use state_processing::per_block_processing::errors::{ diff --git a/beacon_node/beacon_chain/src/metrics.rs b/beacon_node/beacon_chain/src/metrics.rs index fa1718ebfb..fcb564e329 100644 --- a/beacon_node/beacon_chain/src/metrics.rs +++ b/beacon_node/beacon_chain/src/metrics.rs @@ -1,6 +1,18 @@ pub use prometheus::Error; use prometheus::{Histogram, HistogramOpts, IntCounter, Opts, Registry}; +lazy_static! { + pub static ref BLOCK_PROCESSING_DB_READ: Histogram = register_histogram!( + "block_processing_db_read_times", + "Time spent loading block and state from DB" + ) + .unwrap(); +} + +pub fn gather_metrics() -> Vec { + prometheus::gather() +} + pub struct Metrics { pub block_processing_requests: IntCounter, pub block_processing_successes: IntCounter, diff --git a/beacon_node/rest_api/Cargo.toml b/beacon_node/rest_api/Cargo.toml index fb6cb84134..821d6c0ea1 100644 --- a/beacon_node/rest_api/Cargo.toml +++ b/beacon_node/rest_api/Cargo.toml @@ -18,6 +18,7 @@ state_processing = { path = "../../eth2/state_processing" } types = { path = "../../eth2/types" } clap = "2.32.0" http = "^0.1.17" +prometheus = { version = "^0.6", features = ["process"] } hyper = "0.12.32" futures = "0.1" exit-future = "0.1.3" diff --git a/beacon_node/rest_api/src/lib.rs b/beacon_node/rest_api/src/lib.rs index a94a8cdf4a..7dc0df578d 100644 --- a/beacon_node/rest_api/src/lib.rs +++ b/beacon_node/rest_api/src/lib.rs @@ -3,6 +3,7 @@ extern crate hyper; mod beacon; mod config; mod helpers; +mod metrics; mod node; mod url_query; @@ -103,6 +104,7 @@ pub fn start_server( let result = match (req.method(), path.as_ref()) { (&Method::GET, "/beacon/state") => beacon::get_state::(req), (&Method::GET, "/beacon/state_root") => beacon::get_state_root::(req), + (&Method::GET, "/metrics") => metrics::get_prometheus(req), (&Method::GET, "/node/version") => node::get_version(req), (&Method::GET, "/node/genesis_time") => node::get_genesis_time::(req), _ => Err(ApiError::MethodNotAllowed(path.clone())), diff --git a/beacon_node/rest_api/src/metrics.rs b/beacon_node/rest_api/src/metrics.rs new file mode 100644 index 0000000000..1ecdf8b686 --- /dev/null +++ b/beacon_node/rest_api/src/metrics.rs @@ -0,0 +1,17 @@ +use crate::{success_response, ApiError, ApiResult}; +use hyper::{Body, Request}; +use prometheus::{Encoder, TextEncoder}; + +/// Returns the full set of Prometheus metrics for the Beacon Node application. +pub fn get_prometheus(_req: Request) -> ApiResult { + let mut buffer = vec![]; + let encoder = TextEncoder::new(); + + encoder + .encode(&beacon_chain::gather_metrics(), &mut buffer) + .unwrap(); + + String::from_utf8(buffer) + .map(|string| success_response(Body::from(string))) + .map_err(|e| ApiError::ServerError(format!("Failed to encode prometheus info: {:?}", e))) +} From 9995b390b5077ec8c8f92e3fe741590357bad05d Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Sun, 11 Aug 2019 14:11:13 +1000 Subject: [PATCH 052/186] Move beacon_chain to new metrics structure. --- beacon_node/beacon_chain/src/beacon_chain.rs | 41 ++-- beacon_node/beacon_chain/src/metrics.rs | 242 ++++++++----------- beacon_node/client/src/lib.rs | 5 - 3 files changed, 117 insertions(+), 171 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index e31844d582..df9523624a 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -3,7 +3,6 @@ use crate::errors::{BeaconChainError as Error, BlockProductionError}; use crate::fork_choice::{Error as ForkChoiceError, ForkChoice}; use crate::iter::{ReverseBlockRootIterator, ReverseStateRootIterator}; use crate::metrics; -use crate::metrics::Metrics; use crate::persisted_beacon_chain::{PersistedBeaconChain, BEACON_CHAIN_DB_KEY}; use lmd_ghost::LmdGhost; use log::trace; @@ -107,8 +106,6 @@ pub struct BeaconChain { /// A state-machine that is updated with information from the network and chooses a canonical /// head block. pub fork_choice: ForkChoice, - /// Stores metrics about this `BeaconChain`. - pub metrics: Metrics, /// Logging to CLI, etc. log: Logger, } @@ -158,7 +155,6 @@ impl BeaconChain { canonical_head, genesis_block_root, fork_choice: ForkChoice::new(store.clone(), &genesis_block, genesis_block_root), - metrics: Metrics::new()?, store, log, }) @@ -196,7 +192,6 @@ impl BeaconChain { canonical_head: RwLock::new(p.canonical_head), state: RwLock::new(p.state), genesis_block_root: p.genesis_block_root, - metrics: Metrics::new()?, store, log, })) @@ -473,8 +468,8 @@ impl BeaconChain { state: &BeaconState, ) -> Result { // Collect some metrics. - self.metrics.attestation_production_requests.inc(); - let timer = self.metrics.attestation_production_times.start_timer(); + metrics::ATTESTATION_PRODUCTION_REQUESTS.inc(); + let timer = metrics::ATTESTATION_PRODUCTION_TIMES.start_timer(); let slots_per_epoch = T::EthSpec::slots_per_epoch(); let current_epoch_start_slot = state.current_epoch().start_slot(slots_per_epoch); @@ -521,7 +516,7 @@ impl BeaconChain { }; // Collect some metrics. - self.metrics.attestation_production_successes.inc(); + metrics::ATTESTATION_PRODUCTION_SUCCESSES.inc(); timer.observe_duration(); Ok(AttestationData { @@ -708,8 +703,8 @@ impl BeaconChain { state: &BeaconState, block: &BeaconBlock, ) -> Result { - self.metrics.attestation_processing_requests.inc(); - let timer = self.metrics.attestation_processing_times.start_timer(); + metrics::ATTESTATION_PROCESSING_REQUESTS.inc(); + let timer = metrics::ATTESTATION_PROCESSING_TIMES.start_timer(); // Find the highest between: // @@ -754,7 +749,7 @@ impl BeaconChain { .insert_attestation(attestation, state, &self.spec)?; // Update the metrics. - self.metrics.attestation_processing_successes.inc(); + metrics::ATTESTATION_PROCESSING_SUCCESSES.inc(); Ok(AttestationProcessingOutcome::Processed) }; @@ -810,8 +805,8 @@ impl BeaconChain { &self, block: BeaconBlock, ) -> Result { - self.metrics.block_processing_requests.inc(); - let timer = self.metrics.block_processing_times.start_timer(); + metrics::BLOCK_PROCESSING_REQUESTS.inc(); + let timer = metrics::BLOCK_PROCESSING_TIMES.start_timer(); let finalized_slot = self .state @@ -926,10 +921,8 @@ impl BeaconChain { ) }; - self.metrics.block_processing_successes.inc(); - self.metrics - .operations_per_block_attestation - .observe(block.body.attestations.len() as f64); + metrics::BLOCK_PROCESSING_SUCCESSES.inc(); + metrics::OPERATIONS_PER_BLOCK_ATTESTATION.observe(block.body.attestations.len() as f64); timer.observe_duration(); Ok(BlockProcessingOutcome::Processed { block_root }) @@ -965,8 +958,8 @@ impl BeaconChain { produce_at_slot: Slot, randao_reveal: Signature, ) -> Result<(BeaconBlock, BeaconState), BlockProductionError> { - self.metrics.block_production_requests.inc(); - let timer = self.metrics.block_production_times.start_timer(); + metrics::BLOCK_PRODUCTION_REQUESTS.inc(); + let timer = metrics::BLOCK_PRODUCTION_TIMES.start_timer(); // If required, transition the new state to the present slot. while state.slot < produce_at_slot { @@ -1018,7 +1011,7 @@ impl BeaconChain { block.state_root = state_root; - self.metrics.block_production_successes.inc(); + metrics::BLOCK_PRODUCTION_SUCCESSES.inc(); timer.observe_duration(); Ok((block, state)) @@ -1026,10 +1019,10 @@ impl BeaconChain { /// Execute the fork choice algorithm and enthrone the result as the canonical head. pub fn fork_choice(&self) -> Result<(), Error> { - self.metrics.fork_choice_requests.inc(); + metrics::FORK_CHOICE_REQUESTS.inc(); // Start fork choice metrics timer. - let timer = self.metrics.fork_choice_times.start_timer(); + let timer = metrics::FORK_CHOICE_TIMES.start_timer(); // Determine the root of the block that is the head of the chain. let beacon_block_root = self.fork_choice.find_head(&self)?; @@ -1039,7 +1032,7 @@ impl BeaconChain { // If a new head was chosen. if beacon_block_root != self.head().beacon_block_root { - self.metrics.fork_choice_changed_head.inc(); + metrics::FORK_CHOICE_CHANGED_HEAD.inc(); let beacon_block: BeaconBlock = self .store @@ -1057,7 +1050,7 @@ impl BeaconChain { // If we switched to a new chain (instead of building atop the present chain). if self.head().beacon_block_root != beacon_block.parent_root { - self.metrics.fork_choice_reorg_count.inc(); + metrics::FORK_CHOICE_REORG_COUNT.inc(); warn!( self.log, "Beacon chain re-org"; diff --git a/beacon_node/beacon_chain/src/metrics.rs b/beacon_node/beacon_chain/src/metrics.rs index fcb564e329..8b8307e93b 100644 --- a/beacon_node/beacon_chain/src/metrics.rs +++ b/beacon_node/beacon_chain/src/metrics.rs @@ -1,155 +1,113 @@ pub use prometheus::Error; -use prometheus::{Histogram, HistogramOpts, IntCounter, Opts, Registry}; +use prometheus::{Histogram, IntCounter}; lazy_static! { + /* + * Block Processing + */ pub static ref BLOCK_PROCESSING_DB_READ: Histogram = register_histogram!( "block_processing_db_read_times", "Time spent loading block and state from DB" ) .unwrap(); + pub static ref BLOCK_PROCESSING_REQUESTS: IntCounter = register_int_counter!( + "block_processing_requests", + "Count of blocks sumbitted for processing" + ) + .unwrap(); + pub static ref BLOCK_PROCESSING_SUCCESSES: IntCounter = register_int_counter!( + "block_processing_successes", + "Count of blocks processed without error" + ) + .unwrap(); + pub static ref BLOCK_PROCESSING_TIMES: Histogram = + register_histogram!("block_processing_times", "Full runtime of block processing") + .unwrap(); + + /* + * Block Production + */ + pub static ref BLOCK_PRODUCTION_REQUESTS: IntCounter = register_int_counter!( + "block_production_requests", + "Count of all block production requests" + ) + .unwrap(); + pub static ref BLOCK_PRODUCTION_SUCCESSES: IntCounter = register_int_counter!( + "block_production_successes", + "Count of blocks sucessfully produced." + ) + .unwrap(); + pub static ref BLOCK_PRODUCTION_TIMES: Histogram = + register_histogram!("block_production_times", "Full runtime of block production").unwrap(); + + /* + * Block Statistics + */ + pub static ref OPERATIONS_PER_BLOCK_ATTESTATION: Histogram = register_histogram!( + "operations_per_block_attestation", + "Number of attestations in a block" + ) + .unwrap(); + + /* + * Attestation Processing + */ + pub static ref ATTESTATION_PROCESSING_REQUESTS: IntCounter = register_int_counter!( + "attestation_processing_requests", + "Count of all attestations submitted for processing" + ) + .unwrap(); + pub static ref ATTESTATION_PROCESSING_SUCCESSES: IntCounter = register_int_counter!( + "attestation_processing_successes", + "total_attestation_processing_successes" + ) + .unwrap(); + pub static ref ATTESTATION_PROCESSING_TIMES: Histogram = register_histogram!( + "attestation_processing_times", + "Full runtime of attestation processing" + ) + .unwrap(); + + /* + * Attestation Production + */ + pub static ref ATTESTATION_PRODUCTION_REQUESTS: IntCounter = register_int_counter!( + "attestation_production_requests", + "Count of all attestation production requests" + ) + .unwrap(); + pub static ref ATTESTATION_PRODUCTION_SUCCESSES: IntCounter = register_int_counter!( + "attestation_production_successes", + "Count of attestations processed without error" + ) + .unwrap(); + pub static ref ATTESTATION_PRODUCTION_TIMES: Histogram = register_histogram!( + "attestation_production_times", + "Full runtime of attestation production" + ).unwrap(); + + /* + * Fork Choice + */ + pub static ref FORK_CHOICE_REQUESTS: IntCounter = register_int_counter!( + "fork_choice_requests", + "Count of occasions where fork choice has tried to find a head" + ) + .unwrap(); + pub static ref FORK_CHOICE_CHANGED_HEAD: IntCounter = register_int_counter!( + "fork_choice_changed_head", + "Count of occasions fork choice has found a new head" + ) + .unwrap(); + pub static ref FORK_CHOICE_REORG_COUNT: IntCounter = register_int_counter!( + "fork_choice_reorg_count", + "Count of occasions fork choice has switched to a different chain" + ) + .unwrap(); + pub static ref FORK_CHOICE_TIMES: Histogram = + register_histogram!("fork_choice_time", "Full runtime of fork choice").unwrap(); } pub fn gather_metrics() -> Vec { prometheus::gather() } - -pub struct Metrics { - pub block_processing_requests: IntCounter, - pub block_processing_successes: IntCounter, - pub block_processing_times: Histogram, - pub block_production_requests: IntCounter, - pub block_production_successes: IntCounter, - pub block_production_times: Histogram, - pub attestation_production_requests: IntCounter, - pub attestation_production_successes: IntCounter, - pub attestation_production_times: Histogram, - pub attestation_processing_requests: IntCounter, - pub attestation_processing_successes: IntCounter, - pub attestation_processing_times: Histogram, - pub fork_choice_requests: IntCounter, - pub fork_choice_changed_head: IntCounter, - pub fork_choice_reorg_count: IntCounter, - pub fork_choice_times: Histogram, - pub operations_per_block_attestation: Histogram, -} - -impl Metrics { - pub fn new() -> Result { - Ok(Self { - block_processing_requests: { - let opts = Opts::new("block_processing_requests", "total_blocks_processed"); - IntCounter::with_opts(opts)? - }, - block_processing_successes: { - let opts = Opts::new("block_processing_successes", "total_valid_blocks_processed"); - IntCounter::with_opts(opts)? - }, - block_processing_times: { - let opts = HistogramOpts::new("block_processing_times", "block_processing_time"); - Histogram::with_opts(opts)? - }, - block_production_requests: { - let opts = Opts::new("block_production_requests", "attempts_to_produce_new_block"); - IntCounter::with_opts(opts)? - }, - block_production_successes: { - let opts = Opts::new("block_production_successes", "blocks_successfully_produced"); - IntCounter::with_opts(opts)? - }, - block_production_times: { - let opts = HistogramOpts::new("block_production_times", "block_production_time"); - Histogram::with_opts(opts)? - }, - attestation_production_requests: { - let opts = Opts::new( - "attestation_production_requests", - "total_attestation_production_requests", - ); - IntCounter::with_opts(opts)? - }, - attestation_production_successes: { - let opts = Opts::new( - "attestation_production_successes", - "total_attestation_production_successes", - ); - IntCounter::with_opts(opts)? - }, - attestation_production_times: { - let opts = HistogramOpts::new( - "attestation_production_times", - "attestation_production_time", - ); - Histogram::with_opts(opts)? - }, - attestation_processing_requests: { - let opts = Opts::new( - "attestation_processing_requests", - "total_attestation_processing_requests", - ); - IntCounter::with_opts(opts)? - }, - attestation_processing_successes: { - let opts = Opts::new( - "attestation_processing_successes", - "total_attestation_processing_successes", - ); - IntCounter::with_opts(opts)? - }, - attestation_processing_times: { - let opts = HistogramOpts::new( - "attestation_processing_times", - "attestation_processing_time", - ); - Histogram::with_opts(opts)? - }, - fork_choice_requests: { - let opts = Opts::new("fork_choice_requests", "total_times_fork_choice_called"); - IntCounter::with_opts(opts)? - }, - fork_choice_changed_head: { - let opts = Opts::new( - "fork_choice_changed_head", - "total_times_fork_choice_chose_a_new_head", - ); - IntCounter::with_opts(opts)? - }, - fork_choice_reorg_count: { - let opts = Opts::new("fork_choice_reorg_count", "number_of_reorgs"); - IntCounter::with_opts(opts)? - }, - fork_choice_times: { - let opts = HistogramOpts::new("fork_choice_time", "total_time_to_run_fork_choice"); - Histogram::with_opts(opts)? - }, - operations_per_block_attestation: { - let opts = HistogramOpts::new( - "operations_per_block_attestation", - "count_of_attestations_per_block", - ); - Histogram::with_opts(opts)? - }, - }) - } - - pub fn register(&self, registry: &Registry) -> Result<(), Error> { - registry.register(Box::new(self.block_processing_requests.clone()))?; - registry.register(Box::new(self.block_processing_successes.clone()))?; - registry.register(Box::new(self.block_processing_times.clone()))?; - registry.register(Box::new(self.block_production_requests.clone()))?; - registry.register(Box::new(self.block_production_successes.clone()))?; - registry.register(Box::new(self.block_production_times.clone()))?; - registry.register(Box::new(self.attestation_production_requests.clone()))?; - registry.register(Box::new(self.attestation_production_successes.clone()))?; - registry.register(Box::new(self.attestation_production_times.clone()))?; - registry.register(Box::new(self.attestation_processing_requests.clone()))?; - registry.register(Box::new(self.attestation_processing_successes.clone()))?; - registry.register(Box::new(self.attestation_processing_times.clone()))?; - registry.register(Box::new(self.fork_choice_requests.clone()))?; - registry.register(Box::new(self.fork_choice_changed_head.clone()))?; - registry.register(Box::new(self.fork_choice_reorg_count.clone()))?; - registry.register(Box::new(self.fork_choice_times.clone()))?; - registry.register(Box::new(self.operations_per_block_attestation.clone()))?; - - Ok(()) - } -} diff --git a/beacon_node/client/src/lib.rs b/beacon_node/client/src/lib.rs index 65ba071fa1..e06c5b60ee 100644 --- a/beacon_node/client/src/lib.rs +++ b/beacon_node/client/src/lib.rs @@ -71,11 +71,6 @@ where eth2_config.spec.clone(), log.clone(), )?); - // Registry all beacon chain metrics with the global registry. - beacon_chain - .metrics - .register(&metrics_registry) - .expect("Failed to registry metrics"); if beacon_chain.read_slot_clock().is_none() { panic!("Cannot start client before genesis!") From e33d0703efcff8d37936968d3b7d591b4ab07b2a Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Sun, 11 Aug 2019 14:43:31 +1000 Subject: [PATCH 053/186] Make metrics not panic if already defined --- beacon_node/beacon_chain/src/beacon_chain.rs | 51 +++++---- beacon_node/beacon_chain/src/metrics.rs | 113 ++++++++++--------- 2 files changed, 88 insertions(+), 76 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index df9523624a..b0bb6a1592 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -468,8 +468,8 @@ impl BeaconChain { state: &BeaconState, ) -> Result { // Collect some metrics. - metrics::ATTESTATION_PRODUCTION_REQUESTS.inc(); - let timer = metrics::ATTESTATION_PRODUCTION_TIMES.start_timer(); + metrics::inc_counter(&metrics::ATTESTATION_PRODUCTION_REQUESTS); + let timer = metrics::start_timer(&metrics::ATTESTATION_PRODUCTION_TIMES); let slots_per_epoch = T::EthSpec::slots_per_epoch(); let current_epoch_start_slot = state.current_epoch().start_slot(slots_per_epoch); @@ -516,8 +516,8 @@ impl BeaconChain { }; // Collect some metrics. - metrics::ATTESTATION_PRODUCTION_SUCCESSES.inc(); - timer.observe_duration(); + metrics::inc_counter(&metrics::ATTESTATION_PRODUCTION_SUCCESSES); + metrics::stop_timer(timer); Ok(AttestationData { beacon_block_root: head_block_root, @@ -703,8 +703,8 @@ impl BeaconChain { state: &BeaconState, block: &BeaconBlock, ) -> Result { - metrics::ATTESTATION_PROCESSING_REQUESTS.inc(); - let timer = metrics::ATTESTATION_PROCESSING_TIMES.start_timer(); + metrics::inc_counter(&metrics::ATTESTATION_PROCESSING_REQUESTS); + let timer = metrics::start_timer(&metrics::ATTESTATION_PROCESSING_TIMES); // Find the highest between: // @@ -749,12 +749,12 @@ impl BeaconChain { .insert_attestation(attestation, state, &self.spec)?; // Update the metrics. - metrics::ATTESTATION_PROCESSING_SUCCESSES.inc(); + metrics::inc_counter(&metrics::ATTESTATION_PROCESSING_SUCCESSES); Ok(AttestationProcessingOutcome::Processed) }; - timer.observe_duration(); + timer.map(|t| t.observe_duration()); result } @@ -805,8 +805,8 @@ impl BeaconChain { &self, block: BeaconBlock, ) -> Result { - metrics::BLOCK_PROCESSING_REQUESTS.inc(); - let timer = metrics::BLOCK_PROCESSING_TIMES.start_timer(); + metrics::inc_counter(&metrics::BLOCK_PROCESSING_REQUESTS); + let timer = metrics::start_timer(&metrics::BLOCK_PROCESSING_TIMES); let finalized_slot = self .state @@ -846,7 +846,7 @@ impl BeaconChain { // Records the time taken to load the block and state from the database during block // processing. - let db_read_timer = metrics::BLOCK_PROCESSING_DB_READ.start_timer(); + let db_read_timer = metrics::start_timer(&metrics::BLOCK_PROCESSING_DB_READ); // Load the blocks parent block from the database, returning invalid if that block is not // found. @@ -867,7 +867,7 @@ impl BeaconChain { .get(&parent_state_root)? .ok_or_else(|| Error::DBInconsistent(format!("Missing state {}", parent_state_root)))?; - db_read_timer.observe_duration(); + metrics::stop_timer(db_read_timer); // Transition the parent state to the block slot. let mut state: BeaconState = parent_state; @@ -921,9 +921,12 @@ impl BeaconChain { ) }; - metrics::BLOCK_PROCESSING_SUCCESSES.inc(); - metrics::OPERATIONS_PER_BLOCK_ATTESTATION.observe(block.body.attestations.len() as f64); - timer.observe_duration(); + metrics::inc_counter(&metrics::BLOCK_PROCESSING_SUCCESSES); + metrics::observe( + &metrics::OPERATIONS_PER_BLOCK_ATTESTATION, + block.body.attestations.len() as f64, + ); + metrics::stop_timer(timer); Ok(BlockProcessingOutcome::Processed { block_root }) } @@ -958,8 +961,8 @@ impl BeaconChain { produce_at_slot: Slot, randao_reveal: Signature, ) -> Result<(BeaconBlock, BeaconState), BlockProductionError> { - metrics::BLOCK_PRODUCTION_REQUESTS.inc(); - let timer = metrics::BLOCK_PRODUCTION_TIMES.start_timer(); + metrics::inc_counter(&metrics::BLOCK_PRODUCTION_REQUESTS); + let timer = metrics::start_timer(&metrics::BLOCK_PRODUCTION_TIMES); // If required, transition the new state to the present slot. while state.slot < produce_at_slot { @@ -1011,28 +1014,28 @@ impl BeaconChain { block.state_root = state_root; - metrics::BLOCK_PRODUCTION_SUCCESSES.inc(); - timer.observe_duration(); + metrics::inc_counter(&metrics::BLOCK_PRODUCTION_SUCCESSES); + metrics::stop_timer(timer); Ok((block, state)) } /// Execute the fork choice algorithm and enthrone the result as the canonical head. pub fn fork_choice(&self) -> Result<(), Error> { - metrics::FORK_CHOICE_REQUESTS.inc(); + metrics::inc_counter(&metrics::FORK_CHOICE_REQUESTS); // Start fork choice metrics timer. - let timer = metrics::FORK_CHOICE_TIMES.start_timer(); + let timer = metrics::start_timer(&metrics::FORK_CHOICE_TIMES); // Determine the root of the block that is the head of the chain. let beacon_block_root = self.fork_choice.find_head(&self)?; // End fork choice metrics timer. - timer.observe_duration(); + metrics::stop_timer(timer); // If a new head was chosen. if beacon_block_root != self.head().beacon_block_root { - metrics::FORK_CHOICE_CHANGED_HEAD.inc(); + metrics::inc_counter(&metrics::FORK_CHOICE_CHANGED_HEAD); let beacon_block: BeaconBlock = self .store @@ -1050,7 +1053,7 @@ impl BeaconChain { // If we switched to a new chain (instead of building atop the present chain). if self.head().beacon_block_root != beacon_block.parent_root { - metrics::FORK_CHOICE_REORG_COUNT.inc(); + metrics::inc_counter(&metrics::FORK_CHOICE_REORG_COUNT); warn!( self.log, "Beacon chain re-org"; diff --git a/beacon_node/beacon_chain/src/metrics.rs b/beacon_node/beacon_chain/src/metrics.rs index 8b8307e93b..417c2904ab 100644 --- a/beacon_node/beacon_chain/src/metrics.rs +++ b/beacon_node/beacon_chain/src/metrics.rs @@ -1,111 +1,120 @@ pub use prometheus::Error; -use prometheus::{Histogram, IntCounter}; +use prometheus::{Histogram, HistogramTimer, IntCounter, Result}; + +pub fn start_timer(histogram: &Result) -> Option { + if let Ok(histogram) = histogram { + Some(histogram.start_timer()) + } else { + None + } +} + +pub fn stop_timer(timer: Option) { + timer.map(|t| t.observe_duration()); +} + +pub fn inc_counter(counter: &Result) { + if let Ok(counter) = counter { + counter.inc(); + } +} + +pub fn observe(histogram: &Result, value: f64) { + if let Ok(histogram) = histogram { + histogram.observe(value); + } +} lazy_static! { /* * Block Processing */ - pub static ref BLOCK_PROCESSING_DB_READ: Histogram = register_histogram!( + pub static ref BLOCK_PROCESSING_DB_READ: Result = register_histogram!( "block_processing_db_read_times", "Time spent loading block and state from DB" - ) - .unwrap(); - pub static ref BLOCK_PROCESSING_REQUESTS: IntCounter = register_int_counter!( + ); + pub static ref BLOCK_PROCESSING_REQUESTS: Result = register_int_counter!( "block_processing_requests", "Count of blocks sumbitted for processing" - ) - .unwrap(); - pub static ref BLOCK_PROCESSING_SUCCESSES: IntCounter = register_int_counter!( + ); + pub static ref BLOCK_PROCESSING_SUCCESSES: Result = register_int_counter!( "block_processing_successes", "Count of blocks processed without error" - ) - .unwrap(); - pub static ref BLOCK_PROCESSING_TIMES: Histogram = - register_histogram!("block_processing_times", "Full runtime of block processing") - .unwrap(); + ); + pub static ref BLOCK_PROCESSING_TIMES: Result = + register_histogram!("block_processing_times", "Full runtime of block processing"); /* * Block Production */ - pub static ref BLOCK_PRODUCTION_REQUESTS: IntCounter = register_int_counter!( + pub static ref BLOCK_PRODUCTION_REQUESTS: Result = register_int_counter!( "block_production_requests", "Count of all block production requests" - ) - .unwrap(); - pub static ref BLOCK_PRODUCTION_SUCCESSES: IntCounter = register_int_counter!( + ); + pub static ref BLOCK_PRODUCTION_SUCCESSES: Result = register_int_counter!( "block_production_successes", "Count of blocks sucessfully produced." - ) - .unwrap(); - pub static ref BLOCK_PRODUCTION_TIMES: Histogram = - register_histogram!("block_production_times", "Full runtime of block production").unwrap(); + ); + pub static ref BLOCK_PRODUCTION_TIMES: Result = + register_histogram!("block_production_times", "Full runtime of block production"); /* * Block Statistics */ - pub static ref OPERATIONS_PER_BLOCK_ATTESTATION: Histogram = register_histogram!( + pub static ref OPERATIONS_PER_BLOCK_ATTESTATION: Result = register_histogram!( "operations_per_block_attestation", "Number of attestations in a block" - ) - .unwrap(); + ); /* * Attestation Processing */ - pub static ref ATTESTATION_PROCESSING_REQUESTS: IntCounter = register_int_counter!( + pub static ref ATTESTATION_PROCESSING_REQUESTS: Result = register_int_counter!( "attestation_processing_requests", "Count of all attestations submitted for processing" - ) - .unwrap(); - pub static ref ATTESTATION_PROCESSING_SUCCESSES: IntCounter = register_int_counter!( + ); + pub static ref ATTESTATION_PROCESSING_SUCCESSES: Result = register_int_counter!( "attestation_processing_successes", "total_attestation_processing_successes" - ) - .unwrap(); - pub static ref ATTESTATION_PROCESSING_TIMES: Histogram = register_histogram!( + ); + pub static ref ATTESTATION_PROCESSING_TIMES: Result = register_histogram!( "attestation_processing_times", "Full runtime of attestation processing" - ) - .unwrap(); + ); /* * Attestation Production */ - pub static ref ATTESTATION_PRODUCTION_REQUESTS: IntCounter = register_int_counter!( + pub static ref ATTESTATION_PRODUCTION_REQUESTS: Result = register_int_counter!( "attestation_production_requests", "Count of all attestation production requests" - ) - .unwrap(); - pub static ref ATTESTATION_PRODUCTION_SUCCESSES: IntCounter = register_int_counter!( + ); + pub static ref ATTESTATION_PRODUCTION_SUCCESSES: Result = register_int_counter!( "attestation_production_successes", "Count of attestations processed without error" - ) - .unwrap(); - pub static ref ATTESTATION_PRODUCTION_TIMES: Histogram = register_histogram!( + ); + pub static ref ATTESTATION_PRODUCTION_TIMES: Result = register_histogram!( "attestation_production_times", "Full runtime of attestation production" - ).unwrap(); + ); /* * Fork Choice */ - pub static ref FORK_CHOICE_REQUESTS: IntCounter = register_int_counter!( + pub static ref FORK_CHOICE_REQUESTS: Result = register_int_counter!( "fork_choice_requests", "Count of occasions where fork choice has tried to find a head" - ) - .unwrap(); - pub static ref FORK_CHOICE_CHANGED_HEAD: IntCounter = register_int_counter!( + ); + pub static ref FORK_CHOICE_CHANGED_HEAD: Result = register_int_counter!( "fork_choice_changed_head", "Count of occasions fork choice has found a new head" - ) - .unwrap(); - pub static ref FORK_CHOICE_REORG_COUNT: IntCounter = register_int_counter!( + ); + pub static ref FORK_CHOICE_REORG_COUNT: Result = register_int_counter!( "fork_choice_reorg_count", "Count of occasions fork choice has switched to a different chain" - ) - .unwrap(); - pub static ref FORK_CHOICE_TIMES: Histogram = - register_histogram!("fork_choice_time", "Full runtime of fork choice").unwrap(); + ); + pub static ref FORK_CHOICE_TIMES: Result = + register_histogram!("fork_choice_time", "Full runtime of fork choice"); } pub fn gather_metrics() -> Vec { From 36ff115b04a90f767e08d5b52754a643aa2c950d Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Sun, 11 Aug 2019 14:46:20 +1000 Subject: [PATCH 054/186] Use global prometheus gather at rest api --- beacon_node/beacon_chain/src/metrics.rs | 4 ---- beacon_node/rest_api/src/metrics.rs | 4 +--- 2 files changed, 1 insertion(+), 7 deletions(-) diff --git a/beacon_node/beacon_chain/src/metrics.rs b/beacon_node/beacon_chain/src/metrics.rs index 417c2904ab..dc2919cc43 100644 --- a/beacon_node/beacon_chain/src/metrics.rs +++ b/beacon_node/beacon_chain/src/metrics.rs @@ -116,7 +116,3 @@ lazy_static! { pub static ref FORK_CHOICE_TIMES: Result = register_histogram!("fork_choice_time", "Full runtime of fork choice"); } - -pub fn gather_metrics() -> Vec { - prometheus::gather() -} diff --git a/beacon_node/rest_api/src/metrics.rs b/beacon_node/rest_api/src/metrics.rs index 1ecdf8b686..b0f5b8605e 100644 --- a/beacon_node/rest_api/src/metrics.rs +++ b/beacon_node/rest_api/src/metrics.rs @@ -7,9 +7,7 @@ pub fn get_prometheus(_req: Request) -> ApiResult { let mut buffer = vec![]; let encoder = TextEncoder::new(); - encoder - .encode(&beacon_chain::gather_metrics(), &mut buffer) - .unwrap(); + encoder.encode(&prometheus::gather(), &mut buffer).unwrap(); String::from_utf8(buffer) .map(|string| success_response(Body::from(string))) From 2108895fca7c34928b7d0540d8ea0c84740d56ac Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Sun, 11 Aug 2019 15:34:10 +1000 Subject: [PATCH 055/186] Unify common metric fns into a crate --- Cargo.toml | 1 + beacon_node/beacon_chain/Cargo.toml | 2 +- beacon_node/beacon_chain/src/errors.rs | 8 --- beacon_node/beacon_chain/src/lib.rs | 3 -- beacon_node/beacon_chain/src/metrics.rs | 67 ++++++++---------------- eth2/utils/lighthouse_metrics/Cargo.toml | 11 ++++ eth2/utils/lighthouse_metrics/src/lib.rs | 49 +++++++++++++++++ 7 files changed, 83 insertions(+), 58 deletions(-) create mode 100644 eth2/utils/lighthouse_metrics/Cargo.toml create mode 100644 eth2/utils/lighthouse_metrics/src/lib.rs diff --git a/Cargo.toml b/Cargo.toml index f5ee02a173..9b7b87a0d0 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -11,6 +11,7 @@ members = [ "eth2/utils/eth2_interop_keypairs", "eth2/utils/logging", "eth2/utils/eth2_hashing", + "eth2/utils/lighthouse_metrics", "eth2/utils/merkle_proof", "eth2/utils/int_to_bytes", "eth2/utils/serde_hex", diff --git a/beacon_node/beacon_chain/Cargo.toml b/beacon_node/beacon_chain/Cargo.toml index 43e7614b6a..850aa2e947 100644 --- a/beacon_node/beacon_chain/Cargo.toml +++ b/beacon_node/beacon_chain/Cargo.toml @@ -7,7 +7,7 @@ edition = "2018" [dependencies] store = { path = "../store" } parking_lot = "0.7" -prometheus = "^0.6" +lighthouse_metrics = { path = "../../eth2/utils/lighthouse_metrics" } log = "0.4" operation_pool = { path = "../../eth2/operation_pool" } serde = "1.0" diff --git a/beacon_node/beacon_chain/src/errors.rs b/beacon_node/beacon_chain/src/errors.rs index 7a51fc4258..22df90397e 100644 --- a/beacon_node/beacon_chain/src/errors.rs +++ b/beacon_node/beacon_chain/src/errors.rs @@ -1,5 +1,4 @@ use crate::fork_choice::Error as ForkChoiceError; -use crate::metrics::Error as MetricsError; use state_processing::per_block_processing::errors::{ AttestationValidationError, IndexedAttestationValidationError, }; @@ -34,7 +33,6 @@ pub enum BeaconChainError { MissingBeaconBlock(Hash256), MissingBeaconState(Hash256), SlotProcessingError(SlotProcessingError), - MetricsError(String), NoStateForAttestation { beacon_block_root: Hash256, }, @@ -44,12 +42,6 @@ pub enum BeaconChainError { easy_from_to!(SlotProcessingError, BeaconChainError); -impl From for BeaconChainError { - fn from(e: MetricsError) -> BeaconChainError { - BeaconChainError::MetricsError(format!("{:?}", e)) - } -} - #[derive(Debug, PartialEq)] pub enum BlockProductionError { UnableToGetBlockRootFromState, diff --git a/beacon_node/beacon_chain/src/lib.rs b/beacon_node/beacon_chain/src/lib.rs index e24534a2eb..98bd60a35f 100644 --- a/beacon_node/beacon_chain/src/lib.rs +++ b/beacon_node/beacon_chain/src/lib.rs @@ -1,6 +1,4 @@ #[macro_use] -extern crate prometheus; -#[macro_use] extern crate lazy_static; mod beacon_chain; @@ -18,7 +16,6 @@ pub use self::beacon_chain::{ pub use self::checkpoint::CheckPoint; pub use self::errors::{BeaconChainError, BlockProductionError}; pub use lmd_ghost; -pub use metrics::gather_metrics; pub use parking_lot; pub use slot_clock; pub use state_processing::per_block_processing::errors::{ diff --git a/beacon_node/beacon_chain/src/metrics.rs b/beacon_node/beacon_chain/src/metrics.rs index dc2919cc43..03f4783ff9 100644 --- a/beacon_node/beacon_chain/src/metrics.rs +++ b/beacon_node/beacon_chain/src/metrics.rs @@ -1,67 +1,42 @@ -pub use prometheus::Error; -use prometheus::{Histogram, HistogramTimer, IntCounter, Result}; - -pub fn start_timer(histogram: &Result) -> Option { - if let Ok(histogram) = histogram { - Some(histogram.start_timer()) - } else { - None - } -} - -pub fn stop_timer(timer: Option) { - timer.map(|t| t.observe_duration()); -} - -pub fn inc_counter(counter: &Result) { - if let Ok(counter) = counter { - counter.inc(); - } -} - -pub fn observe(histogram: &Result, value: f64) { - if let Ok(histogram) = histogram { - histogram.observe(value); - } -} +pub use lighthouse_metrics::*; lazy_static! { /* * Block Processing */ - pub static ref BLOCK_PROCESSING_DB_READ: Result = register_histogram!( + pub static ref BLOCK_PROCESSING_DB_READ: Result = try_create_histogram( "block_processing_db_read_times", "Time spent loading block and state from DB" ); - pub static ref BLOCK_PROCESSING_REQUESTS: Result = register_int_counter!( + pub static ref BLOCK_PROCESSING_REQUESTS: Result = try_create_int_counter( "block_processing_requests", - "Count of blocks sumbitted for processing" + "Count of blocks submitted for processing" ); - pub static ref BLOCK_PROCESSING_SUCCESSES: Result = register_int_counter!( + pub static ref BLOCK_PROCESSING_SUCCESSES: Result = try_create_int_counter( "block_processing_successes", "Count of blocks processed without error" ); pub static ref BLOCK_PROCESSING_TIMES: Result = - register_histogram!("block_processing_times", "Full runtime of block processing"); + try_create_histogram("block_processing_times", "Full runtime of block processing"); /* * Block Production */ - pub static ref BLOCK_PRODUCTION_REQUESTS: Result = register_int_counter!( + pub static ref BLOCK_PRODUCTION_REQUESTS: Result = try_create_int_counter( "block_production_requests", "Count of all block production requests" ); - pub static ref BLOCK_PRODUCTION_SUCCESSES: Result = register_int_counter!( + pub static ref BLOCK_PRODUCTION_SUCCESSES: Result = try_create_int_counter( "block_production_successes", - "Count of blocks sucessfully produced." + "Count of blocks successfully produced." ); pub static ref BLOCK_PRODUCTION_TIMES: Result = - register_histogram!("block_production_times", "Full runtime of block production"); + try_create_histogram("block_production_times", "Full runtime of block production"); /* * Block Statistics */ - pub static ref OPERATIONS_PER_BLOCK_ATTESTATION: Result = register_histogram!( + pub static ref OPERATIONS_PER_BLOCK_ATTESTATION: Result = try_create_histogram( "operations_per_block_attestation", "Number of attestations in a block" ); @@ -69,15 +44,15 @@ lazy_static! { /* * Attestation Processing */ - pub static ref ATTESTATION_PROCESSING_REQUESTS: Result = register_int_counter!( + pub static ref ATTESTATION_PROCESSING_REQUESTS: Result = try_create_int_counter( "attestation_processing_requests", "Count of all attestations submitted for processing" ); - pub static ref ATTESTATION_PROCESSING_SUCCESSES: Result = register_int_counter!( + pub static ref ATTESTATION_PROCESSING_SUCCESSES: Result = try_create_int_counter( "attestation_processing_successes", "total_attestation_processing_successes" ); - pub static ref ATTESTATION_PROCESSING_TIMES: Result = register_histogram!( + pub static ref ATTESTATION_PROCESSING_TIMES: Result = try_create_histogram( "attestation_processing_times", "Full runtime of attestation processing" ); @@ -85,15 +60,15 @@ lazy_static! { /* * Attestation Production */ - pub static ref ATTESTATION_PRODUCTION_REQUESTS: Result = register_int_counter!( + pub static ref ATTESTATION_PRODUCTION_REQUESTS: Result = try_create_int_counter( "attestation_production_requests", "Count of all attestation production requests" ); - pub static ref ATTESTATION_PRODUCTION_SUCCESSES: Result = register_int_counter!( + pub static ref ATTESTATION_PRODUCTION_SUCCESSES: Result = try_create_int_counter( "attestation_production_successes", "Count of attestations processed without error" ); - pub static ref ATTESTATION_PRODUCTION_TIMES: Result = register_histogram!( + pub static ref ATTESTATION_PRODUCTION_TIMES: Result = try_create_histogram( "attestation_production_times", "Full runtime of attestation production" ); @@ -101,18 +76,18 @@ lazy_static! { /* * Fork Choice */ - pub static ref FORK_CHOICE_REQUESTS: Result = register_int_counter!( + pub static ref FORK_CHOICE_REQUESTS: Result = try_create_int_counter( "fork_choice_requests", "Count of occasions where fork choice has tried to find a head" ); - pub static ref FORK_CHOICE_CHANGED_HEAD: Result = register_int_counter!( + pub static ref FORK_CHOICE_CHANGED_HEAD: Result = try_create_int_counter( "fork_choice_changed_head", "Count of occasions fork choice has found a new head" ); - pub static ref FORK_CHOICE_REORG_COUNT: Result = register_int_counter!( + pub static ref FORK_CHOICE_REORG_COUNT: Result = try_create_int_counter( "fork_choice_reorg_count", "Count of occasions fork choice has switched to a different chain" ); pub static ref FORK_CHOICE_TIMES: Result = - register_histogram!("fork_choice_time", "Full runtime of fork choice"); + try_create_histogram("fork_choice_time", "Full runtime of fork choice"); } diff --git a/eth2/utils/lighthouse_metrics/Cargo.toml b/eth2/utils/lighthouse_metrics/Cargo.toml new file mode 100644 index 0000000000..0a24a96fb5 --- /dev/null +++ b/eth2/utils/lighthouse_metrics/Cargo.toml @@ -0,0 +1,11 @@ +[package] +name = "lighthouse_metrics" +version = "0.1.0" +authors = ["Paul Hauner "] +edition = "2018" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +lazy_static = "1.3.0" +prometheus = "^0.6" diff --git a/eth2/utils/lighthouse_metrics/src/lib.rs b/eth2/utils/lighthouse_metrics/src/lib.rs new file mode 100644 index 0000000000..e6e30f6bb7 --- /dev/null +++ b/eth2/utils/lighthouse_metrics/src/lib.rs @@ -0,0 +1,49 @@ +use prometheus::{HistogramOpts, HistogramTimer, Opts}; + +pub use prometheus::{Histogram, IntCounter, Result}; + +pub fn try_create_int_counter(name: &str, help: &str) -> Result { + let opts = Opts::new(name, help); + let counter = IntCounter::with_opts(opts)?; + prometheus::register(Box::new(counter.clone()))?; + Ok(counter) +} + +pub fn try_create_histogram(name: &str, help: &str) -> Result { + let opts = HistogramOpts::new(name, help); + let histogram = Histogram::with_opts(opts)?; + prometheus::register(Box::new(histogram.clone()))?; + Ok(histogram) +} + +pub fn start_timer(histogram: &Result) -> Option { + if let Ok(histogram) = histogram { + Some(histogram.start_timer()) + } else { + None + } +} + +pub fn stop_timer(timer: Option) { + timer.map(|t| t.observe_duration()); +} + +pub fn inc_counter(counter: &Result) { + if let Ok(counter) = counter { + counter.inc(); + } +} + +pub fn observe(histogram: &Result, value: f64) { + if let Ok(histogram) = histogram { + histogram.observe(value); + } +} + +#[cfg(test)] +mod tests { + #[test] + fn it_works() { + assert_eq!(2 + 2, 4); + } +} From 441eb41b6bd3a36d5f673c23d392ef5a9796706d Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Sun, 11 Aug 2019 15:53:34 +1000 Subject: [PATCH 056/186] Add heavy metering to block processing --- beacon_node/beacon_chain/src/beacon_chain.rs | 34 ++++++++++++++++-- beacon_node/beacon_chain/src/metrics.rs | 36 +++++++++++++++++--- 2 files changed, 64 insertions(+), 6 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index b0bb6a1592..f5fb954b91 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -806,7 +806,7 @@ impl BeaconChain { block: BeaconBlock, ) -> Result { metrics::inc_counter(&metrics::BLOCK_PROCESSING_REQUESTS); - let timer = metrics::start_timer(&metrics::BLOCK_PROCESSING_TIMES); + let full_timer = metrics::start_timer(&metrics::BLOCK_PROCESSING_TIMES); let finalized_slot = self .state @@ -869,15 +869,25 @@ impl BeaconChain { metrics::stop_timer(db_read_timer); + let catchup_timer = metrics::start_timer(&metrics::BLOCK_PROCESSING_CATCHUP_STATE); + // Transition the parent state to the block slot. let mut state: BeaconState = parent_state; for _ in state.slot.as_u64()..block.slot.as_u64() { per_slot_processing(&mut state, &self.spec)?; } + metrics::stop_timer(catchup_timer); + + let commitee_timer = metrics::start_timer(&metrics::BLOCK_PROCESSING_COMMITTEE); + state.build_committee_cache(RelativeEpoch::Previous, &self.spec)?; state.build_committee_cache(RelativeEpoch::Current, &self.spec)?; + metrics::stop_timer(commitee_timer); + + let core_timer = metrics::start_timer(&metrics::BLOCK_PROCESSING_CORE); + // Apply the received block to its parent state (which has been transitioned into this // slot). match per_block_processing(&mut state, &block, &self.spec) { @@ -888,16 +898,29 @@ impl BeaconChain { _ => {} } + metrics::stop_timer(core_timer); + + let state_root_timer = metrics::start_timer(&metrics::BLOCK_PROCESSING_STATE_ROOT); + let state_root = state.canonical_root(); if block.state_root != state_root { return Ok(BlockProcessingOutcome::StateRootMismatch); } + metrics::stop_timer(state_root_timer); + + let db_write_timer = metrics::start_timer(&metrics::BLOCK_PROCESSING_DB_WRITE); + // Store the block and state. self.store.put(&block_root, &block)?; self.store.put(&state_root, &state)?; + metrics::stop_timer(db_write_timer); + + let fork_choice_register_timer = + metrics::start_timer(&metrics::BLOCK_PROCESSING_FORK_CHOICE_REGISTER); + // Register the new block with the fork choice service. if let Err(e) = self.fork_choice.process_block(&state, &block, block_root) { error!( @@ -909,6 +932,11 @@ impl BeaconChain { ) } + metrics::stop_timer(fork_choice_register_timer); + + let find_head_timer = + metrics::start_timer(&metrics::BLOCK_PROCESSING_FORK_CHOICE_FIND_HEAD); + // Execute the fork choice algorithm, enthroning a new head if discovered. // // Note: in the future we may choose to run fork-choice less often, potentially based upon @@ -921,12 +949,14 @@ impl BeaconChain { ) }; + metrics::stop_timer(find_head_timer); + metrics::inc_counter(&metrics::BLOCK_PROCESSING_SUCCESSES); metrics::observe( &metrics::OPERATIONS_PER_BLOCK_ATTESTATION, block.body.attestations.len() as f64, ); - metrics::stop_timer(timer); + metrics::stop_timer(full_timer); Ok(BlockProcessingOutcome::Processed { block_root }) } diff --git a/beacon_node/beacon_chain/src/metrics.rs b/beacon_node/beacon_chain/src/metrics.rs index 03f4783ff9..38a7af9e15 100644 --- a/beacon_node/beacon_chain/src/metrics.rs +++ b/beacon_node/beacon_chain/src/metrics.rs @@ -4,10 +4,6 @@ lazy_static! { /* * Block Processing */ - pub static ref BLOCK_PROCESSING_DB_READ: Result = try_create_histogram( - "block_processing_db_read_times", - "Time spent loading block and state from DB" - ); pub static ref BLOCK_PROCESSING_REQUESTS: Result = try_create_int_counter( "block_processing_requests", "Count of blocks submitted for processing" @@ -18,6 +14,38 @@ lazy_static! { ); pub static ref BLOCK_PROCESSING_TIMES: Result = try_create_histogram("block_processing_times", "Full runtime of block processing"); + pub static ref BLOCK_PROCESSING_DB_READ: Result = try_create_histogram( + "block_processing_db_read_times", + "Time spent loading block and state from DB for block processing" + ); + pub static ref BLOCK_PROCESSING_CATCHUP_STATE: Result = try_create_histogram( + "block_processing_catch-up_state_times", + "Time spent skipping slots on a state before processing a block." + ); + pub static ref BLOCK_PROCESSING_COMMITTEE: Result = try_create_histogram( + "block_processing_committee_building_times", + "Time spent building/obtaining committees for block processing." + ); + pub static ref BLOCK_PROCESSING_CORE: Result = try_create_histogram( + "block_processing_core_times", + "Time spent doing the core per_block_processing state processing." + ); + pub static ref BLOCK_PROCESSING_STATE_ROOT: Result = try_create_histogram( + "block_processing_state_root_times", + "Time spent calculating the state root when processing a block." + ); + pub static ref BLOCK_PROCESSING_DB_WRITE: Result = try_create_histogram( + "block_processing_db_write_times", + "Time spent writing a newly processed block and state to DB" + ); + pub static ref BLOCK_PROCESSING_FORK_CHOICE_REGISTER: Result = try_create_histogram( + "block_processing_fork_choice_register_times", + "Time spent registering the new block with fork choice (but not finding head)" + ); + pub static ref BLOCK_PROCESSING_FORK_CHOICE_FIND_HEAD: Result = try_create_histogram( + "block_processing_fork_choice_find_head_times", + "Time spent finding the new head after processing a new block" + ); /* * Block Production From 76f42ac7ffd7d25e4c92393370b5b4717cacab49 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Sun, 11 Aug 2019 16:15:26 +1000 Subject: [PATCH 057/186] Remove hypen from prometheus metric name --- beacon_node/beacon_chain/src/metrics.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/beacon_node/beacon_chain/src/metrics.rs b/beacon_node/beacon_chain/src/metrics.rs index 38a7af9e15..d0b6e27fcd 100644 --- a/beacon_node/beacon_chain/src/metrics.rs +++ b/beacon_node/beacon_chain/src/metrics.rs @@ -19,7 +19,7 @@ lazy_static! { "Time spent loading block and state from DB for block processing" ); pub static ref BLOCK_PROCESSING_CATCHUP_STATE: Result = try_create_histogram( - "block_processing_catch-up_state_times", + "block_processing_catch_up_state_times", "Time spent skipping slots on a state before processing a block." ); pub static ref BLOCK_PROCESSING_COMMITTEE: Result = try_create_histogram( From 42d300bdc35df563598fcd65488b5fb21342a60b Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Sun, 11 Aug 2019 17:49:32 +1000 Subject: [PATCH 058/186] Add more beacon chain metrics --- beacon_node/beacon_chain/src/beacon_chain.rs | 18 ++++++++++++++---- beacon_node/beacon_chain/src/fork_choice.rs | 20 +++++++++++++++++--- beacon_node/beacon_chain/src/metrics.rs | 20 ++++++++++++++++++++ 3 files changed, 51 insertions(+), 7 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index f5fb954b91..6f9a2b414d 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -1060,11 +1060,8 @@ impl BeaconChain { // Determine the root of the block that is the head of the chain. let beacon_block_root = self.fork_choice.find_head(&self)?; - // End fork choice metrics timer. - metrics::stop_timer(timer); - // If a new head was chosen. - if beacon_block_root != self.head().beacon_block_root { + let result = if beacon_block_root != self.head().beacon_block_root { metrics::inc_counter(&metrics::FORK_CHOICE_CHANGED_HEAD); let beacon_block: BeaconBlock = self @@ -1127,11 +1124,22 @@ impl BeaconChain { } } else { Ok(()) + }; + + // End fork choice metrics timer. + metrics::stop_timer(timer); + + if let Err(_) = result { + metrics::inc_counter(&metrics::FORK_CHOICE_ERRORS); } + + result } /// Update the canonical head to `new_head`. fn update_canonical_head(&self, new_head: CheckPoint) -> Result<(), Error> { + let timer = metrics::start_timer(&metrics::UPDATE_HEAD_TIMES); + // Update the checkpoint that stores the head of the chain at the time it received the // block. *self.canonical_head.write() = new_head; @@ -1158,6 +1166,8 @@ impl BeaconChain { // Save `self` to `self.store`. self.persist()?; + metrics::stop_timer(timer); + Ok(()) } diff --git a/beacon_node/beacon_chain/src/fork_choice.rs b/beacon_node/beacon_chain/src/fork_choice.rs index edd426f296..77fdaacdc5 100644 --- a/beacon_node/beacon_chain/src/fork_choice.rs +++ b/beacon_node/beacon_chain/src/fork_choice.rs @@ -1,4 +1,4 @@ -use crate::{BeaconChain, BeaconChainTypes}; +use crate::{metrics, BeaconChain, BeaconChainTypes}; use lmd_ghost::LmdGhost; use state_processing::common::get_attesting_indices; use std::sync::Arc; @@ -46,6 +46,8 @@ impl ForkChoice { } pub fn find_head(&self, chain: &BeaconChain) -> Result { + let timer = metrics::start_timer(&metrics::FORK_CHOICE_FIND_HEAD_TIMES); + let start_slot = |epoch: Epoch| epoch.start_slot(T::EthSpec::slots_per_epoch()); // From the specification: @@ -97,9 +99,14 @@ impl ForkChoice { .map(|v| v.effective_balance) }; - self.backend + let result = self + .backend .find_head(start_block_slot, start_block_root, weight) - .map_err(Into::into) + .map_err(Into::into); + + metrics::stop_timer(timer); + + result } /// Process all attestations in the given `block`. @@ -112,6 +119,7 @@ impl ForkChoice { block: &BeaconBlock, block_root: Hash256, ) -> Result<()> { + let timer = metrics::start_timer(&metrics::FORK_CHOICE_PROCESS_BLOCK_TIMES); // Note: we never count the block as a latest message, only attestations. // // I (Paul H) do not have an explicit reference to this, but I derive it from this @@ -136,6 +144,8 @@ impl ForkChoice { // a block that has the majority of votes applied to it. self.backend.process_block(block, block_root)?; + metrics::stop_timer(timer); + Ok(()) } @@ -148,6 +158,8 @@ impl ForkChoice { attestation: &Attestation, block: &BeaconBlock, ) -> Result<()> { + let timer = metrics::start_timer(&metrics::FORK_CHOICE_PROCESS_ATTESTATION_TIMES); + let block_hash = attestation.data.beacon_block_root; // Ignore any attestations to the zero hash. @@ -175,6 +187,8 @@ impl ForkChoice { } } + metrics::stop_timer(timer); + Ok(()) } diff --git a/beacon_node/beacon_chain/src/metrics.rs b/beacon_node/beacon_chain/src/metrics.rs index d0b6e27fcd..34f359ad8d 100644 --- a/beacon_node/beacon_chain/src/metrics.rs +++ b/beacon_node/beacon_chain/src/metrics.rs @@ -108,6 +108,10 @@ lazy_static! { "fork_choice_requests", "Count of occasions where fork choice has tried to find a head" ); + pub static ref FORK_CHOICE_ERRORS: Result = try_create_int_counter( + "fork_choice_errors", + "Count of occasions where fork choice has returned an error when trying to find a head" + ); pub static ref FORK_CHOICE_CHANGED_HEAD: Result = try_create_int_counter( "fork_choice_changed_head", "Count of occasions fork choice has found a new head" @@ -118,4 +122,20 @@ lazy_static! { ); pub static ref FORK_CHOICE_TIMES: Result = try_create_histogram("fork_choice_time", "Full runtime of fork choice"); + pub static ref FORK_CHOICE_FIND_HEAD_TIMES: Result = + try_create_histogram("fork_choice_find_head_time", "Full runtime of fork choice find_head function"); + pub static ref FORK_CHOICE_PROCESS_BLOCK_TIMES: Result = try_create_histogram( + "fork_choice_process_block_time", + "Time taken to add a block and all attestations to fork choice" + ); + pub static ref FORK_CHOICE_PROCESS_ATTESTATION_TIMES: Result = try_create_histogram( + "fork_choice_process_attestation_time", + "Time taken to add an attestation to fork choice" + ); + + /* + * Head Updating + */ + pub static ref UPDATE_HEAD_TIMES: Result = + try_create_histogram("update_head_times", "Time taken to update the canonical head"); } From 78db947e6e65f4d0960ca5b9340305c663856244 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Sun, 11 Aug 2019 18:28:57 +1000 Subject: [PATCH 059/186] Add beacon chain persistence metric --- beacon_node/beacon_chain/src/beacon_chain.rs | 4 ++++ beacon_node/beacon_chain/src/metrics.rs | 6 ++++++ 2 files changed, 10 insertions(+) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 6f9a2b414d..96ff339a67 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -199,6 +199,8 @@ impl BeaconChain { /// Attempt to save this instance to `self.store`. pub fn persist(&self) -> Result<(), Error> { + let timer = metrics::start_timer(&metrics::PERSIST_CHAIN); + let p: PersistedBeaconChain = PersistedBeaconChain { canonical_head: self.canonical_head.read().clone(), op_pool: PersistedOperationPool::from_operation_pool(&self.op_pool), @@ -209,6 +211,8 @@ impl BeaconChain { let key = Hash256::from_slice(&BEACON_CHAIN_DB_KEY.as_bytes()); self.store.put(&key, &p)?; + metrics::stop_timer(timer); + Ok(()) } diff --git a/beacon_node/beacon_chain/src/metrics.rs b/beacon_node/beacon_chain/src/metrics.rs index 34f359ad8d..b911254633 100644 --- a/beacon_node/beacon_chain/src/metrics.rs +++ b/beacon_node/beacon_chain/src/metrics.rs @@ -138,4 +138,10 @@ lazy_static! { */ pub static ref UPDATE_HEAD_TIMES: Result = try_create_histogram("update_head_times", "Time taken to update the canonical head"); + + /* + * Persisting BeaconChain to disk + */ + pub static ref PERSIST_CHAIN: Result = + try_create_histogram("persist_chain", "Time taken to update the canonical head"); } From 6150f0ae1a549dcc1d76c831c4ca5cae03300dd7 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Sun, 11 Aug 2019 18:29:11 +1000 Subject: [PATCH 060/186] Prune op pool on finalization --- beacon_node/beacon_chain/src/beacon_chain.rs | 3 +++ 1 file changed, 3 insertions(+) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 96ff339a67..0e0583309f 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -1199,6 +1199,9 @@ impl BeaconChain { self.fork_choice .process_finalization(&finalized_block, finalized_block_root)?; + self.op_pool + .prune_all(&self.head().beacon_state, &self.spec); + Ok(()) } } From 7140dbc45da4a8895a155450f1777fa1655991ac Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Mon, 12 Aug 2019 13:26:58 +1000 Subject: [PATCH 061/186] Add extra prom beacon chain metrics --- beacon_node/beacon_chain/Cargo.toml | 2 +- beacon_node/beacon_chain/src/lib.rs | 1 + beacon_node/beacon_chain/src/metrics.rs | 156 ++++++++++++++++++++++- beacon_node/client/src/lib.rs | 1 + beacon_node/rest_api/src/lib.rs | 20 ++- beacon_node/rest_api/src/metrics.rs | 18 ++- beacon_node/store/Cargo.toml | 2 + beacon_node/store/src/lib.rs | 4 + beacon_node/store/src/metrics.rs | 25 ++++ eth2/utils/lighthouse_metrics/src/lib.rs | 15 ++- 10 files changed, 233 insertions(+), 11 deletions(-) create mode 100644 beacon_node/store/src/metrics.rs diff --git a/beacon_node/beacon_chain/Cargo.toml b/beacon_node/beacon_chain/Cargo.toml index 850aa2e947..462d44e920 100644 --- a/beacon_node/beacon_chain/Cargo.toml +++ b/beacon_node/beacon_chain/Cargo.toml @@ -7,6 +7,7 @@ edition = "2018" [dependencies] store = { path = "../store" } parking_lot = "0.7" +lazy_static = "1.3.0" lighthouse_metrics = { path = "../../eth2/utils/lighthouse_metrics" } log = "0.4" operation_pool = { path = "../../eth2/operation_pool" } @@ -17,7 +18,6 @@ sloggers = { version = "^0.3" } slot_clock = { path = "../../eth2/utils/slot_clock" } eth2_ssz = "0.1" eth2_ssz_derive = "0.1" -lazy_static = "1.3.0" state_processing = { path = "../../eth2/state_processing" } tree_hash = "0.1" types = { path = "../../eth2/types" } diff --git a/beacon_node/beacon_chain/src/lib.rs b/beacon_node/beacon_chain/src/lib.rs index 98bd60a35f..1262bc5372 100644 --- a/beacon_node/beacon_chain/src/lib.rs +++ b/beacon_node/beacon_chain/src/lib.rs @@ -16,6 +16,7 @@ pub use self::beacon_chain::{ pub use self::checkpoint::CheckPoint; pub use self::errors::{BeaconChainError, BlockProductionError}; pub use lmd_ghost; +pub use metrics::scrape_for_metrics; pub use parking_lot; pub use slot_clock; pub use state_processing::per_block_processing::errors::{ diff --git a/beacon_node/beacon_chain/src/metrics.rs b/beacon_node/beacon_chain/src/metrics.rs index b911254633..6ed8218f02 100644 --- a/beacon_node/beacon_chain/src/metrics.rs +++ b/beacon_node/beacon_chain/src/metrics.rs @@ -1,4 +1,6 @@ +use crate::{BeaconChain, BeaconChainTypes}; pub use lighthouse_metrics::*; +use types::{BeaconState, Epoch, EthSpec, Hash256, Slot}; lazy_static! { /* @@ -133,15 +135,157 @@ lazy_static! { "Time taken to add an attestation to fork choice" ); - /* - * Head Updating - */ - pub static ref UPDATE_HEAD_TIMES: Result = - try_create_histogram("update_head_times", "Time taken to update the canonical head"); - /* * Persisting BeaconChain to disk */ pub static ref PERSIST_CHAIN: Result = try_create_histogram("persist_chain", "Time taken to update the canonical head"); } + +// Lazy-static is split so we don't reach the crate-level recursion limit. +lazy_static! { + /* + * Slot Clock + */ + pub static ref PRESENT_SLOT: Result = + try_create_int_gauge("present_slot", "The present slot, according to system time"); + pub static ref PRESENT_EPOCH: Result = + try_create_int_gauge("present_epoch", "The present epoch, according to system time"); + + /* + * Chain Head + */ + pub static ref UPDATE_HEAD_TIMES: Result = + try_create_histogram("update_head_times", "Time taken to update the canonical head"); + pub static ref HEAD_STATE_SLOT: Result = + try_create_int_gauge("head_state_slot", "Slot of the block at the head of the chain"); + pub static ref HEAD_STATE_ROOT: Result = + try_create_int_gauge("head_state_root", "Root of the block at the head of the chain"); + pub static ref HEAD_STATE_LATEST_BLOCK_SLOT: Result = + try_create_int_gauge("head_state_latest_block_slot", "Latest block slot at the head of the chain"); + pub static ref HEAD_STATE_CURRENT_JUSTIFIED_ROOT: Result = + try_create_int_gauge("head_state_current_justified_root", "Current justified root at the head of the chain"); + pub static ref HEAD_STATE_CURRENT_JUSTIFIED_EPOCH: Result = + try_create_int_gauge("head_state_current_justified_epoch", "Current justified epoch at the head of the chain"); + pub static ref HEAD_STATE_PREVIOUS_JUSTIFIED_ROOT: Result = + try_create_int_gauge("head_state_previous_justified_root", "Previous justified root at the head of the chain"); + pub static ref HEAD_STATE_PREVIOUS_JUSTIFIED_EPOCH: Result = + try_create_int_gauge("head_state_previous_justified_epoch", "Previous justified epoch at the head of the chain"); + pub static ref HEAD_STATE_FINALIZED_ROOT: Result = + try_create_int_gauge("head_state_finalized_root", "Finalized root at the head of the chain"); + pub static ref HEAD_STATE_FINALIZED_EPOCH: Result = + try_create_int_gauge("head_state_finalized_epoch", "Finalized epoch at the head of the chain"); + pub static ref HEAD_STATE_TOTAL_VALIDATORS: Result = + try_create_int_gauge("head_state_total_validators", "Count of validators at the head of the chain"); + pub static ref HEAD_STATE_ACTIVE_VALIDATORS: Result = + try_create_int_gauge("head_state_active_validators", "Count of active validators at the head of the chain"); + pub static ref HEAD_STATE_VALIDATOR_BALANCES: Result = + try_create_int_gauge("head_state_validator_balances", "Sum of all validator balances at the head of the chain"); + pub static ref HEAD_STATE_SLASHED_VALIDATORS: Result = + try_create_int_gauge("head_state_slashed_validators", "Count of all slashed validators at the head of the chain"); + pub static ref HEAD_STATE_WITHDRAWN_VALIDATORS: Result = + try_create_int_gauge("head_state_withdrawn_validators", "Sum of all validator balances at the head of the chain"); + pub static ref HEAD_STATE_ETH1_DEPOSIT_INDEX: Result = + try_create_int_gauge("head_state_eth1_deposit_index", "Eth1 deposit index at the head of the chain"); +} + +/// Scrape the `beacon_chain` for metrics that are not constantly updated (e.g., the present slot, +/// head state info, etc) and update the Prometheus `DEFAULT_REGISTRY`. +pub fn scrape_for_metrics(beacon_chain: &BeaconChain) { + set_gauge_by_slot( + &PRESENT_SLOT, + beacon_chain + .read_slot_clock() + .unwrap_or_else(|| Slot::new(0)), + ); + + set_gauge_by_epoch( + &PRESENT_EPOCH, + beacon_chain + .read_slot_clock() + .map(|s| s.epoch(T::EthSpec::slots_per_epoch())) + .unwrap_or_else(|| Epoch::new(0)), + ); + + scrape_head_state::( + &beacon_chain.head().beacon_state, + beacon_chain.head().beacon_state_root, + ); +} + +/// Scrape the given `state` assuming it's the head state, updating the `DEFAULT_REGISTRY`. +fn scrape_head_state(state: &BeaconState, state_root: Hash256) { + set_gauge_by_slot(&HEAD_STATE_SLOT, state.slot); + set_gauge_by_hash(&HEAD_STATE_ROOT, state_root); + set_gauge_by_slot( + &HEAD_STATE_LATEST_BLOCK_SLOT, + state.latest_block_header.slot, + ); + set_gauge_by_hash( + &HEAD_STATE_CURRENT_JUSTIFIED_ROOT, + state.current_justified_checkpoint.root, + ); + set_gauge_by_epoch( + &HEAD_STATE_CURRENT_JUSTIFIED_EPOCH, + state.current_justified_checkpoint.epoch, + ); + set_gauge_by_hash( + &HEAD_STATE_PREVIOUS_JUSTIFIED_ROOT, + state.previous_justified_checkpoint.root, + ); + set_gauge_by_epoch( + &HEAD_STATE_PREVIOUS_JUSTIFIED_EPOCH, + state.previous_justified_checkpoint.epoch, + ); + set_gauge_by_hash(&HEAD_STATE_FINALIZED_ROOT, state.finalized_checkpoint.root); + set_gauge_by_epoch( + &HEAD_STATE_FINALIZED_EPOCH, + state.finalized_checkpoint.epoch, + ); + set_gauge_by_usize(&HEAD_STATE_TOTAL_VALIDATORS, state.validators.len()); + set_gauge_by_u64( + &HEAD_STATE_VALIDATOR_BALANCES, + state.balances.iter().fold(0_u64, |acc, i| acc + i), + ); + set_gauge_by_usize( + &HEAD_STATE_ACTIVE_VALIDATORS, + state + .validators + .iter() + .filter(|v| v.is_active_at(state.current_epoch())) + .count(), + ); + set_gauge_by_usize( + &HEAD_STATE_SLASHED_VALIDATORS, + state.validators.iter().filter(|v| v.slashed).count(), + ); + set_gauge_by_usize( + &HEAD_STATE_WITHDRAWN_VALIDATORS, + state + .validators + .iter() + .filter(|v| v.is_withdrawable_at(state.current_epoch())) + .count(), + ); + set_gauge_by_u64(&HEAD_STATE_ETH1_DEPOSIT_INDEX, state.eth1_deposit_index); +} + +fn set_gauge_by_slot(gauge: &Result, value: Slot) { + set_gauge(gauge, value.as_u64() as i64); +} + +fn set_gauge_by_epoch(gauge: &Result, value: Epoch) { + set_gauge(gauge, value.as_u64() as i64); +} + +fn set_gauge_by_hash(gauge: &Result, value: Hash256) { + set_gauge(gauge, value.to_low_u64_le() as i64); +} + +fn set_gauge_by_usize(gauge: &Result, value: usize) { + set_gauge(gauge, value as i64); +} + +fn set_gauge_by_u64(gauge: &Result, value: u64) { + set_gauge(gauge, value as i64); +} diff --git a/beacon_node/client/src/lib.rs b/beacon_node/client/src/lib.rs index e06c5b60ee..c74787f606 100644 --- a/beacon_node/client/src/lib.rs +++ b/beacon_node/client/src/lib.rs @@ -142,6 +142,7 @@ where &client_config.rest_api, executor, beacon_chain.clone(), + client_config.db_path().expect("unable to read datadir"), &log, ) { Ok(s) => Some(s), diff --git a/beacon_node/rest_api/src/lib.rs b/beacon_node/rest_api/src/lib.rs index 7dc0df578d..fea67618ba 100644 --- a/beacon_node/rest_api/src/lib.rs +++ b/beacon_node/rest_api/src/lib.rs @@ -13,6 +13,8 @@ use hyper::rt::Future; use hyper::service::service_fn_ok; use hyper::{Body, Method, Response, Server, StatusCode}; use slog::{info, o, warn}; +use std::ops::Deref; +use std::path::PathBuf; use std::sync::Arc; use tokio::runtime::TaskExecutor; use url_query::UrlQuery; @@ -68,6 +70,7 @@ pub fn start_server( config: &ApiConfig, executor: &TaskExecutor, beacon_chain: Arc>, + db_path: PathBuf, log: &slog::Logger, ) -> Result { let log = log.new(o!("Service" => "Api")); @@ -81,6 +84,8 @@ pub fn start_server( Ok(()) }); + let db_path = DBPath(db_path); + // Get the address to bind to let bind_addr = (config.listen_address, config.port).into(); @@ -91,12 +96,14 @@ pub fn start_server( let service = move || { let log = server_log.clone(); let beacon_chain = server_bc.clone(); + let db_path = db_path.clone(); // Create a simple handler for the router, inject our stateful objects into the request. service_fn_ok(move |mut req| { req.extensions_mut().insert::(log.clone()); req.extensions_mut() .insert::>>(beacon_chain.clone()); + req.extensions_mut().insert::(db_path.clone()); let path = req.uri().path().to_string(); @@ -104,7 +111,7 @@ pub fn start_server( let result = match (req.method(), path.as_ref()) { (&Method::GET, "/beacon/state") => beacon::get_state::(req), (&Method::GET, "/beacon/state_root") => beacon::get_state_root::(req), - (&Method::GET, "/metrics") => metrics::get_prometheus(req), + (&Method::GET, "/metrics") => metrics::get_prometheus::(req), (&Method::GET, "/node/version") => node::get_version(req), (&Method::GET, "/node/genesis_time") => node::get_genesis_time::(req), _ => Err(ApiError::MethodNotAllowed(path.clone())), @@ -154,3 +161,14 @@ fn success_response(body: Body) -> Response { .body(body) .expect("We should always be able to make response from the success body.") } + +#[derive(Clone)] +pub struct DBPath(PathBuf); + +impl Deref for DBPath { + type Target = PathBuf; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} diff --git a/beacon_node/rest_api/src/metrics.rs b/beacon_node/rest_api/src/metrics.rs index b0f5b8605e..0cd700c445 100644 --- a/beacon_node/rest_api/src/metrics.rs +++ b/beacon_node/rest_api/src/metrics.rs @@ -1,12 +1,26 @@ -use crate::{success_response, ApiError, ApiResult}; +use crate::{success_response, ApiError, ApiResult, DBPath}; +use beacon_chain::{BeaconChain, BeaconChainTypes}; use hyper::{Body, Request}; use prometheus::{Encoder, TextEncoder}; +use std::sync::Arc; /// Returns the full set of Prometheus metrics for the Beacon Node application. -pub fn get_prometheus(_req: Request) -> ApiResult { +pub fn get_prometheus(req: Request) -> ApiResult { let mut buffer = vec![]; let encoder = TextEncoder::new(); + let beacon_chain = req + .extensions() + .get::>>() + .ok_or_else(|| ApiError::ServerError("Beacon chain extension missing".to_string()))?; + let db_path = req + .extensions() + .get::() + .ok_or_else(|| ApiError::ServerError("DBPath extension missing".to_string()))?; + + store::scrape_for_metrics(&db_path); + beacon_chain::scrape_for_metrics(&beacon_chain); + encoder.encode(&prometheus::gather(), &mut buffer).unwrap(); String::from_utf8(buffer) diff --git a/beacon_node/store/Cargo.toml b/beacon_node/store/Cargo.toml index 9607e8b8e5..cd9711253e 100644 --- a/beacon_node/store/Cargo.toml +++ b/beacon_node/store/Cargo.toml @@ -15,3 +15,5 @@ eth2_ssz = "0.1" eth2_ssz_derive = "0.1" tree_hash = "0.1" types = { path = "../../eth2/types" } +lazy_static = "1.3.0" +lighthouse_metrics = { path = "../../eth2/utils/lighthouse_metrics" } diff --git a/beacon_node/store/src/lib.rs b/beacon_node/store/src/lib.rs index 5b8d583200..9c0e3cbaec 100644 --- a/beacon_node/store/src/lib.rs +++ b/beacon_node/store/src/lib.rs @@ -7,18 +7,22 @@ //! //! Provides a simple API for storing/retrieving all types that sometimes needs type-hints. See //! tests for implementation examples. +#[macro_use] +extern crate lazy_static; mod block_at_slot; mod errors; mod impls; mod leveldb_store; mod memory_store; +mod metrics; pub mod iter; pub use self::leveldb_store::LevelDB as DiskStore; pub use self::memory_store::MemoryStore; pub use errors::Error; +pub use metrics::scrape_for_metrics; pub use types::*; /// An object capable of storing and retrieving objects implementing `StoreItem`. diff --git a/beacon_node/store/src/metrics.rs b/beacon_node/store/src/metrics.rs new file mode 100644 index 0000000000..b6a055f102 --- /dev/null +++ b/beacon_node/store/src/metrics.rs @@ -0,0 +1,25 @@ +pub use lighthouse_metrics::{set_gauge, try_create_int_gauge, *}; + +use std::fs; +use std::path::PathBuf; + +lazy_static! { + pub static ref DISK_DB_SIZE: Result = + try_create_int_gauge("database_size", "Size of the on-disk database (bytes)"); +} + +/// Updates the global metrics registry with store-related information. +pub fn scrape_for_metrics(db_path: &PathBuf) { + let db_size = if let Ok(iter) = fs::read_dir(db_path) { + iter.filter_map(std::result::Result::ok) + .map(size_of_dir_entry) + .fold(0_u64, |sum, val| sum + val) + } else { + 0 + }; + set_gauge(&DISK_DB_SIZE, db_size as i64); +} + +fn size_of_dir_entry(dir: fs::DirEntry) -> u64 { + dir.metadata().map(|m| m.len()).unwrap_or(0) +} diff --git a/eth2/utils/lighthouse_metrics/src/lib.rs b/eth2/utils/lighthouse_metrics/src/lib.rs index e6e30f6bb7..d55fcd3e21 100644 --- a/eth2/utils/lighthouse_metrics/src/lib.rs +++ b/eth2/utils/lighthouse_metrics/src/lib.rs @@ -1,6 +1,6 @@ use prometheus::{HistogramOpts, HistogramTimer, Opts}; -pub use prometheus::{Histogram, IntCounter, Result}; +pub use prometheus::{Histogram, IntCounter, IntGauge, Result}; pub fn try_create_int_counter(name: &str, help: &str) -> Result { let opts = Opts::new(name, help); @@ -9,6 +9,13 @@ pub fn try_create_int_counter(name: &str, help: &str) -> Result { Ok(counter) } +pub fn try_create_int_gauge(name: &str, help: &str) -> Result { + let opts = Opts::new(name, help); + let gauge = IntGauge::with_opts(opts)?; + prometheus::register(Box::new(gauge.clone()))?; + Ok(gauge) +} + pub fn try_create_histogram(name: &str, help: &str) -> Result { let opts = HistogramOpts::new(name, help); let histogram = Histogram::with_opts(opts)?; @@ -34,6 +41,12 @@ pub fn inc_counter(counter: &Result) { } } +pub fn set_gauge(gauge: &Result, value: i64) { + if let Ok(gauge) = gauge { + gauge.set(value); + } +} + pub fn observe(histogram: &Result, value: f64) { if let Ok(histogram) = histogram { histogram.observe(value); From 913ee4694eb4310b08def5feef4f111233b6c3e5 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Mon, 12 Aug 2019 13:35:16 +1000 Subject: [PATCH 062/186] Prefix BeaconChain metrics with "beacon_" --- beacon_node/beacon_chain/src/metrics.rs | 94 ++++++++++++------------- 1 file changed, 47 insertions(+), 47 deletions(-) diff --git a/beacon_node/beacon_chain/src/metrics.rs b/beacon_node/beacon_chain/src/metrics.rs index 6ed8218f02..227f1090f7 100644 --- a/beacon_node/beacon_chain/src/metrics.rs +++ b/beacon_node/beacon_chain/src/metrics.rs @@ -7,45 +7,45 @@ lazy_static! { * Block Processing */ pub static ref BLOCK_PROCESSING_REQUESTS: Result = try_create_int_counter( - "block_processing_requests", + "beacon_block_processing_requests", "Count of blocks submitted for processing" ); pub static ref BLOCK_PROCESSING_SUCCESSES: Result = try_create_int_counter( - "block_processing_successes", + "beacon_block_processing_successes", "Count of blocks processed without error" ); pub static ref BLOCK_PROCESSING_TIMES: Result = try_create_histogram("block_processing_times", "Full runtime of block processing"); pub static ref BLOCK_PROCESSING_DB_READ: Result = try_create_histogram( - "block_processing_db_read_times", + "beacon_block_processing_db_read_times", "Time spent loading block and state from DB for block processing" ); pub static ref BLOCK_PROCESSING_CATCHUP_STATE: Result = try_create_histogram( - "block_processing_catch_up_state_times", + "beacon_block_processing_catch_up_state_times", "Time spent skipping slots on a state before processing a block." ); pub static ref BLOCK_PROCESSING_COMMITTEE: Result = try_create_histogram( - "block_processing_committee_building_times", + "beacon_block_processing_committee_building_times", "Time spent building/obtaining committees for block processing." ); pub static ref BLOCK_PROCESSING_CORE: Result = try_create_histogram( - "block_processing_core_times", + "beacon_block_processing_core_times", "Time spent doing the core per_block_processing state processing." ); pub static ref BLOCK_PROCESSING_STATE_ROOT: Result = try_create_histogram( - "block_processing_state_root_times", + "beacon_block_processing_state_root_times", "Time spent calculating the state root when processing a block." ); pub static ref BLOCK_PROCESSING_DB_WRITE: Result = try_create_histogram( - "block_processing_db_write_times", + "beacon_block_processing_db_write_times", "Time spent writing a newly processed block and state to DB" ); pub static ref BLOCK_PROCESSING_FORK_CHOICE_REGISTER: Result = try_create_histogram( - "block_processing_fork_choice_register_times", + "beacon_block_processing_fork_choice_register_times", "Time spent registering the new block with fork choice (but not finding head)" ); pub static ref BLOCK_PROCESSING_FORK_CHOICE_FIND_HEAD: Result = try_create_histogram( - "block_processing_fork_choice_find_head_times", + "beacon_block_processing_fork_choice_find_head_times", "Time spent finding the new head after processing a new block" ); @@ -53,21 +53,21 @@ lazy_static! { * Block Production */ pub static ref BLOCK_PRODUCTION_REQUESTS: Result = try_create_int_counter( - "block_production_requests", + "beacon_block_production_requests", "Count of all block production requests" ); pub static ref BLOCK_PRODUCTION_SUCCESSES: Result = try_create_int_counter( - "block_production_successes", + "beacon_block_production_successes", "Count of blocks successfully produced." ); pub static ref BLOCK_PRODUCTION_TIMES: Result = - try_create_histogram("block_production_times", "Full runtime of block production"); + try_create_histogram("beacon_block_production_times", "Full runtime of block production"); /* * Block Statistics */ pub static ref OPERATIONS_PER_BLOCK_ATTESTATION: Result = try_create_histogram( - "operations_per_block_attestation", + "beacon_operations_per_block_attestation", "Number of attestations in a block" ); @@ -75,15 +75,15 @@ lazy_static! { * Attestation Processing */ pub static ref ATTESTATION_PROCESSING_REQUESTS: Result = try_create_int_counter( - "attestation_processing_requests", + "beacon_attestation_processing_requests", "Count of all attestations submitted for processing" ); pub static ref ATTESTATION_PROCESSING_SUCCESSES: Result = try_create_int_counter( - "attestation_processing_successes", + "beacon_attestation_processing_successes", "total_attestation_processing_successes" ); pub static ref ATTESTATION_PROCESSING_TIMES: Result = try_create_histogram( - "attestation_processing_times", + "beacon_attestation_processing_times", "Full runtime of attestation processing" ); @@ -91,15 +91,15 @@ lazy_static! { * Attestation Production */ pub static ref ATTESTATION_PRODUCTION_REQUESTS: Result = try_create_int_counter( - "attestation_production_requests", + "beacon_attestation_production_requests", "Count of all attestation production requests" ); pub static ref ATTESTATION_PRODUCTION_SUCCESSES: Result = try_create_int_counter( - "attestation_production_successes", + "beacon_attestation_production_successes", "Count of attestations processed without error" ); pub static ref ATTESTATION_PRODUCTION_TIMES: Result = try_create_histogram( - "attestation_production_times", + "beacon_attestation_production_times", "Full runtime of attestation production" ); @@ -107,31 +107,31 @@ lazy_static! { * Fork Choice */ pub static ref FORK_CHOICE_REQUESTS: Result = try_create_int_counter( - "fork_choice_requests", + "beacon_fork_choice_requests", "Count of occasions where fork choice has tried to find a head" ); pub static ref FORK_CHOICE_ERRORS: Result = try_create_int_counter( - "fork_choice_errors", + "beacon_fork_choice_errors", "Count of occasions where fork choice has returned an error when trying to find a head" ); pub static ref FORK_CHOICE_CHANGED_HEAD: Result = try_create_int_counter( - "fork_choice_changed_head", + "beacon_fork_choice_changed_head", "Count of occasions fork choice has found a new head" ); pub static ref FORK_CHOICE_REORG_COUNT: Result = try_create_int_counter( - "fork_choice_reorg_count", + "beacon_fork_choice_reorg_count", "Count of occasions fork choice has switched to a different chain" ); pub static ref FORK_CHOICE_TIMES: Result = - try_create_histogram("fork_choice_time", "Full runtime of fork choice"); + try_create_histogram("beacon_fork_choice_time", "Full runtime of fork choice"); pub static ref FORK_CHOICE_FIND_HEAD_TIMES: Result = - try_create_histogram("fork_choice_find_head_time", "Full runtime of fork choice find_head function"); + try_create_histogram("beacon_fork_choice_find_head_time", "Full runtime of fork choice find_head function"); pub static ref FORK_CHOICE_PROCESS_BLOCK_TIMES: Result = try_create_histogram( - "fork_choice_process_block_time", + "beacon_fork_choice_process_block_time", "Time taken to add a block and all attestations to fork choice" ); pub static ref FORK_CHOICE_PROCESS_ATTESTATION_TIMES: Result = try_create_histogram( - "fork_choice_process_attestation_time", + "beacon_fork_choice_process_attestation_time", "Time taken to add an attestation to fork choice" ); @@ -139,7 +139,7 @@ lazy_static! { * Persisting BeaconChain to disk */ pub static ref PERSIST_CHAIN: Result = - try_create_histogram("persist_chain", "Time taken to update the canonical head"); + try_create_histogram("beacon_persist_chain", "Time taken to update the canonical head"); } // Lazy-static is split so we don't reach the crate-level recursion limit. @@ -148,45 +148,45 @@ lazy_static! { * Slot Clock */ pub static ref PRESENT_SLOT: Result = - try_create_int_gauge("present_slot", "The present slot, according to system time"); + try_create_int_gauge("beacon_present_slot", "The present slot, according to system time"); pub static ref PRESENT_EPOCH: Result = - try_create_int_gauge("present_epoch", "The present epoch, according to system time"); + try_create_int_gauge("beacon_present_epoch", "The present epoch, according to system time"); /* * Chain Head */ pub static ref UPDATE_HEAD_TIMES: Result = - try_create_histogram("update_head_times", "Time taken to update the canonical head"); + try_create_histogram("beacon_update_head_times", "Time taken to update the canonical head"); pub static ref HEAD_STATE_SLOT: Result = - try_create_int_gauge("head_state_slot", "Slot of the block at the head of the chain"); + try_create_int_gauge("beacon_head_state_slot", "Slot of the block at the head of the chain"); pub static ref HEAD_STATE_ROOT: Result = - try_create_int_gauge("head_state_root", "Root of the block at the head of the chain"); + try_create_int_gauge("beacon_head_state_root", "Root of the block at the head of the chain"); pub static ref HEAD_STATE_LATEST_BLOCK_SLOT: Result = - try_create_int_gauge("head_state_latest_block_slot", "Latest block slot at the head of the chain"); + try_create_int_gauge("beacon_head_state_latest_block_slot", "Latest block slot at the head of the chain"); pub static ref HEAD_STATE_CURRENT_JUSTIFIED_ROOT: Result = - try_create_int_gauge("head_state_current_justified_root", "Current justified root at the head of the chain"); + try_create_int_gauge("beacon_head_state_current_justified_root", "Current justified root at the head of the chain"); pub static ref HEAD_STATE_CURRENT_JUSTIFIED_EPOCH: Result = - try_create_int_gauge("head_state_current_justified_epoch", "Current justified epoch at the head of the chain"); + try_create_int_gauge("beacon_head_state_current_justified_epoch", "Current justified epoch at the head of the chain"); pub static ref HEAD_STATE_PREVIOUS_JUSTIFIED_ROOT: Result = - try_create_int_gauge("head_state_previous_justified_root", "Previous justified root at the head of the chain"); + try_create_int_gauge("beacon_head_state_previous_justified_root", "Previous justified root at the head of the chain"); pub static ref HEAD_STATE_PREVIOUS_JUSTIFIED_EPOCH: Result = - try_create_int_gauge("head_state_previous_justified_epoch", "Previous justified epoch at the head of the chain"); + try_create_int_gauge("beacon_head_state_previous_justified_epoch", "Previous justified epoch at the head of the chain"); pub static ref HEAD_STATE_FINALIZED_ROOT: Result = - try_create_int_gauge("head_state_finalized_root", "Finalized root at the head of the chain"); + try_create_int_gauge("beacon_head_state_finalized_root", "Finalized root at the head of the chain"); pub static ref HEAD_STATE_FINALIZED_EPOCH: Result = - try_create_int_gauge("head_state_finalized_epoch", "Finalized epoch at the head of the chain"); + try_create_int_gauge("beacon_head_state_finalized_epoch", "Finalized epoch at the head of the chain"); pub static ref HEAD_STATE_TOTAL_VALIDATORS: Result = - try_create_int_gauge("head_state_total_validators", "Count of validators at the head of the chain"); + try_create_int_gauge("beacon_head_state_total_validators", "Count of validators at the head of the chain"); pub static ref HEAD_STATE_ACTIVE_VALIDATORS: Result = - try_create_int_gauge("head_state_active_validators", "Count of active validators at the head of the chain"); + try_create_int_gauge("beacon_head_state_active_validators", "Count of active validators at the head of the chain"); pub static ref HEAD_STATE_VALIDATOR_BALANCES: Result = - try_create_int_gauge("head_state_validator_balances", "Sum of all validator balances at the head of the chain"); + try_create_int_gauge("beacon_head_state_validator_balances", "Sum of all validator balances at the head of the chain"); pub static ref HEAD_STATE_SLASHED_VALIDATORS: Result = - try_create_int_gauge("head_state_slashed_validators", "Count of all slashed validators at the head of the chain"); + try_create_int_gauge("beacon_head_state_slashed_validators", "Count of all slashed validators at the head of the chain"); pub static ref HEAD_STATE_WITHDRAWN_VALIDATORS: Result = - try_create_int_gauge("head_state_withdrawn_validators", "Sum of all validator balances at the head of the chain"); + try_create_int_gauge("beacon_head_state_withdrawn_validators", "Sum of all validator balances at the head of the chain"); pub static ref HEAD_STATE_ETH1_DEPOSIT_INDEX: Result = - try_create_int_gauge("head_state_eth1_deposit_index", "Eth1 deposit index at the head of the chain"); + try_create_int_gauge("beacon_head_state_eth1_deposit_index", "Eth1 deposit index at the head of the chain"); } /// Scrape the `beacon_chain` for metrics that are not constantly updated (e.g., the present slot, From 0b4a8893a4a94826b723e39b29ea8aaf64bb8912 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Mon, 12 Aug 2019 13:49:09 +1000 Subject: [PATCH 063/186] Add more store metrics --- beacon_node/store/src/leveldb_store.rs | 22 ++++++++++++++++++-- beacon_node/store/src/metrics.rs | 26 +++++++++++++++++++++++- eth2/utils/lighthouse_metrics/src/lib.rs | 6 ++++++ 3 files changed, 51 insertions(+), 3 deletions(-) diff --git a/beacon_node/store/src/leveldb_store.rs b/beacon_node/store/src/leveldb_store.rs index 699861e3ae..a085d845a8 100644 --- a/beacon_node/store/src/leveldb_store.rs +++ b/beacon_node/store/src/leveldb_store.rs @@ -1,4 +1,5 @@ use super::*; +use crate::metrics; use db_key::Key; use leveldb::database::kv::KV; use leveldb::database::Database; @@ -62,15 +63,27 @@ impl Store for LevelDB { fn get_bytes(&self, col: &str, key: &[u8]) -> Result>, Error> { let column_key = Self::get_key_for_col(col, key); - self.db + metrics::inc_counter(&metrics::DISK_DB_READ_COUNT); + + let result = self + .db .get(self.read_options(), column_key) - .map_err(Into::into) + .map_err(Into::into); + + if let Ok(Some(bytes)) = &result { + metrics::inc_counter_by(&metrics::DISK_DB_READ_BYTES, bytes.len() as i64) + } + + result } /// Store some `value` in `column`, indexed with `key`. fn put_bytes(&self, col: &str, key: &[u8], val: &[u8]) -> Result<(), Error> { let column_key = Self::get_key_for_col(col, key); + metrics::inc_counter(&metrics::DISK_DB_WRITE_COUNT); + metrics::inc_counter_by(&metrics::DISK_DB_WRITE_BYTES, val.len() as i64); + self.db .put(self.write_options(), column_key, val) .map_err(Into::into) @@ -80,6 +93,8 @@ impl Store for LevelDB { fn key_exists(&self, col: &str, key: &[u8]) -> Result { let column_key = Self::get_key_for_col(col, key); + metrics::inc_counter(&metrics::DISK_DB_EXISTS_COUNT); + self.db .get(self.read_options(), column_key) .map_err(Into::into) @@ -89,6 +104,9 @@ impl Store for LevelDB { /// Removes `key` from `column`. fn key_delete(&self, col: &str, key: &[u8]) -> Result<(), Error> { let column_key = Self::get_key_for_col(col, key); + + metrics::inc_counter(&metrics::DISK_DB_DELETE_COUNT); + self.db .delete(self.write_options(), column_key) .map_err(Into::into) diff --git a/beacon_node/store/src/metrics.rs b/beacon_node/store/src/metrics.rs index b6a055f102..430e9c38e5 100644 --- a/beacon_node/store/src/metrics.rs +++ b/beacon_node/store/src/metrics.rs @@ -5,7 +5,31 @@ use std::path::PathBuf; lazy_static! { pub static ref DISK_DB_SIZE: Result = - try_create_int_gauge("database_size", "Size of the on-disk database (bytes)"); + try_create_int_gauge("store_disk_db_size", "Size of the on-disk database (bytes)"); + pub static ref DISK_DB_WRITE_BYTES: Result = try_create_int_counter( + "store_disk_db_write_bytes", + "Number of bytes attempted to be written to the on-disk DB" + ); + pub static ref DISK_DB_READ_BYTES: Result = try_create_int_counter( + "store_disk_db_read_bytes", + "Number of bytes read from the on-disk DB" + ); + pub static ref DISK_DB_READ_COUNT: Result = try_create_int_counter( + "store_disk_db_read_count", + "Total number of reads to the on-disk DB" + ); + pub static ref DISK_DB_WRITE_COUNT: Result = try_create_int_counter( + "store_disk_db_write_count", + "Total number of writes to the on-disk DB" + ); + pub static ref DISK_DB_EXISTS_COUNT: Result = try_create_int_counter( + "store_disk_db_exists_count", + "Total number of checks if a key is in the on-disk DB" + ); + pub static ref DISK_DB_DELETE_COUNT: Result = try_create_int_counter( + "store_disk_db_delete_count", + "Total number of deletions from the on-disk DB" + ); } /// Updates the global metrics registry with store-related information. diff --git a/eth2/utils/lighthouse_metrics/src/lib.rs b/eth2/utils/lighthouse_metrics/src/lib.rs index d55fcd3e21..a8656d0171 100644 --- a/eth2/utils/lighthouse_metrics/src/lib.rs +++ b/eth2/utils/lighthouse_metrics/src/lib.rs @@ -41,6 +41,12 @@ pub fn inc_counter(counter: &Result) { } } +pub fn inc_counter_by(counter: &Result, value: i64) { + if let Ok(counter) = counter { + counter.inc_by(value); + } +} + pub fn set_gauge(gauge: &Result, value: i64) { if let Ok(gauge) = gauge { gauge.set(value); From cac0e5c83284fb05cf9d465cb8e2fc8dc0f3e4aa Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Mon, 12 Aug 2019 14:16:20 +1000 Subject: [PATCH 064/186] Add basic metrics to libp2p --- beacon_node/eth2-libp2p/Cargo.toml | 2 ++ beacon_node/eth2-libp2p/src/discovery.rs | 4 ++++ beacon_node/eth2-libp2p/src/lib.rs | 4 ++++ beacon_node/eth2-libp2p/src/metrics.rs | 16 ++++++++++++++++ 4 files changed, 26 insertions(+) create mode 100644 beacon_node/eth2-libp2p/src/metrics.rs diff --git a/beacon_node/eth2-libp2p/Cargo.toml b/beacon_node/eth2-libp2p/Cargo.toml index 794b097128..006b895a1c 100644 --- a/beacon_node/eth2-libp2p/Cargo.toml +++ b/beacon_node/eth2-libp2p/Cargo.toml @@ -26,3 +26,5 @@ smallvec = "0.6.10" fnv = "1.0.6" unsigned-varint = "0.2.2" bytes = "0.4.12" +lazy_static = "1.3.0" +lighthouse_metrics = { path = "../../eth2/utils/lighthouse_metrics" } diff --git a/beacon_node/eth2-libp2p/src/discovery.rs b/beacon_node/eth2-libp2p/src/discovery.rs index 4c1794945d..d9f2f7465a 100644 --- a/beacon_node/eth2-libp2p/src/discovery.rs +++ b/beacon_node/eth2-libp2p/src/discovery.rs @@ -1,3 +1,4 @@ +use crate::metrics; use crate::{error, NetworkConfig}; /// This manages the discovery and management of peers. /// @@ -158,10 +159,12 @@ where } fn inject_connected(&mut self, peer_id: PeerId, _endpoint: ConnectedPoint) { + metrics::inc_counter(&metrics::PEER_CONNECT_COUNT); self.connected_peers.insert(peer_id); } fn inject_disconnected(&mut self, peer_id: &PeerId, _endpoint: ConnectedPoint) { + metrics::inc_counter(&metrics::PEER_DISCONNECT_COUNT); self.connected_peers.remove(peer_id); } @@ -217,6 +220,7 @@ where } Discv5Event::SocketUpdated(socket) => { info!(self.log, "Address updated"; "IP" => format!("{}",socket.ip())); + metrics::inc_counter(&metrics::ADDRESS_UPDATE_COUNT); let mut address = Multiaddr::from(socket.ip()); address.push(Protocol::Tcp(self.tcp_port)); let enr = self.discovery.local_enr(); diff --git a/beacon_node/eth2-libp2p/src/lib.rs b/beacon_node/eth2-libp2p/src/lib.rs index 54a4f2a998..33d5ba9ed9 100644 --- a/beacon_node/eth2-libp2p/src/lib.rs +++ b/beacon_node/eth2-libp2p/src/lib.rs @@ -2,10 +2,14 @@ /// all required libp2p functionality. /// /// This crate builds and manages the libp2p services required by the beacon node. +#[macro_use] +extern crate lazy_static; + pub mod behaviour; mod config; mod discovery; pub mod error; +mod metrics; pub mod rpc; mod service; diff --git a/beacon_node/eth2-libp2p/src/metrics.rs b/beacon_node/eth2-libp2p/src/metrics.rs new file mode 100644 index 0000000000..a47037669e --- /dev/null +++ b/beacon_node/eth2-libp2p/src/metrics.rs @@ -0,0 +1,16 @@ +pub use lighthouse_metrics::*; + +lazy_static! { + pub static ref ADDRESS_UPDATE_COUNT: Result = try_create_int_counter( + "libp2p_address_update_count", + "Count of libp2p socked updated events (when our view of our IP address has changed)" + ); + pub static ref PEER_CONNECT_COUNT: Result = try_create_int_counter( + "libp2p_peer_connect_count", + "Count of libp2p peer connect events (not the current number of connected peers)" + ); + pub static ref PEER_DISCONNECT_COUNT: Result = try_create_int_counter( + "libp2p_peer_disconnect_count", + "Count of libp2p peer disconnect events" + ); +} From af334b2cf0a6278c576b23d93d1748fdb4a51960 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Mon, 12 Aug 2019 14:30:46 +1000 Subject: [PATCH 065/186] Add metrics to HTTP server --- beacon_node/rest_api/Cargo.toml | 2 ++ beacon_node/rest_api/src/lib.rs | 17 +++++++++++++---- beacon_node/rest_api/src/metrics.rs | 21 +++++++++++++++++++++ 3 files changed, 36 insertions(+), 4 deletions(-) diff --git a/beacon_node/rest_api/Cargo.toml b/beacon_node/rest_api/Cargo.toml index 821d6c0ea1..100e680de8 100644 --- a/beacon_node/rest_api/Cargo.toml +++ b/beacon_node/rest_api/Cargo.toml @@ -24,3 +24,5 @@ futures = "0.1" exit-future = "0.1.3" tokio = "0.1.17" url = "2.0" +lazy_static = "1.3.0" +lighthouse_metrics = { path = "../../eth2/utils/lighthouse_metrics" } diff --git a/beacon_node/rest_api/src/lib.rs b/beacon_node/rest_api/src/lib.rs index fea67618ba..57019deea0 100644 --- a/beacon_node/rest_api/src/lib.rs +++ b/beacon_node/rest_api/src/lib.rs @@ -1,5 +1,6 @@ -extern crate futures; -extern crate hyper; +#[macro_use] +extern crate lazy_static; + mod beacon; mod config; mod helpers; @@ -100,6 +101,9 @@ pub fn start_server( // Create a simple handler for the router, inject our stateful objects into the request. service_fn_ok(move |mut req| { + metrics::inc_counter(&metrics::REQUEST_COUNT); + let timer = metrics::start_timer(&metrics::REQUEST_RESPONSE_TIME); + req.extensions_mut().insert::(log.clone()); req.extensions_mut() .insert::>>(beacon_chain.clone()); @@ -117,9 +121,10 @@ pub fn start_server( _ => Err(ApiError::MethodNotAllowed(path.clone())), }; - match result { + let response = match result { // Return the `hyper::Response`. Ok(response) => { + metrics::inc_counter(&metrics::SUCCESS_COUNT); slog::debug!(log, "Request successful: {:?}", path); response } @@ -128,7 +133,11 @@ pub fn start_server( slog::debug!(log, "Request failure: {:?}", path); e.into() } - } + }; + + metrics::stop_timer(timer); + + response }) }; diff --git a/beacon_node/rest_api/src/metrics.rs b/beacon_node/rest_api/src/metrics.rs index 0cd700c445..c0db810b60 100644 --- a/beacon_node/rest_api/src/metrics.rs +++ b/beacon_node/rest_api/src/metrics.rs @@ -4,7 +4,28 @@ use hyper::{Body, Request}; use prometheus::{Encoder, TextEncoder}; use std::sync::Arc; +pub use lighthouse_metrics::*; + +lazy_static! { + pub static ref REQUEST_RESPONSE_TIME: Result = try_create_histogram( + "http_server_request_response_time", + "Time taken to build a response to a HTTP request" + ); + pub static ref REQUEST_COUNT: Result = try_create_int_counter( + "http_server_request_count", + "Total count of HTTP requests received" + ); + pub static ref SUCCESS_COUNT: Result = try_create_int_counter( + "http_server_success_count", + "Total count of HTTP 200 responses sent" + ); +} + /// Returns the full set of Prometheus metrics for the Beacon Node application. +/// +/// # Note +/// +/// This is a HTTP handler method. pub fn get_prometheus(req: Request) -> ApiResult { let mut buffer = vec![]; let encoder = TextEncoder::new(); From 6a1e5f6d26c4dffd126faaedd970b4d8446a1ce3 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Mon, 12 Aug 2019 15:19:39 +1000 Subject: [PATCH 066/186] Remove old `http_server` crate --- Cargo.toml | 1 - beacon_node/client/Cargo.toml | 1 - beacon_node/client/src/config.rs | 4 - beacon_node/client/src/lib.rs | 22 --- beacon_node/http_server/Cargo.toml | 23 --- beacon_node/http_server/src/api.rs | 71 -------- beacon_node/http_server/src/key.rs | 33 ---- beacon_node/http_server/src/lib.rs | 145 ----------------- beacon_node/http_server/src/metrics.rs | 72 -------- .../http_server/src/metrics/local_metrics.rs | 154 ------------------ beacon_node/rest_api/src/config.rs | 2 +- docs/config_examples/beacon-node.toml | 10 +- 12 files changed, 2 insertions(+), 536 deletions(-) delete mode 100644 beacon_node/http_server/Cargo.toml delete mode 100644 beacon_node/http_server/src/api.rs delete mode 100644 beacon_node/http_server/src/key.rs delete mode 100644 beacon_node/http_server/src/lib.rs delete mode 100644 beacon_node/http_server/src/metrics.rs delete mode 100644 beacon_node/http_server/src/metrics/local_metrics.rs diff --git a/Cargo.toml b/Cargo.toml index 9b7b87a0d0..f087539e6a 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -26,7 +26,6 @@ members = [ "beacon_node", "beacon_node/store", "beacon_node/client", - "beacon_node/http_server", "beacon_node/rest_api", "beacon_node/network", "beacon_node/eth2-libp2p", diff --git a/beacon_node/client/Cargo.toml b/beacon_node/client/Cargo.toml index 8c72fa4171..b13f175a9b 100644 --- a/beacon_node/client/Cargo.toml +++ b/beacon_node/client/Cargo.toml @@ -7,7 +7,6 @@ edition = "2018" [dependencies] beacon_chain = { path = "../beacon_chain" } network = { path = "../network" } -http_server = { path = "../http_server" } rpc = { path = "../rpc" } rest_api = { path = "../rest_api" } prometheus = "^0.6" diff --git a/beacon_node/client/src/config.rs b/beacon_node/client/src/config.rs index ee62b62815..fcc2cc7dac 100644 --- a/beacon_node/client/src/config.rs +++ b/beacon_node/client/src/config.rs @@ -1,6 +1,5 @@ use crate::Eth2Config; use clap::ArgMatches; -use http_server::HttpServerConfig; use network::NetworkConfig; use serde_derive::{Deserialize, Serialize}; use slog::{info, o, Drain}; @@ -25,7 +24,6 @@ pub struct Config { pub genesis_state: GenesisState, pub network: network::NetworkConfig, pub rpc: rpc::RPCConfig, - pub http: HttpServerConfig, pub rest_api: rest_api::ApiConfig, } @@ -59,7 +57,6 @@ impl Default for Config { db_name: "chain_db".to_string(), network: NetworkConfig::new(), rpc: rpc::RPCConfig::default(), - http: HttpServerConfig::default(), rest_api: rest_api::ApiConfig::default(), spec_constants: TESTNET_SPEC_CONSTANTS.into(), genesis_state: GenesisState::RecentGenesis { @@ -143,7 +140,6 @@ impl Config { self.network.apply_cli_args(args)?; self.rpc.apply_cli_args(args)?; - self.http.apply_cli_args(args)?; self.rest_api.apply_cli_args(args)?; if let Some(log_file) = args.value_of("logfile") { diff --git a/beacon_node/client/src/lib.rs b/beacon_node/client/src/lib.rs index c74787f606..5c37ac3e9c 100644 --- a/beacon_node/client/src/lib.rs +++ b/beacon_node/client/src/lib.rs @@ -10,7 +10,6 @@ use beacon_chain::BeaconChain; use exit_future::Signal; use futures::{future::Future, Stream}; use network::Service as NetworkService; -use prometheus::Registry; use slog::{error, info, o}; use slot_clock::SlotClock; use std::marker::PhantomData; @@ -36,8 +35,6 @@ pub struct Client { pub network: Arc>, /// Signal to terminate the RPC server. pub rpc_exit_signal: Option, - /// Signal to terminate the HTTP server. - pub http_exit_signal: Option, /// Signal to terminate the slot timer. pub slot_timer_exit_signal: Option, /// Signal to terminate the API @@ -60,7 +57,6 @@ where log: slog::Logger, executor: &TaskExecutor, ) -> error::Result { - let metrics_registry = Registry::new(); let store = Arc::new(store); let seconds_per_slot = eth2_config.spec.seconds_per_slot; @@ -119,23 +115,6 @@ where None }; - // Start the `http_server` service. - // - // Note: presently we are ignoring the config and _always_ starting a HTTP server. - let http_exit_signal = if client_config.http.enabled { - Some(http_server::start_service( - &client_config.http, - executor, - network_send, - beacon_chain.clone(), - client_config.db_path().expect("unable to read datadir"), - metrics_registry, - &log, - )) - } else { - None - }; - // Start the `rest_api` service let api_exit_signal = if client_config.rest_api.enabled { match rest_api::start_server( @@ -184,7 +163,6 @@ where Ok(Client { _client_config: client_config, beacon_chain, - http_exit_signal, rpc_exit_signal, slot_timer_exit_signal: Some(slot_timer_exit_signal), api_exit_signal, diff --git a/beacon_node/http_server/Cargo.toml b/beacon_node/http_server/Cargo.toml deleted file mode 100644 index e87ff29972..0000000000 --- a/beacon_node/http_server/Cargo.toml +++ /dev/null @@ -1,23 +0,0 @@ -[package] -name = "http_server" -version = "0.1.0" -authors = ["Paul Hauner "] -edition = "2018" - -[dependencies] -beacon_chain = { path = "../beacon_chain" } -iron = "^0.6" -router = "^0.6" -network = { path = "../network" } -types = { path = "../../eth2/types" } -slot_clock = { path = "../../eth2/utils/slot_clock" } -persistent = "^0.4" -prometheus = { version = "^0.6", features = ["process"] } -clap = "2.32.0" -futures = "0.1.23" -serde = "1.0" -serde_derive = "1.0" -serde_json = "1.0" -slog = { version = "^2.2.3" , features = ["max_level_trace"] } -tokio = "0.1.17" -exit-future = "0.1.4" diff --git a/beacon_node/http_server/src/api.rs b/beacon_node/http_server/src/api.rs deleted file mode 100644 index 8cb023b02c..0000000000 --- a/beacon_node/http_server/src/api.rs +++ /dev/null @@ -1,71 +0,0 @@ -use crate::{key::BeaconChainKey, map_persistent_err_to_500}; -use beacon_chain::{BeaconChain, BeaconChainTypes}; -use iron::prelude::*; -use iron::{ - headers::{CacheControl, CacheDirective, ContentType}, - status::Status, - AfterMiddleware, Handler, IronResult, Request, Response, -}; -use persistent::Read; -use router::Router; -use serde_json::json; -use std::sync::Arc; - -/// Yields a handler for the HTTP API. -pub fn build_handler( - beacon_chain: Arc>, -) -> impl Handler { - let mut router = Router::new(); - - router.get("/node/fork", handle_fork::, "fork"); - - let mut chain = Chain::new(router); - - // Insert `BeaconChain` so it may be accessed in a request. - chain.link(Read::>::both(beacon_chain.clone())); - // Set the content-type headers. - chain.link_after(SetJsonContentType); - // Set the cache headers. - chain.link_after(SetCacheDirectives); - - chain -} - -/// Sets the `cache-control` headers on _all_ responses, unless they are already set. -struct SetCacheDirectives; -impl AfterMiddleware for SetCacheDirectives { - fn after(&self, _req: &mut Request, mut resp: Response) -> IronResult { - // This is run for every requests, AFTER all handlers have been executed - if resp.headers.get::() == None { - resp.headers.set(CacheControl(vec![ - CacheDirective::NoCache, - CacheDirective::NoStore, - ])); - } - Ok(resp) - } -} - -/// Sets the `content-type` headers on _all_ responses, unless they are already set. -struct SetJsonContentType; -impl AfterMiddleware for SetJsonContentType { - fn after(&self, _req: &mut Request, mut resp: Response) -> IronResult { - if resp.headers.get::() == None { - resp.headers.set(ContentType::json()); - } - Ok(resp) - } -} - -fn handle_fork(req: &mut Request) -> IronResult { - let beacon_chain = req - .get::>>() - .map_err(map_persistent_err_to_500)?; - - let response = json!({ - "fork": beacon_chain.head().beacon_state.fork, - "network_id": beacon_chain.spec.network_id - }); - - Ok(Response::with((Status::Ok, response.to_string()))) -} diff --git a/beacon_node/http_server/src/key.rs b/beacon_node/http_server/src/key.rs deleted file mode 100644 index a69da6747f..0000000000 --- a/beacon_node/http_server/src/key.rs +++ /dev/null @@ -1,33 +0,0 @@ -use crate::metrics::LocalMetrics; -use beacon_chain::{BeaconChain, BeaconChainTypes}; -use iron::typemap::Key; -use prometheus::Registry; -use std::marker::PhantomData; -use std::path::PathBuf; -use std::sync::Arc; - -pub struct BeaconChainKey { - _phantom: PhantomData, -} - -impl Key for BeaconChainKey { - type Value = Arc>; -} - -pub struct MetricsRegistryKey; - -impl Key for MetricsRegistryKey { - type Value = Registry; -} - -pub struct LocalMetricsKey; - -impl Key for LocalMetricsKey { - type Value = LocalMetrics; -} - -pub struct DBPathKey; - -impl Key for DBPathKey { - type Value = PathBuf; -} diff --git a/beacon_node/http_server/src/lib.rs b/beacon_node/http_server/src/lib.rs deleted file mode 100644 index f1d006a5bc..0000000000 --- a/beacon_node/http_server/src/lib.rs +++ /dev/null @@ -1,145 +0,0 @@ -mod api; -mod key; -mod metrics; - -use beacon_chain::{BeaconChain, BeaconChainTypes}; -use clap::ArgMatches; -use futures::Future; -use iron::prelude::*; -use network::NetworkMessage; -use prometheus::Registry; -use router::Router; -use serde_derive::{Deserialize, Serialize}; -use slog::{info, o, warn}; -use std::path::PathBuf; -use std::sync::Arc; -use tokio::runtime::TaskExecutor; -use tokio::sync::mpsc; - -#[derive(PartialEq, Clone, Debug, Serialize, Deserialize)] -pub struct HttpServerConfig { - pub enabled: bool, - pub listen_address: String, - pub listen_port: String, -} - -impl Default for HttpServerConfig { - fn default() -> Self { - Self { - enabled: false, - listen_address: "127.0.0.1".to_string(), - listen_port: "5052".to_string(), - } - } -} - -impl HttpServerConfig { - pub fn apply_cli_args(&mut self, args: &ArgMatches) -> Result<(), &'static str> { - if args.is_present("http") { - self.enabled = true; - } - - if let Some(listen_address) = args.value_of("http-address") { - self.listen_address = listen_address.to_string(); - } - - if let Some(listen_port) = args.value_of("http-port") { - self.listen_port = listen_port.to_string(); - } - - Ok(()) - } -} - -/// Build the `iron` HTTP server, defining the core routes. -pub fn create_iron_http_server( - beacon_chain: Arc>, - db_path: PathBuf, - metrics_registry: Registry, -) -> Iron { - let mut router = Router::new(); - - // A `GET` request to `/metrics` is handled by the `metrics` module. - router.get( - "/metrics", - metrics::build_handler(beacon_chain.clone(), db_path, metrics_registry), - "metrics", - ); - - // Any request to all other endpoints is handled by the `api` module. - router.any("/*", api::build_handler(beacon_chain.clone()), "api"); - - Iron::new(router) -} - -/// Start the HTTP service on the tokio `TaskExecutor`. -pub fn start_service( - config: &HttpServerConfig, - executor: &TaskExecutor, - _network_chan: mpsc::UnboundedSender, - beacon_chain: Arc>, - db_path: PathBuf, - metrics_registry: Registry, - log: &slog::Logger, -) -> exit_future::Signal { - let log = log.new(o!("Service"=>"HTTP")); - - // Create: - // - `shutdown_trigger` a one-shot to shut down this service. - // - `wait_for_shutdown` a future that will wait until someone calls shutdown. - let (shutdown_trigger, wait_for_shutdown) = exit_future::signal(); - - // Create an `iron` http, without starting it yet. - let iron = create_iron_http_server(beacon_chain, db_path, metrics_registry); - - // Create a HTTP server future. - // - // 1. Start the HTTP server - // 2. Build an exit future that will shutdown the server when requested. - // 3. Return the exit future, so the caller may shutdown the service when desired. - let http_service = { - let listen_address = format!("{}:{}", config.listen_address, config.listen_port); - // Start the HTTP server - let server_start_result = iron.http(listen_address.clone()); - - if server_start_result.is_ok() { - info!(log, "HTTP server running on {}", listen_address); - } else { - warn!(log, "HTTP server failed to start on {}", listen_address); - } - - // Build a future that will shutdown the HTTP server when the `shutdown_trigger` is - // triggered. - wait_for_shutdown.and_then(move |_| { - info!(log, "HTTP server shutting down"); - - if let Ok(mut server) = server_start_result { - // According to the documentation, `server.close()` "doesn't work" and the server - // keeps listening. - // - // It is being called anyway, because it seems like the right thing to do. If you - // know this has negative side-effects, please create an issue to discuss. - // - // See: https://docs.rs/iron/0.6.0/iron/struct.Listening.html#impl - match server.close() { - _ => (), - }; - } - info!(log, "HTTP server shutdown complete."); - Ok(()) - }) - }; - - // Attach the HTTP server to the executor. - executor.spawn(http_service); - - shutdown_trigger -} - -/// Helper function for mapping a failure to read state to a 500 server error. -fn map_persistent_err_to_500(e: persistent::PersistentError) -> iron::error::IronError { - iron::error::IronError { - error: Box::new(e), - response: iron::Response::with(iron::status::Status::InternalServerError), - } -} diff --git a/beacon_node/http_server/src/metrics.rs b/beacon_node/http_server/src/metrics.rs deleted file mode 100644 index 1b1ed1f3d4..0000000000 --- a/beacon_node/http_server/src/metrics.rs +++ /dev/null @@ -1,72 +0,0 @@ -use crate::{ - key::{BeaconChainKey, DBPathKey, LocalMetricsKey, MetricsRegistryKey}, - map_persistent_err_to_500, -}; -use beacon_chain::{BeaconChain, BeaconChainTypes}; -use iron::prelude::*; -use iron::{status::Status, Handler, IronResult, Request, Response}; -use persistent::Read; -use prometheus::{Encoder, Registry, TextEncoder}; -use std::path::PathBuf; -use std::sync::Arc; - -pub use local_metrics::LocalMetrics; - -mod local_metrics; - -/// Yields a handler for the metrics endpoint. -pub fn build_handler( - beacon_chain: Arc>, - db_path: PathBuf, - metrics_registry: Registry, -) -> impl Handler { - let mut chain = Chain::new(handle_metrics::); - - let local_metrics = LocalMetrics::new().unwrap(); - local_metrics.register(&metrics_registry).unwrap(); - - chain.link(Read::>::both(beacon_chain)); - chain.link(Read::::both(metrics_registry)); - chain.link(Read::::both(local_metrics)); - chain.link(Read::::both(db_path)); - - chain -} - -/// Handle a request for Prometheus metrics. -/// -/// Returns a text string containing all metrics. -fn handle_metrics(req: &mut Request) -> IronResult { - let beacon_chain = req - .get::>>() - .map_err(map_persistent_err_to_500)?; - - let r = req - .get::>() - .map_err(map_persistent_err_to_500)?; - - let local_metrics = req - .get::>() - .map_err(map_persistent_err_to_500)?; - - let db_path = req - .get::>() - .map_err(map_persistent_err_to_500)?; - - // Update metrics that are calculated on each scrape. - local_metrics.update(&beacon_chain, &db_path); - - let mut buffer = vec![]; - let encoder = TextEncoder::new(); - - // Gather `DEFAULT_REGISTRY` metrics. - encoder.encode(&prometheus::gather(), &mut buffer).unwrap(); - - // Gather metrics from our registry. - let metric_families = r.gather(); - encoder.encode(&metric_families, &mut buffer).unwrap(); - - let prom_string = String::from_utf8(buffer).unwrap(); - - Ok(Response::with((Status::Ok, prom_string))) -} diff --git a/beacon_node/http_server/src/metrics/local_metrics.rs b/beacon_node/http_server/src/metrics/local_metrics.rs deleted file mode 100644 index b342cca81c..0000000000 --- a/beacon_node/http_server/src/metrics/local_metrics.rs +++ /dev/null @@ -1,154 +0,0 @@ -use beacon_chain::{BeaconChain, BeaconChainTypes}; -use prometheus::{IntGauge, Opts, Registry}; -use slot_clock::SlotClock; -use std::fs; -use std::path::PathBuf; -use types::{EthSpec, Slot}; - -// If set to `true` will iterate and sum the balances of all validators in the state for each -// scrape. -const SHOULD_SUM_VALIDATOR_BALANCES: bool = true; - -pub struct LocalMetrics { - present_slot: IntGauge, - present_epoch: IntGauge, - best_slot: IntGauge, - best_beacon_block_root: IntGauge, - justified_beacon_block_root: IntGauge, - finalized_beacon_block_root: IntGauge, - validator_count: IntGauge, - justified_epoch: IntGauge, - finalized_epoch: IntGauge, - validator_balances_sum: IntGauge, - database_size: IntGauge, -} - -impl LocalMetrics { - /// Create a new instance. - pub fn new() -> Result { - Ok(Self { - present_slot: { - let opts = Opts::new("present_slot", "slot_at_time_of_scrape"); - IntGauge::with_opts(opts)? - }, - present_epoch: { - let opts = Opts::new("present_epoch", "epoch_at_time_of_scrape"); - IntGauge::with_opts(opts)? - }, - best_slot: { - let opts = Opts::new("best_slot", "slot_of_block_at_chain_head"); - IntGauge::with_opts(opts)? - }, - best_beacon_block_root: { - let opts = Opts::new("best_beacon_block_root", "root_of_block_at_chain_head"); - IntGauge::with_opts(opts)? - }, - justified_beacon_block_root: { - let opts = Opts::new( - "justified_beacon_block_root", - "root_of_block_at_justified_head", - ); - IntGauge::with_opts(opts)? - }, - finalized_beacon_block_root: { - let opts = Opts::new( - "finalized_beacon_block_root", - "root_of_block_at_finalized_head", - ); - IntGauge::with_opts(opts)? - }, - validator_count: { - let opts = Opts::new("validator_count", "number_of_validators"); - IntGauge::with_opts(opts)? - }, - justified_epoch: { - let opts = Opts::new("justified_epoch", "state_justified_epoch"); - IntGauge::with_opts(opts)? - }, - finalized_epoch: { - let opts = Opts::new("finalized_epoch", "state_finalized_epoch"); - IntGauge::with_opts(opts)? - }, - validator_balances_sum: { - let opts = Opts::new("validator_balances_sum", "sum_of_all_validator_balances"); - IntGauge::with_opts(opts)? - }, - database_size: { - let opts = Opts::new("database_size", "size_of_on_disk_db_in_mb"); - IntGauge::with_opts(opts)? - }, - }) - } - - /// Registry this instance with the `registry`. - pub fn register(&self, registry: &Registry) -> Result<(), prometheus::Error> { - registry.register(Box::new(self.present_slot.clone()))?; - registry.register(Box::new(self.present_epoch.clone()))?; - registry.register(Box::new(self.best_slot.clone()))?; - registry.register(Box::new(self.best_beacon_block_root.clone()))?; - registry.register(Box::new(self.justified_beacon_block_root.clone()))?; - registry.register(Box::new(self.finalized_beacon_block_root.clone()))?; - registry.register(Box::new(self.validator_count.clone()))?; - registry.register(Box::new(self.finalized_epoch.clone()))?; - registry.register(Box::new(self.justified_epoch.clone()))?; - registry.register(Box::new(self.validator_balances_sum.clone()))?; - registry.register(Box::new(self.database_size.clone()))?; - - Ok(()) - } - - /// Update the metrics in `self` to the latest values. - pub fn update(&self, beacon_chain: &BeaconChain, db_path: &PathBuf) { - let state = &beacon_chain.head().beacon_state; - - let present_slot = beacon_chain - .slot_clock - .present_slot() - .unwrap_or_else(|_| None) - .unwrap_or_else(|| Slot::new(0)); - self.present_slot.set(present_slot.as_u64() as i64); - self.present_epoch - .set(present_slot.epoch(T::EthSpec::slots_per_epoch()).as_u64() as i64); - - self.best_slot.set(state.slot.as_u64() as i64); - self.best_beacon_block_root - .set(beacon_chain.head().beacon_block_root.to_low_u64_le() as i64); - self.justified_beacon_block_root.set( - beacon_chain - .head() - .beacon_state - .current_justified_checkpoint - .root - .to_low_u64_le() as i64, - ); - self.finalized_beacon_block_root.set( - beacon_chain - .head() - .beacon_state - .finalized_checkpoint - .root - .to_low_u64_le() as i64, - ); - self.validator_count.set(state.validators.len() as i64); - self.justified_epoch - .set(state.current_justified_checkpoint.epoch.as_u64() as i64); - self.finalized_epoch - .set(state.finalized_checkpoint.epoch.as_u64() as i64); - if SHOULD_SUM_VALIDATOR_BALANCES { - self.validator_balances_sum - .set(state.balances.iter().sum::() as i64); - } - let db_size = if let Ok(iter) = fs::read_dir(db_path) { - iter.filter_map(Result::ok) - .map(size_of_dir_entry) - .fold(0_u64, |sum, val| sum + val) - } else { - 0 - }; - self.database_size.set(db_size as i64); - } -} - -fn size_of_dir_entry(dir: fs::DirEntry) -> u64 { - dir.metadata().map(|m| m.len()).unwrap_or(0) -} diff --git a/beacon_node/rest_api/src/config.rs b/beacon_node/rest_api/src/config.rs index c4a9c738a0..90ac0821b1 100644 --- a/beacon_node/rest_api/src/config.rs +++ b/beacon_node/rest_api/src/config.rs @@ -18,7 +18,7 @@ impl Default for Config { Config { enabled: true, // rest_api enabled by default listen_address: Ipv4Addr::new(127, 0, 0, 1), - port: 1248, + port: 5052, } } } diff --git a/docs/config_examples/beacon-node.toml b/docs/config_examples/beacon-node.toml index 3c9f8b613c..f0863934e7 100644 --- a/docs/config_examples/beacon-node.toml +++ b/docs/config_examples/beacon-node.toml @@ -78,14 +78,6 @@ enabled = false listen_address = "127.0.0.1" port = 5051 -# -# Legacy HTTP server configuration. To be removed. -# -[http] -enabled = false -listen_address = "127.0.0.1" -listen_port = "5052" - # # RESTful HTTP API server configuration. # @@ -95,4 +87,4 @@ enabled = true # The listen port for the HTTP server. listen_address = "127.0.0.1" # The listen port for the HTTP server. -port = 1248 +port = 5052 From 95a320817e0c724b6d4ed64b9bf2fefacc918aa6 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Mon, 12 Aug 2019 15:40:51 +1000 Subject: [PATCH 067/186] Update metrics names to be more like standard --- beacon_node/beacon_chain/src/metrics.rs | 65 +++++++++++++------------ beacon_node/rest_api/src/metrics.rs | 6 +-- beacon_node/store/src/metrics.rs | 12 ++--- 3 files changed, 43 insertions(+), 40 deletions(-) diff --git a/beacon_node/beacon_chain/src/metrics.rs b/beacon_node/beacon_chain/src/metrics.rs index 227f1090f7..00a3e5eb2e 100644 --- a/beacon_node/beacon_chain/src/metrics.rs +++ b/beacon_node/beacon_chain/src/metrics.rs @@ -7,45 +7,45 @@ lazy_static! { * Block Processing */ pub static ref BLOCK_PROCESSING_REQUESTS: Result = try_create_int_counter( - "beacon_block_processing_requests", + "beacon_block_processing_requests_total", "Count of blocks submitted for processing" ); pub static ref BLOCK_PROCESSING_SUCCESSES: Result = try_create_int_counter( - "beacon_block_processing_successes", + "beacon_block_processing_successes_total", "Count of blocks processed without error" ); pub static ref BLOCK_PROCESSING_TIMES: Result = - try_create_histogram("block_processing_times", "Full runtime of block processing"); + try_create_histogram("block_processing_seconds", "Full runtime of block processing"); pub static ref BLOCK_PROCESSING_DB_READ: Result = try_create_histogram( - "beacon_block_processing_db_read_times", + "beacon_block_processing_db_read_seconds", "Time spent loading block and state from DB for block processing" ); pub static ref BLOCK_PROCESSING_CATCHUP_STATE: Result = try_create_histogram( - "beacon_block_processing_catch_up_state_times", + "beacon_block_processing_catch_up_state_seconds", "Time spent skipping slots on a state before processing a block." ); pub static ref BLOCK_PROCESSING_COMMITTEE: Result = try_create_histogram( - "beacon_block_processing_committee_building_times", + "beacon_block_processing_committee_building_seconds", "Time spent building/obtaining committees for block processing." ); pub static ref BLOCK_PROCESSING_CORE: Result = try_create_histogram( - "beacon_block_processing_core_times", + "beacon_block_processing_core_seconds", "Time spent doing the core per_block_processing state processing." ); pub static ref BLOCK_PROCESSING_STATE_ROOT: Result = try_create_histogram( - "beacon_block_processing_state_root_times", + "beacon_block_processing_state_root_seconds", "Time spent calculating the state root when processing a block." ); pub static ref BLOCK_PROCESSING_DB_WRITE: Result = try_create_histogram( - "beacon_block_processing_db_write_times", + "beacon_block_processing_db_write_seconds", "Time spent writing a newly processed block and state to DB" ); pub static ref BLOCK_PROCESSING_FORK_CHOICE_REGISTER: Result = try_create_histogram( - "beacon_block_processing_fork_choice_register_times", + "beacon_block_processing_fork_choice_register_seconds", "Time spent registering the new block with fork choice (but not finding head)" ); pub static ref BLOCK_PROCESSING_FORK_CHOICE_FIND_HEAD: Result = try_create_histogram( - "beacon_block_processing_fork_choice_find_head_times", + "beacon_block_processing_fork_choice_find_head_seconds", "Time spent finding the new head after processing a new block" ); @@ -53,21 +53,21 @@ lazy_static! { * Block Production */ pub static ref BLOCK_PRODUCTION_REQUESTS: Result = try_create_int_counter( - "beacon_block_production_requests", + "beacon_block_production_requests_total", "Count of all block production requests" ); pub static ref BLOCK_PRODUCTION_SUCCESSES: Result = try_create_int_counter( - "beacon_block_production_successes", + "beacon_block_production_successes_total", "Count of blocks successfully produced." ); pub static ref BLOCK_PRODUCTION_TIMES: Result = - try_create_histogram("beacon_block_production_times", "Full runtime of block production"); + try_create_histogram("beacon_block_production_seconds", "Full runtime of block production"); /* * Block Statistics */ pub static ref OPERATIONS_PER_BLOCK_ATTESTATION: Result = try_create_histogram( - "beacon_operations_per_block_attestation", + "beacon_operations_per_block_attestation_total", "Number of attestations in a block" ); @@ -75,15 +75,15 @@ lazy_static! { * Attestation Processing */ pub static ref ATTESTATION_PROCESSING_REQUESTS: Result = try_create_int_counter( - "beacon_attestation_processing_requests", + "beacon_attestation_processing_requests_total", "Count of all attestations submitted for processing" ); pub static ref ATTESTATION_PROCESSING_SUCCESSES: Result = try_create_int_counter( - "beacon_attestation_processing_successes", + "beacon_attestation_processing_successes_total", "total_attestation_processing_successes" ); pub static ref ATTESTATION_PROCESSING_TIMES: Result = try_create_histogram( - "beacon_attestation_processing_times", + "beacon_attestation_processing_seconds", "Full runtime of attestation processing" ); @@ -91,15 +91,15 @@ lazy_static! { * Attestation Production */ pub static ref ATTESTATION_PRODUCTION_REQUESTS: Result = try_create_int_counter( - "beacon_attestation_production_requests", + "beacon_attestation_production_requests_total", "Count of all attestation production requests" ); pub static ref ATTESTATION_PRODUCTION_SUCCESSES: Result = try_create_int_counter( - "beacon_attestation_production_successes", + "beacon_attestation_production_successes_total", "Count of attestations processed without error" ); pub static ref ATTESTATION_PRODUCTION_TIMES: Result = try_create_histogram( - "beacon_attestation_production_times", + "beacon_attestation_production_seconds", "Full runtime of attestation production" ); @@ -107,19 +107,19 @@ lazy_static! { * Fork Choice */ pub static ref FORK_CHOICE_REQUESTS: Result = try_create_int_counter( - "beacon_fork_choice_requests", + "beacon_fork_choice_requests_total", "Count of occasions where fork choice has tried to find a head" ); pub static ref FORK_CHOICE_ERRORS: Result = try_create_int_counter( - "beacon_fork_choice_errors", + "beacon_fork_choice_errors_total", "Count of occasions where fork choice has returned an error when trying to find a head" ); pub static ref FORK_CHOICE_CHANGED_HEAD: Result = try_create_int_counter( - "beacon_fork_choice_changed_head", + "beacon_fork_choice_changed_head_total", "Count of occasions fork choice has found a new head" ); pub static ref FORK_CHOICE_REORG_COUNT: Result = try_create_int_counter( - "beacon_fork_choice_reorg_count", + "beacon_fork_choice_reorg_total", "Count of occasions fork choice has switched to a different chain" ); pub static ref FORK_CHOICE_TIMES: Result = @@ -156,7 +156,7 @@ lazy_static! { * Chain Head */ pub static ref UPDATE_HEAD_TIMES: Result = - try_create_histogram("beacon_update_head_times", "Time taken to update the canonical head"); + try_create_histogram("beacon_update_head_seconds", "Time taken to update the canonical head"); pub static ref HEAD_STATE_SLOT: Result = try_create_int_gauge("beacon_head_state_slot", "Slot of the block at the head of the chain"); pub static ref HEAD_STATE_ROOT: Result = @@ -175,16 +175,18 @@ lazy_static! { try_create_int_gauge("beacon_head_state_finalized_root", "Finalized root at the head of the chain"); pub static ref HEAD_STATE_FINALIZED_EPOCH: Result = try_create_int_gauge("beacon_head_state_finalized_epoch", "Finalized epoch at the head of the chain"); + pub static ref HEAD_STATE_SHARDS: Result = + try_create_int_gauge("beacon_head_state_shard_total", "Count of shards in the beacon chain"); pub static ref HEAD_STATE_TOTAL_VALIDATORS: Result = - try_create_int_gauge("beacon_head_state_total_validators", "Count of validators at the head of the chain"); + try_create_int_gauge("beacon_head_state_total_validators_total", "Count of validators at the head of the chain"); pub static ref HEAD_STATE_ACTIVE_VALIDATORS: Result = - try_create_int_gauge("beacon_head_state_active_validators", "Count of active validators at the head of the chain"); + try_create_int_gauge("beacon_head_state_active_validators_total", "Count of active validators at the head of the chain"); pub static ref HEAD_STATE_VALIDATOR_BALANCES: Result = - try_create_int_gauge("beacon_head_state_validator_balances", "Sum of all validator balances at the head of the chain"); + try_create_int_gauge("beacon_head_state_validator_balances_total", "Sum of all validator balances at the head of the chain"); pub static ref HEAD_STATE_SLASHED_VALIDATORS: Result = - try_create_int_gauge("beacon_head_state_slashed_validators", "Count of all slashed validators at the head of the chain"); + try_create_int_gauge("beacon_head_state_slashed_validators_total", "Count of all slashed validators at the head of the chain"); pub static ref HEAD_STATE_WITHDRAWN_VALIDATORS: Result = - try_create_int_gauge("beacon_head_state_withdrawn_validators", "Sum of all validator balances at the head of the chain"); + try_create_int_gauge("beacon_head_state_withdrawn_validators_total", "Sum of all validator balances at the head of the chain"); pub static ref HEAD_STATE_ETH1_DEPOSIT_INDEX: Result = try_create_int_gauge("beacon_head_state_eth1_deposit_index", "Eth1 deposit index at the head of the chain"); } @@ -242,6 +244,7 @@ fn scrape_head_state(state: &BeaconState, state &HEAD_STATE_FINALIZED_EPOCH, state.finalized_checkpoint.epoch, ); + set_gauge_by_usize(&HEAD_STATE_SHARDS, state.previous_crosslinks.len()); set_gauge_by_usize(&HEAD_STATE_TOTAL_VALIDATORS, state.validators.len()); set_gauge_by_u64( &HEAD_STATE_VALIDATOR_BALANCES, diff --git a/beacon_node/rest_api/src/metrics.rs b/beacon_node/rest_api/src/metrics.rs index c0db810b60..b0f1c1b980 100644 --- a/beacon_node/rest_api/src/metrics.rs +++ b/beacon_node/rest_api/src/metrics.rs @@ -8,15 +8,15 @@ pub use lighthouse_metrics::*; lazy_static! { pub static ref REQUEST_RESPONSE_TIME: Result = try_create_histogram( - "http_server_request_response_time", + "http_server_request_duration_seconds", "Time taken to build a response to a HTTP request" ); pub static ref REQUEST_COUNT: Result = try_create_int_counter( - "http_server_request_count", + "http_server_request_total", "Total count of HTTP requests received" ); pub static ref SUCCESS_COUNT: Result = try_create_int_counter( - "http_server_success_count", + "http_server_success_total", "Total count of HTTP 200 responses sent" ); } diff --git a/beacon_node/store/src/metrics.rs b/beacon_node/store/src/metrics.rs index 430e9c38e5..30cbb878b6 100644 --- a/beacon_node/store/src/metrics.rs +++ b/beacon_node/store/src/metrics.rs @@ -7,27 +7,27 @@ lazy_static! { pub static ref DISK_DB_SIZE: Result = try_create_int_gauge("store_disk_db_size", "Size of the on-disk database (bytes)"); pub static ref DISK_DB_WRITE_BYTES: Result = try_create_int_counter( - "store_disk_db_write_bytes", + "store_disk_db_write_bytes_total", "Number of bytes attempted to be written to the on-disk DB" ); pub static ref DISK_DB_READ_BYTES: Result = try_create_int_counter( - "store_disk_db_read_bytes", + "store_disk_db_read_bytes_total", "Number of bytes read from the on-disk DB" ); pub static ref DISK_DB_READ_COUNT: Result = try_create_int_counter( - "store_disk_db_read_count", + "store_disk_db_read_count_total", "Total number of reads to the on-disk DB" ); pub static ref DISK_DB_WRITE_COUNT: Result = try_create_int_counter( - "store_disk_db_write_count", + "store_disk_db_write_count_total", "Total number of writes to the on-disk DB" ); pub static ref DISK_DB_EXISTS_COUNT: Result = try_create_int_counter( - "store_disk_db_exists_count", + "store_disk_db_exists_count_total", "Total number of checks if a key is in the on-disk DB" ); pub static ref DISK_DB_DELETE_COUNT: Result = try_create_int_counter( - "store_disk_db_delete_count", + "store_disk_db_delete_count_total", "Total number of deletions from the on-disk DB" ); } From d7c546844cfaf58ab63739a181fbf73c924fb4d5 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Mon, 12 Aug 2019 17:44:47 +1000 Subject: [PATCH 068/186] Fix broken beacon chain metrics, add slot clock metrics --- beacon_node/beacon_chain/src/lib.rs | 1 + beacon_node/beacon_chain/src/metrics.rs | 28 +----------------- beacon_node/rest_api/Cargo.toml | 1 + beacon_node/rest_api/src/metrics.rs | 17 +++++++++++ eth2/utils/slot_clock/Cargo.toml | 2 ++ eth2/utils/slot_clock/src/lib.rs | 10 ++++++- eth2/utils/slot_clock/src/metrics.rs | 29 +++++++++++++++++++ .../slot_clock/src/system_time_slot_clock.rs | 4 +++ .../slot_clock/src/testing_slot_clock.rs | 4 +++ 9 files changed, 68 insertions(+), 28 deletions(-) create mode 100644 eth2/utils/slot_clock/src/metrics.rs diff --git a/beacon_node/beacon_chain/src/lib.rs b/beacon_node/beacon_chain/src/lib.rs index 1262bc5372..cc7725dd83 100644 --- a/beacon_node/beacon_chain/src/lib.rs +++ b/beacon_node/beacon_chain/src/lib.rs @@ -1,3 +1,4 @@ +#![recursion_limit = "128"] // For lazy-static #[macro_use] extern crate lazy_static; diff --git a/beacon_node/beacon_chain/src/metrics.rs b/beacon_node/beacon_chain/src/metrics.rs index 00a3e5eb2e..a4b36cd375 100644 --- a/beacon_node/beacon_chain/src/metrics.rs +++ b/beacon_node/beacon_chain/src/metrics.rs @@ -1,6 +1,6 @@ use crate::{BeaconChain, BeaconChainTypes}; pub use lighthouse_metrics::*; -use types::{BeaconState, Epoch, EthSpec, Hash256, Slot}; +use types::{BeaconState, Epoch, Hash256, Slot}; lazy_static! { /* @@ -140,17 +140,6 @@ lazy_static! { */ pub static ref PERSIST_CHAIN: Result = try_create_histogram("beacon_persist_chain", "Time taken to update the canonical head"); -} - -// Lazy-static is split so we don't reach the crate-level recursion limit. -lazy_static! { - /* - * Slot Clock - */ - pub static ref PRESENT_SLOT: Result = - try_create_int_gauge("beacon_present_slot", "The present slot, according to system time"); - pub static ref PRESENT_EPOCH: Result = - try_create_int_gauge("beacon_present_epoch", "The present epoch, according to system time"); /* * Chain Head @@ -194,21 +183,6 @@ lazy_static! { /// Scrape the `beacon_chain` for metrics that are not constantly updated (e.g., the present slot, /// head state info, etc) and update the Prometheus `DEFAULT_REGISTRY`. pub fn scrape_for_metrics(beacon_chain: &BeaconChain) { - set_gauge_by_slot( - &PRESENT_SLOT, - beacon_chain - .read_slot_clock() - .unwrap_or_else(|| Slot::new(0)), - ); - - set_gauge_by_epoch( - &PRESENT_EPOCH, - beacon_chain - .read_slot_clock() - .map(|s| s.epoch(T::EthSpec::slots_per_epoch())) - .unwrap_or_else(|| Epoch::new(0)), - ); - scrape_head_state::( &beacon_chain.head().beacon_state, beacon_chain.head().beacon_state_root, diff --git a/beacon_node/rest_api/Cargo.toml b/beacon_node/rest_api/Cargo.toml index 100e680de8..c7026014c4 100644 --- a/beacon_node/rest_api/Cargo.toml +++ b/beacon_node/rest_api/Cargo.toml @@ -26,3 +26,4 @@ tokio = "0.1.17" url = "2.0" lazy_static = "1.3.0" lighthouse_metrics = { path = "../../eth2/utils/lighthouse_metrics" } +slot_clock = { path = "../../eth2/utils/slot_clock" } diff --git a/beacon_node/rest_api/src/metrics.rs b/beacon_node/rest_api/src/metrics.rs index b0f1c1b980..f0ccef5f8d 100644 --- a/beacon_node/rest_api/src/metrics.rs +++ b/beacon_node/rest_api/src/metrics.rs @@ -39,6 +39,23 @@ pub fn get_prometheus(req: Request) -> ApiR .get::() .ok_or_else(|| ApiError::ServerError("DBPath extension missing".to_string()))?; + // There are two categories of metrics: + // + // - Dynamically updated: things like histograms and event counters that are updated on the + // fly. + // - Statically updated: things which are only updated at the time of the scrape (used where we + // can avoid cluttering up code with metrics calls). + // + // The `prometheus` crate has a `DEFAULT_REGISTRY` global singleton (via `lazy_static`) which + // keeps the state of all the metrics. Dynamically updated things will already be up-to-date in + // the registry (because they update themselves) however statically updated things need to be + // "scraped". + // + // We proceed by, first updating all the static metrics using `scrape_for_metrics(..)`. Then, + // using `prometheus::gather(..)` to collect the global `DEFAULT_REGISTRY` metrics into a + // string that can be returned via HTTP. + + slot_clock::scrape_for_metrics::(&beacon_chain.slot_clock); store::scrape_for_metrics(&db_path); beacon_chain::scrape_for_metrics(&beacon_chain); diff --git a/eth2/utils/slot_clock/Cargo.toml b/eth2/utils/slot_clock/Cargo.toml index 31a4357251..c4b9df5edd 100644 --- a/eth2/utils/slot_clock/Cargo.toml +++ b/eth2/utils/slot_clock/Cargo.toml @@ -6,3 +6,5 @@ edition = "2018" [dependencies] types = { path = "../../types" } +lazy_static = "1.3.0" +lighthouse_metrics = { path = "../lighthouse_metrics" } diff --git a/eth2/utils/slot_clock/src/lib.rs b/eth2/utils/slot_clock/src/lib.rs index 7b86684fa4..871743c9e6 100644 --- a/eth2/utils/slot_clock/src/lib.rs +++ b/eth2/utils/slot_clock/src/lib.rs @@ -1,9 +1,15 @@ +#[macro_use] +extern crate lazy_static; + +mod metrics; mod system_time_slot_clock; mod testing_slot_clock; +use std::time::Duration; + pub use crate::system_time_slot_clock::{Error as SystemTimeSlotClockError, SystemTimeSlotClock}; pub use crate::testing_slot_clock::{Error as TestingSlotClockError, TestingSlotClock}; -use std::time::Duration; +pub use metrics::scrape_for_metrics; pub use types::Slot; pub trait SlotClock: Send + Sync + Sized { @@ -17,4 +23,6 @@ pub trait SlotClock: Send + Sync + Sized { fn present_slot(&self) -> Result, Self::Error>; fn duration_to_next_slot(&self) -> Result, Self::Error>; + + fn slot_duration_millis(&self) -> u64; } diff --git a/eth2/utils/slot_clock/src/metrics.rs b/eth2/utils/slot_clock/src/metrics.rs new file mode 100644 index 0000000000..a9153a10ca --- /dev/null +++ b/eth2/utils/slot_clock/src/metrics.rs @@ -0,0 +1,29 @@ +use crate::SlotClock; +pub use lighthouse_metrics::*; +use types::{EthSpec, Slot}; + +lazy_static! { + pub static ref PRESENT_SLOT: Result = + try_create_int_gauge("slotclock_present_slot", "The present wall-clock slot"); + pub static ref PRESENT_EPOCH: Result = + try_create_int_gauge("slotclock_present_epoch", "The present wall-clock epoch"); + pub static ref MILLISECONDS_PER_SLOT: Result = try_create_int_gauge( + "slotclock_slot_time_milliseconds", + "The duration in milliseconds between each slot" + ); +} + +/// Update the global metrics `DEFAULT_REGISTRY` with info from the slot clock. +pub fn scrape_for_metrics(clock: &U) { + let present_slot = match clock.present_slot() { + Ok(Some(slot)) => slot, + _ => Slot::new(0), + }; + + set_gauge(&PRESENT_SLOT, present_slot.as_u64() as i64); + set_gauge( + &PRESENT_EPOCH, + present_slot.epoch(T::slots_per_epoch()).as_u64() as i64, + ); + set_gauge(&MILLISECONDS_PER_SLOT, clock.slot_duration_millis() as i64); +} diff --git a/eth2/utils/slot_clock/src/system_time_slot_clock.rs b/eth2/utils/slot_clock/src/system_time_slot_clock.rs index 7c184b02bf..c493a8be83 100644 --- a/eth2/utils/slot_clock/src/system_time_slot_clock.rs +++ b/eth2/utils/slot_clock/src/system_time_slot_clock.rs @@ -52,6 +52,10 @@ impl SlotClock for SystemTimeSlotClock { fn duration_to_next_slot(&self) -> Result, Error> { duration_to_next_slot(self.genesis_seconds, self.slot_duration_seconds) } + + fn slot_duration_millis(&self) -> u64 { + self.slot_duration_seconds * 1000 + } } impl From for Error { diff --git a/eth2/utils/slot_clock/src/testing_slot_clock.rs b/eth2/utils/slot_clock/src/testing_slot_clock.rs index ab00d2baa7..f741d3b87a 100644 --- a/eth2/utils/slot_clock/src/testing_slot_clock.rs +++ b/eth2/utils/slot_clock/src/testing_slot_clock.rs @@ -40,6 +40,10 @@ impl SlotClock for TestingSlotClock { fn duration_to_next_slot(&self) -> Result, Error> { Ok(Some(Duration::from_secs(1))) } + + fn slot_duration_millis(&self) -> u64 { + 0 + } } #[cfg(test)] From 7165598b7fe3346ece3420bf808d14391106295a Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Mon, 12 Aug 2019 18:19:50 +1000 Subject: [PATCH 069/186] Add lighthouse_metrics gather fn --- beacon_node/rest_api/src/metrics.rs | 16 +++++++++------- eth2/utils/lighthouse_metrics/src/lib.rs | 4 ++++ 2 files changed, 13 insertions(+), 7 deletions(-) diff --git a/beacon_node/rest_api/src/metrics.rs b/beacon_node/rest_api/src/metrics.rs index f0ccef5f8d..064359337a 100644 --- a/beacon_node/rest_api/src/metrics.rs +++ b/beacon_node/rest_api/src/metrics.rs @@ -46,20 +46,22 @@ pub fn get_prometheus(req: Request) -> ApiR // - Statically updated: things which are only updated at the time of the scrape (used where we // can avoid cluttering up code with metrics calls). // - // The `prometheus` crate has a `DEFAULT_REGISTRY` global singleton (via `lazy_static`) which - // keeps the state of all the metrics. Dynamically updated things will already be up-to-date in - // the registry (because they update themselves) however statically updated things need to be - // "scraped". + // The `lighthouse_metrics` crate has a `DEFAULT_REGISTRY` global singleton (via `lazy_static`) + // which keeps the state of all the metrics. Dynamically updated things will already be + // up-to-date in the registry (because they update themselves) however statically updated + // things need to be "scraped". // // We proceed by, first updating all the static metrics using `scrape_for_metrics(..)`. Then, - // using `prometheus::gather(..)` to collect the global `DEFAULT_REGISTRY` metrics into a - // string that can be returned via HTTP. + // using `lighthouse_metrics::gather(..)` to collect the global `DEFAULT_REGISTRY` metrics into + // a string that can be returned via HTTP. slot_clock::scrape_for_metrics::(&beacon_chain.slot_clock); store::scrape_for_metrics(&db_path); beacon_chain::scrape_for_metrics(&beacon_chain); - encoder.encode(&prometheus::gather(), &mut buffer).unwrap(); + encoder + .encode(&lighthouse_metrics::gather(), &mut buffer) + .unwrap(); String::from_utf8(buffer) .map(|string| success_response(Body::from(string))) diff --git a/eth2/utils/lighthouse_metrics/src/lib.rs b/eth2/utils/lighthouse_metrics/src/lib.rs index a8656d0171..c9e66e9712 100644 --- a/eth2/utils/lighthouse_metrics/src/lib.rs +++ b/eth2/utils/lighthouse_metrics/src/lib.rs @@ -2,6 +2,10 @@ use prometheus::{HistogramOpts, HistogramTimer, Opts}; pub use prometheus::{Histogram, IntCounter, IntGauge, Result}; +pub fn gather() -> Vec { + prometheus::gather() +} + pub fn try_create_int_counter(name: &str, help: &str) -> Result { let opts = Opts::new(name, help); let counter = IntCounter::with_opts(opts)?; From d5d60874e5e38368e8e538f5d4f4a89b30c7423a Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Mon, 12 Aug 2019 18:20:05 +1000 Subject: [PATCH 070/186] Remove http args --- beacon_node/src/main.rs | 22 ---------------------- 1 file changed, 22 deletions(-) diff --git a/beacon_node/src/main.rs b/beacon_node/src/main.rs index 2e3ad06918..9a52f2638c 100644 --- a/beacon_node/src/main.rs +++ b/beacon_node/src/main.rs @@ -128,28 +128,6 @@ fn main() { .help("Listen port for RPC endpoint.") .takes_value(true), ) - /* - * HTTP server parameters. - */ - .arg( - Arg::with_name("http") - .long("http") - .help("Enable the HTTP server.") - .takes_value(false), - ) - .arg( - Arg::with_name("http-address") - .long("http-address") - .value_name("Address") - .help("Listen address for the HTTP server.") - .takes_value(true), - ) - .arg( - Arg::with_name("http-port") - .long("http-port") - .help("Listen port for the HTTP server.") - .takes_value(true), - ) /* Client related arguments */ .arg( Arg::with_name("api") From 5d4d2f35e1dc59b2566af5b547a1b6132b940454 Mon Sep 17 00:00:00 2001 From: Age Manning Date: Mon, 12 Aug 2019 22:07:59 +1000 Subject: [PATCH 071/186] Initial sync re-write. WIP --- beacon_node/eth2-libp2p/src/rpc/handler.rs | 36 +- beacon_node/network/src/message_handler.rs | 121 +++---- beacon_node/network/src/sync/manager.rs | 283 ++++++++++++++++ beacon_node/network/src/sync/simple_sync.rs | 349 +++++--------------- 4 files changed, 405 insertions(+), 384 deletions(-) create mode 100644 beacon_node/network/src/sync/manager.rs diff --git a/beacon_node/eth2-libp2p/src/rpc/handler.rs b/beacon_node/eth2-libp2p/src/rpc/handler.rs index dbc32c5a48..a69cd0cda9 100644 --- a/beacon_node/eth2-libp2p/src/rpc/handler.rs +++ b/beacon_node/eth2-libp2p/src/rpc/handler.rs @@ -1,4 +1,4 @@ -use super::methods::{RPCErrorResponse, RPCResponse, RequestId}; +use super::methods::RequestId; use super::protocol::{RPCError, RPCProtocol, RPCRequest}; use super::RPCEvent; use crate::rpc::protocol::{InboundFramed, OutboundFramed}; @@ -13,8 +13,8 @@ use smallvec::SmallVec; use std::time::{Duration, Instant}; use tokio_io::{AsyncRead, AsyncWrite}; -/// The time (in seconds) before a substream that is awaiting a response times out. -pub const RESPONSE_TIMEOUT: u64 = 9; +/// The time (in seconds) before a substream that is awaiting a response from the user times out. +pub const RESPONSE_TIMEOUT: u64 = 10; /// Implementation of `ProtocolsHandler` for the RPC protocol. pub struct RPCHandler @@ -314,7 +314,7 @@ where Ok(Async::Ready(response)) => { if let Some(response) = response { return Ok(Async::Ready(ProtocolsHandlerEvent::Custom( - build_response(rpc_event, response), + RPCEvent::Response(rpc_event.id(), response), ))); } else { // stream closed early @@ -365,31 +365,3 @@ where Ok(Async::NotReady) } } - -/// Given a response back from a peer and the request that sent it, construct a response to send -/// back to the user. This allows for some data manipulation of responses given requests. -fn build_response(rpc_event: RPCEvent, rpc_response: RPCErrorResponse) -> RPCEvent { - let id = rpc_event.id(); - - // handle the types of responses - match rpc_response { - RPCErrorResponse::Success(response) => { - match response { - // if the response is block roots, tag on the extra request data - RPCResponse::BeaconBlockBodies(mut resp) => { - if let RPCEvent::Request(_id, RPCRequest::BeaconBlockBodies(bodies_req)) = - rpc_event - { - resp.block_roots = Some(bodies_req.block_roots); - } - RPCEvent::Response( - id, - RPCErrorResponse::Success(RPCResponse::BeaconBlockBodies(resp)), - ) - } - _ => RPCEvent::Response(id, RPCErrorResponse::Success(response)), - } - } - _ => RPCEvent::Response(id, rpc_response), - } -} diff --git a/beacon_node/network/src/message_handler.rs b/beacon_node/network/src/message_handler.rs index b86dcb9697..6a9a403693 100644 --- a/beacon_node/network/src/message_handler.rs +++ b/beacon_node/network/src/message_handler.rs @@ -14,9 +14,7 @@ use slog::{debug, trace, warn}; use ssz::{Decode, DecodeError}; use std::sync::Arc; use tokio::sync::mpsc; -use types::{ - Attestation, AttesterSlashing, BeaconBlock, BeaconBlockHeader, ProposerSlashing, VoluntaryExit, -}; +use types::{Attestation, AttesterSlashing, BeaconBlock, ProposerSlashing, VoluntaryExit}; /// Handles messages received from the network and client and organises syncing. pub struct MessageHandler { @@ -56,9 +54,9 @@ impl MessageHandler { let (handler_send, handler_recv) = mpsc::unbounded_channel(); // Initialise sync and begin processing in thread - // generate the Message handler let sync = SimpleSync::new(beacon_chain.clone(), &log); + // generate the Message handler let mut handler = MessageHandler { _chain: beacon_chain.clone(), sync, @@ -66,7 +64,7 @@ impl MessageHandler { log: log.clone(), }; - // spawn handler task + // spawn handler task and move the message handler instance into the spawned thread executor.spawn( handler_recv .for_each(move |msg| Ok(handler.handle_message(msg))) @@ -89,11 +87,11 @@ impl MessageHandler { HandlerMessage::PeerDisconnected(peer_id) => { self.sync.on_disconnect(peer_id); } - // we have received an RPC message request/response + // An RPC message request/response has been received HandlerMessage::RPC(peer_id, rpc_event) => { self.handle_rpc_message(peer_id, rpc_event); } - // we have received an RPC message request/response + // An RPC message request/response has been received HandlerMessage::PubsubMessage(peer_id, gossip) => { self.handle_gossip(peer_id, gossip); } @@ -106,7 +104,7 @@ impl MessageHandler { fn handle_rpc_message(&mut self, peer_id: PeerId, rpc_message: RPCEvent) { match rpc_message { RPCEvent::Request(id, req) => self.handle_rpc_request(peer_id, id, req), - RPCEvent::Response(_id, resp) => self.handle_rpc_response(peer_id, resp), + RPCEvent::Response(id, resp) => self.handle_rpc_response(peer_id, id, resp), RPCEvent::Error(id, error) => self.handle_rpc_error(peer_id, id, error), } } @@ -121,46 +119,39 @@ impl MessageHandler { &mut self.network_context, ), RPCRequest::Goodbye(goodbye_reason) => self.sync.on_goodbye(peer_id, goodbye_reason), - RPCRequest::BeaconBlockRoots(request) => self.sync.on_beacon_block_roots_request( + RPCRequest::BeaconBlocks(request) => self.sync.on_beacon_blocks_request( peer_id, request_id, request, &mut self.network_context, ), - RPCRequest::BeaconBlockHeaders(request) => self.sync.on_beacon_block_headers_request( + RPCRequest::RecentBeaconBlocks(request) => self.sync.on_recent_beacon_blocks_request( peer_id, request_id, request, &mut self.network_context, ), - RPCRequest::BeaconBlockBodies(request) => self.sync.on_beacon_block_bodies_request( - peer_id, - request_id, - request, - &mut self.network_context, - ), - RPCRequest::BeaconChainState(_) => { - // We do not implement this endpoint, it is not required and will only likely be - // useful for light-client support in later phases. - warn!(self.log, "BeaconChainState RPC call is not supported."); - } } } /// An RPC response has been received from the network. // we match on id and ignore responses past the timeout. - fn handle_rpc_response(&mut self, peer_id: PeerId, error_response: RPCErrorResponse) { + fn handle_rpc_response( + &mut self, + peer_id: PeerId, + request_id: RequestId, + error_response: RPCErrorResponse, + ) { // an error could have occurred. - // TODO: Handle Error gracefully match error_response { RPCErrorResponse::InvalidRequest(error) => { - warn!(self.log, "";"peer" => format!("{:?}", peer_id), "Invalid Request" => error.as_string()) + warn!(self.log, "Peer indicated invalid request";"peer_id" => format!("{:?}", peer_id), "error" => error.as_string()) } RPCErrorResponse::ServerError(error) => { - warn!(self.log, "";"peer" => format!("{:?}", peer_id), "Server Error" => error.as_string()) + warn!(self.log, "Peer internal server error";"peer_id" => format!("{:?}", peer_id), "error" => error.as_string()) } RPCErrorResponse::Unknown(error) => { - warn!(self.log, "";"peer" => format!("{:?}", peer_id), "Unknown Error" => error.as_string()) + warn!(self.log, "Unknown peer error";"peer" => format!("{:?}", peer_id), "error" => error.as_string()) } RPCErrorResponse::Success(response) => { match response { @@ -171,49 +162,37 @@ impl MessageHandler { &mut self.network_context, ); } - RPCResponse::BeaconBlockRoots(response) => { - self.sync.on_beacon_block_roots_response( - peer_id, - response, - &mut self.network_context, - ); - } - RPCResponse::BeaconBlockHeaders(response) => { - match self.decode_block_headers(response) { - Ok(decoded_block_headers) => { - self.sync.on_beacon_block_headers_response( + RPCResponse::BeaconBlocks(response) => { + match self.decode_beacon_blocks(response) { + Ok(beacon_blocks) => { + self.sync.on_beacon_blocks_response( peer_id, - decoded_block_headers, + beacon_blocks, &mut self.network_context, ); } - Err(_e) => { - warn!(self.log, "Peer sent invalid block headers";"peer" => format!("{:?}", peer_id)) + Err(e) => { + // TODO: Down-vote Peer + warn!(self.log, "Peer sent invalid BEACON_BLOCKS response";"peer" => format!("{:?}", peer_id), "error" => format!("{:?}", e)); } } } - RPCResponse::BeaconBlockBodies(response) => { - match self.decode_block_bodies(response) { - Ok(decoded_block_bodies) => { - self.sync.on_beacon_block_bodies_response( + RPCResponse::RecentBeaconBlocks(response) => { + match self.decode_beacon_blocks(response) { + Ok(beacon_blocks) => { + self.sync.on_recent_beacon_blocks_response( + request_id, peer_id, - decoded_block_bodies, + beacon_blocks, &mut self.network_context, ); } - Err(_e) => { - warn!(self.log, "Peer sent invalid block bodies";"peer" => format!("{:?}", peer_id)) + Err(e) => { + // TODO: Down-vote Peer + warn!(self.log, "Peer sent invalid BEACON_BLOCKS response";"peer" => format!("{:?}", peer_id), "error" => format!("{:?}", e)); } } } - RPCResponse::BeaconChainState(_) => { - // We do not implement this endpoint, it is not required and will only likely be - // useful for light-client support in later phases. - // - // Theoretically, we shouldn't reach this code because we should never send a - // beacon state RPC request. - warn!(self.log, "BeaconChainState RPC call is not supported."); - } } } } @@ -334,36 +313,22 @@ impl MessageHandler { /* Req/Resp Domain Decoding */ - /// Verifies and decodes the ssz-encoded block bodies received from peers. - fn decode_block_bodies( + /// Verifies and decodes an ssz-encoded list of `BeaconBlock`s. This list may contain empty + /// entries encoded with an SSZ NULL. + fn decode_beacon_blocks( &self, - bodies_response: BeaconBlockBodiesResponse, - ) -> Result, DecodeError> { + beacon_blocks: &[u8], + ) -> Result>, DecodeError> { //TODO: Implement faster block verification before decoding entirely - let block_bodies = Vec::from_ssz_bytes(&bodies_response.block_bodies)?; - Ok(DecodedBeaconBlockBodiesResponse { - block_roots: bodies_response - .block_roots - .expect("Responses must have associated roots"), - block_bodies, - }) - } - - /// Verifies and decodes the ssz-encoded block headers received from peers. - fn decode_block_headers( - &self, - headers_response: BeaconBlockHeadersResponse, - ) -> Result, DecodeError> { - //TODO: Implement faster header verification before decoding entirely - Vec::from_ssz_bytes(&headers_response.headers) + Vec::from_ssz_bytes(&beacon_blocks) } } -// TODO: RPC Rewrite makes this struct fairly pointless +/// Wraps a Network Channel to employ various RPC/Sync related network functionality. pub struct NetworkContext { /// The network channel to relay messages to the Network service. network_send: mpsc::UnboundedSender, - /// The `MessageHandler` logger. + /// Logger for the `NetworkContext`. log: slog::Logger, } @@ -388,7 +353,7 @@ impl NetworkContext { &mut self, peer_id: PeerId, request_id: RequestId, - rpc_response: RPCResponse, + rpc_response: RPCErrorResponse, ) { self.send_rpc_event( peer_id, diff --git a/beacon_node/network/src/sync/manager.rs b/beacon_node/network/src/sync/manager.rs new file mode 100644 index 0000000000..52c1a72c6f --- /dev/null +++ b/beacon_node/network/src/sync/manager.rs @@ -0,0 +1,283 @@ + +const MAXIMUM_BLOCKS_PER_REQUEST: usize = 10; +const SIMULTANEOUS_REQUESTS: usize = 10; +use super::simple_sync::FUTURE_SLOT_TOLERANCE; + +struct Chunk { + id: usize, + start_slot: Slot, + end_slot: Slot, + } + + +struct CompletedChunk { + peer_id: PeerId, + chunk: Chunk, + blocks: Vec, +} + +struct ProcessedChunk { + peer_id: PeerId, + chunk: Chunk, +} + +#[derive(PartialEq)] +pub enum SyncState { + Idle, + Downloading, + ColdSync { + max_wanted_slot: Slot, + max_wanted_hash: Hash256, + } +} + +pub enum SyncManagerState { + RequestBlocks(peer_id, BeaconBlockRequest), + Stalled, + Idle, +} + +pub struct PeerSyncInfo { + peer_id: PeerId, + fork_version: [u8,4], + finalized_root: Hash256, + finalized_epoch: Epoch, + head_root: Hash256, + head_slot: Slot, + requested_slot_skip: Option<(Slot, usize)>, +} + +pub(crate) struct SyncManager { + /// A reference to the underlying beacon chain. + chain: Arc>, + /// A mapping of Peers to their respective PeerSyncInfo. + available_peers: HashMap, + wanted_chunks: Vec, + pending_chunks: HashMap, + completed_chunks: Vec, + processed_chunks: Vec, // ordered + multi_peer_sections: HashMap + + current_requests: usize, + latest_wanted_slot: Option, + sync_status: SyncStatus, + to_process_chunk_id: usize, + log: Logger, + +} + +impl SyncManager { + /// Adds a sync-able peer and determines which blocks to download given the current state of + /// the chain, known peers and currently requested blocks. + fn add_sync_peer(&mut self, peer_id: PeerId, remote: PeerSyncInfo, network &mut NetworkContext) { + + let local = PeerSyncInfo::from(&self.chain); + let remote_finalized_slot = remote.finalized_epoch.start_slot(T::EthSpec::slots_per_epoch()); + let local_finalized_slot = local.finalized_epoch.start_slot(T::EthSpec::slots_per_epoch()); + + // cold sync + if remote_finalized_slot > local.head_slot { + if let SyncState::Idle || SyncState::Downloading = self.sync_state { + info!(self.log, "Cold Sync Started", "start_slot" => local.head_slot, "latest_known_finalized" => remote_finalized_slot); + self.sync_state = SyncState::ColdSync{Slot::from(0), remote.finalized_hash} + } + + if let SyncState::ColdSync{max_wanted_slot, max_wanted_hjash } = self.sync_state { + + // We don't assume that our current head is the canonical chain. So we request blocks from + // our last finalized slot to ensure we are on the finalized chain. + if max_wanted_slot < remote_finalized_slot { + let remaining_blocks = remote_finalized_slot - max_wanted_slot; + for chunk in (0..remaining_blocks/MAXIMUM_BLOCKS_PER_REQUEST) { + self.wanted_chunks.push( + Chunk { + id: self.current_chunk_id, + previous_chunk: self.curent_chunk_id.saturating_sub(1), + start_slot: chunk*MAXIMUM_BLOCKS_PER_REQUEST + self.last_wanted_slot, + end_slot: (section+1)*MAXIMUM_BLOCKS_PER_REQUEST +self.last_wanted_slot, + }) + self.current_chunk_id +=1; + } + + // add any extra partial chunks + self.pending_section.push( Section { + start_slot: (remaining_blocks/MAXIMUM_BLOCKS_PER_REQUEST) + 1, + end_slot: remote_finalized_slot, + }) + self.current_chunk_id +=1; + + info!(self.log, "Cold Sync Updated", "start_slot" => local.head_slot, "latest_known_finalized" => remote_finalized_slot); + + self.sync_state = SyncState::ColdSync{remote_finalized_slot, remote.finalized_hash} + } + } + + else { // hot sync + if remote_head_slot > self.chain.head().beacon_state.slot { + if let SyncState::Idle = self.sync_state { + self.sync_state = SyncState::Downloading + info!(self.log, "Sync Started", "start_slot" => local.head_slot, "latest_known_head" => remote.head_slot.as_u64()); + } + self.latest_known_slot = remote_head_slot; + //TODO Build requests. + } + } + + available_peers.push(remote); + + } + + pub fn add_blocks(&mut self, chunk_id: RequestId, peer_id: PeerId, blocks: Vec) { + + if SyncState::ColdSync{max_wanted_slot, max_wanted_hash} = self.sync_state { + + let chunk = match self.pending_chunks.remove(&peer_id) { + Some(chunks) => { + match chunks.find(|chunk| chunk.id == chunk_id) { + Some(chunk) => chunk, + None => { + warn!(self.log, "Received blocks for an unknown chunk"; + "peer"=> peer_id); + return; + } + } + }, + None => { + warn!(self.log, "Received blocks without a request"; + "peer"=> peer_id); + return; + } + }; + + // add to completed + self.current_requests -= 1; + self.completed_chunks.push(CompletedChunk(peer_id, Chunk)); + } + } + + pub fn inject_error(id: RequestId, peer_id) { + if let SyncState::ColdSync{ _max_wanted_slot, _max_wanted_hash } { + match self.pending_chunks.get(&peer_id) { + Some(chunks) => { + if let Some(pos) = chunks.iter().position(|c| c.id == id) { + chunks.remove(pos); + } + }, + None => { + debug!(self.log, + "Received an error for an unknown request"; + "request_id" => id, + "peer" => peer_id + ); + } + } + } + } + + pub fn poll(&mut self) -> SyncManagerState { + + // if cold sync + if let SyncState::ColdSync(waiting_slot, max_wanted_slot, max_wanted_hash) = self.sync_state { + + // Try to process completed chunks + for completed_chunk in self.completed_chunks { + let chunk = completed_chunk.1; + let last_chunk_id = { + let no_processed_chunks = self.processed_chunks.len(); + if elements == 0 { 0 } else { self.processed_chunks[no_processed_chunks].id } + }; + if chunk.id == last_chunk_id + 1 { + // try and process the chunk + for block in chunk.blocks { + let processing_result = self.chain.process_block(block.clone()); + + if let Ok(outcome) = processing_result { + match outcome { + BlockProcessingOutCome::Processed { block_root} => { + // block successfully processed + }, + BlockProcessingOutcome::BlockIsAlreadyKnown => { + warn!( + self.log, "Block Already Known"; + "source" => source, + "sync" => "Cold Sync", + "parent_root" => format!("{}", parent), + "baby_block_slot" => block.slot, + "peer" => format!("{:?}", chunk.0), + ); + }, + _ => { + // An error has occurred + // This could be due to the previous chunk or the current chunk. + // Re-issue both. + warn!( + self.log, "Faulty Chunk"; + "source" => source, + "sync" => "Cold Sync", + "parent_root" => format!("{}", parent), + "baby_block_slot" => block.slot, + "peer" => format!("{:?}", chunk.0), + "outcome" => format!("{:?}", outcome), + ); + + // re-issue both chunks + // if both are the same peer. Downgrade the peer. + let past_chunk = self.processed_chunks.pop() + self.wanted_chunks.insert(0, chunk.clone()); + self.wanted_chunks.insert(0, past_chunk.clone()); + if chunk.0 == past_chunk.peer_id { + // downgrade peer + return SyncManagerState::DowngradePeer(chunk.0); + } + break; + } + } + } + } + // chunk successfully processed + debug!(self.log, + "Chunk Processed"; + "id" => chunk.id + "start_slot" => chunk.start_slot, + "end_slot" => chunk.end_slot, + ); + self.processed_chunks.push(chunk); + } + } + + // chunks completed, update the state + self.sync_state = SyncState::ColdSync{waiting_slot, max_wanted_slot, max_wanted_hash}; + + // Remove stales + + // Spawn requests + if self.current_requests <= SIMULTANEOUS_REQUESTS { + if !self.wanted_chunks.is_empty() { + let chunk = self.wanted_chunks.remove(0); + for n in (0..self.peers.len()).rev() { + let peer = self.peers.swap_remove(n); + let peer_finalized_slot = peer.finalized_epoch.start_slot(T::EthSpec::slots_per_epoch()); + if peer_finalized_slot >= chunk.end_slot { + *self.pending.chunks.entry(&peer_id).or_insert_with(|| Vec::new).push(chunk); + self.active_peers.push(peer); + self.current_requests +=1; + let block_request = BeaconBlockRequest { + head_block_root, + start_slot: chunk.start_slot, + count: chunk.end_slot - chunk.start_slot + step: 1 + } + return SyncManagerState::BlockRequest(peer, block_request); + } + } + // no peers for this chunk + self.wanted_chunks.push(chunk); + return SyncManagerState::Stalled + } + } + } + + // if hot sync + return SyncManagerState::Idle + + } diff --git a/beacon_node/network/src/sync/simple_sync.rs b/beacon_node/network/src/sync/simple_sync.rs index c3271888a8..e3d3d7cefd 100644 --- a/beacon_node/network/src/sync/simple_sync.rs +++ b/beacon_node/network/src/sync/simple_sync.rs @@ -17,7 +17,7 @@ use types::{ /// The number of slots that we can import blocks ahead of us, before going into full Sync mode. const SLOT_IMPORT_TOLERANCE: u64 = 100; -/// The amount of seconds a block (or partial block) may exist in the import queue. +/// The amount of seconds a block may exist in the import queue. const QUEUE_STALE_SECS: u64 = 100; /// If a block is more than `FUTURE_SLOT_TOLERANCE` slots ahead of our slot clock, we drop it. @@ -30,23 +30,23 @@ const SHOULD_NOT_FORWARD_GOSSIP_BLOCK: bool = false; /// Keeps track of syncing information for known connected peers. #[derive(Clone, Copy, Debug)] pub struct PeerSyncInfo { - network_id: u8, - chain_id: u64, - latest_finalized_root: Hash256, - latest_finalized_epoch: Epoch, - best_root: Hash256, - best_slot: Slot, + fork_version: [u8,4], + finalized_root: Hash256, + finalized_epoch: Epoch, + head_root: Hash256, + head_slot: Slot, + requested_slot_skip: Option<(Slot, usize)>, } impl From for PeerSyncInfo { fn from(hello: HelloMessage) -> PeerSyncInfo { PeerSyncInfo { - network_id: hello.network_id, - chain_id: hello.chain_id, - latest_finalized_root: hello.latest_finalized_root, - latest_finalized_epoch: hello.latest_finalized_epoch, - best_root: hello.best_root, - best_slot: hello.best_slot, + fork_version: hello.fork_version, + finalized_root: hello.finalized_root, + finalized_epoch: hello.finalized_epoch, + head_root: hello.head_root, + head_slot: hello.head_slot, + requested_slot_skip: None, } } } @@ -71,8 +71,6 @@ pub struct SimpleSync { chain: Arc>, /// A mapping of Peers to their respective PeerSyncInfo. known_peers: HashMap, - /// A queue to allow importing of blocks - import_queue: ImportQueue, /// The current state of the syncing protocol. state: SyncState, log: slog::Logger, @@ -178,8 +176,8 @@ impl SimpleSync { let start_slot = |epoch: Epoch| epoch.start_slot(T::EthSpec::slots_per_epoch()); - if local.network_id != remote.network_id { - // The node is on a different network, disconnect them. + if local.fork_version != remote.fork_version { + // The node is on a different network/fork, disconnect them. info!( self.log, "HandshakeFailure"; "peer" => format!("{:?}", peer_id), @@ -187,9 +185,9 @@ impl SimpleSync { ); network.disconnect(peer_id.clone(), GoodbyeReason::IrrelevantNetwork); - } else if remote.latest_finalized_epoch <= local.latest_finalized_epoch - && remote.latest_finalized_root != Hash256::zero() - && local.latest_finalized_root != Hash256::zero() + } else if remote.finalized_epoch <= local.finalized_epoch + && remote.finalized_root != Hash256::zero() + && local.finalized_root != Hash256::zero() && (self.root_at_slot(start_slot(remote.latest_finalized_epoch)) != Some(remote.latest_finalized_root)) { @@ -248,22 +246,37 @@ impl SimpleSync { "remote_latest_finalized_epoch" => remote.latest_finalized_epoch, ); - let start_slot = local - .latest_finalized_epoch - .start_slot(T::EthSpec::slots_per_epoch()); - let required_slots = remote.best_slot - start_slot; - self.request_block_roots( - peer_id, - BeaconBlockRootsRequest { - start_slot, - count: required_slots.as_u64(), - }, - network, - ); + self.process_sync(); } } + self.proess_sync(&mut self) { + loop { + match self.sync_manager.poll() { + SyncManagerState::RequestBlocks(peer_id, req) { + debug!( + self.log, + "RPCRequest(BeaconBlockBodies)"; + "count" => req.block_roots.len(), + "peer" => format!("{:?}", peer_id) + ); + network.send_rpc_request(peer_id.clone(), RPCRequest::BeaconBlocks(req)); + }, + SyncManagerState::Stalled { + // need more peers to continue sync + warn!(self.log, "No useable peers for sync"); + break; + }, + SyncManagerState::Idle { + // nothing to do + break; + } + } + } + } + + fn root_at_slot(&self, target_slot: Slot) -> Option { self.chain .rev_iter_block_roots(target_slot) @@ -272,213 +285,27 @@ impl SimpleSync { .map(|(root, _slot)| root) } - /// Handle a `BeaconBlockRoots` request from the peer. - pub fn on_beacon_block_roots_request( + /// Handle a `BeaconBlocks` request from the peer. + pub fn on_beacon_blocks_request( &mut self, peer_id: PeerId, request_id: RequestId, - req: BeaconBlockRootsRequest, + req: BeaconBlocksRequest, network: &mut NetworkContext, ) { let state = &self.chain.head().beacon_state; debug!( self.log, - "BlockRootsRequest"; + "BeaconBlocksRequest"; "peer" => format!("{:?}", peer_id), "count" => req.count, "start_slot" => req.start_slot, ); - let mut roots: Vec = self - .chain - .rev_iter_block_roots(std::cmp::min(req.start_slot + req.count, state.slot)) - .take_while(|(_root, slot)| req.start_slot <= *slot) - .map(|(block_root, slot)| BlockRootSlot { slot, block_root }) - .collect(); - - if roots.len() as u64 != req.count { - debug!( - self.log, - "BlockRootsRequest"; - "peer" => format!("{:?}", peer_id), - "msg" => "Failed to return all requested hashes", - "start_slot" => req.start_slot, - "current_slot" => self.chain.present_slot(), - "requested" => req.count, - "returned" => roots.len(), - ); - } - - roots.reverse(); - roots.dedup_by_key(|brs| brs.block_root); - - network.send_rpc_response( - peer_id, - request_id, - RPCResponse::BeaconBlockRoots(BeaconBlockRootsResponse { roots }), - ) - } - - /// Handle a `BeaconBlockRoots` response from the peer. - pub fn on_beacon_block_roots_response( - &mut self, - peer_id: PeerId, - res: BeaconBlockRootsResponse, - network: &mut NetworkContext, - ) { - debug!( - self.log, - "BlockRootsResponse"; - "peer" => format!("{:?}", peer_id), - "count" => res.roots.len(), - ); - - if res.roots.is_empty() { - warn!( - self.log, - "Peer returned empty block roots response"; - "peer_id" => format!("{:?}", peer_id) - ); - return; - } - - // The wire protocol specifies that slots must be in ascending order. - if !res.slots_are_ascending() { - warn!( - self.log, - "Peer returned block roots response with bad slot ordering"; - "peer_id" => format!("{:?}", peer_id) - ); - return; - } - - let new_roots = self - .import_queue - .enqueue_block_roots(&res.roots, peer_id.clone()); - - // No new roots means nothing to do. - // - // This check protects against future panics. - if new_roots.is_empty() { - return; - } - - // Determine the first (earliest) and last (latest) `BlockRootSlot` items. - // - // This logic relies upon slots to be in ascending order, which is enforced earlier. - let first = new_roots.first().expect("Non-empty list must have first"); - let last = new_roots.last().expect("Non-empty list must have last"); - - // Request all headers between the earliest and latest new `BlockRootSlot` items. - self.request_block_headers( - peer_id, - BeaconBlockHeadersRequest { - start_root: first.block_root, - start_slot: first.slot, - max_headers: (last.slot - first.slot + 1).as_u64(), - skip_slots: 0, - }, - network, - ) - } - - /// Handle a `BeaconBlockHeaders` request from the peer. - pub fn on_beacon_block_headers_request( - &mut self, - peer_id: PeerId, - request_id: RequestId, - req: BeaconBlockHeadersRequest, - network: &mut NetworkContext, - ) { - let state = &self.chain.head().beacon_state; - - debug!( - self.log, - "BlockHeadersRequest"; - "peer" => format!("{:?}", peer_id), - "count" => req.max_headers, - ); - - let count = req.max_headers; - - // Collect the block roots. - let mut roots: Vec = self - .chain - .rev_iter_block_roots(std::cmp::min(req.start_slot + count, state.slot)) - .take_while(|(_root, slot)| req.start_slot <= *slot) - .map(|(root, _slot)| root) - .collect(); - - roots.reverse(); - roots.dedup(); - - let headers: Vec = roots - .into_iter() - .step_by(req.skip_slots as usize + 1) - .filter_map(|root| { - let block = self - .chain - .store - .get::>(&root) - .ok()?; - Some(block?.block_header()) - }) - .collect(); - - // ssz-encode the headers - let headers = headers.as_ssz_bytes(); - - network.send_rpc_response( - peer_id, - request_id, - RPCResponse::BeaconBlockHeaders(BeaconBlockHeadersResponse { headers }), - ) - } - - /// Handle a `BeaconBlockHeaders` response from the peer. - pub fn on_beacon_block_headers_response( - &mut self, - peer_id: PeerId, - headers: Vec, - network: &mut NetworkContext, - ) { - debug!( - self.log, - "BlockHeadersResponse"; - "peer" => format!("{:?}", peer_id), - "count" => headers.len(), - ); - - if headers.is_empty() { - warn!( - self.log, - "Peer returned empty block headers response. PeerId: {:?}", peer_id - ); - return; - } - - // Enqueue the headers, obtaining a list of the roots of the headers which were newly added - // to the queue. - let block_roots = self.import_queue.enqueue_headers(headers, peer_id.clone()); - - if !block_roots.is_empty() { - self.request_block_bodies(peer_id, BeaconBlockBodiesRequest { block_roots }, network); - } - } - - /// Handle a `BeaconBlockBodies` request from the peer. - pub fn on_beacon_block_bodies_request( - &mut self, - peer_id: PeerId, - request_id: RequestId, - req: BeaconBlockBodiesRequest, - network: &mut NetworkContext, - ) { - let block_bodies: Vec> = req - .block_roots - .iter() - .filter_map(|root| { + let blocks = Vec> = self + .chain.rev_iter_block_roots().filter(|(_root, slot) req.start_slot <= slot && req.start_slot + req.count >= slot).take_while(|(_root, slot) req.start_slot <= *slot) + .filter_map(|root, slot| { if let Ok(Some(block)) = self.chain.store.get::>(root) { Some(block.body) } else { @@ -494,59 +321,49 @@ impl SimpleSync { }) .collect(); - debug!( - self.log, - "BlockBodiesRequest"; - "peer" => format!("{:?}", peer_id), - "requested" => req.block_roots.len(), - "returned" => block_bodies.len(), - ); + roots.reverse(); + roots.dedup_by_key(|brs| brs.block_root); - let bytes = block_bodies.as_ssz_bytes(); + if roots.len() as u64 != req.count { + debug!( + self.log, + "BeaconBlocksRequest"; + "peer" => format!("{:?}", peer_id), + "msg" => "Failed to return all requested hashes", + "start_slot" => req.start_slot, + "current_slot" => self.chain.present_slot(), + "requested" => req.count, + "returned" => roots.len(), + ); + } network.send_rpc_response( peer_id, request_id, - RPCResponse::BeaconBlockBodies(BeaconBlockBodiesResponse { - block_bodies: bytes, - block_roots: None, - }), + RPCResponse::BeaconBlocks(blocks.as_ssz_bytes()), ) } - /// Handle a `BeaconBlockBodies` response from the peer. - pub fn on_beacon_block_bodies_response( + + /// Handle a `BeaconBlocks` response from the peer. + pub fn on_beacon_blocks_response( &mut self, peer_id: PeerId, - res: DecodedBeaconBlockBodiesResponse, + res: Vec>, network: &mut NetworkContext, ) { debug!( self.log, - "BlockBodiesResponse"; + "BeaconBlocksResponse"; "peer" => format!("{:?}", peer_id), "count" => res.block_bodies.len(), ); - if !res.block_bodies.is_empty() { - // Import all blocks to queue - let last_root = self - .import_queue - .enqueue_bodies(res.block_bodies, peer_id.clone()); - - // Attempt to process all received bodies by recursively processing the latest block - if let Some(root) = last_root { - if let Some(BlockProcessingOutcome::Processed { .. }) = - self.attempt_process_partial_block(peer_id, root, network, &"rpc") - { - // If processing is successful remove from `import_queue` - self.import_queue.remove(root); - } - } + if !res.is_empty() { + self.sync_manager.add_blocks(peer_id, blocks); } - // Clear out old entries - self.import_queue.remove_stale(); + self.process_sync(); } /// Process a gossip message declaring a new block. @@ -679,22 +496,6 @@ impl SimpleSync { network.send_rpc_request(peer_id.clone(), RPCRequest::BeaconBlockHeaders(req)); } - /// Request some `BeaconBlockBodies` from the remote peer. - fn request_block_bodies( - &mut self, - peer_id: PeerId, - req: BeaconBlockBodiesRequest, - network: &mut NetworkContext, - ) { - debug!( - self.log, - "RPCRequest(BeaconBlockBodies)"; - "count" => req.block_roots.len(), - "peer" => format!("{:?}", peer_id) - ); - - network.send_rpc_request(peer_id.clone(), RPCRequest::BeaconBlockBodies(req)); - } /// Returns `true` if `self.chain` has not yet processed this block. pub fn chain_has_seen_block(&self, block_root: &Hash256) -> bool { From 24b2f83713f5e3fd5147e99be44a5f842a6332fb Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Tue, 13 Aug 2019 07:35:52 +1000 Subject: [PATCH 072/186] Fix wrong state given to op pool prune --- beacon_node/beacon_chain/src/beacon_chain.rs | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 0e0583309f..bed50202d4 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -1199,8 +1199,12 @@ impl BeaconChain { self.fork_choice .process_finalization(&finalized_block, finalized_block_root)?; - self.op_pool - .prune_all(&self.head().beacon_state, &self.spec); + let finalized_state = self + .store + .get::>(&finalized_block.state_root)? + .ok_or_else(|| Error::MissingBeaconState(finalized_block.state_root))?; + + self.op_pool.prune_all(&finalized_state, &self.spec); Ok(()) } From e369e293a507c602be1901b462001bfd8e4e825c Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Tue, 13 Aug 2019 09:20:39 +1000 Subject: [PATCH 073/186] Make prom metric names more consistent --- beacon_node/beacon_chain/src/metrics.rs | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/beacon_node/beacon_chain/src/metrics.rs b/beacon_node/beacon_chain/src/metrics.rs index a4b36cd375..574fbb4a46 100644 --- a/beacon_node/beacon_chain/src/metrics.rs +++ b/beacon_node/beacon_chain/src/metrics.rs @@ -15,7 +15,7 @@ lazy_static! { "Count of blocks processed without error" ); pub static ref BLOCK_PROCESSING_TIMES: Result = - try_create_histogram("block_processing_seconds", "Full runtime of block processing"); + try_create_histogram("beacon_block_processing_seconds", "Full runtime of block processing"); pub static ref BLOCK_PROCESSING_DB_READ: Result = try_create_histogram( "beacon_block_processing_db_read_seconds", "Time spent loading block and state from DB for block processing" @@ -123,15 +123,15 @@ lazy_static! { "Count of occasions fork choice has switched to a different chain" ); pub static ref FORK_CHOICE_TIMES: Result = - try_create_histogram("beacon_fork_choice_time", "Full runtime of fork choice"); + try_create_histogram("beacon_fork_choice_seconds", "Full runtime of fork choice"); pub static ref FORK_CHOICE_FIND_HEAD_TIMES: Result = - try_create_histogram("beacon_fork_choice_find_head_time", "Full runtime of fork choice find_head function"); + try_create_histogram("beacon_fork_choice_find_head_seconds", "Full runtime of fork choice find_head function"); pub static ref FORK_CHOICE_PROCESS_BLOCK_TIMES: Result = try_create_histogram( - "beacon_fork_choice_process_block_time", + "beacon_fork_choice_process_block_seconds", "Time taken to add a block and all attestations to fork choice" ); pub static ref FORK_CHOICE_PROCESS_ATTESTATION_TIMES: Result = try_create_histogram( - "beacon_fork_choice_process_attestation_time", + "beacon_fork_choice_process_attestation_seconds", "Time taken to add an attestation to fork choice" ); From b076b07022c9f359315b52700de301d23530e1f0 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Tue, 13 Aug 2019 12:11:18 +1000 Subject: [PATCH 074/186] Add more metrics, tidy existing metrics --- beacon_node/beacon_chain/src/beacon_chain.rs | 4 ++ beacon_node/beacon_chain/src/metrics.rs | 4 ++ beacon_node/eth2-libp2p/src/discovery.rs | 8 ++- beacon_node/eth2-libp2p/src/metrics.rs | 14 +++-- beacon_node/store/src/impls.rs | 20 ++++++- beacon_node/store/src/impls/beacon_state.rs | 21 +++++++- beacon_node/store/src/metrics.rs | 57 ++++++++++++++++++++ eth2/utils/slot_clock/src/metrics.rs | 3 ++ 8 files changed, 120 insertions(+), 11 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 0e0583309f..faffa46f52 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -827,8 +827,12 @@ impl BeaconChain { return Ok(BlockProcessingOutcome::GenesisBlock); } + let block_root_timer = metrics::start_timer(&metrics::BLOCK_PROCESSING_BLOCK_ROOT); + let block_root = block.canonical_root(); + metrics::stop_timer(block_root_timer); + if block_root == self.genesis_block_root { return Ok(BlockProcessingOutcome::GenesisBlock); } diff --git a/beacon_node/beacon_chain/src/metrics.rs b/beacon_node/beacon_chain/src/metrics.rs index 574fbb4a46..db213a0cf7 100644 --- a/beacon_node/beacon_chain/src/metrics.rs +++ b/beacon_node/beacon_chain/src/metrics.rs @@ -16,6 +16,10 @@ lazy_static! { ); pub static ref BLOCK_PROCESSING_TIMES: Result = try_create_histogram("beacon_block_processing_seconds", "Full runtime of block processing"); + pub static ref BLOCK_PROCESSING_BLOCK_ROOT: Result = try_create_histogram( + "beacon_block_processing_block_root_seconds", + "Time spent calculating the block root when processing a block." + ); pub static ref BLOCK_PROCESSING_DB_READ: Result = try_create_histogram( "beacon_block_processing_db_read_seconds", "Time spent loading block and state from DB for block processing" diff --git a/beacon_node/eth2-libp2p/src/discovery.rs b/beacon_node/eth2-libp2p/src/discovery.rs index d9f2f7465a..ca98db3246 100644 --- a/beacon_node/eth2-libp2p/src/discovery.rs +++ b/beacon_node/eth2-libp2p/src/discovery.rs @@ -159,13 +159,17 @@ where } fn inject_connected(&mut self, peer_id: PeerId, _endpoint: ConnectedPoint) { - metrics::inc_counter(&metrics::PEER_CONNECT_COUNT); self.connected_peers.insert(peer_id); + + metrics::inc_counter(&metrics::PEER_CONNECT_EVENT_COUNT); + metrics::set_gauge(&metrics::PEERS_CONNECTED, self.connected_peers() as i64); } fn inject_disconnected(&mut self, peer_id: &PeerId, _endpoint: ConnectedPoint) { - metrics::inc_counter(&metrics::PEER_DISCONNECT_COUNT); self.connected_peers.remove(peer_id); + + metrics::inc_counter(&metrics::PEER_DISCONNECT_EVENT_COUNT); + metrics::set_gauge(&metrics::PEERS_CONNECTED, self.connected_peers() as i64); } fn inject_replaced( diff --git a/beacon_node/eth2-libp2p/src/metrics.rs b/beacon_node/eth2-libp2p/src/metrics.rs index a47037669e..b678ef6b41 100644 --- a/beacon_node/eth2-libp2p/src/metrics.rs +++ b/beacon_node/eth2-libp2p/src/metrics.rs @@ -2,15 +2,19 @@ pub use lighthouse_metrics::*; lazy_static! { pub static ref ADDRESS_UPDATE_COUNT: Result = try_create_int_counter( - "libp2p_address_update_count", + "libp2p_address_update_total", "Count of libp2p socked updated events (when our view of our IP address has changed)" ); - pub static ref PEER_CONNECT_COUNT: Result = try_create_int_counter( - "libp2p_peer_connect_count", + pub static ref PEERS_CONNECTED: Result = try_create_int_gauge( + "libp2p_peer_connected_peers_total", + "Count of libp2p peers currently connected" + ); + pub static ref PEER_CONNECT_EVENT_COUNT: Result = try_create_int_counter( + "libp2p_peer_connect_event_total", "Count of libp2p peer connect events (not the current number of connected peers)" ); - pub static ref PEER_DISCONNECT_COUNT: Result = try_create_int_counter( - "libp2p_peer_disconnect_count", + pub static ref PEER_DISCONNECT_EVENT_COUNT: Result = try_create_int_counter( + "libp2p_peer_disconnect_event_total", "Count of libp2p peer disconnect events" ); } diff --git a/beacon_node/store/src/impls.rs b/beacon_node/store/src/impls.rs index e88b70f396..1c29c245b2 100644 --- a/beacon_node/store/src/impls.rs +++ b/beacon_node/store/src/impls.rs @@ -9,10 +9,26 @@ impl StoreItem for BeaconBlock { } fn as_store_bytes(&self) -> Vec { - self.as_ssz_bytes() + let timer = metrics::start_timer(&metrics::BEACON_STATE_WRITE_TIMES); + let bytes = self.as_ssz_bytes(); + + metrics::stop_timer(timer); + metrics::inc_counter(&metrics::BEACON_STATE_WRITE_COUNT); + metrics::inc_counter_by(&metrics::BEACON_STATE_WRITE_BYTES, bytes.len() as i64); + + bytes } fn from_store_bytes(bytes: &mut [u8]) -> Result { - Self::from_ssz_bytes(bytes).map_err(Into::into) + let timer = metrics::start_timer(&metrics::BEACON_STATE_READ_TIMES); + + let len = bytes.len(); + let result = Self::from_ssz_bytes(bytes).map_err(Into::into); + + metrics::stop_timer(timer); + metrics::inc_counter(&metrics::BEACON_STATE_READ_COUNT); + metrics::inc_counter_by(&metrics::BEACON_STATE_READ_BYTES, len as i64); + + result } } diff --git a/beacon_node/store/src/impls/beacon_state.rs b/beacon_node/store/src/impls/beacon_state.rs index 591663fe05..69e83cd636 100644 --- a/beacon_node/store/src/impls/beacon_state.rs +++ b/beacon_node/store/src/impls/beacon_state.rs @@ -53,12 +53,29 @@ impl StoreItem for BeaconState { } fn as_store_bytes(&self) -> Vec { + let timer = metrics::start_timer(&metrics::BEACON_STATE_WRITE_TIMES); + let container = StorageContainer::new(self); - container.as_ssz_bytes() + let bytes = container.as_ssz_bytes(); + + metrics::stop_timer(timer); + metrics::inc_counter(&metrics::BEACON_STATE_WRITE_COUNT); + metrics::inc_counter_by(&metrics::BEACON_STATE_WRITE_BYTES, bytes.len() as i64); + + bytes } fn from_store_bytes(bytes: &mut [u8]) -> Result { + let timer = metrics::start_timer(&metrics::BEACON_STATE_READ_TIMES); + + let len = bytes.len(); let container = StorageContainer::from_ssz_bytes(bytes)?; - container.try_into() + let result = container.try_into(); + + metrics::stop_timer(timer); + metrics::inc_counter(&metrics::BEACON_STATE_READ_COUNT); + metrics::inc_counter_by(&metrics::BEACON_STATE_READ_BYTES, len as i64); + + result } } diff --git a/beacon_node/store/src/metrics.rs b/beacon_node/store/src/metrics.rs index 30cbb878b6..90237824d2 100644 --- a/beacon_node/store/src/metrics.rs +++ b/beacon_node/store/src/metrics.rs @@ -4,6 +4,9 @@ use std::fs; use std::path::PathBuf; lazy_static! { + /* + * General + */ pub static ref DISK_DB_SIZE: Result = try_create_int_gauge("store_disk_db_size", "Size of the on-disk database (bytes)"); pub static ref DISK_DB_WRITE_BYTES: Result = try_create_int_counter( @@ -30,6 +33,60 @@ lazy_static! { "store_disk_db_delete_count_total", "Total number of deletions from the on-disk DB" ); + /* + * Beacon State + */ + pub static ref BEACON_STATE_READ_TIMES: Result = try_create_histogram( + "store_beacon_state_read_overhead_seconds", + "Overhead on reading a beacon state from the DB (e.g., decoding)" + ); + pub static ref BEACON_STATE_READ_COUNT: Result = try_create_int_counter( + "store_beacon_state_read_total", + "Total number of beacon state reads from the DB" + ); + pub static ref BEACON_STATE_READ_BYTES: Result = try_create_int_counter( + "store_beacon_state_read_bytes_total", + "Total number of beacon state bytes read from the DB" + ); + pub static ref BEACON_STATE_WRITE_TIMES: Result = try_create_histogram( + "store_beacon_state_write_overhead_seconds", + "Overhead on writing a beacon state to the DB (e.g., encoding)" + ); + pub static ref BEACON_STATE_WRITE_COUNT: Result = try_create_int_counter( + "store_beacon_state_write_total", + "Total number of beacon state writes the DB" + ); + pub static ref BEACON_STATE_WRITE_BYTES: Result = try_create_int_counter( + "store_beacon_state_write_bytes_total", + "Total number of beacon state bytes written to the DB" + ); + /* + * Beacon Block + */ + pub static ref BEACON_BLOCK_READ_TIMES: Result = try_create_histogram( + "store_beacon_block_read_overhead_seconds", + "Overhead on reading a beacon block from the DB (e.g., decoding)" + ); + pub static ref BEACON_BLOCK_READ_COUNT: Result = try_create_int_counter( + "store_beacon_block_read_total", + "Total number of beacon block reads from the DB" + ); + pub static ref BEACON_BLOCK_READ_BYTES: Result = try_create_int_counter( + "store_beacon_block_read_bytes_total", + "Total number of beacon block bytes read from the DB" + ); + pub static ref BEACON_BLOCK_WRITE_TIMES: Result = try_create_histogram( + "store_beacon_block_write_overhead_seconds", + "Overhead on writing a beacon block to the DB (e.g., encoding)" + ); + pub static ref BEACON_BLOCK_WRITE_COUNT: Result = try_create_int_counter( + "store_beacon_block_write_total", + "Total number of beacon block writes the DB" + ); + pub static ref BEACON_BLOCK_WRITE_BYTES: Result = try_create_int_counter( + "store_beacon_block_write_bytes_total", + "Total number of beacon block bytes written to the DB" + ); } /// Updates the global metrics registry with store-related information. diff --git a/eth2/utils/slot_clock/src/metrics.rs b/eth2/utils/slot_clock/src/metrics.rs index a9153a10ca..e0d3923e00 100644 --- a/eth2/utils/slot_clock/src/metrics.rs +++ b/eth2/utils/slot_clock/src/metrics.rs @@ -7,6 +7,8 @@ lazy_static! { try_create_int_gauge("slotclock_present_slot", "The present wall-clock slot"); pub static ref PRESENT_EPOCH: Result = try_create_int_gauge("slotclock_present_epoch", "The present wall-clock epoch"); + pub static ref SLOTS_PER_EPOCH: Result = + try_create_int_gauge("slotclock_slots_per_epoch", "Slots per epoch (constant)"); pub static ref MILLISECONDS_PER_SLOT: Result = try_create_int_gauge( "slotclock_slot_time_milliseconds", "The duration in milliseconds between each slot" @@ -25,5 +27,6 @@ pub fn scrape_for_metrics(clock: &U) { &PRESENT_EPOCH, present_slot.epoch(T::slots_per_epoch()).as_u64() as i64, ); + set_gauge(&SLOTS_PER_EPOCH, T::slots_per_epoch() as i64); set_gauge(&MILLISECONDS_PER_SLOT, clock.slot_duration_millis() as i64); } From a3e464078af39e10132bac3d1ac37dbebae8b41a Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Tue, 13 Aug 2019 13:00:01 +1000 Subject: [PATCH 075/186] Fix store block read metrics --- beacon_node/store/src/impls.rs | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/beacon_node/store/src/impls.rs b/beacon_node/store/src/impls.rs index 1c29c245b2..ed724480c3 100644 --- a/beacon_node/store/src/impls.rs +++ b/beacon_node/store/src/impls.rs @@ -9,25 +9,25 @@ impl StoreItem for BeaconBlock { } fn as_store_bytes(&self) -> Vec { - let timer = metrics::start_timer(&metrics::BEACON_STATE_WRITE_TIMES); + let timer = metrics::start_timer(&metrics::BEACON_BLOCK_WRITE_TIMES); let bytes = self.as_ssz_bytes(); metrics::stop_timer(timer); - metrics::inc_counter(&metrics::BEACON_STATE_WRITE_COUNT); - metrics::inc_counter_by(&metrics::BEACON_STATE_WRITE_BYTES, bytes.len() as i64); + metrics::inc_counter(&metrics::BEACON_BLOCK_WRITE_COUNT); + metrics::inc_counter_by(&metrics::BEACON_BLOCK_WRITE_BYTES, bytes.len() as i64); bytes } fn from_store_bytes(bytes: &mut [u8]) -> Result { - let timer = metrics::start_timer(&metrics::BEACON_STATE_READ_TIMES); + let timer = metrics::start_timer(&metrics::BEACON_BLOCK_READ_TIMES); let len = bytes.len(); let result = Self::from_ssz_bytes(bytes).map_err(Into::into); metrics::stop_timer(timer); - metrics::inc_counter(&metrics::BEACON_STATE_READ_COUNT); - metrics::inc_counter_by(&metrics::BEACON_STATE_READ_BYTES, len as i64); + metrics::inc_counter(&metrics::BEACON_BLOCK_READ_COUNT); + metrics::inc_counter_by(&metrics::BEACON_BLOCK_READ_BYTES, len as i64); result } From 341a83b9e8d5f3733b09ac9dae2e8aa6d5602ef5 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Tue, 13 Aug 2019 16:17:11 +1000 Subject: [PATCH 076/186] Tidy attestation metrics --- beacon_node/beacon_chain/src/beacon_chain.rs | 28 +++++++++++++++----- beacon_node/beacon_chain/src/metrics.rs | 4 +++ 2 files changed, 26 insertions(+), 6 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index faffa46f52..0cb6d5f980 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -547,11 +547,14 @@ impl BeaconChain { &self, attestation: Attestation, ) -> Result { + metrics::inc_counter(&metrics::ATTESTATION_PROCESSING_REQUESTS); + let timer = metrics::start_timer(&metrics::ATTESTATION_PROCESSING_TIMES); + // From the store, load the attestation's "head block". // // An honest validator would have set this block to be the head of the chain (i.e., the // result of running fork choice). - if let Some(attestation_head_block) = self + let result = if let Some(attestation_head_block) = self .store .get::>(&attestation.data.beacon_block_root)? { @@ -680,7 +683,15 @@ impl BeaconChain { Ok(AttestationProcessingOutcome::UnknownHeadBlock { beacon_block_root: attestation.data.beacon_block_root, }) + }; + + metrics::stop_timer(timer); + + if let Ok(AttestationProcessingOutcome::Processed) = &result { + metrics::inc_counter(&metrics::ATTESTATION_PROCESSING_SUCCESSES); } + + result } /// Verifies the `attestation` against the `state` to which it is attesting. @@ -707,9 +718,6 @@ impl BeaconChain { state: &BeaconState, block: &BeaconBlock, ) -> Result { - metrics::inc_counter(&metrics::ATTESTATION_PROCESSING_REQUESTS); - let timer = metrics::start_timer(&metrics::ATTESTATION_PROCESSING_TIMES); - // Find the highest between: // // - The highest valid finalized epoch we've ever seen (i.e., the head). @@ -719,6 +727,16 @@ impl BeaconChain { state.finalized_checkpoint.epoch, ); + // A helper function to allow attestation processing to be metered. + let verify_attestation_for_state = |state, attestation, spec, verify_signatures| { + let timer = metrics::start_timer(&metrics::ATTESTATION_PROCESSING_CORE); + + let result = verify_attestation_for_state(state, attestation, spec, verify_signatures); + + metrics::stop_timer(timer); + result + }; + let result = if block.slot <= finalized_epoch.start_slot(T::EthSpec::slots_per_epoch()) { // Ignore any attestation where the slot of `data.beacon_block_root` is equal to or // prior to the finalized epoch. @@ -758,8 +776,6 @@ impl BeaconChain { Ok(AttestationProcessingOutcome::Processed) }; - timer.map(|t| t.observe_duration()); - result } diff --git a/beacon_node/beacon_chain/src/metrics.rs b/beacon_node/beacon_chain/src/metrics.rs index db213a0cf7..6efa4b3f2b 100644 --- a/beacon_node/beacon_chain/src/metrics.rs +++ b/beacon_node/beacon_chain/src/metrics.rs @@ -90,6 +90,10 @@ lazy_static! { "beacon_attestation_processing_seconds", "Full runtime of attestation processing" ); + pub static ref ATTESTATION_PROCESSING_CORE: Result = try_create_histogram( + "beacon_attestation_processing_core_seconds", + "Time spent on the core spec processing of attestation processing" + ); /* * Attestation Production From b7e43b56f9dc4167414c61d6b52238782e0caf47 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Tue, 13 Aug 2019 19:37:14 +1000 Subject: [PATCH 077/186] Fix minor PR comments --- beacon_node/beacon_chain/src/beacon_chain.rs | 6 +++--- eth2/lmd_ghost/src/reduced_tree.rs | 6 +----- 2 files changed, 4 insertions(+), 8 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 73ebb7007f..76442fb8d1 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -536,7 +536,7 @@ impl BeaconChain { /// If valid, the attestation is added to `self.op_pool` and `self.fork_choice`. /// /// Returns an `Ok(AttestationProcessingOutcome)` if the chain was able to make a determination - /// about the `attestation` (wether it was invalid or not). Returns an `Err` if the was an + /// about the `attestation` (whether it was invalid or not). Returns an `Err` if there was an /// error during this process and no determination was able to be made. /// /// ## Notes @@ -620,7 +620,7 @@ impl BeaconChain { outcome } else { // Use the `data.beacon_block_root` to load the state from the latest non-skipped - // slot preceding the attestations creation. + // slot preceding the attestation's creation. // // This state is guaranteed to be in the same chain as the attestation, but it's // not guaranteed to be from the same slot or epoch as the attestation. @@ -703,7 +703,7 @@ impl BeaconChain { /// The given `state` must fulfil one of the following conditions: /// /// - `state` corresponds to the `block.state_root` identified by - /// `attestation.data.beacon_block_root`. (Viz., `attestation` was created using `state`. + /// `attestation.data.beacon_block_root`. (Viz., `attestation` was created using `state`). /// - `state.slot` is in the same epoch as `data.target.epoch` and /// `attestation.data.beacon_block_root` is in the history of `state`. /// diff --git a/eth2/lmd_ghost/src/reduced_tree.rs b/eth2/lmd_ghost/src/reduced_tree.rs index 822c388f6a..deda02e1fd 100644 --- a/eth2/lmd_ghost/src/reduced_tree.rs +++ b/eth2/lmd_ghost/src/reduced_tree.rs @@ -777,11 +777,7 @@ where } pub fn get_ref(&self, i: usize) -> Option<&T> { - if i < self.0.len() { - Some(&self.0[i]) - } else { - None - } + self.0.get(i) } pub fn insert(&mut self, i: usize, element: T) { From 6cd0af766e0ef97c258545fec7369169801cb9a5 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Tue, 13 Aug 2019 19:37:14 +1000 Subject: [PATCH 078/186] Fix minor PR comments --- beacon_node/beacon_chain/src/beacon_chain.rs | 6 +++--- eth2/lmd_ghost/src/reduced_tree.rs | 6 +----- 2 files changed, 4 insertions(+), 8 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 9ccf595893..61998b5de3 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -536,7 +536,7 @@ impl BeaconChain { /// If valid, the attestation is added to `self.op_pool` and `self.fork_choice`. /// /// Returns an `Ok(AttestationProcessingOutcome)` if the chain was able to make a determination - /// about the `attestation` (wether it was invalid or not). Returns an `Err` if the was an + /// about the `attestation` (whether it was invalid or not). Returns an `Err` if there was an /// error during this process and no determination was able to be made. /// /// ## Notes @@ -617,7 +617,7 @@ impl BeaconChain { outcome } else { // Use the `data.beacon_block_root` to load the state from the latest non-skipped - // slot preceding the attestations creation. + // slot preceding the attestation's creation. // // This state is guaranteed to be in the same chain as the attestation, but it's // not guaranteed to be from the same slot or epoch as the attestation. @@ -692,7 +692,7 @@ impl BeaconChain { /// The given `state` must fulfil one of the following conditions: /// /// - `state` corresponds to the `block.state_root` identified by - /// `attestation.data.beacon_block_root`. (Viz., `attestation` was created using `state`. + /// `attestation.data.beacon_block_root`. (Viz., `attestation` was created using `state`). /// - `state.slot` is in the same epoch as `data.target.epoch` and /// `attestation.data.beacon_block_root` is in the history of `state`. /// diff --git a/eth2/lmd_ghost/src/reduced_tree.rs b/eth2/lmd_ghost/src/reduced_tree.rs index 822c388f6a..deda02e1fd 100644 --- a/eth2/lmd_ghost/src/reduced_tree.rs +++ b/eth2/lmd_ghost/src/reduced_tree.rs @@ -777,11 +777,7 @@ where } pub fn get_ref(&self, i: usize) -> Option<&T> { - if i < self.0.len() { - Some(&self.0[i]) - } else { - None - } + self.0.get(i) } pub fn insert(&mut self, i: usize, element: T) { From 8fb9e1f648b75b488f798a25cf2bce487ff8206e Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Tue, 13 Aug 2019 19:48:03 +1000 Subject: [PATCH 079/186] Remove duplicated attestation finalization check --- beacon_node/beacon_chain/src/beacon_chain.rs | 17 ----------------- 1 file changed, 17 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 61998b5de3..9ee51c1629 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -555,23 +555,6 @@ impl BeaconChain { .store .get::>(&attestation.data.beacon_block_root)? { - let finalized_epoch = self.head().beacon_state.finalized_checkpoint.epoch; - - if attestation_head_block.slot - <= finalized_epoch.start_slot(T::EthSpec::slots_per_epoch()) - { - // Ignore any attestation where the slot of `data.beacon_block_root` is equal to or - // prior to the finalized epoch. - // - // For any valid attestation if the `beacon_block_root` is prior to finalization, then - // all other parameters (source, target, etc) must all be prior to finalization and - // therefore no longer interesting. - return Ok(AttestationProcessingOutcome::FinalizedSlot { - attestation: attestation_head_block.epoch(), - finalized: finalized_epoch, - }); - } - // Attempt to process the attestation using the `self.head()` state. // // This is purely an effort to avoid loading a `BeaconState` unnecessarily from the DB. From 82e8aafb014484e72926dc476184634fd8b9afdf Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Tue, 13 Aug 2019 19:59:29 +1000 Subject: [PATCH 080/186] Remove awkward `let` statement --- beacon_node/beacon_chain/src/beacon_chain.rs | 170 +++++++++---------- 1 file changed, 82 insertions(+), 88 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 9ee51c1629..96d3065306 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -558,97 +558,91 @@ impl BeaconChain { // Attempt to process the attestation using the `self.head()` state. // // This is purely an effort to avoid loading a `BeaconState` unnecessarily from the DB. - let optional_outcome: Option> = { - // Take a read lock on the head beacon state. - // - // The purpose of this whole `let processed ...` block is to ensure that the read - // lock is dropped if we don't end up using the head beacon state. - let state = &self.head().beacon_state; + // Take a read lock on the head beacon state. + let state = &self.head().beacon_state; - // If it turns out that the attestation was made using the head state, then there - // is no need to load a state from the database to process the attestation. - // - // Note: use the epoch of the target because it indicates which epoch the - // attestation was created in. You cannot use the epoch of the head block, because - // the block doesn't necessarily need to be in the same epoch as the attestation - // (e.g., if there are skip slots between the epoch the block was created in and - // the epoch for the attestation). - // - // This check also ensures that the slot for `data.beacon_block_root` is not higher - // than `state.root` by ensuring that the block is in the history of `state`. - if state.current_epoch() == attestation.data.target.epoch - && (attestation.data.beacon_block_root == self.head().beacon_block_root - || state - .get_block_root(attestation_head_block.slot) - .map(|root| *root == attestation.data.beacon_block_root) - .unwrap_or_else(|_| false)) - { - // The head state is able to be used to validate this attestation. No need to load - // anything from the database. - Some(self.process_attestation_for_state_and_block( - attestation.clone(), - state, - &attestation_head_block, - )) - } else { - None - } - }; + // If it turns out that the attestation was made using the head state, then there + // is no need to load a state from the database to process the attestation. + // + // Note: use the epoch of the target because it indicates which epoch the + // attestation was created in. You cannot use the epoch of the head block, because + // the block doesn't necessarily need to be in the same epoch as the attestation + // (e.g., if there are skip slots between the epoch the block was created in and + // the epoch for the attestation). + // + // This check also ensures that the slot for `data.beacon_block_root` is not higher + // than `state.root` by ensuring that the block is in the history of `state`. + if state.current_epoch() == attestation.data.target.epoch + && (attestation.data.beacon_block_root == self.head().beacon_block_root + || state + .get_block_root(attestation_head_block.slot) + .map(|root| *root == attestation.data.beacon_block_root) + .unwrap_or_else(|_| false)) + { + // The head state is able to be used to validate this attestation. No need to load + // anything from the database. + return self.process_attestation_for_state_and_block( + attestation.clone(), + state, + &attestation_head_block, + ); + } - if let Some(outcome) = optional_outcome { - // Verification was already completed with an in-memory state. Return that result. - outcome + // Ensure the read-lock from `self.head()` is dropped. + // + // This is likely unnecessary, however it remains as a reminder to ensure this lock + // isn't hogged. + std::mem::drop(state); + + // Use the `data.beacon_block_root` to load the state from the latest non-skipped + // slot preceding the attestation's creation. + // + // This state is guaranteed to be in the same chain as the attestation, but it's + // not guaranteed to be from the same slot or epoch as the attestation. + let mut state: BeaconState = self + .store + .get(&attestation_head_block.state_root)? + .ok_or_else(|| Error::MissingBeaconState(attestation_head_block.state_root))?; + + // Ensure the state loaded from the database matches the state of the attestation + // head block. + // + // The state needs to be advanced from the current slot through to the epoch in + // which the attestation was created in. It would be an error to try and use + // `state.get_attestation_data_slot(..)` because the state matching the + // `data.beacon_block_root` isn't necessarily in a nearby epoch to the attestation + // (e.g., if there were lots of skip slots since the head of the chain and the + // epoch creation epoch). + for _ in state.slot.as_u64() + ..attestation + .data + .target + .epoch + .start_slot(T::EthSpec::slots_per_epoch()) + .as_u64() + { + per_slot_processing(&mut state, &self.spec)?; + } + + state.build_committee_cache(RelativeEpoch::Current, &self.spec)?; + + let attestation_slot = state.get_attestation_data_slot(&attestation.data)?; + + // Reject any attestation where the `state` loaded from `data.beacon_block_root` + // has a higher slot than the attestation. + // + // Permitting this would allow for attesters to vote on _future_ slots. + if attestation_slot > state.slot { + Ok(AttestationProcessingOutcome::AttestsToFutureState { + state: state.slot, + attestation: attestation_slot, + }) } else { - // Use the `data.beacon_block_root` to load the state from the latest non-skipped - // slot preceding the attestation's creation. - // - // This state is guaranteed to be in the same chain as the attestation, but it's - // not guaranteed to be from the same slot or epoch as the attestation. - let mut state: BeaconState = self - .store - .get(&attestation_head_block.state_root)? - .ok_or_else(|| Error::MissingBeaconState(attestation_head_block.state_root))?; - - // Ensure the state loaded from the database matches the state of the attestation - // head block. - // - // The state needs to be advanced from the current slot through to the epoch in - // which the attestation was created in. It would be an error to try and use - // `state.get_attestation_data_slot(..)` because the state matching the - // `data.beacon_block_root` isn't necessarily in a nearby epoch to the attestation - // (e.g., if there were lots of skip slots since the head of the chain and the - // epoch creation epoch). - for _ in state.slot.as_u64() - ..attestation - .data - .target - .epoch - .start_slot(T::EthSpec::slots_per_epoch()) - .as_u64() - { - per_slot_processing(&mut state, &self.spec)?; - } - - state.build_committee_cache(RelativeEpoch::Current, &self.spec)?; - - let attestation_slot = state.get_attestation_data_slot(&attestation.data)?; - - // Reject any attestation where the `state` loaded from `data.beacon_block_root` - // has a higher slot than the attestation. - // - // Permitting this would allow for attesters to vote on _future_ slots. - if attestation_slot > state.slot { - Ok(AttestationProcessingOutcome::AttestsToFutureState { - state: state.slot, - attestation: attestation_slot, - }) - } else { - self.process_attestation_for_state_and_block( - attestation, - &state, - &attestation_head_block, - ) - } + self.process_attestation_for_state_and_block( + attestation, + &state, + &attestation_head_block, + ) } } else { // Drop any attestation where we have not processed `attestation.data.beacon_block_root`. From 4f98a3985fc1714799ac5897d002fa26ea74bb96 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Wed, 14 Aug 2019 10:36:55 +1000 Subject: [PATCH 081/186] Add first attempts at HTTP bootstrap --- beacon_node/client/Cargo.toml | 1 + beacon_node/client/src/beacon_chain_types.rs | 11 +++ beacon_node/client/src/config.rs | 2 + beacon_node/client/src/lib.rs | 1 + beacon_node/client/src/local_bootstrap.rs | 93 ++++++++++++++++++++ beacon_node/rest_api/src/beacon.rs | 21 +++++ beacon_node/rest_api/src/lib.rs | 6 ++ beacon_node/rest_api/src/spec.rs | 27 ++++++ 8 files changed, 162 insertions(+) create mode 100644 beacon_node/client/src/local_bootstrap.rs create mode 100644 beacon_node/rest_api/src/spec.rs diff --git a/beacon_node/client/Cargo.toml b/beacon_node/client/Cargo.toml index 8c72fa4171..b0524b17d2 100644 --- a/beacon_node/client/Cargo.toml +++ b/beacon_node/client/Cargo.toml @@ -27,3 +27,4 @@ clap = "2.32.0" dirs = "1.0.3" exit-future = "0.1.3" futures = "0.1.25" +reqwest = "0.9" diff --git a/beacon_node/client/src/beacon_chain_types.rs b/beacon_node/client/src/beacon_chain_types.rs index 0b86c95838..a5b89b86a2 100644 --- a/beacon_node/client/src/beacon_chain_types.rs +++ b/beacon_node/client/src/beacon_chain_types.rs @@ -1,4 +1,5 @@ use crate::error::Result; +use crate::local_bootstrap::BootstrapParams; use crate::{config::GenesisState, ClientConfig}; use beacon_chain::{ lmd_ghost::{LmdGhost, ThreadSafeReducedTree}, @@ -6,6 +7,7 @@ use beacon_chain::{ store::Store, BeaconChain, BeaconChainTypes, }; +use reqwest::Url; use slog::{crit, info, Logger}; use slot_clock::SlotClock; use std::fs::File; @@ -74,6 +76,15 @@ where serde_yaml::from_reader(file) .map_err(|e| format!("Unable to parse YAML genesis state file: {:?}", e))? } + GenesisState::HttpBootstrap { server } => { + let url: Url = + Url::parse(&server).map_err(|e| format!("Invalid bootstrap server url: {}", e))?; + + let params = BootstrapParams::from_http_api(url) + .map_err(|e| format!("Failed to bootstrap from HTTP server: {:?}", e))?; + + params.genesis_state + } }; let mut genesis_block = BeaconBlock::empty(&spec); diff --git a/beacon_node/client/src/config.rs b/beacon_node/client/src/config.rs index ee62b62815..2b410312b1 100644 --- a/beacon_node/client/src/config.rs +++ b/beacon_node/client/src/config.rs @@ -48,6 +48,8 @@ pub enum GenesisState { }, /// Load a YAML-encoded genesis state from a file. Yaml { file: PathBuf }, + /// Use a HTTP server (running our REST-API) to load genesis and finalized states and blocks. + HttpBootstrap { server: String }, } impl Default for Config { diff --git a/beacon_node/client/src/lib.rs b/beacon_node/client/src/lib.rs index 65ba071fa1..7a9152ee0e 100644 --- a/beacon_node/client/src/lib.rs +++ b/beacon_node/client/src/lib.rs @@ -2,6 +2,7 @@ extern crate slog; mod beacon_chain_types; mod config; +mod local_bootstrap; pub mod error; pub mod notifier; diff --git a/beacon_node/client/src/local_bootstrap.rs b/beacon_node/client/src/local_bootstrap.rs new file mode 100644 index 0000000000..f38762b3be --- /dev/null +++ b/beacon_node/client/src/local_bootstrap.rs @@ -0,0 +1,93 @@ +use reqwest::{Error as HttpError, Url}; +use types::{BeaconBlock, BeaconState, Checkpoint, EthSpec, Slot}; + +#[derive(Debug)] +pub enum Error { + UrlCannotBeBase, + HttpError(HttpError), +} + +impl From for Error { + fn from(e: HttpError) -> Error { + Error::HttpError(e) + } +} + +pub struct BootstrapParams { + pub finalized_block: BeaconBlock, + pub finalized_state: BeaconState, + pub genesis_block: BeaconBlock, + pub genesis_state: BeaconState, +} + +impl BootstrapParams { + pub fn from_http_api(url: Url) -> Result { + let slots_per_epoch = get_slots_per_epoch(url.clone())?; + let genesis_slot = Slot::new(0); + let finalized_slot = get_finalized_slot(url.clone(), slots_per_epoch.as_u64())?; + + Ok(Self { + finalized_block: get_block(url.clone(), finalized_slot)?, + finalized_state: get_state(url.clone(), finalized_slot)?, + genesis_block: get_block(url.clone(), genesis_slot)?, + genesis_state: get_state(url.clone(), genesis_slot)?, + }) + } +} + +fn get_slots_per_epoch(mut url: Url) -> Result { + url.path_segments_mut() + .map(|mut url| { + url.push("spec").push("slots_per_epoch"); + }) + .map_err(|_| Error::UrlCannotBeBase)?; + + reqwest::get(url)? + .error_for_status()? + .json() + .map_err(Into::into) +} + +fn get_finalized_slot(mut url: Url, slots_per_epoch: u64) -> Result { + url.path_segments_mut() + .map(|mut url| { + url.push("beacon").push("latest_finalized_checkpoint"); + }) + .map_err(|_| Error::UrlCannotBeBase)?; + + let checkpoint: Checkpoint = reqwest::get(url)?.error_for_status()?.json()?; + + Ok(checkpoint.epoch.start_slot(slots_per_epoch)) +} + +fn get_state(mut url: Url, slot: Slot) -> Result, Error> { + url.path_segments_mut() + .map(|mut url| { + url.push("beacon").push("state"); + }) + .map_err(|_| Error::UrlCannotBeBase)?; + + url.query_pairs_mut() + .append_pair("slot", &format!("{}", slot.as_u64())); + + reqwest::get(url)? + .error_for_status()? + .json() + .map_err(Into::into) +} + +fn get_block(mut url: Url, slot: Slot) -> Result, Error> { + url.path_segments_mut() + .map(|mut url| { + url.push("beacon").push("block"); + }) + .map_err(|_| Error::UrlCannotBeBase)?; + + url.query_pairs_mut() + .append_pair("slot", &format!("{}", slot.as_u64())); + + reqwest::get(url)? + .error_for_status()? + .json() + .map_err(Into::into) +} diff --git a/beacon_node/rest_api/src/beacon.rs b/beacon_node/rest_api/src/beacon.rs index cef23abe81..8b089f542b 100644 --- a/beacon_node/rest_api/src/beacon.rs +++ b/beacon_node/rest_api/src/beacon.rs @@ -58,3 +58,24 @@ pub fn get_state_root(req: Request) -> ApiR Ok(success_response(Body::from(json))) } + +/// HTTP handler to return the highest finalized slot. +pub fn get_latest_finalized_checkpoint( + req: Request, +) -> ApiResult { + let beacon_chain = req + .extensions() + .get::>>() + .ok_or_else(|| ApiError::ServerError("Beacon chain extension missing".to_string()))?; + + let checkpoint = beacon_chain + .head() + .beacon_state + .finalized_checkpoint + .clone(); + + let json: String = serde_json::to_string(&checkpoint) + .map_err(|e| ApiError::ServerError(format!("Unable to serialize checkpoint: {:?}", e)))?; + + Ok(success_response(Body::from(json))) +} diff --git a/beacon_node/rest_api/src/lib.rs b/beacon_node/rest_api/src/lib.rs index a94a8cdf4a..57c5482cd8 100644 --- a/beacon_node/rest_api/src/lib.rs +++ b/beacon_node/rest_api/src/lib.rs @@ -4,6 +4,7 @@ mod beacon; mod config; mod helpers; mod node; +mod spec; mod url_query; use beacon_chain::{BeaconChain, BeaconChainTypes}; @@ -101,10 +102,15 @@ pub fn start_server( // Route the request to the correct handler. let result = match (req.method(), path.as_ref()) { + (&Method::GET, "/beacon/latest_finalized_checkpoint") => { + beacon::get_latest_finalized_checkpoint::(req) + } (&Method::GET, "/beacon/state") => beacon::get_state::(req), (&Method::GET, "/beacon/state_root") => beacon::get_state_root::(req), (&Method::GET, "/node/version") => node::get_version(req), (&Method::GET, "/node/genesis_time") => node::get_genesis_time::(req), + (&Method::GET, "/spec") => spec::get_spec::(req), + (&Method::GET, "/spec/slots_per_epoch") => spec::get_slots_per_epoch::(req), _ => Err(ApiError::MethodNotAllowed(path.clone())), }; diff --git a/beacon_node/rest_api/src/spec.rs b/beacon_node/rest_api/src/spec.rs new file mode 100644 index 0000000000..d0c8e4368d --- /dev/null +++ b/beacon_node/rest_api/src/spec.rs @@ -0,0 +1,27 @@ +use super::{success_response, ApiResult}; +use crate::ApiError; +use beacon_chain::{BeaconChain, BeaconChainTypes}; +use hyper::{Body, Request}; +use std::sync::Arc; +use types::EthSpec; + +/// HTTP handler to return the full spec object. +pub fn get_spec(req: Request) -> ApiResult { + let beacon_chain = req + .extensions() + .get::>>() + .ok_or_else(|| ApiError::ServerError("Beacon chain extension missing".to_string()))?; + + let json: String = serde_json::to_string(&beacon_chain.spec) + .map_err(|e| ApiError::ServerError(format!("Unable to serialize spec: {:?}", e)))?; + + Ok(success_response(Body::from(json))) +} + +/// HTTP handler to return the full spec object. +pub fn get_slots_per_epoch(_req: Request) -> ApiResult { + let json: String = serde_json::to_string(&T::EthSpec::slots_per_epoch()) + .map_err(|e| ApiError::ServerError(format!("Unable to serialize epoch: {:?}", e)))?; + + Ok(success_response(Body::from(json))) +} From 2bf0d5c071efee2f24bda10afe5f21ec6a9c4884 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Wed, 14 Aug 2019 11:22:43 +1000 Subject: [PATCH 082/186] Add beacon_block methods to rest api --- beacon_node/client/src/local_bootstrap.rs | 22 ++++--- beacon_node/rest_api/src/beacon.rs | 74 ++++++++++++++++++++++- beacon_node/rest_api/src/lib.rs | 2 + 3 files changed, 87 insertions(+), 11 deletions(-) diff --git a/beacon_node/client/src/local_bootstrap.rs b/beacon_node/client/src/local_bootstrap.rs index f38762b3be..79fad7ec2e 100644 --- a/beacon_node/client/src/local_bootstrap.rs +++ b/beacon_node/client/src/local_bootstrap.rs @@ -2,7 +2,7 @@ use reqwest::{Error as HttpError, Url}; use types::{BeaconBlock, BeaconState, Checkpoint, EthSpec, Slot}; #[derive(Debug)] -pub enum Error { +enum Error { UrlCannotBeBase, HttpError(HttpError), } @@ -21,16 +21,22 @@ pub struct BootstrapParams { } impl BootstrapParams { - pub fn from_http_api(url: Url) -> Result { - let slots_per_epoch = get_slots_per_epoch(url.clone())?; + pub fn from_http_api(url: Url) -> Result { + let slots_per_epoch = get_slots_per_epoch(url.clone()) + .map_err(|e| format!("Unable to get slots per epoch: {:?}", e))?; let genesis_slot = Slot::new(0); - let finalized_slot = get_finalized_slot(url.clone(), slots_per_epoch.as_u64())?; + let finalized_slot = get_finalized_slot(url.clone(), slots_per_epoch.as_u64()) + .map_err(|e| format!("Unable to get finalized slot: {:?}", e))?; Ok(Self { - finalized_block: get_block(url.clone(), finalized_slot)?, - finalized_state: get_state(url.clone(), finalized_slot)?, - genesis_block: get_block(url.clone(), genesis_slot)?, - genesis_state: get_state(url.clone(), genesis_slot)?, + finalized_block: get_block(url.clone(), finalized_slot) + .map_err(|e| format!("Unable to get finalized block: {:?}", e))?, + finalized_state: get_state(url.clone(), finalized_slot) + .map_err(|e| format!("Unable to get finalized state: {:?}", e))?, + genesis_block: get_block(url.clone(), genesis_slot) + .map_err(|e| format!("Unable to get genesis block: {:?}", e))?, + genesis_state: get_state(url.clone(), genesis_slot) + .map_err(|e| format!("Unable to get genesis state: {:?}", e))?, }) } } diff --git a/beacon_node/rest_api/src/beacon.rs b/beacon_node/rest_api/src/beacon.rs index 8b089f542b..a2afb10010 100644 --- a/beacon_node/rest_api/src/beacon.rs +++ b/beacon_node/rest_api/src/beacon.rs @@ -4,7 +4,75 @@ use beacon_chain::{BeaconChain, BeaconChainTypes}; use hyper::{Body, Request}; use std::sync::Arc; use store::Store; -use types::BeaconState; +use types::{BeaconBlock, BeaconState}; + +/// HTTP handler to return a `BeaconBlock` at a given `root` or `slot`. +pub fn get_block(req: Request) -> ApiResult { + let beacon_chain = req + .extensions() + .get::>>() + .ok_or_else(|| ApiError::ServerError("Beacon chain extension missing".to_string()))?; + + let query_params = ["root", "slot"]; + let (key, value) = UrlQuery::from_request(&req)?.first_of(&query_params)?; + + let block_root = match (key.as_ref(), value) { + ("slot", value) => { + let target = parse_slot(&value)?; + + beacon_chain + .rev_iter_block_roots() + .take_while(|(_root, slot)| *slot >= target) + .find(|(_root, slot)| *slot == target) + .map(|(root, _slot)| root) + .ok_or_else(|| { + ApiError::NotFound(format!("Unable to find BeaconBlock for slot {}", target)) + })? + } + ("root", value) => parse_root(&value)?, + _ => return Err(ApiError::ServerError("Unexpected query parameter".into())), + }; + + let block = beacon_chain + .store + .get::>(&block_root)? + .ok_or_else(|| { + ApiError::NotFound(format!( + "Unable to find BeaconBlock for root {}", + block_root + )) + })?; + + let json: String = serde_json::to_string(&block) + .map_err(|e| ApiError::ServerError(format!("Unable to serialize BeaconBlock: {:?}", e)))?; + + Ok(success_response(Body::from(json))) +} + +/// HTTP handler to return a `BeaconBlock` root at a given `slot`. +pub fn get_block_root(req: Request) -> ApiResult { + let beacon_chain = req + .extensions() + .get::>>() + .ok_or_else(|| ApiError::ServerError("Beacon chain extension missing".to_string()))?; + + let slot_string = UrlQuery::from_request(&req)?.only_one("slot")?; + let target = parse_slot(&slot_string)?; + + let root = beacon_chain + .rev_iter_block_roots() + .take_while(|(_root, slot)| *slot >= target) + .find(|(_root, slot)| *slot == target) + .map(|(root, _slot)| root) + .ok_or_else(|| { + ApiError::NotFound(format!("Unable to find BeaconBlock for slot {}", target)) + })?; + + let json: String = serde_json::to_string(&root) + .map_err(|e| ApiError::ServerError(format!("Unable to serialize root: {:?}", e)))?; + + Ok(success_response(Body::from(json))) +} /// HTTP handler to return a `BeaconState` at a given `root` or `slot`. /// @@ -29,7 +97,7 @@ pub fn get_state(req: Request) -> ApiResult .get(root)? .ok_or_else(|| ApiError::NotFound(format!("No state for root: {}", root)))? } - _ => unreachable!("Guarded by UrlQuery::from_request()"), + _ => return Err(ApiError::ServerError("Unexpected query parameter".into())), }; let json: String = serde_json::to_string(&state) @@ -38,7 +106,7 @@ pub fn get_state(req: Request) -> ApiResult Ok(success_response(Body::from(json))) } -/// HTTP handler to return a `BeaconState` root at a given or `slot`. +/// HTTP handler to return a `BeaconState` root at a given `slot`. /// /// Will not return a state if the request slot is in the future. Will return states higher than /// the current head by skipping slots. diff --git a/beacon_node/rest_api/src/lib.rs b/beacon_node/rest_api/src/lib.rs index 57c5482cd8..4f07b482a4 100644 --- a/beacon_node/rest_api/src/lib.rs +++ b/beacon_node/rest_api/src/lib.rs @@ -102,6 +102,8 @@ pub fn start_server( // Route the request to the correct handler. let result = match (req.method(), path.as_ref()) { + (&Method::GET, "/beacon/block") => beacon::get_block::(req), + (&Method::GET, "/beacon/block_root") => beacon::get_block_root::(req), (&Method::GET, "/beacon/latest_finalized_checkpoint") => { beacon::get_latest_finalized_checkpoint::(req) } From 980f533b3b1156c89cc9f46d396a216b13af9205 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Wed, 14 Aug 2019 11:55:12 +1000 Subject: [PATCH 083/186] Fix serde for block.body.grafitti --- eth2/types/src/beacon_block_body.rs | 7 +++++-- eth2/types/src/utils/serde_utils.rs | 16 ++++++++++++++-- 2 files changed, 19 insertions(+), 4 deletions(-) diff --git a/eth2/types/src/beacon_block_body.rs b/eth2/types/src/beacon_block_body.rs index 64dc229ed8..c1f66b816c 100644 --- a/eth2/types/src/beacon_block_body.rs +++ b/eth2/types/src/beacon_block_body.rs @@ -1,5 +1,5 @@ use crate::test_utils::TestRandom; -use crate::utils::graffiti_from_hex_str; +use crate::utils::{graffiti_from_hex_str, graffiti_to_hex_str}; use crate::*; use serde_derive::{Deserialize, Serialize}; @@ -16,7 +16,10 @@ use tree_hash_derive::TreeHash; pub struct BeaconBlockBody { pub randao_reveal: Signature, pub eth1_data: Eth1Data, - #[serde(deserialize_with = "graffiti_from_hex_str")] + #[serde( + serialize_with = "graffiti_to_hex_str", + deserialize_with = "graffiti_from_hex_str" + )] pub graffiti: [u8; 32], pub proposer_slashings: VariableList, pub attester_slashings: VariableList, T::MaxAttesterSlashings>, diff --git a/eth2/types/src/utils/serde_utils.rs b/eth2/types/src/utils/serde_utils.rs index 4b46fc0dc6..a9b27d75b5 100644 --- a/eth2/types/src/utils/serde_utils.rs +++ b/eth2/types/src/utils/serde_utils.rs @@ -46,8 +46,20 @@ where Ok(array) } -// #[allow(clippy::trivially_copy_pass_by_ref)] // Serde requires the `byte` to be a ref. -pub fn fork_to_hex_str(bytes: &[u8; 4], serializer: S) -> Result +pub fn fork_to_hex_str(bytes: &[u8; FORK_BYTES_LEN], serializer: S) -> Result +where + S: Serializer, +{ + let mut hex_string: String = "0x".to_string(); + hex_string.push_str(&hex::encode(&bytes)); + + serializer.serialize_str(&hex_string) +} + +pub fn graffiti_to_hex_str( + bytes: &[u8; GRAFFITI_BYTES_LEN], + serializer: S, +) -> Result where S: Serializer, { From 9b3c9f8c0fe1908b37ca7c6b8f98b68cc07adfac Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Wed, 14 Aug 2019 12:03:03 +1000 Subject: [PATCH 084/186] Allow travis failures on beta (see desc) There's a non-backward compatible change in `cargo fmt`. Stable and beta do not agree. --- .travis.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.travis.yml b/.travis.yml index def7435a1f..b9754eb1eb 100644 --- a/.travis.yml +++ b/.travis.yml @@ -17,6 +17,7 @@ rust: - nightly matrix: allow_failures: + - rust: beta - rust: nightly fast_finish: true install: From c93d2baa912a3ff41fba711ae5b2ae387298c265 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Wed, 14 Aug 2019 18:23:26 +1000 Subject: [PATCH 085/186] Add network routes to API --- beacon_node/client/Cargo.toml | 1 + beacon_node/client/src/lib.rs | 3 +- beacon_node/client/src/local_bootstrap.rs | 16 ++++++ beacon_node/eth2-libp2p/src/behaviour.rs | 5 ++ beacon_node/eth2-libp2p/src/discovery.rs | 9 ++++ beacon_node/eth2-libp2p/src/lib.rs | 1 + beacon_node/eth2-libp2p/src/service.rs | 2 +- beacon_node/network/src/service.rs | 26 +++++++++- beacon_node/rest_api/Cargo.toml | 2 + beacon_node/rest_api/src/lib.rs | 12 ++++- beacon_node/rest_api/src/network.rs | 61 +++++++++++++++++++++++ 11 files changed, 134 insertions(+), 4 deletions(-) create mode 100644 beacon_node/rest_api/src/network.rs diff --git a/beacon_node/client/Cargo.toml b/beacon_node/client/Cargo.toml index 9aa3557a95..9d5d49e17b 100644 --- a/beacon_node/client/Cargo.toml +++ b/beacon_node/client/Cargo.toml @@ -7,6 +7,7 @@ edition = "2018" [dependencies] beacon_chain = { path = "../beacon_chain" } network = { path = "../network" } +eth2-libp2p = { path = "../eth2-libp2p" } rpc = { path = "../rpc" } rest_api = { path = "../rest_api" } prometheus = "^0.6" diff --git a/beacon_node/client/src/lib.rs b/beacon_node/client/src/lib.rs index e7c3d2d8ac..93e80df421 100644 --- a/beacon_node/client/src/lib.rs +++ b/beacon_node/client/src/lib.rs @@ -48,7 +48,7 @@ pub struct Client { impl Client where - T: BeaconChainTypes + InitialiseBeaconChain + Clone + 'static, + T: BeaconChainTypes + InitialiseBeaconChain + Clone + Send + Sync + 'static, { /// Generate an instance of the client. Spawn and link all internal sub-processes. pub fn new( @@ -122,6 +122,7 @@ where &client_config.rest_api, executor, beacon_chain.clone(), + network.clone(), client_config.db_path().expect("unable to read datadir"), &log, ) { diff --git a/beacon_node/client/src/local_bootstrap.rs b/beacon_node/client/src/local_bootstrap.rs index 79fad7ec2e..5fe5e1b4fb 100644 --- a/beacon_node/client/src/local_bootstrap.rs +++ b/beacon_node/client/src/local_bootstrap.rs @@ -1,3 +1,4 @@ +use eth2_libp2p::Enr; use reqwest::{Error as HttpError, Url}; use types::{BeaconBlock, BeaconState, Checkpoint, EthSpec, Slot}; @@ -18,6 +19,7 @@ pub struct BootstrapParams { pub finalized_state: BeaconState, pub genesis_block: BeaconBlock, pub genesis_state: BeaconState, + pub enr: Enr, } impl BootstrapParams { @@ -37,6 +39,7 @@ impl BootstrapParams { .map_err(|e| format!("Unable to get genesis block: {:?}", e))?, genesis_state: get_state(url.clone(), genesis_slot) .map_err(|e| format!("Unable to get genesis state: {:?}", e))?, + enr: get_enr(url.clone()).map_err(|e| format!("Unable to get ENR: {:?}", e))?, }) } } @@ -97,3 +100,16 @@ fn get_block(mut url: Url, slot: Slot) -> Result, Err .json() .map_err(Into::into) } + +fn get_enr(mut url: Url) -> Result { + url.path_segments_mut() + .map(|mut url| { + url.push("node").push("network").push("enr"); + }) + .map_err(|_| Error::UrlCannotBeBase)?; + + reqwest::get(url)? + .error_for_status()? + .json() + .map_err(Into::into) +} diff --git a/beacon_node/eth2-libp2p/src/behaviour.rs b/beacon_node/eth2-libp2p/src/behaviour.rs index b87f8a0613..24aacbfa12 100644 --- a/beacon_node/eth2-libp2p/src/behaviour.rs +++ b/beacon_node/eth2-libp2p/src/behaviour.rs @@ -7,6 +7,7 @@ use futures::prelude::*; use libp2p::{ core::identity::Keypair, discv5::Discv5Event, + enr::Enr, gossipsub::{Gossipsub, GossipsubEvent}, identify::{Identify, IdentifyEvent}, ping::{Ping, PingConfig, PingEvent}, @@ -78,6 +79,10 @@ impl Behaviour { log: behaviour_log, }) } + + pub fn discovery(&self) -> &Discovery { + &self.discovery + } } // Implement the NetworkBehaviourEventProcess trait so that we can derive NetworkBehaviour for Behaviour diff --git a/beacon_node/eth2-libp2p/src/discovery.rs b/beacon_node/eth2-libp2p/src/discovery.rs index ca98db3246..87d5dd5581 100644 --- a/beacon_node/eth2-libp2p/src/discovery.rs +++ b/beacon_node/eth2-libp2p/src/discovery.rs @@ -103,6 +103,10 @@ impl Discovery { }) } + pub fn local_enr(&self) -> &Enr { + self.discovery.local_enr() + } + /// Manually search for peers. This restarts the discovery round, sparking multiple rapid /// queries. pub fn discover_peers(&mut self) { @@ -120,6 +124,11 @@ impl Discovery { self.connected_peers.len() } + /// The current number of connected libp2p peers. + pub fn connected_peer_set(&self) -> &HashSet { + &self.connected_peers + } + /// Search for new peers using the underlying discovery mechanism. fn find_peers(&mut self) { // pick a random NodeId diff --git a/beacon_node/eth2-libp2p/src/lib.rs b/beacon_node/eth2-libp2p/src/lib.rs index 33d5ba9ed9..8c2644fbbc 100644 --- a/beacon_node/eth2-libp2p/src/lib.rs +++ b/beacon_node/eth2-libp2p/src/lib.rs @@ -17,6 +17,7 @@ pub use behaviour::PubsubMessage; pub use config::{ Config as NetworkConfig, BEACON_ATTESTATION_TOPIC, BEACON_BLOCK_TOPIC, SHARD_TOPIC_PREFIX, }; +pub use libp2p::enr::Enr; pub use libp2p::gossipsub::{Topic, TopicHash}; pub use libp2p::multiaddr; pub use libp2p::Multiaddr; diff --git a/beacon_node/eth2-libp2p/src/service.rs b/beacon_node/eth2-libp2p/src/service.rs index 316aa05798..4c343fa26c 100644 --- a/beacon_node/eth2-libp2p/src/service.rs +++ b/beacon_node/eth2-libp2p/src/service.rs @@ -15,7 +15,7 @@ use libp2p::core::{ transport::boxed::Boxed, upgrade::{InboundUpgradeExt, OutboundUpgradeExt}, }; -use libp2p::{core, secio, PeerId, Swarm, Transport}; +use libp2p::{core, enr::Enr, secio, PeerId, Swarm, Transport}; use slog::{debug, info, trace, warn}; use std::fs::File; use std::io::prelude::*; diff --git a/beacon_node/network/src/service.rs b/beacon_node/network/src/service.rs index e5ca2a9175..ed3c9da0b3 100644 --- a/beacon_node/network/src/service.rs +++ b/beacon_node/network/src/service.rs @@ -5,7 +5,7 @@ use beacon_chain::{BeaconChain, BeaconChainTypes}; use core::marker::PhantomData; use eth2_libp2p::Service as LibP2PService; use eth2_libp2p::Topic; -use eth2_libp2p::{Libp2pEvent, PeerId}; +use eth2_libp2p::{Enr, Libp2pEvent, PeerId}; use eth2_libp2p::{PubsubMessage, RPCEvent}; use futures::prelude::*; use futures::Stream; @@ -64,6 +64,30 @@ impl Service { Ok((Arc::new(network_service), network_send)) } + pub fn local_enr(&self) -> Enr { + self.libp2p_service + .lock() + .swarm + .discovery() + .local_enr() + .clone() + } + + pub fn connected_peers(&self) -> usize { + self.libp2p_service.lock().swarm.connected_peers() + } + + pub fn connected_peer_set(&self) -> Vec { + self.libp2p_service + .lock() + .swarm + .discovery() + .connected_peer_set() + .iter() + .cloned() + .collect() + } + pub fn libp2p_service(&self) -> Arc> { self.libp2p_service.clone() } diff --git a/beacon_node/rest_api/Cargo.toml b/beacon_node/rest_api/Cargo.toml index c7026014c4..cac196d9cb 100644 --- a/beacon_node/rest_api/Cargo.toml +++ b/beacon_node/rest_api/Cargo.toml @@ -7,6 +7,8 @@ edition = "2018" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] beacon_chain = { path = "../beacon_chain" } +network = { path = "../network" } +eth2-libp2p = { path = "../eth2-libp2p" } store = { path = "../store" } version = { path = "../version" } serde = { version = "1.0", features = ["derive"] } diff --git a/beacon_node/rest_api/src/lib.rs b/beacon_node/rest_api/src/lib.rs index e267ce313e..86b5b35db0 100644 --- a/beacon_node/rest_api/src/lib.rs +++ b/beacon_node/rest_api/src/lib.rs @@ -1,15 +1,18 @@ #[macro_use] extern crate lazy_static; +extern crate network as client_network; mod beacon; mod config; mod helpers; mod metrics; +mod network; mod node; mod spec; mod url_query; use beacon_chain::{BeaconChain, BeaconChainTypes}; +use client_network::Service as NetworkService; pub use config::Config as ApiConfig; use hyper::rt::Future; use hyper::service::service_fn_ok; @@ -68,10 +71,11 @@ impl From for ApiError { } } -pub fn start_server( +pub fn start_server( config: &ApiConfig, executor: &TaskExecutor, beacon_chain: Arc>, + network_service: Arc>, db_path: PathBuf, log: &slog::Logger, ) -> Result { @@ -99,6 +103,7 @@ pub fn start_server( let log = server_log.clone(); let beacon_chain = server_bc.clone(); let db_path = db_path.clone(); + let network_service = network_service.clone(); // Create a simple handler for the router, inject our stateful objects into the request. service_fn_ok(move |mut req| { @@ -109,6 +114,8 @@ pub fn start_server( req.extensions_mut() .insert::>>(beacon_chain.clone()); req.extensions_mut().insert::(db_path.clone()); + req.extensions_mut() + .insert::>>(network_service.clone()); let path = req.uri().path().to_string(); @@ -124,6 +131,9 @@ pub fn start_server( (&Method::GET, "/metrics") => metrics::get_prometheus::(req), (&Method::GET, "/node/version") => node::get_version(req), (&Method::GET, "/node/genesis_time") => node::get_genesis_time::(req), + (&Method::GET, "/node/network/enr") => network::get_enr::(req), + (&Method::GET, "/node/network/peer_count") => network::get_peer_count::(req), + (&Method::GET, "/node/network/peers") => network::get_peer_list::(req), (&Method::GET, "/spec") => spec::get_spec::(req), (&Method::GET, "/spec/slots_per_epoch") => spec::get_slots_per_epoch::(req), _ => Err(ApiError::MethodNotAllowed(path.clone())), diff --git a/beacon_node/rest_api/src/network.rs b/beacon_node/rest_api/src/network.rs new file mode 100644 index 0000000000..2fd88f4985 --- /dev/null +++ b/beacon_node/rest_api/src/network.rs @@ -0,0 +1,61 @@ +use crate::{success_response, ApiError, ApiResult, NetworkService}; +use beacon_chain::BeaconChainTypes; +use eth2_libp2p::{Enr, PeerId}; +use hyper::{Body, Request}; +use std::sync::Arc; + +/// HTTP handle to return the Discv5 ENR from the client's libp2p service. +/// +/// ENR is encoded as base64 string. +pub fn get_enr(req: Request) -> ApiResult { + let network = req + .extensions() + .get::>>() + .ok_or_else(|| ApiError::ServerError("NetworkService extension missing".to_string()))?; + + let enr: Enr = network.local_enr(); + + Ok(success_response(Body::from( + serde_json::to_string(&enr.to_base64()) + .map_err(|e| ApiError::ServerError(format!("Unable to serialize Enr: {:?}", e)))?, + ))) +} + +/// HTTP handle to return the number of peers connected in the client's libp2p service. +pub fn get_peer_count( + req: Request, +) -> ApiResult { + let network = req + .extensions() + .get::>>() + .ok_or_else(|| ApiError::ServerError("NetworkService extension missing".to_string()))?; + + let connected_peers: usize = network.connected_peers(); + + Ok(success_response(Body::from( + serde_json::to_string(&connected_peers) + .map_err(|e| ApiError::ServerError(format!("Unable to serialize Enr: {:?}", e)))?, + ))) +} + +/// HTTP handle to return the list of peers connected to the client's libp2p service. +/// +/// Peers are presented as a list of `PeerId::to_string()`. +pub fn get_peer_list(req: Request) -> ApiResult { + let network = req + .extensions() + .get::>>() + .ok_or_else(|| ApiError::ServerError("NetworkService extension missing".to_string()))?; + + let connected_peers: Vec = network + .connected_peer_set() + .iter() + .map(PeerId::to_string) + .collect(); + + Ok(success_response(Body::from( + serde_json::to_string(&connected_peers).map_err(|e| { + ApiError::ServerError(format!("Unable to serialize Vec: {:?}", e)) + })?, + ))) +} From bb166a25992535460aecdec2fe94403b1521254a Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Wed, 14 Aug 2019 18:58:01 +1000 Subject: [PATCH 086/186] Fix rustc warnings --- beacon_node/eth2-libp2p/src/behaviour.rs | 1 - beacon_node/eth2-libp2p/src/service.rs | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/beacon_node/eth2-libp2p/src/behaviour.rs b/beacon_node/eth2-libp2p/src/behaviour.rs index 24aacbfa12..9158fe4858 100644 --- a/beacon_node/eth2-libp2p/src/behaviour.rs +++ b/beacon_node/eth2-libp2p/src/behaviour.rs @@ -7,7 +7,6 @@ use futures::prelude::*; use libp2p::{ core::identity::Keypair, discv5::Discv5Event, - enr::Enr, gossipsub::{Gossipsub, GossipsubEvent}, identify::{Identify, IdentifyEvent}, ping::{Ping, PingConfig, PingEvent}, diff --git a/beacon_node/eth2-libp2p/src/service.rs b/beacon_node/eth2-libp2p/src/service.rs index 4c343fa26c..316aa05798 100644 --- a/beacon_node/eth2-libp2p/src/service.rs +++ b/beacon_node/eth2-libp2p/src/service.rs @@ -15,7 +15,7 @@ use libp2p::core::{ transport::boxed::Boxed, upgrade::{InboundUpgradeExt, OutboundUpgradeExt}, }; -use libp2p::{core, enr::Enr, secio, PeerId, Swarm, Transport}; +use libp2p::{core, secio, PeerId, Swarm, Transport}; use slog::{debug, info, trace, warn}; use std::fs::File; use std::io::prelude::*; From c97b3b20cb1cfa6ae6ac5e9658b5f5a27f2bf4af Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Wed, 14 Aug 2019 20:58:51 +1000 Subject: [PATCH 087/186] Add best_slot method --- beacon_node/rest_api/src/beacon.rs | 15 +++++++++++++++ beacon_node/rest_api/src/lib.rs | 1 + 2 files changed, 16 insertions(+) diff --git a/beacon_node/rest_api/src/beacon.rs b/beacon_node/rest_api/src/beacon.rs index a2afb10010..66e31ae41c 100644 --- a/beacon_node/rest_api/src/beacon.rs +++ b/beacon_node/rest_api/src/beacon.rs @@ -6,6 +6,21 @@ use std::sync::Arc; use store::Store; use types::{BeaconBlock, BeaconState}; +/// HTTP handler to return a `BeaconBlock` at a given `root` or `slot`. +pub fn get_best_slot(req: Request) -> ApiResult { + let beacon_chain = req + .extensions() + .get::>>() + .ok_or_else(|| ApiError::ServerError("Beacon chain extension missing".to_string()))?; + + let slot = beacon_chain.head().beacon_state.slot; + + let json: String = serde_json::to_string(&slot) + .map_err(|e| ApiError::ServerError(format!("Unable to serialize Slot: {:?}", e)))?; + + Ok(success_response(Body::from(json))) +} + /// HTTP handler to return a `BeaconBlock` at a given `root` or `slot`. pub fn get_block(req: Request) -> ApiResult { let beacon_chain = req diff --git a/beacon_node/rest_api/src/lib.rs b/beacon_node/rest_api/src/lib.rs index 86b5b35db0..349a62c3fe 100644 --- a/beacon_node/rest_api/src/lib.rs +++ b/beacon_node/rest_api/src/lib.rs @@ -121,6 +121,7 @@ pub fn start_server( // Route the request to the correct handler. let result = match (req.method(), path.as_ref()) { + (&Method::GET, "/beacon/best_slot") => beacon::get_best_slot::(req), (&Method::GET, "/beacon/block") => beacon::get_block::(req), (&Method::GET, "/beacon/block_root") => beacon::get_block_root::(req), (&Method::GET, "/beacon/latest_finalized_checkpoint") => { From fda208b103284a156d801f2cea0e556642b10fe5 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Thu, 15 Aug 2019 12:48:34 +1000 Subject: [PATCH 088/186] Add --bootstrap arg to beacon node --- beacon_node/client/src/lib.rs | 2 +- beacon_node/src/main.rs | 19 ++++++++++++++++++- 2 files changed, 19 insertions(+), 2 deletions(-) diff --git a/beacon_node/client/src/lib.rs b/beacon_node/client/src/lib.rs index 93e80df421..44b5c0ce3d 100644 --- a/beacon_node/client/src/lib.rs +++ b/beacon_node/client/src/lib.rs @@ -22,7 +22,7 @@ use tokio::timer::Interval; pub use beacon_chain::BeaconChainTypes; pub use beacon_chain_types::ClientType; pub use beacon_chain_types::InitialiseBeaconChain; -pub use config::Config as ClientConfig; +pub use config::{Config as ClientConfig, GenesisState}; pub use eth2_config::Eth2Config; /// Main beacon node client service. This provides the connection and initialisation of the clients diff --git a/beacon_node/src/main.rs b/beacon_node/src/main.rs index 9a52f2638c..862ca4a90e 100644 --- a/beacon_node/src/main.rs +++ b/beacon_node/src/main.rs @@ -1,7 +1,7 @@ mod run; use clap::{App, Arg}; -use client::{ClientConfig, Eth2Config}; +use client::{ClientConfig, Eth2Config, GenesisState}; use env_logger::{Builder, Env}; use eth2_config::{read_from_file, write_to_file}; use slog::{crit, o, warn, Drain, Level}; @@ -200,6 +200,16 @@ fn main() { .help("Sets the verbosity level") .takes_value(true), ) + /* + * Bootstrap. + */ + .arg( + Arg::with_name("bootstrap") + .long("bootstrap") + .value_name("HTTP_SERVER") + .help("Load the genesis state and libp2p address from the HTTP API of another Lighthouse node.") + .takes_value(true) + ) .get_matches(); // build the initial logger @@ -288,6 +298,13 @@ fn main() { } }; + // If the `--bootstrap` flag is provided, overwrite the default configuration. + if let Some(server) = matches.value_of("bootstrap") { + client_config.genesis_state = GenesisState::HttpBootstrap { + server: server.to_string(), + }; + } + let eth2_config_path = data_dir.join(ETH2_CONFIG_FILENAME); // Initialise the `Eth2Config`. From b24482674933406460b40f83fee00a98c6c84135 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Thu, 15 Aug 2019 13:58:04 +1000 Subject: [PATCH 089/186] Get bootstrapper working for ENR address --- beacon_node/client/src/beacon_chain_types.rs | 14 ++--- .../{local_bootstrap.rs => bootstrapper.rs} | 55 +++++++++++-------- beacon_node/client/src/lib.rs | 3 +- beacon_node/src/main.rs | 21 ++++++- 4 files changed, 62 insertions(+), 31 deletions(-) rename beacon_node/client/src/{local_bootstrap.rs => bootstrapper.rs} (64%) diff --git a/beacon_node/client/src/beacon_chain_types.rs b/beacon_node/client/src/beacon_chain_types.rs index a5b89b86a2..f2f95226ad 100644 --- a/beacon_node/client/src/beacon_chain_types.rs +++ b/beacon_node/client/src/beacon_chain_types.rs @@ -1,5 +1,5 @@ +use crate::bootstrapper::Bootstrapper; use crate::error::Result; -use crate::local_bootstrap::BootstrapParams; use crate::{config::GenesisState, ClientConfig}; use beacon_chain::{ lmd_ghost::{LmdGhost, ThreadSafeReducedTree}, @@ -7,7 +7,6 @@ use beacon_chain::{ store::Store, BeaconChain, BeaconChainTypes, }; -use reqwest::Url; use slog::{crit, info, Logger}; use slot_clock::SlotClock; use std::fs::File; @@ -77,13 +76,14 @@ where .map_err(|e| format!("Unable to parse YAML genesis state file: {:?}", e))? } GenesisState::HttpBootstrap { server } => { - let url: Url = - Url::parse(&server).map_err(|e| format!("Invalid bootstrap server url: {}", e))?; + let bootstrapper = Bootstrapper::from_server_string(server.to_string()) + .map_err(|e| format!("Failed to initialize bootstrap client: {}", e))?; - let params = BootstrapParams::from_http_api(url) - .map_err(|e| format!("Failed to bootstrap from HTTP server: {:?}", e))?; + let (state, _block) = bootstrapper + .genesis() + .map_err(|e| format!("Failed to bootstrap genesis state: {}", e))?; - params.genesis_state + state } }; diff --git a/beacon_node/client/src/local_bootstrap.rs b/beacon_node/client/src/bootstrapper.rs similarity index 64% rename from beacon_node/client/src/local_bootstrap.rs rename to beacon_node/client/src/bootstrapper.rs index 5fe5e1b4fb..9537f6f909 100644 --- a/beacon_node/client/src/local_bootstrap.rs +++ b/beacon_node/client/src/bootstrapper.rs @@ -14,33 +14,44 @@ impl From for Error { } } -pub struct BootstrapParams { - pub finalized_block: BeaconBlock, - pub finalized_state: BeaconState, - pub genesis_block: BeaconBlock, - pub genesis_state: BeaconState, - pub enr: Enr, +pub struct Bootstrapper { + url: Url, } -impl BootstrapParams { - pub fn from_http_api(url: Url) -> Result { - let slots_per_epoch = get_slots_per_epoch(url.clone()) - .map_err(|e| format!("Unable to get slots per epoch: {:?}", e))?; +impl Bootstrapper { + pub fn from_server_string(server: String) -> Result { + Ok(Self { + url: Url::parse(&server).map_err(|e| format!("Invalid bootstrap server url: {}", e))?, + }) + } + + pub fn enr(&self) -> Result { + get_enr(self.url.clone()).map_err(|e| format!("Unable to get ENR: {:?}", e)) + } + + pub fn genesis(&self) -> Result<(BeaconState, BeaconBlock), String> { let genesis_slot = Slot::new(0); - let finalized_slot = get_finalized_slot(url.clone(), slots_per_epoch.as_u64()) + + let block = get_block(self.url.clone(), genesis_slot) + .map_err(|e| format!("Unable to get genesis block: {:?}", e))?; + let state = get_state(self.url.clone(), genesis_slot) + .map_err(|e| format!("Unable to get genesis state: {:?}", e))?; + + Ok((state, block)) + } + + pub fn finalized(&self) -> Result<(BeaconState, BeaconBlock), String> { + let slots_per_epoch = get_slots_per_epoch(self.url.clone()) + .map_err(|e| format!("Unable to get slots per epoch: {:?}", e))?; + let finalized_slot = get_finalized_slot(self.url.clone(), slots_per_epoch.as_u64()) .map_err(|e| format!("Unable to get finalized slot: {:?}", e))?; - Ok(Self { - finalized_block: get_block(url.clone(), finalized_slot) - .map_err(|e| format!("Unable to get finalized block: {:?}", e))?, - finalized_state: get_state(url.clone(), finalized_slot) - .map_err(|e| format!("Unable to get finalized state: {:?}", e))?, - genesis_block: get_block(url.clone(), genesis_slot) - .map_err(|e| format!("Unable to get genesis block: {:?}", e))?, - genesis_state: get_state(url.clone(), genesis_slot) - .map_err(|e| format!("Unable to get genesis state: {:?}", e))?, - enr: get_enr(url.clone()).map_err(|e| format!("Unable to get ENR: {:?}", e))?, - }) + let block = get_block(self.url.clone(), finalized_slot) + .map_err(|e| format!("Unable to get finalized block: {:?}", e))?; + let state = get_state(self.url.clone(), finalized_slot) + .map_err(|e| format!("Unable to get finalized state: {:?}", e))?; + + Ok((state, block)) } } diff --git a/beacon_node/client/src/lib.rs b/beacon_node/client/src/lib.rs index 44b5c0ce3d..798aedec92 100644 --- a/beacon_node/client/src/lib.rs +++ b/beacon_node/client/src/lib.rs @@ -1,8 +1,8 @@ extern crate slog; mod beacon_chain_types; +mod bootstrapper; mod config; -mod local_bootstrap; pub mod error; pub mod notifier; @@ -22,6 +22,7 @@ use tokio::timer::Interval; pub use beacon_chain::BeaconChainTypes; pub use beacon_chain_types::ClientType; pub use beacon_chain_types::InitialiseBeaconChain; +pub use bootstrapper::Bootstrapper; pub use config::{Config as ClientConfig, GenesisState}; pub use eth2_config::Eth2Config; diff --git a/beacon_node/src/main.rs b/beacon_node/src/main.rs index 862ca4a90e..5199bddb6e 100644 --- a/beacon_node/src/main.rs +++ b/beacon_node/src/main.rs @@ -1,7 +1,7 @@ mod run; use clap::{App, Arg}; -use client::{ClientConfig, Eth2Config, GenesisState}; +use client::{Bootstrapper, ClientConfig, Eth2Config, GenesisState}; use env_logger::{Builder, Env}; use eth2_config::{read_from_file, write_to_file}; use slog::{crit, o, warn, Drain, Level}; @@ -300,9 +300,28 @@ fn main() { // If the `--bootstrap` flag is provided, overwrite the default configuration. if let Some(server) = matches.value_of("bootstrap") { + // Set the genesis state source. client_config.genesis_state = GenesisState::HttpBootstrap { server: server.to_string(), }; + + let bootstrapper = match Bootstrapper::from_server_string(server.to_string()) { + Ok(b) => b, + Err(e) => { + crit!(log, "Failed to load bootstrapper"; "error" => format!("{:?}", e)); + return; + } + }; + + let enr = match bootstrapper.enr() { + Ok(b) => b, + Err(e) => { + crit!(log, "Failed to read ENR from bootstrap server"; "error" => format!("{:?}", e)); + return; + } + }; + + client_config.network.boot_nodes.push(enr); } let eth2_config_path = data_dir.join(ETH2_CONFIG_FILENAME); From 4678524659f4915037b7d64b9ce8f52498a7bb54 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Thu, 15 Aug 2019 14:52:00 +1000 Subject: [PATCH 090/186] Store intermediate states during block processing --- beacon_node/beacon_chain/src/beacon_chain.rs | 25 +++++++++++++++++++- 1 file changed, 24 insertions(+), 1 deletion(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 891f76d373..7faca0dfd1 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -870,9 +870,16 @@ impl BeaconChain { let catchup_timer = metrics::start_timer(&metrics::BLOCK_PROCESSING_CATCHUP_STATE); + // Keep a list of any states that were "skipped" (block-less) in between the parent state + // slot and the block slot. These will need to be stored in the database. + let mut intermediate_states = vec![]; + // Transition the parent state to the block slot. let mut state: BeaconState = parent_state; - for _ in state.slot.as_u64()..block.slot.as_u64() { + for i in state.slot.as_u64()..block.slot.as_u64() { + if i > 0 { + intermediate_states.push(state.clone()); + } per_slot_processing(&mut state, &self.spec)?; } @@ -911,6 +918,22 @@ impl BeaconChain { let db_write_timer = metrics::start_timer(&metrics::BLOCK_PROCESSING_DB_WRITE); + // Store all the states between the parent block state and this blocks slot before storing + // the final state. + for (i, intermediate_state) in intermediate_states.iter().enumerate() { + // To avoid doing an unnecessary tree hash, use the following (slot + 1) state's + // state_roots field to find the root. + let following_state = match intermediate_states.get(i + 1) { + Some(following_state) => following_state, + None => &state, + }; + let intermediate_state_root = + following_state.get_state_root(intermediate_state.slot)?; + + self.store + .put(&intermediate_state_root, intermediate_state)?; + } + // Store the block and state. self.store.put(&block_root, &block)?; self.store.put(&state_root, &state)?; From ce37f958612229370791ae170e85780a07362656 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Thu, 15 Aug 2019 16:41:02 +1000 Subject: [PATCH 091/186] Allow bootstrapper to scrape libp2p address --- beacon_node/client/Cargo.toml | 1 + beacon_node/client/src/bootstrapper.rs | 29 +++++++++++++++++- beacon_node/client/src/config.rs | 42 ++++++++++++++++++++++++-- beacon_node/eth2-libp2p/src/lib.rs | 2 +- beacon_node/network/src/service.rs | 14 ++++++++- beacon_node/rest_api/src/lib.rs | 3 ++ beacon_node/rest_api/src/network.rs | 21 ++++++++++++- beacon_node/src/main.rs | 28 +---------------- 8 files changed, 107 insertions(+), 33 deletions(-) diff --git a/beacon_node/client/Cargo.toml b/beacon_node/client/Cargo.toml index 9d5d49e17b..9b5a9cf42c 100644 --- a/beacon_node/client/Cargo.toml +++ b/beacon_node/client/Cargo.toml @@ -28,3 +28,4 @@ dirs = "1.0.3" exit-future = "0.1.3" futures = "0.1.25" reqwest = "0.9" +url = "1.2" diff --git a/beacon_node/client/src/bootstrapper.rs b/beacon_node/client/src/bootstrapper.rs index 9537f6f909..1fd8f16592 100644 --- a/beacon_node/client/src/bootstrapper.rs +++ b/beacon_node/client/src/bootstrapper.rs @@ -1,6 +1,8 @@ -use eth2_libp2p::Enr; +use eth2_libp2p::{Enr, Multiaddr}; use reqwest::{Error as HttpError, Url}; +use std::net::Ipv4Addr; use types::{BeaconBlock, BeaconState, Checkpoint, EthSpec, Slot}; +use url::Host; #[derive(Debug)] enum Error { @@ -25,10 +27,22 @@ impl Bootstrapper { }) } + pub fn server_ipv4_addr(&self) -> Option { + match self.url.host()? { + Host::Ipv4(addr) => Some(addr), + _ => None, + } + } + pub fn enr(&self) -> Result { get_enr(self.url.clone()).map_err(|e| format!("Unable to get ENR: {:?}", e)) } + pub fn listen_addresses(&self) -> Result, String> { + get_listen_addresses(self.url.clone()) + .map_err(|e| format!("Unable to get listen addresses: {:?}", e)) + } + pub fn genesis(&self) -> Result<(BeaconState, BeaconBlock), String> { let genesis_slot = Slot::new(0); @@ -124,3 +138,16 @@ fn get_enr(mut url: Url) -> Result { .json() .map_err(Into::into) } + +fn get_listen_addresses(mut url: Url) -> Result, Error> { + url.path_segments_mut() + .map(|mut url| { + url.push("node").push("network").push("listen_addresses"); + }) + .map_err(|_| Error::UrlCannotBeBase)?; + + reqwest::get(url)? + .error_for_status()? + .json() + .map_err(Into::into) +} diff --git a/beacon_node/client/src/config.rs b/beacon_node/client/src/config.rs index 0d5d5f81de..5dd0eef52f 100644 --- a/beacon_node/client/src/config.rs +++ b/beacon_node/client/src/config.rs @@ -1,8 +1,9 @@ -use crate::Eth2Config; +use crate::{Bootstrapper, Eth2Config}; use clap::ArgMatches; +use eth2_libp2p::multiaddr::{Multiaddr, Protocol}; use network::NetworkConfig; use serde_derive::{Deserialize, Serialize}; -use slog::{info, o, Drain}; +use slog::{info, o, warn, Drain}; use std::fs::{self, OpenOptions}; use std::path::PathBuf; use std::sync::Mutex; @@ -149,6 +150,43 @@ impl Config { self.update_logger(log)?; }; + // If the `--bootstrap` flag is provided, overwrite the default configuration. + if let Some(server) = args.value_of("bootstrap") { + do_bootstrapping(self, server.to_string(), &log)?; + } + Ok(()) } } + +fn do_bootstrapping(config: &mut Config, server: String, log: &slog::Logger) -> Result<(), String> { + // Set the genesis state source. + config.genesis_state = GenesisState::HttpBootstrap { + server: server.to_string(), + }; + + let bootstrapper = Bootstrapper::from_server_string(server.to_string())?; + + config.network.boot_nodes.push(bootstrapper.enr()?); + + if let Some(server_ip) = bootstrapper.server_ipv4_addr() { + let server_multiaddr: Multiaddr = bootstrapper + .listen_addresses()? + .first() + .ok_or_else(|| "Bootstrap peer returned an empty list of listen addresses")? + // Iterate through the components of the Multiaddr, replacing any Ipv4 address with the + // server address. + .iter() + .map(|protocol| match protocol { + Protocol::Ip4(_) => Protocol::Ip4(server_ip), + _ => protocol, + }) + .collect::(); + + config.network.libp2p_nodes.push(server_multiaddr); + } else { + warn!(log, "Unable to determine bootstrap server Ipv4 address. Unable to add server as libp2p peer."); + } + + Ok(()) +} diff --git a/beacon_node/eth2-libp2p/src/lib.rs b/beacon_node/eth2-libp2p/src/lib.rs index 8c2644fbbc..4c84469cea 100644 --- a/beacon_node/eth2-libp2p/src/lib.rs +++ b/beacon_node/eth2-libp2p/src/lib.rs @@ -23,7 +23,7 @@ pub use libp2p::multiaddr; pub use libp2p::Multiaddr; pub use libp2p::{ gossipsub::{GossipsubConfig, GossipsubConfigBuilder}, - PeerId, + PeerId, Swarm, }; pub use rpc::RPCEvent; pub use service::Libp2pEvent; diff --git a/beacon_node/network/src/service.rs b/beacon_node/network/src/service.rs index ed3c9da0b3..4bec038309 100644 --- a/beacon_node/network/src/service.rs +++ b/beacon_node/network/src/service.rs @@ -5,7 +5,7 @@ use beacon_chain::{BeaconChain, BeaconChainTypes}; use core::marker::PhantomData; use eth2_libp2p::Service as LibP2PService; use eth2_libp2p::Topic; -use eth2_libp2p::{Enr, Libp2pEvent, PeerId}; +use eth2_libp2p::{Enr, Libp2pEvent, Multiaddr, PeerId, Swarm}; use eth2_libp2p::{PubsubMessage, RPCEvent}; use futures::prelude::*; use futures::Stream; @@ -64,6 +64,8 @@ impl Service { Ok((Arc::new(network_service), network_send)) } + /// Returns the local ENR from the underlying Discv5 behaviour that external peers may connect + /// to. pub fn local_enr(&self) -> Enr { self.libp2p_service .lock() @@ -73,10 +75,19 @@ impl Service { .clone() } + /// Returns the list of `Multiaddr` that the underlying libp2p instance is listening on. + pub fn listen_multiaddrs(&self) -> Vec { + Swarm::listeners(&self.libp2p_service.lock().swarm) + .cloned() + .collect() + } + + /// Returns the number of libp2p connected peers. pub fn connected_peers(&self) -> usize { self.libp2p_service.lock().swarm.connected_peers() } + /// Returns the set of `PeerId` that are connected via libp2p. pub fn connected_peer_set(&self) -> Vec { self.libp2p_service .lock() @@ -88,6 +99,7 @@ impl Service { .collect() } + /// Provides a reference to the underlying libp2p service. pub fn libp2p_service(&self) -> Arc> { self.libp2p_service.clone() } diff --git a/beacon_node/rest_api/src/lib.rs b/beacon_node/rest_api/src/lib.rs index 349a62c3fe..8ef48ad72c 100644 --- a/beacon_node/rest_api/src/lib.rs +++ b/beacon_node/rest_api/src/lib.rs @@ -135,6 +135,9 @@ pub fn start_server( (&Method::GET, "/node/network/enr") => network::get_enr::(req), (&Method::GET, "/node/network/peer_count") => network::get_peer_count::(req), (&Method::GET, "/node/network/peers") => network::get_peer_list::(req), + (&Method::GET, "/node/network/listen_addresses") => { + network::get_listen_addresses::(req) + } (&Method::GET, "/spec") => spec::get_spec::(req), (&Method::GET, "/spec/slots_per_epoch") => spec::get_slots_per_epoch::(req), _ => Err(ApiError::MethodNotAllowed(path.clone())), diff --git a/beacon_node/rest_api/src/network.rs b/beacon_node/rest_api/src/network.rs index 2fd88f4985..0e2448270c 100644 --- a/beacon_node/rest_api/src/network.rs +++ b/beacon_node/rest_api/src/network.rs @@ -1,9 +1,28 @@ use crate::{success_response, ApiError, ApiResult, NetworkService}; use beacon_chain::BeaconChainTypes; -use eth2_libp2p::{Enr, PeerId}; +use eth2_libp2p::{Enr, Multiaddr, PeerId}; use hyper::{Body, Request}; use std::sync::Arc; +/// HTTP handle to return the list of libp2p multiaddr the client is listening on. +/// +/// Returns a list of `Multiaddr`, serialized according to their `serde` impl. +pub fn get_listen_addresses( + req: Request, +) -> ApiResult { + let network = req + .extensions() + .get::>>() + .ok_or_else(|| ApiError::ServerError("NetworkService extension missing".to_string()))?; + + let multiaddresses: Vec = network.listen_multiaddrs(); + + Ok(success_response(Body::from( + serde_json::to_string(&multiaddresses) + .map_err(|e| ApiError::ServerError(format!("Unable to serialize Enr: {:?}", e)))?, + ))) +} + /// HTTP handle to return the Discv5 ENR from the client's libp2p service. /// /// ENR is encoded as base64 string. diff --git a/beacon_node/src/main.rs b/beacon_node/src/main.rs index 5199bddb6e..ae48f692b6 100644 --- a/beacon_node/src/main.rs +++ b/beacon_node/src/main.rs @@ -1,7 +1,7 @@ mod run; use clap::{App, Arg}; -use client::{Bootstrapper, ClientConfig, Eth2Config, GenesisState}; +use client::{ClientConfig, Eth2Config}; use env_logger::{Builder, Env}; use eth2_config::{read_from_file, write_to_file}; use slog::{crit, o, warn, Drain, Level}; @@ -298,32 +298,6 @@ fn main() { } }; - // If the `--bootstrap` flag is provided, overwrite the default configuration. - if let Some(server) = matches.value_of("bootstrap") { - // Set the genesis state source. - client_config.genesis_state = GenesisState::HttpBootstrap { - server: server.to_string(), - }; - - let bootstrapper = match Bootstrapper::from_server_string(server.to_string()) { - Ok(b) => b, - Err(e) => { - crit!(log, "Failed to load bootstrapper"; "error" => format!("{:?}", e)); - return; - } - }; - - let enr = match bootstrapper.enr() { - Ok(b) => b, - Err(e) => { - crit!(log, "Failed to read ENR from bootstrap server"; "error" => format!("{:?}", e)); - return; - } - }; - - client_config.network.boot_nodes.push(enr); - } - let eth2_config_path = data_dir.join(ETH2_CONFIG_FILENAME); // Initialise the `Eth2Config`. From 7cd963e6bb7ad35458defc94f3c6a24eb24f249c Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Thu, 15 Aug 2019 18:48:39 +1000 Subject: [PATCH 092/186] Update bootstrapper libp2p address finding --- beacon_node/client/src/bootstrapper.rs | 31 +++++++++++++++++++++++++- beacon_node/client/src/config.rs | 26 +++++++++------------ beacon_node/src/main.rs | 5 +++++ beacon_node/src/run.rs | 7 +----- 4 files changed, 46 insertions(+), 23 deletions(-) diff --git a/beacon_node/client/src/bootstrapper.rs b/beacon_node/client/src/bootstrapper.rs index 1fd8f16592..2c8cf6afc2 100644 --- a/beacon_node/client/src/bootstrapper.rs +++ b/beacon_node/client/src/bootstrapper.rs @@ -1,5 +1,9 @@ -use eth2_libp2p::{Enr, Multiaddr}; +use eth2_libp2p::{ + multiaddr::{Multiaddr, Protocol}, + Enr, +}; use reqwest::{Error as HttpError, Url}; +use std::borrow::Cow; use std::net::Ipv4Addr; use types::{BeaconBlock, BeaconState, Checkpoint, EthSpec, Slot}; use url::Host; @@ -27,6 +31,31 @@ impl Bootstrapper { }) } + pub fn best_effort_multiaddr(&self) -> Option { + let tcp_port = self.first_listening_tcp_port()?; + + let mut multiaddr = Multiaddr::with_capacity(2); + + match self.url.host()? { + Host::Ipv4(addr) => multiaddr.push(Protocol::Ip4(addr)), + Host::Domain(s) => multiaddr.push(Protocol::Dns4(Cow::Borrowed(s))), + _ => return None, + }; + + multiaddr.push(Protocol::Tcp(tcp_port)); + + Some(multiaddr) + } + + fn first_listening_tcp_port(&self) -> Option { + self.listen_addresses().ok()?.iter().find_map(|multiaddr| { + multiaddr.iter().find_map(|protocol| match protocol { + Protocol::Tcp(port) => Some(port), + _ => None, + }) + }) + } + pub fn server_ipv4_addr(&self) -> Option { match self.url.host()? { Host::Ipv4(addr) => Some(addr), diff --git a/beacon_node/client/src/config.rs b/beacon_node/client/src/config.rs index 5dd0eef52f..1a985fb4af 100644 --- a/beacon_node/client/src/config.rs +++ b/beacon_node/client/src/config.rs @@ -1,6 +1,5 @@ use crate::{Bootstrapper, Eth2Config}; use clap::ArgMatches; -use eth2_libp2p::multiaddr::{Multiaddr, Protocol}; use network::NetworkConfig; use serde_derive::{Deserialize, Serialize}; use slog::{info, o, warn, Drain}; @@ -169,23 +168,18 @@ fn do_bootstrapping(config: &mut Config, server: String, log: &slog::Logger) -> config.network.boot_nodes.push(bootstrapper.enr()?); - if let Some(server_ip) = bootstrapper.server_ipv4_addr() { - let server_multiaddr: Multiaddr = bootstrapper - .listen_addresses()? - .first() - .ok_or_else(|| "Bootstrap peer returned an empty list of listen addresses")? - // Iterate through the components of the Multiaddr, replacing any Ipv4 address with the - // server address. - .iter() - .map(|protocol| match protocol { - Protocol::Ip4(_) => Protocol::Ip4(server_ip), - _ => protocol, - }) - .collect::(); - + if let Some(server_multiaddr) = bootstrapper.best_effort_multiaddr() { + info!( + log, + "Estimated bootstrapper libp2p address"; + "multiaddr" => format!("{:?}", server_multiaddr) + ); config.network.libp2p_nodes.push(server_multiaddr); } else { - warn!(log, "Unable to determine bootstrap server Ipv4 address. Unable to add server as libp2p peer."); + warn!( + log, + "Unable to estimate a bootstrapper libp2p address, this node may not find any peers." + ); } Ok(()) diff --git a/beacon_node/src/main.rs b/beacon_node/src/main.rs index ae48f692b6..04366baa7a 100644 --- a/beacon_node/src/main.rs +++ b/beacon_node/src/main.rs @@ -237,6 +237,11 @@ fn main() { let mut log = slog::Logger::root(drain.fuse(), o!()); + warn!( + log, + "Ethereum 2.0 is pre-release. This software is experimental." + ); + let data_dir = match matches .value_of("datadir") .and_then(|v| Some(PathBuf::from(v))) diff --git a/beacon_node/src/run.rs b/beacon_node/src/run.rs index c16d23e5f1..5066231d55 100644 --- a/beacon_node/src/run.rs +++ b/beacon_node/src/run.rs @@ -4,7 +4,7 @@ use client::{ }; use futures::sync::oneshot; use futures::Future; -use slog::{error, info, warn}; +use slog::{error, info}; use std::cell::RefCell; use std::path::Path; use std::path::PathBuf; @@ -42,11 +42,6 @@ pub fn run_beacon_node( let other_client_config = client_config.clone(); - warn!( - log, - "Ethereum 2.0 is pre-release. This software is experimental." - ); - info!( log, "BeaconNode init"; From c259d6c00637e6372cc75afd1c6cd2debe009424 Mon Sep 17 00:00:00 2001 From: Age Manning Date: Sun, 18 Aug 2019 03:36:13 +1000 Subject: [PATCH 093/186] First draft sync re-write. WIP --- beacon_node/network/src/message_handler.rs | 10 +- beacon_node/network/src/sync/import_queue.rs | 307 ------- beacon_node/network/src/sync/manager.rs | 810 +++++++++++++------ beacon_node/network/src/sync/simple_sync.rs | 409 ++-------- 4 files changed, 661 insertions(+), 875 deletions(-) delete mode 100644 beacon_node/network/src/sync/import_queue.rs diff --git a/beacon_node/network/src/message_handler.rs b/beacon_node/network/src/message_handler.rs index 6a9a403693..fd10c5aead 100644 --- a/beacon_node/network/src/message_handler.rs +++ b/beacon_node/network/src/message_handler.rs @@ -118,7 +118,14 @@ impl MessageHandler { hello_message, &mut self.network_context, ), - RPCRequest::Goodbye(goodbye_reason) => self.sync.on_goodbye(peer_id, goodbye_reason), + RPCRequest::Goodbye(goodbye_reason) => { + debug!( + self.log, "PeerGoodbye"; + "peer" => format!("{:?}", peer_id), + "reason" => format!("{:?}", reason), + ); + self.sync.on_disconnect(peer_id), + }, RPCRequest::BeaconBlocks(request) => self.sync.on_beacon_blocks_request( peer_id, request_id, @@ -167,6 +174,7 @@ impl MessageHandler { Ok(beacon_blocks) => { self.sync.on_beacon_blocks_response( peer_id, + request_id, beacon_blocks, &mut self.network_context, ); diff --git a/beacon_node/network/src/sync/import_queue.rs b/beacon_node/network/src/sync/import_queue.rs deleted file mode 100644 index 5503ed64fc..0000000000 --- a/beacon_node/network/src/sync/import_queue.rs +++ /dev/null @@ -1,307 +0,0 @@ -use beacon_chain::{BeaconChain, BeaconChainTypes}; -use eth2_libp2p::rpc::methods::*; -use eth2_libp2p::PeerId; -use slog::error; -use std::collections::HashMap; -use std::sync::Arc; -use std::time::{Duration, Instant}; -use tree_hash::TreeHash; -use types::{BeaconBlock, BeaconBlockBody, BeaconBlockHeader, EthSpec, Hash256, Slot}; - -/// Provides a queue for fully and partially built `BeaconBlock`s. -/// -/// The queue is fundamentally a `Vec` where no two items have the same -/// `item.block_root`. This struct it backed by a `Vec` not a `HashMap` for the following two -/// reasons: -/// -/// - When we receive a `BeaconBlockBody`, the only way we can find it's matching -/// `BeaconBlockHeader` is to find a header such that `header.beacon_block_body == -/// tree_hash_root(body)`. Therefore, if we used a `HashMap` we would need to use the root of -/// `BeaconBlockBody` as the key. -/// - It is possible for multiple distinct blocks to have identical `BeaconBlockBodies`. Therefore -/// we cannot use a `HashMap` keyed by the root of `BeaconBlockBody`. -pub struct ImportQueue { - pub chain: Arc>, - /// Partially imported blocks, keyed by the root of `BeaconBlockBody`. - partials: HashMap>, - /// Time before a queue entry is considered state. - pub stale_time: Duration, - /// Logging - log: slog::Logger, -} - -impl ImportQueue { - /// Return a new, empty queue. - pub fn new(chain: Arc>, stale_time: Duration, log: slog::Logger) -> Self { - Self { - chain, - partials: HashMap::new(), - stale_time, - log, - } - } - - /// Returns true of the if the `BlockRoot` is found in the `import_queue`. - pub fn contains_block_root(&self, block_root: Hash256) -> bool { - self.partials.contains_key(&block_root) - } - - /// Attempts to complete the `BlockRoot` if it is found in the `import_queue`. - /// - /// Returns an Enum with a `PartialBeaconBlockCompletion`. - /// Does not remove the `block_root` from the `import_queue`. - pub fn attempt_complete_block( - &self, - block_root: Hash256, - ) -> PartialBeaconBlockCompletion { - if let Some(partial) = self.partials.get(&block_root) { - partial.attempt_complete() - } else { - PartialBeaconBlockCompletion::MissingRoot - } - } - - /// Removes the first `PartialBeaconBlock` with a matching `block_root`, returning the partial - /// if it exists. - pub fn remove(&mut self, block_root: Hash256) -> Option> { - self.partials.remove(&block_root) - } - - /// Flushes all stale entries from the queue. - /// - /// An entry is stale if it has as a `inserted` time that is more than `self.stale_time` in the - /// past. - pub fn remove_stale(&mut self) { - let stale_time = self.stale_time; - - self.partials - .retain(|_, partial| partial.inserted + stale_time > Instant::now()) - } - - /// Returns `true` if `self.chain` has not yet processed this block. - pub fn chain_has_not_seen_block(&self, block_root: &Hash256) -> bool { - self.chain - .is_new_block_root(&block_root) - .unwrap_or_else(|_| { - error!(self.log, "Unable to determine if block is new."); - true - }) - } - - /// Adds the `block_roots` to the partials queue. - /// - /// If a `block_root` is not in the queue and has not been processed by the chain it is added - /// to the queue and it's block root is included in the output. - pub fn enqueue_block_roots( - &mut self, - block_roots: &[BlockRootSlot], - sender: PeerId, - ) -> Vec { - // TODO: This will currently not return a `BlockRootSlot` if this root exists but there is no header. - // It would be more robust if it did. - let new_block_root_slots: Vec = block_roots - .iter() - // Ignore any roots already stored in the queue. - .filter(|brs| !self.contains_block_root(brs.block_root)) - // Ignore any roots already processed by the chain. - .filter(|brs| self.chain_has_not_seen_block(&brs.block_root)) - .cloned() - .collect(); - - self.partials.extend( - new_block_root_slots - .iter() - .map(|brs| PartialBeaconBlock { - slot: brs.slot, - block_root: brs.block_root, - sender: sender.clone(), - header: None, - body: None, - inserted: Instant::now(), - }) - .map(|partial| (partial.block_root, partial)), - ); - - new_block_root_slots - } - - /// Adds the `headers` to the `partials` queue. Returns a list of `Hash256` block roots for - /// which we should use to request `BeaconBlockBodies`. - /// - /// If a `header` is not in the queue and has not been processed by the chain it is added to - /// the queue and it's block root is included in the output. - /// - /// If a `header` is already in the queue, but not yet processed by the chain the block root is - /// not included in the output and the `inserted` time for the partial record is set to - /// `Instant::now()`. Updating the `inserted` time stops the partial from becoming stale. - pub fn enqueue_headers( - &mut self, - headers: Vec, - sender: PeerId, - ) -> Vec { - let mut required_bodies: Vec = vec![]; - - for header in headers { - let block_root = Hash256::from_slice(&header.canonical_root()[..]); - - if self.chain_has_not_seen_block(&block_root) - && !self.insert_header(block_root, header, sender.clone()) - { - // If a body is empty - required_bodies.push(block_root); - } - } - - required_bodies - } - - /// If there is a matching `header` for this `body`, adds it to the queue. - /// - /// If there is no `header` for the `body`, the body is simply discarded. - pub fn enqueue_bodies( - &mut self, - bodies: Vec>, - sender: PeerId, - ) -> Option { - let mut last_block_hash = None; - for body in bodies { - last_block_hash = self.insert_body(body, sender.clone()); - } - - last_block_hash - } - - pub fn enqueue_full_blocks(&mut self, blocks: Vec>, sender: PeerId) { - for block in blocks { - self.insert_full_block(block, sender.clone()); - } - } - - /// Inserts a header to the queue. - /// - /// If the header already exists, the `inserted` time is set to `now` and not other - /// modifications are made. - /// Returns true is `body` exists. - fn insert_header( - &mut self, - block_root: Hash256, - header: BeaconBlockHeader, - sender: PeerId, - ) -> bool { - let mut exists = false; - self.partials - .entry(block_root) - .and_modify(|partial| { - partial.header = Some(header.clone()); - partial.inserted = Instant::now(); - if partial.body.is_some() { - exists = true; - } - }) - .or_insert_with(|| PartialBeaconBlock { - slot: header.slot, - block_root, - header: Some(header), - body: None, - inserted: Instant::now(), - sender, - }); - exists - } - - /// Updates an existing partial with the `body`. - /// - /// If the body already existed, the `inserted` time is set to `now`. - /// - /// Returns the block hash of the inserted body - fn insert_body( - &mut self, - body: BeaconBlockBody, - sender: PeerId, - ) -> Option { - let body_root = Hash256::from_slice(&body.tree_hash_root()[..]); - let mut last_root = None; - - self.partials.iter_mut().for_each(|(root, mut p)| { - if let Some(header) = &mut p.header { - if body_root == header.body_root { - p.inserted = Instant::now(); - p.body = Some(body.clone()); - p.sender = sender.clone(); - last_root = Some(*root); - } - } - }); - - last_root - } - - /// Updates an existing `partial` with the completed block, or adds a new (complete) partial. - /// - /// If the partial already existed, the `inserted` time is set to `now`. - fn insert_full_block(&mut self, block: BeaconBlock, sender: PeerId) { - let block_root = Hash256::from_slice(&block.canonical_root()[..]); - - let partial = PartialBeaconBlock { - slot: block.slot, - block_root, - header: Some(block.block_header()), - body: Some(block.body), - inserted: Instant::now(), - sender, - }; - - self.partials - .entry(block_root) - .and_modify(|existing_partial| *existing_partial = partial.clone()) - .or_insert(partial); - } -} - -/// Individual components of a `BeaconBlock`, potentially all that are required to form a full -/// `BeaconBlock`. -#[derive(Clone, Debug)] -pub struct PartialBeaconBlock { - pub slot: Slot, - /// `BeaconBlock` root. - pub block_root: Hash256, - pub header: Option, - pub body: Option>, - /// The instant at which this record was created or last meaningfully modified. Used to - /// determine if an entry is stale and should be removed. - pub inserted: Instant, - /// The `PeerId` that last meaningfully contributed to this item. - pub sender: PeerId, -} - -impl PartialBeaconBlock { - /// Attempts to build a block. - /// - /// Does not comsume the `PartialBeaconBlock`. - pub fn attempt_complete(&self) -> PartialBeaconBlockCompletion { - if self.header.is_none() { - PartialBeaconBlockCompletion::MissingHeader(self.slot) - } else if self.body.is_none() { - PartialBeaconBlockCompletion::MissingBody - } else { - PartialBeaconBlockCompletion::Complete( - self.header - .clone() - .unwrap() - .into_block(self.body.clone().unwrap()), - ) - } - } -} - -/// The result of trying to convert a `BeaconBlock` into a `PartialBeaconBlock`. -pub enum PartialBeaconBlockCompletion { - /// The partial contains a valid BeaconBlock. - Complete(BeaconBlock), - /// The partial does not exist. - MissingRoot, - /// The partial contains a `BeaconBlockRoot` but no `BeaconBlockHeader`. - MissingHeader(Slot), - /// The partial contains a `BeaconBlockRoot` and `BeaconBlockHeader` but no `BeaconBlockBody`. - MissingBody, -} diff --git a/beacon_node/network/src/sync/manager.rs b/beacon_node/network/src/sync/manager.rs index 52c1a72c6f..a4ce544ec3 100644 --- a/beacon_node/network/src/sync/manager.rs +++ b/beacon_node/network/src/sync/manager.rs @@ -1,283 +1,639 @@ +const MAX_BLOCKS_PER_REQUEST: usize = 10; -const MAXIMUM_BLOCKS_PER_REQUEST: usize = 10; -const SIMULTANEOUS_REQUESTS: usize = 10; -use super::simple_sync::FUTURE_SLOT_TOLERANCE; +/// The number of slots that we can import blocks ahead of us, before going into full Sync mode. +const SLOT_IMPORT_TOLERANCE: u64 = 10; -struct Chunk { - id: usize, - start_slot: Slot, - end_slot: Slot, - } +const PARENT_FAIL_TOLERANCE: usize = 3; +const PARENT_DEPTH_TOLERANCE: usize = SLOT_IMPORT_TOLERANCE*2; - -struct CompletedChunk { - peer_id: PeerId, - chunk: Chunk, - blocks: Vec, +enum BlockRequestsState { + QueuedForward, + QueuedBackward, + Pending(RequestId), + Complete, } -struct ProcessedChunk { - peer_id: PeerId, - chunk: Chunk, +struct BlockRequests { + target_head_slot: Slot + target_head_root: Hash256, + downloaded_blocks: Vec, + state: State, } -#[derive(PartialEq)] -pub enum SyncState { - Idle, - Downloading, - ColdSync { - max_wanted_slot: Slot, - max_wanted_hash: Hash256, +struct ParentRequests { + downloaded_blocks: Vec, + attempts: usize, + last_submitted_peer: PeerId, // to downvote the submitting peer. + state: BlockRequestsState, +} + +impl BlockRequests { + + // gets the start slot for next batch + // last block slot downloaded plus 1 + fn next_start_slot(&self) -> Option { + if !self.downloaded_blocks.is_empty() { + match self.state { + BlockRequestsState::QueuedForward => { + let last_element_index = self.downloaded_blocks.len() -1; + Some(downloaded_blocks[last_element_index].slot.add(1)) + } + BlockRequestsState::QueuedBackward => { + let earliest_known_slot = self.downloaded_blocks[0].slot; + Some(earliest_known_slot.add(1).sub(MAX_BLOCKS_PER_REQUEST)) + } + } + } + else { + None + } } } -pub enum SyncManagerState { - RequestBlocks(peer_id, BeaconBlockRequest), +enum ManagerState { + Syncing, + Regular, Stalled, - Idle, } -pub struct PeerSyncInfo { - peer_id: PeerId, - fork_version: [u8,4], - finalized_root: Hash256, - finalized_epoch: Epoch, - head_root: Hash256, - head_slot: Slot, - requested_slot_skip: Option<(Slot, usize)>, +enum ImportManagerOutcome { + Idle, + RequestBlocks{ + peer_id: PeerId, + request_id: RequestId, + request: BeaconBlocksRequest, + }, + RecentRequest(PeerId, RecentBeaconBlocksRequest), + DownvotePeer(PeerId), } -pub(crate) struct SyncManager { + +pub struct ImportManager { /// A reference to the underlying beacon chain. chain: Arc>, - /// A mapping of Peers to their respective PeerSyncInfo. - available_peers: HashMap, - wanted_chunks: Vec, - pending_chunks: HashMap, - completed_chunks: Vec, - processed_chunks: Vec, // ordered - multi_peer_sections: HashMap - - current_requests: usize, - latest_wanted_slot: Option, - sync_status: SyncStatus, - to_process_chunk_id: usize, + state: MangerState, + import_queue: HashMap, + parent_queue: Vec, + full_peers: Hashset, + current_req_id: usize, log: Logger, - } -impl SyncManager { - /// Adds a sync-able peer and determines which blocks to download given the current state of - /// the chain, known peers and currently requested blocks. - fn add_sync_peer(&mut self, peer_id: PeerId, remote: PeerSyncInfo, network &mut NetworkContext) { +impl ImportManager { + pub fn add_peer(&mut self, peer_id, remote: PeerSyncInfo) { + // TODO: Improve comments. + // initially try to download blocks from our current head + // then backwards search all the way back to our finalized epoch until we match on a chain + // has to be done sequentially to find next slot to start the batch from + let local = PeerSyncInfo::from(&self.chain); - let remote_finalized_slot = remote.finalized_epoch.start_slot(T::EthSpec::slots_per_epoch()); - let local_finalized_slot = local.finalized_epoch.start_slot(T::EthSpec::slots_per_epoch()); - // cold sync - if remote_finalized_slot > local.head_slot { - if let SyncState::Idle || SyncState::Downloading = self.sync_state { - info!(self.log, "Cold Sync Started", "start_slot" => local.head_slot, "latest_known_finalized" => remote_finalized_slot); - self.sync_state = SyncState::ColdSync{Slot::from(0), remote.finalized_hash} - } - - if let SyncState::ColdSync{max_wanted_slot, max_wanted_hjash } = self.sync_state { - - // We don't assume that our current head is the canonical chain. So we request blocks from - // our last finalized slot to ensure we are on the finalized chain. - if max_wanted_slot < remote_finalized_slot { - let remaining_blocks = remote_finalized_slot - max_wanted_slot; - for chunk in (0..remaining_blocks/MAXIMUM_BLOCKS_PER_REQUEST) { - self.wanted_chunks.push( - Chunk { - id: self.current_chunk_id, - previous_chunk: self.curent_chunk_id.saturating_sub(1), - start_slot: chunk*MAXIMUM_BLOCKS_PER_REQUEST + self.last_wanted_slot, - end_slot: (section+1)*MAXIMUM_BLOCKS_PER_REQUEST +self.last_wanted_slot, - }) - self.current_chunk_id +=1; - } - - // add any extra partial chunks - self.pending_section.push( Section { - start_slot: (remaining_blocks/MAXIMUM_BLOCKS_PER_REQUEST) + 1, - end_slot: remote_finalized_slot, - }) - self.current_chunk_id +=1; - - info!(self.log, "Cold Sync Updated", "start_slot" => local.head_slot, "latest_known_finalized" => remote_finalized_slot); - - self.sync_state = SyncState::ColdSync{remote_finalized_slot, remote.finalized_hash} - } + // If a peer is within SLOT_IMPORT_TOLERANCE from out head slot, ignore a batch sync + if remote.head_slot.sub(local.head_slot) < SLOT_IMPORT_TOLERANCE { + trace!(self.log, "Ignoring full sync with peer"; + "peer" => peer_id, + "peer_head_slot" => remote.head_slot, + "local_head_slot" => local.head_slot, + ); + // remove the peer from the queue if it exists + self.import_queue.remove(&peer_id); + return; } - else { // hot sync - if remote_head_slot > self.chain.head().beacon_state.slot { - if let SyncState::Idle = self.sync_state { - self.sync_state = SyncState::Downloading - info!(self.log, "Sync Started", "start_slot" => local.head_slot, "latest_known_head" => remote.head_slot.as_u64()); + if let Some(block_requests) = self.import_queue.get_mut(&peer_id) { + // update the target head slot + if remote.head_slot > requested_block.target_head_slot { + block_requests.target_head_slot = remote.head_slot; } - self.latest_known_slot = remote_head_slot; - //TODO Build requests. + } else { + let block_requests = BlockRequests { + target_head_slot: remote.head_slot, // this should be larger than the current head. It is checked in the SyncManager before add_peer is called + target_head_root: remote.head_root, + downloaded_blocks: Vec::new(), + state: RequestedBlockState::Queued } + self.import_queue.insert(peer_id, block_requests); } - available_peers.push(remote); - } - pub fn add_blocks(&mut self, chunk_id: RequestId, peer_id: PeerId, blocks: Vec) { - - if SyncState::ColdSync{max_wanted_slot, max_wanted_hash} = self.sync_state { - - let chunk = match self.pending_chunks.remove(&peer_id) { - Some(chunks) => { - match chunks.find(|chunk| chunk.id == chunk_id) { - Some(chunk) => chunk, - None => { - warn!(self.log, "Received blocks for an unknown chunk"; - "peer"=> peer_id); - return; - } - } - }, - None => { - warn!(self.log, "Received blocks without a request"; - "peer"=> peer_id); + pub fn beacon_blocks_response(peer_id: PeerId, request_id: RequestId, blocks: Vec) { + + // find the request + let block_requests = match self.import_queue.get_mut(&peer_id) { + Some(req) if req.state = RequestedBlockState::Pending(request_id) => req, + None => { + // No pending request, invalid request_id or coding error + warn!(self.log, "BeaconBlocks response unknown"; "request_id" => request_id); return; - } - }; + } + }; - // add to completed - self.current_requests -= 1; - self.completed_chunks.push(CompletedChunk(peer_id, Chunk)); + // The response should contain at least one block. + // + // If we are syncing up to a target head block, at least the target head block should be + // returned. If we are syncing back to our last finalized block the request should return + // at least the last block we received (last known block). In diagram form: + // + // unknown blocks requested blocks downloaded blocks + // |-------------------|------------------------|------------------------| + // ^finalized slot ^ requested start slot ^ last known block ^ remote head + + if blocks.is_empty() { + warn!(self.log, "BeaconBlocks response was empty"; "request_id" => request_id); + block_requests.state = RequestedBlockState::Failed; + return; + } + + // Add the newly downloaded blocks to the current list of downloaded blocks. This also + // determines if we are syncing forward or backward. + let syncing_forwards = { + if block_requests.blocks.is_empty() { + block_requests.blocks.push(blocks); + true + } + else if block_requests.blocks[0].slot < blocks[0].slot { // syncing forwards + // verify the peer hasn't sent overlapping blocks - ensuring the strictly + // increasing blocks in a batch will be verified during the processing + if block_requests.next_slot() > blocks[0].slot { + warn!(self.log, "BeaconBlocks response returned duplicate blocks", "request_id" => request_id, "response_initial_slot" => blocks[0].slot, "requested_initial_slot" => block_requests.next_slot()); + block_requests.state = RequestedBlockState::Failed; + return; + } + + block_requests.blocks.push(blocks); + true + } + else { false } + }; + + + // Determine if more blocks need to be downloaded. There are a few cases: + // - We have downloaded a batch from our head_slot, which has not reached the remotes head + // (target head). Therefore we need to download another sequential batch. + // - The latest batch includes blocks that greater than or equal to the target_head slot, + // which means we have caught up to their head. We then check to see if the first + // block downloaded matches our head. If so, we are on the same chain and can process + // the blocks. If not we need to sync back further until we are on the same chain. So + // request more blocks. + // - We are syncing backwards (from our head slot) and need to check if we are on the same + // chain. If so, process the blocks, if not, request more blocks all the way up to + // our last finalized slot. + + if syncing_forwards { + // does the batch contain the target_head_slot + let last_element_index = block_requests.blocks.len()-1; + if block_requests[last_element_index].slot >= block_requests.target_slot { + // if the batch is on our chain, this is complete and we can then process. + // Otherwise start backwards syncing until we reach a common chain. + let earliest_slot = block_requests_blocks[0].slot + if block_requests.blocks[0] == self.chain.get_block_by_slot(earliest_slot) { + block_requests.state = RequestedBlockState::Complete; + return; + } + + // not on the same chain, request blocks backwards + // binary search, request half the distance between the earliest block and our + // finalized slot + let state = &beacon_chain.head().beacon_state; + let local_finalized_slot = state.finalized_checkpoint.epoch; //TODO: Convert to slot + // check that the request hasn't failed by having no common chain + if local_finalized_slot >= block_requests.blocks[0] { + warn!(self.log, "Peer returned an unknown chain."; "request_id" => request_id); + block_requests.state = RequestedBlockState::Failed; + return; + } + + // Start a backwards sync by requesting earlier blocks + // There can be duplication in downloaded blocks here if there are a large number + // of skip slots. In all cases we at least re-download the earliest known block. + // It is unlikely that a backwards sync in required, so we accept this duplication + // for now. + block_requests.state = RequestedBlockState::QueuedBackward; + } + else { + // batch doesn't contain the head slot, request the next batch + block_requests.state = RequestedBlockState::QueuedForward; + } + } + else { + // syncing backwards + // if the batch is on our chain, this is complete and we can then process. + // Otherwise continue backwards + let earliest_slot = block_requests_blocks[0].slot + if block_requests.blocks[0] == self.chain.get_block_by_slot(earliest_slot) { + block_requests.state = RequestedBlockState::Complete; + return; + } + block_requests.state = RequestedBlockState::QueuedBackward; + } } - pub fn inject_error(id: RequestId, peer_id) { - if let SyncState::ColdSync{ _max_wanted_slot, _max_wanted_hash } { - match self.pending_chunks.get(&peer_id) { - Some(chunks) => { - if let Some(pos) = chunks.iter().position(|c| c.id == id) { - chunks.remove(pos); - } - }, - None => { - debug!(self.log, - "Received an error for an unknown request"; - "request_id" => id, - "peer" => peer_id - ); + pub fn recent_blocks_response(peer_id: PeerId, request_id: RequestId, blocks: Vec) { + + // find the request + let parent_request = match self.parent_queue.get_mut(&peer_id) { + Some(req) if req.state = RequestedBlockState::Pending(request_id) => req, + None => { + // No pending request, invalid request_id or coding error + warn!(self.log, "RecentBeaconBlocks response unknown"; "request_id" => request_id); + return; + } + }; + + // if an empty response is given, the peer didn't have the requested block, try again + if blocks.is_empty() { + parent_request.attempts += 1; + parent_request.state = RequestedBlockState::QueuedForward; + parent_request.last_submitted_peer = peer_id; + return; + } + + // currently only support a single block lookup. Reject any response that has more than 1 + // block + if blocks.len() != 1 { + //TODO: Potentially downvote the peer + debug!(self.log, "Peer sent more than 1 parent. Ignoring"; + "peer_id" => peer_id, + "no_parents" => blocks.len() + ); + return; + } + + + // queue for processing + parent_request.state = RequestedBlockState::Complete; + } + + + pub fn inject_error(peer_id: PeerId, id: RequestId) { + //TODO: Remove block state from pending + } + + pub fn peer_disconnect(peer_id: PeerId) { + self.import_queue.remove(&peer_id); + self.full_peers.remove(&peer_id); + self.update_state(); + } + + pub fn add_full_peer(peer_id: PeerId) { + debug!( + self.log, "Fully synced peer added"; + "peer" => format!("{:?}", peer_id), + ); + self.full_peers.insert(peer_id); + self.update_state(); + } + + pub fn add_unknown_block(&mut self,block: BeaconBlock) { + // if we are not in regular sync mode, ignore this block + if self.state == ManagerState::Regular { + return; + } + + // make sure this block is not already being searched for + // TODO: Potentially store a hashset of blocks for O(1) lookups + for parent_req in self.parent_queue.iter() { + if let Some(_) = parent_req.downloaded_blocks.iter().find(|d_block| d_block == block) { + // we are already searching for this block, ignore it + return; + } + } + + let req = ParentRequests { + downloaded_blocks: vec![block], + failed_attempts: 0, + state: RequestedBlockState::QueuedBackward + } + + self.parent_queue.push(req); + } + + pub fn poll() -> ImportManagerOutcome { + + loop { + // update the state of the manager + self.update_state(); + + // process potential block requests + if let Some(outcome) = self.process_potential_block_requests() { + return outcome; + } + + // process any complete long-range batches + if let Some(outcome) = self.process_complete_batches() { + return outcome; + } + + // process any parent block lookup-requests + if let Some(outcome) = self.process_parent_requests() { + return outcome; + } + + // process any complete parent lookups + if let (re_run, outcome) = self.process_complete_parent_requests() { + if let Some(outcome) = outcome { + return outcome; + } + else if !re_run { + break; } } } + + return ImportManagerOutcome::Idle; + } - pub fn poll(&mut self) -> SyncManagerState { - // if cold sync - if let SyncState::ColdSync(waiting_slot, max_wanted_slot, max_wanted_hash) = self.sync_state { + fn update_state(&mut self) { + let previous_state = self.state; + self.state = { + if !self.import_queue.is_empty() { + ManagerState::Syncing + } + else if !self.full_peers.is_empty() { + ManagerState::Regualar + } + else { + ManagerState::Stalled } + }; + if self.state != previous_state { + info!(self.log, "Syncing state updated", + "old_state" => format!("{:?}", previous_state) + "new_state" => format!("{:?}", self.state) + ); + } + } - // Try to process completed chunks - for completed_chunk in self.completed_chunks { - let chunk = completed_chunk.1; - let last_chunk_id = { - let no_processed_chunks = self.processed_chunks.len(); - if elements == 0 { 0 } else { self.processed_chunks[no_processed_chunks].id } - }; - if chunk.id == last_chunk_id + 1 { - // try and process the chunk - for block in chunk.blocks { - let processing_result = self.chain.process_block(block.clone()); - if let Ok(outcome) = processing_result { - match outcome { - BlockProcessingOutCome::Processed { block_root} => { - // block successfully processed - }, - BlockProcessingOutcome::BlockIsAlreadyKnown => { - warn!( - self.log, "Block Already Known"; - "source" => source, - "sync" => "Cold Sync", - "parent_root" => format!("{}", parent), - "baby_block_slot" => block.slot, - "peer" => format!("{:?}", chunk.0), - ); - }, - _ => { - // An error has occurred - // This could be due to the previous chunk or the current chunk. - // Re-issue both. - warn!( - self.log, "Faulty Chunk"; - "source" => source, - "sync" => "Cold Sync", - "parent_root" => format!("{}", parent), - "baby_block_slot" => block.slot, - "peer" => format!("{:?}", chunk.0), - "outcome" => format!("{:?}", outcome), - ); - // re-issue both chunks - // if both are the same peer. Downgrade the peer. - let past_chunk = self.processed_chunks.pop() - self.wanted_chunks.insert(0, chunk.clone()); - self.wanted_chunks.insert(0, past_chunk.clone()); - if chunk.0 == past_chunk.peer_id { - // downgrade peer - return SyncManagerState::DowngradePeer(chunk.0); - } - break; - } - } - } - } - // chunk successfully processed - debug!(self.log, - "Chunk Processed"; - "id" => chunk.id - "start_slot" => chunk.start_slot, - "end_slot" => chunk.end_slot, + fn process_potential_block_requests(&mut self) -> Option { + // check if an outbound request is required + // Managing a fixed number of outbound requests is maintained at the RPC protocol libp2p + // layer and not needed here. + // If any in queued state we submit a request. + + + // remove any failed batches + self.import_queue.retain(|peer_id, block_request| { + if block_request.state == RequestedBlockState::Failed { + debug!(self.log, "Block import from peer failed", + "peer_id" => peer_id, + "downloaded_blocks" => block_request.downloaded.blocks.len() ); - self.processed_chunks.push(chunk); - } + false } + else { true } + }); - // chunks completed, update the state - self.sync_state = SyncState::ColdSync{waiting_slot, max_wanted_slot, max_wanted_hash}; - // Remove stales + for (peer_id, block_requests) in self.import_queue.iter_mut() { + if let Some(request) = requests.iter().find(|req| req.state == RequestedBlockState::QueuedForward || req.state == RequestedBlockState::QueuedBackward) { - // Spawn requests - if self.current_requests <= SIMULTANEOUS_REQUESTS { - if !self.wanted_chunks.is_empty() { - let chunk = self.wanted_chunks.remove(0); - for n in (0..self.peers.len()).rev() { - let peer = self.peers.swap_remove(n); - let peer_finalized_slot = peer.finalized_epoch.start_slot(T::EthSpec::slots_per_epoch()); - if peer_finalized_slot >= chunk.end_slot { - *self.pending.chunks.entry(&peer_id).or_insert_with(|| Vec::new).push(chunk); - self.active_peers.push(peer); - self.current_requests +=1; - let block_request = BeaconBlockRequest { - head_block_root, - start_slot: chunk.start_slot, - count: chunk.end_slot - chunk.start_slot - step: 1 - } - return SyncManagerState::BlockRequest(peer, block_request); - } - } - // no peers for this chunk - self.wanted_chunks.push(chunk); - return SyncManagerState::Stalled + let request.state = RequestedBlockState::Pending(self.current_req_id); + self.current_req_id +=1; + + let req = BeaconBlocksRequest { + head_block_root: request.target_root, + start_slot: request.next_start_slot().unwrap_or_else(|| self.chain.head().slot), + count: MAX_BLOCKS_PER_REQUEST, + step: 0 } + return Some(ImportManagerOutCome::RequestBlocks{ peer_id, req }); } } - // if hot sync - return SyncManagerState::Idle + None + } + + fn process_complete_batches(&mut self) -> Option { + + let completed_batches = self.import_queue.iter().filter(|_peer, block_requests| block_requests.state == RequestedState::Complete).map(|peer, _| peer).collect::>(); + for peer_id in completed_batches { + let block_requests = self.import_queue.remove(&peer_id).unwrap("key exists"); + match self.process_blocks(block_requests.downloaded_blocks) { + Ok(()) => { + //TODO: Verify it's impossible to have empty downloaded_blocks + last_element = block_requests.downloaded_blocks.len() -1 + debug!(self.log, "Blocks processed successfully"; + "peer" => peer_id, + "start_slot" => block_requests.downloaded_blocks[0].slot, + "end_slot" => block_requests.downloaded_blocks[last_element].slot, + "no_blocks" => last_element + 1, + ); + // Re-HELLO to ensure we are up to the latest head + return Some(ImportManagerOutcome::Hello(peer_id)); + } + Err(e) => { + last_element = block_requests.downloaded_blocks.len() -1 + warn!(self.log, "Block processing failed"; + "peer" => peer_id, + "start_slot" => block_requests.downloaded_blocks[0].slot, + "end_slot" => block_requests.downloaded_blocks[last_element].slot, + "no_blocks" => last_element + 1, + "error" => format!("{:?}", e), + ); + return Some(ImportManagerOutcome::DownvotePeer(peer_id)); + } + } + } + None + } + + + fn process_parent_requests(&mut self) -> Option { + + // remove any failed requests + self.parent_queue.retain(|parent_request| { + if parent_request.state == RequestedBlockState::Failed { + debug!(self.log, "Parent import failed", + "block" => parent_request.downloaded_blocks[0].hash, + "siblings found" => parent_request.len() + ); + false + } + else { true } + }); + + // check to make sure there are peers to search for the parent from + if self.full_peers.is_empty() { + return; + } + + // check if parents need to be searched for + for parent_request in self.parent_queue.iter_mut() { + if parent_request.failed_attempts >= PARENT_FAIL_TOLERANCE { + parent_request.state == BlockRequestsState::Failed + continue; + } + else if parent_request.state == BlockRequestsState::QueuedForward { + parent_request.state = BlockRequestsState::Pending(self.current_req_id); + self.current_req_id +=1; + let parent_hash = + let req = RecentBeaconBlocksRequest { + block_roots: vec![parent_hash], + }; + + // select a random fully synced peer to attempt to download the parent block + let peer_id = self.full_peers.iter().next().expect("List is not empty"); + + return Some(ImportManagerOutcome::RecentRequest(peer_id, req); + } + } + + None + } + + + fn process_complete_parent_requests(&mut self) => (bool, Option) { + + // flag to determine if there is more process to drive or if the manager can be switched to + // an idle state + let mut re_run = false; + + // verify the last added block is the parent of the last requested block + let last_index = parent_requests.downloaded_blocks.len() -1; + let expected_hash = parent_requests.downloaded_blocks[last_index].parent ; + let block_hash = parent_requests.downloaded_blocks[0].tree_hash_root(); + if block_hash != expected_hash { + //TODO: Potentially downvote the peer + debug!(self.log, "Peer sent invalid parent. Ignoring"; + "peer_id" => peer_id, + "received_block" => block_hash, + "expected_parent" => expected_hash, + ); + return; + } + + // Find any parent_requests ready to be processed + for completed_request in self.parent_queue.iter_mut().filter(|req| req.state == BlockRequestsState::Complete) { + // try and process the list of blocks up to the requested block + while !completed_request.downloaded_blocks.is_empty() { + let block = completed_request.downloaded_blocks.pop(); + match self.chain_process_block(block.clone()) { + Ok(BlockProcessingOutcome::ParentUnknown { parent } => { + // need to keep looking for parents + completed_request.downloaded_blocks.push(block); + completed_request.state == BlockRequestsState::QueuedForward; + re_run = true; + break; + } + Ok(BlockProcessingOutcome::Processed { _ } => { } + Ok(outcome) => { // it's a future slot or an invalid block, remove it and try again + completed_request.failed_attempts +=1; + trace!( + self.log, "Invalid parent block"; + "outcome" => format!("{:?}", outcome); + "peer" => format!("{:?}", completed_request.last_submitted_peer), + ); + completed_request.state == BlockRequestsState::QueuedForward; + re_run = true; + return (re_run, Some(ImportManagerOutcome::DownvotePeer(completed_request.last_submitted_peer))); + } + Err(e) => { + completed_request.failed_attempts +=1; + warn!( + self.log, "Parent processing error"; + "error" => format!("{:?}", e); + ); + completed_request.state == BlockRequestsState::QueuedForward; + re_run = true; + return (re_run, Some(ImportManagerOutcome::DownvotePeer(completed_request.last_submitted_peer))); + } + } + } + } + + // remove any full completed and processed parent chains + self.parent_queue.retain(|req| if req.state == BlockRequestsState::Complete { false } else { true }); + (re_run, None) } + + + fn process_blocks( + &mut self, + blocks: Vec>, + ) -> Result<(), String> { + + for block in blocks { + let processing_result = self.chain.process_block(block.clone()); + + if let Ok(outcome) = processing_result { + match outcome { + BlockProcessingOutcome::Processed { block_root } => { + // The block was valid and we processed it successfully. + trace!( + self.log, "Imported block from network"; + "source" => source, + "slot" => block.slot, + "block_root" => format!("{}", block_root), + "peer" => format!("{:?}", peer_id), + ); + } + BlockProcessingOutcome::ParentUnknown { parent } => { + // blocks should be sequential and all parents should exist + trace!( + self.log, "ParentBlockUnknown"; + "source" => source, + "parent_root" => format!("{}", parent), + "baby_block_slot" => block.slot, + ); + return Err(format!("Block at slot {} has an unknown parent.", block.slot)); + } + BlockProcessingOutcome::FutureSlot { + present_slot, + block_slot, + } => { + if present_slot + FUTURE_SLOT_TOLERANCE >= block_slot { + // The block is too far in the future, drop it. + trace!( + self.log, "FutureBlock"; + "source" => source, + "msg" => "block for future slot rejected, check your time", + "present_slot" => present_slot, + "block_slot" => block_slot, + "FUTURE_SLOT_TOLERANCE" => FUTURE_SLOT_TOLERANCE, + "peer" => format!("{:?}", peer_id), + ); + return Err(format!("Block at slot {} is too far in the future", block.slot)); + } else { + // The block is in the future, but not too far. + trace!( + self.log, "QueuedFutureBlock"; + "source" => source, + "msg" => "queuing future block, check your time", + "present_slot" => present_slot, + "block_slot" => block_slot, + "FUTURE_SLOT_TOLERANCE" => FUTURE_SLOT_TOLERANCE, + "peer" => format!("{:?}", peer_id), + ); + } + } + _ => { + trace!( + self.log, "InvalidBlock"; + "source" => source, + "msg" => "peer sent invalid block", + "outcome" => format!("{:?}", outcome), + "peer" => format!("{:?}", peer_id), + ); + return Err(format!("Invalid block at slot {}", block.slot)); + } + } + Ok(()) + } else { + trace!( + self.log, "BlockProcessingFailure"; + "source" => source, + "msg" => "unexpected condition in processing block.", + "outcome" => format!("{:?}", processing_result) + ); + return Err(format!("Unexpected block processing error: {:?}", processing_result)); + } + } + } +} diff --git a/beacon_node/network/src/sync/simple_sync.rs b/beacon_node/network/src/sync/simple_sync.rs index 6e5cada236..a7f5ced401 100644 --- a/beacon_node/network/src/sync/simple_sync.rs +++ b/beacon_node/network/src/sync/simple_sync.rs @@ -14,11 +14,6 @@ use types::{ Attestation, BeaconBlock, BeaconBlockBody, BeaconBlockHeader, Epoch, EthSpec, Hash256, Slot, }; -/// The number of slots that we can import blocks ahead of us, before going into full Sync mode. -const SLOT_IMPORT_TOLERANCE: u64 = 100; - -/// The amount of seconds a block may exist in the import queue. -const QUEUE_STALE_SECS: u64 = 100; /// If a block is more than `FUTURE_SLOT_TOLERANCE` slots ahead of our slot clock, we drop it. /// Otherwise we queue it. @@ -35,9 +30,11 @@ pub struct PeerSyncInfo { finalized_epoch: Epoch, head_root: Hash256, head_slot: Slot, - requested_slot_skip: Option<(Slot, usize)>, } + + + impl From for PeerSyncInfo { fn from(hello: HelloMessage) -> PeerSyncInfo { PeerSyncInfo { @@ -69,10 +66,7 @@ pub enum SyncState { pub struct SimpleSync { /// A reference to the underlying beacon chain. chain: Arc>, - /// A mapping of Peers to their respective PeerSyncInfo. - known_peers: HashMap, - /// The current state of the syncing protocol. - state: SyncState, + manager: ImportManager, log: slog::Logger, } @@ -81,49 +75,24 @@ impl SimpleSync { pub fn new(beacon_chain: Arc>, log: &slog::Logger) -> Self { let sync_logger = log.new(o!("Service"=> "Sync")); - let queue_item_stale_time = Duration::from_secs(QUEUE_STALE_SECS); - - let import_queue = - ImportQueue::new(beacon_chain.clone(), queue_item_stale_time, log.clone()); SimpleSync { chain: beacon_chain.clone(), - known_peers: HashMap::new(), - import_queue, - state: SyncState::Idle, + manager: ImportManager::new(), log: sync_logger, } } - /// Handle a `Goodbye` message from a peer. - /// - /// Removes the peer from `known_peers`. - pub fn on_goodbye(&mut self, peer_id: PeerId, reason: GoodbyeReason) { - info!( - self.log, "PeerGoodbye"; - "peer" => format!("{:?}", peer_id), - "reason" => format!("{:?}", reason), - ); - - self.known_peers.remove(&peer_id); - } - /// Handle a peer disconnect. /// - /// Removes the peer from `known_peers`. + /// Removes the peer from the manager. pub fn on_disconnect(&mut self, peer_id: PeerId) { - info!( - self.log, "Peer Disconnected"; - "peer" => format!("{:?}", peer_id), - ); - self.known_peers.remove(&peer_id); + self.manager.peer_disconnect(&peer_id); } /// Handle the connection of a new peer. /// /// Sends a `Hello` message to the peer. pub fn on_connect(&self, peer_id: PeerId, network: &mut NetworkContext) { - info!(self.log, "PeerConnected"; "peer" => format!("{:?}", peer_id)); - network.send_rpc_request(peer_id, RPCRequest::Hello(hello_message(&self.chain))); } @@ -137,7 +106,7 @@ impl SimpleSync { hello: HelloMessage, network: &mut NetworkContext, ) { - debug!(self.log, "HelloRequest"; "peer" => format!("{:?}", peer_id)); + trace!(self.log, "HelloRequest"; "peer" => format!("{:?}", peer_id)); // Say hello back. network.send_rpc_response( @@ -156,7 +125,7 @@ impl SimpleSync { hello: HelloMessage, network: &mut NetworkContext, ) { - debug!(self.log, "HelloResponse"; "peer" => format!("{:?}", peer_id)); + trace!(self.log, "HelloResponse"; "peer" => format!("{:?}", peer_id)); // Process the hello message, without sending back another hello. self.process_hello(peer_id, hello, network); @@ -178,7 +147,7 @@ impl SimpleSync { if local.fork_version != remote.fork_version { // The node is on a different network/fork, disconnect them. - info!( + debug!( self.log, "HandshakeFailure"; "peer" => format!("{:?}", peer_id), "reason" => "network_id" @@ -195,7 +164,7 @@ impl SimpleSync { // different to the one in our chain. // // Therefore, the node is on a different chain and we should not communicate with them. - info!( + debug!( self.log, "HandshakeFailure"; "peer" => format!("{:?}", peer_id), "reason" => "different finalized chain" @@ -227,13 +196,10 @@ impl SimpleSync { .exists::>(&remote.best_root) .unwrap_or_else(|_| false) { - // If the node's best-block is already known to us, we have nothing to request. - debug!( - self.log, - "NaivePeer"; - "peer" => format!("{:?}", peer_id), - "reason" => "best block is known" - ); + // If the node's best-block is already known to us and they are close to our current + // head, treat them as a fully sync'd peer. + self.import_manager.add_full_peer(peer_id); + self.process_sync(); } else { // The remote node has an equal or great finalized epoch and we don't know it's head. // @@ -246,43 +212,60 @@ impl SimpleSync { "remote_latest_finalized_epoch" => remote.latest_finalized_epoch, ); - + self.import_manager.add_peer(peer_id, remote); self.process_sync(); } } self.proess_sync(&mut self) { loop { - match self.sync_manager.poll() { - SyncManagerState::RequestBlocks(peer_id, req) { - debug!( + match self.import_manager.poll() { + ImportManagerOutcome::RequestBlocks(peer_id, req) { + trace!( self.log, - "RPCRequest(BeaconBlockBodies)"; - "count" => req.block_roots.len(), + "RPC Request"; + "method" => "BeaconBlocks", + "count" => req.count, "peer" => format!("{:?}", peer_id) ); network.send_rpc_request(peer_id.clone(), RPCRequest::BeaconBlocks(req)); }, - SyncManagerState::Stalled { - // need more peers to continue sync - warn!(self.log, "No useable peers for sync"); - break; + ImportManagerOutcome::RecentRequest(peer_id, req) { + trace!( + self.log, + "RPC Request"; + "method" => "RecentBeaconBlocks", + "count" => req.block_roots.len(), + "peer" => format!("{:?}", peer_id) + ); + network.send_rpc_request(peer_id.clone(), RPCRequest::RecentBeaconBlocks(req)); + }, + ImportManagerOutcome::DownvotePeer(peer_id) { + trace!( + self.log, + "Peer downvoted"; + "peer" => format!("{:?}", peer_id) + ); + // TODO: Implement reputation + network.disconnect(peer_id.clone(), GoodbyeReason::Fault); }, SyncManagerState::Idle { // nothing to do - break; + return; } } } } + /* fn root_at_slot(&self, target_slot: Slot) -> Option { self.chain .rev_iter_block_roots() .find(|(_root, slot)| *slot == target_slot) .map(|(root, _slot)| root) } + */ /// Handle a `BeaconBlocks` request from the peer. pub fn on_beacon_blocks_request( @@ -346,8 +329,8 @@ impl SimpleSync { pub fn on_beacon_blocks_response( &mut self, peer_id: PeerId, + request_id: RequestId, res: Vec>, - network: &mut NetworkContext, ) { debug!( self.log, @@ -356,9 +339,26 @@ impl SimpleSync { "count" => res.block_bodies.len(), ); - if !res.is_empty() { - self.sync_manager.add_blocks(peer_id, blocks); - } + self.import_manager.beacon_blocks_response(peer_id, request_id, blocks); + + self.process_sync(); + } + + /// Handle a `RecentBeaconBlocks` response from the peer. + pub fn on_recent_beacon_blocks_response( + &mut self, + peer_id: PeerId, + request_id: RequestId, + res: Vec>, + ) { + debug!( + self.log, + "BeaconBlocksResponse"; + "peer" => format!("{:?}", peer_id), + "count" => res.block_bodies.len(), + ); + + self.import_manager.recent_blocks_response(peer_id, request_id, blocks); self.process_sync(); } @@ -372,7 +372,6 @@ impl SimpleSync { &mut self, peer_id: PeerId, block: BeaconBlock, - network: &mut NetworkContext, ) -> bool { if let Some(outcome) = self.process_block(peer_id.clone(), block.clone(), network, &"gossip") @@ -380,53 +379,17 @@ impl SimpleSync { match outcome { BlockProcessingOutcome::Processed { .. } => SHOULD_FORWARD_GOSSIP_BLOCK, BlockProcessingOutcome::ParentUnknown { parent } => { - // Add this block to the queue - self.import_queue - .enqueue_full_blocks(vec![block.clone()], peer_id.clone()); - debug!( - self.log, "RequestParentBlock"; - "parent_root" => format!("{}", parent), - "parent_slot" => block.slot - 1, - "peer" => format!("{:?}", peer_id), - ); - - // Request roots between parent and start of finality from peer. - let start_slot = self - .chain - .head() - .beacon_state - .finalized_checkpoint - .epoch - .start_slot(T::EthSpec::slots_per_epoch()); - self.request_block_roots( - peer_id, - BeaconBlockRootsRequest { - // Request blocks between `latest_finalized_slot` and the `block` - start_slot, - count: block.slot.as_u64() - start_slot.as_u64(), - }, - network, - ); - - // Clean the stale entries from the queue. - self.import_queue.remove_stale(); - + // Inform the sync manager to find parents for this block + self.import_manager.add_unknown_block(block.clone()); SHOULD_FORWARD_GOSSIP_BLOCK } - BlockProcessingOutcome::FutureSlot { present_slot, block_slot, } if present_slot + FUTURE_SLOT_TOLERANCE >= block_slot => { - self.import_queue - .enqueue_full_blocks(vec![block], peer_id.clone()); - + //TODO: Decide the logic here SHOULD_FORWARD_GOSSIP_BLOCK } - // Note: known blocks are forwarded on the gossip network. - // - // We rely upon the lower layers (libp2p) to stop loops occurring from re-gossiped - // blocks. BlockProcessingOutcome::BlockIsAlreadyKnown => SHOULD_FORWARD_GOSSIP_BLOCK, _ => SHOULD_NOT_FORWARD_GOSSIP_BLOCK, } @@ -457,48 +420,8 @@ impl SimpleSync { } } - /// Request some `BeaconBlockRoots` from the remote peer. - fn request_block_roots( - &mut self, - peer_id: PeerId, - req: BeaconBlockRootsRequest, - network: &mut NetworkContext, - ) { - // Potentially set state to sync. - if self.state == SyncState::Idle && req.count > SLOT_IMPORT_TOLERANCE { - debug!(self.log, "Entering downloading sync state."); - self.state = SyncState::Downloading; - } - - debug!( - self.log, - "RPCRequest(BeaconBlockRoots)"; - "count" => req.count, - "peer" => format!("{:?}", peer_id) - ); - - // TODO: handle count > max count. - network.send_rpc_request(peer_id.clone(), RPCRequest::BeaconBlockRoots(req)); - } - - /// Request some `BeaconBlockHeaders` from the remote peer. - fn request_block_headers( - &mut self, - peer_id: PeerId, - req: BeaconBlockHeadersRequest, - network: &mut NetworkContext, - ) { - debug!( - self.log, - "RPCRequest(BeaconBlockHeaders)"; - "max_headers" => req.max_headers, - "peer" => format!("{:?}", peer_id) - ); - - network.send_rpc_request(peer_id.clone(), RPCRequest::BeaconBlockHeaders(req)); - } - +/* /// Returns `true` if `self.chain` has not yet processed this block. pub fn chain_has_seen_block(&self, block_root: &Hash256) -> bool { !self @@ -509,207 +432,13 @@ impl SimpleSync { false }) } + */ /// Generates our current state in the form of a HELLO RPC message. pub fn generate_hello(&self) -> HelloMessage { hello_message(&self.chain) } - /// Helper function to attempt to process a partial block. - /// - /// If the block can be completed recursively call `process_block` - /// else request missing parts. - fn attempt_process_partial_block( - &mut self, - peer_id: PeerId, - block_root: Hash256, - network: &mut NetworkContext, - source: &str, - ) -> Option { - match self.import_queue.attempt_complete_block(block_root) { - PartialBeaconBlockCompletion::MissingBody => { - // Unable to complete the block because the block body is missing. - debug!( - self.log, "RequestParentBody"; - "source" => source, - "block_root" => format!("{}", block_root), - "peer" => format!("{:?}", peer_id), - ); - - // Request the block body from the peer. - self.request_block_bodies( - peer_id, - BeaconBlockBodiesRequest { - block_roots: vec![block_root], - }, - network, - ); - - None - } - PartialBeaconBlockCompletion::MissingHeader(slot) => { - // Unable to complete the block because the block header is missing. - debug!( - self.log, "RequestParentHeader"; - "source" => source, - "block_root" => format!("{}", block_root), - "peer" => format!("{:?}", peer_id), - ); - - // Request the block header from the peer. - self.request_block_headers( - peer_id, - BeaconBlockHeadersRequest { - start_root: block_root, - start_slot: slot, - max_headers: 1, - skip_slots: 0, - }, - network, - ); - - None - } - PartialBeaconBlockCompletion::MissingRoot => { - // The `block_root` is not known to the queue. - debug!( - self.log, "MissingParentRoot"; - "source" => source, - "block_root" => format!("{}", block_root), - "peer" => format!("{:?}", peer_id), - ); - - // Do nothing. - - None - } - PartialBeaconBlockCompletion::Complete(block) => { - // The block exists in the queue, attempt to process it - trace!( - self.log, "AttemptProcessParent"; - "source" => source, - "block_root" => format!("{}", block_root), - "parent_slot" => block.slot, - "peer" => format!("{:?}", peer_id), - ); - - self.process_block(peer_id.clone(), block, network, source) - } - } - } - - /// Processes the `block` that was received from `peer_id`. - /// - /// If the block was submitted to the beacon chain without internal error, `Some(outcome)` is - /// returned, otherwise `None` is returned. Note: `Some(_)` does not necessarily indicate that - /// the block was successfully processed or valid. - /// - /// This function performs the following duties: - /// - /// - Attempting to import the block into the beacon chain. - /// - Logging - /// - Requesting unavailable blocks (e.g., if parent is unknown). - /// - Disconnecting faulty nodes. - /// - /// This function does not remove processed blocks from the import queue. - fn process_block( - &mut self, - peer_id: PeerId, - block: BeaconBlock, - network: &mut NetworkContext, - source: &str, - ) -> Option { - let processing_result = self.chain.process_block(block.clone()); - - if let Ok(outcome) = processing_result { - match outcome { - BlockProcessingOutcome::Processed { block_root } => { - // The block was valid and we processed it successfully. - debug!( - self.log, "Imported block from network"; - "source" => source, - "slot" => block.slot, - "block_root" => format!("{}", block_root), - "peer" => format!("{:?}", peer_id), - ); - } - BlockProcessingOutcome::ParentUnknown { parent } => { - // The parent has not been processed - trace!( - self.log, "ParentBlockUnknown"; - "source" => source, - "parent_root" => format!("{}", parent), - "baby_block_slot" => block.slot, - "peer" => format!("{:?}", peer_id), - ); - - // If the parent is in the `import_queue` attempt to complete it then process it. - // All other cases leave `parent` in `import_queue` and return original outcome. - if let Some(BlockProcessingOutcome::Processed { .. }) = - self.attempt_process_partial_block(peer_id, parent, network, source) - { - // If processing parent is successful, re-process block and remove parent from queue - self.import_queue.remove(parent); - - // Attempt to process `block` again - match self.chain.process_block(block) { - Ok(outcome) => return Some(outcome), - Err(_) => return None, - } - } - } - BlockProcessingOutcome::FutureSlot { - present_slot, - block_slot, - } => { - if present_slot + FUTURE_SLOT_TOLERANCE >= block_slot { - // The block is too far in the future, drop it. - warn!( - self.log, "FutureBlock"; - "source" => source, - "msg" => "block for future slot rejected, check your time", - "present_slot" => present_slot, - "block_slot" => block_slot, - "FUTURE_SLOT_TOLERANCE" => FUTURE_SLOT_TOLERANCE, - "peer" => format!("{:?}", peer_id), - ); - network.disconnect(peer_id, GoodbyeReason::Fault); - } else { - // The block is in the future, but not too far. - debug!( - self.log, "QueuedFutureBlock"; - "source" => source, - "msg" => "queuing future block, check your time", - "present_slot" => present_slot, - "block_slot" => block_slot, - "FUTURE_SLOT_TOLERANCE" => FUTURE_SLOT_TOLERANCE, - "peer" => format!("{:?}", peer_id), - ); - } - } - _ => { - debug!( - self.log, "InvalidBlock"; - "source" => source, - "msg" => "peer sent invalid block", - "outcome" => format!("{:?}", outcome), - "peer" => format!("{:?}", peer_id), - ); - } - } - - Some(outcome) - } else { - error!( - self.log, "BlockProcessingFailure"; - "source" => source, - "msg" => "unexpected condition in processing block.", - "outcome" => format!("{:?}", processing_result) - ); - - None - } - } } /// Build a `HelloMessage` representing the state of the given `beacon_chain`. From a8daf46d5f557d45d1add6c974d654d366e31a6f Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Wed, 21 Aug 2019 14:48:49 +1000 Subject: [PATCH 094/186] Add comments --- beacon_node/client/src/bootstrapper.rs | 21 +++++++++++++++++++++ beacon_node/client/src/config.rs | 2 ++ 2 files changed, 23 insertions(+) diff --git a/beacon_node/client/src/bootstrapper.rs b/beacon_node/client/src/bootstrapper.rs index 2c8cf6afc2..9843ceec77 100644 --- a/beacon_node/client/src/bootstrapper.rs +++ b/beacon_node/client/src/bootstrapper.rs @@ -20,17 +20,31 @@ impl From for Error { } } +/// Used to load "bootstrap" information from the HTTP API of another Lighthouse beacon node. +/// +/// Bootstrapping information includes things like genesis and finalized states and blocks, and +/// libp2p connection details. pub struct Bootstrapper { url: Url, } impl Bootstrapper { + /// Parses the given `server` as a URL, instantiating `Self`. pub fn from_server_string(server: String) -> Result { Ok(Self { url: Url::parse(&server).map_err(|e| format!("Invalid bootstrap server url: {}", e))?, }) } + /// Build a multiaddr using the HTTP server URL that is not guaranteed to be correct. + /// + /// The address is created by querying the HTTP server for it's listening libp2p addresses. + /// Then, we find the first TCP port in those addresses and combine the port with the URL of + /// the server. + /// + /// For example, the server `http://192.168.0.1` might end up with a `best_effort_multiaddr` of + /// `/ipv4/192.168.0.1/tcp/9000` if the server advertises a listening address of + /// `/ipv4/172.0.0.1/tcp/9000`. pub fn best_effort_multiaddr(&self) -> Option { let tcp_port = self.first_listening_tcp_port()?; @@ -47,6 +61,8 @@ impl Bootstrapper { Some(multiaddr) } + /// Reads the server's listening libp2p addresses and returns the first TCP port protocol it + /// finds, if any. fn first_listening_tcp_port(&self) -> Option { self.listen_addresses().ok()?.iter().find_map(|multiaddr| { multiaddr.iter().find_map(|protocol| match protocol { @@ -56,6 +72,7 @@ impl Bootstrapper { }) } + /// Returns the IPv4 address of the server URL, unless it contains a FQDN. pub fn server_ipv4_addr(&self) -> Option { match self.url.host()? { Host::Ipv4(addr) => Some(addr), @@ -63,15 +80,18 @@ impl Bootstrapper { } } + /// Returns the servers ENR address. pub fn enr(&self) -> Result { get_enr(self.url.clone()).map_err(|e| format!("Unable to get ENR: {:?}", e)) } + /// Returns the servers listening libp2p addresses. pub fn listen_addresses(&self) -> Result, String> { get_listen_addresses(self.url.clone()) .map_err(|e| format!("Unable to get listen addresses: {:?}", e)) } + /// Returns the genesis block and state. pub fn genesis(&self) -> Result<(BeaconState, BeaconBlock), String> { let genesis_slot = Slot::new(0); @@ -83,6 +103,7 @@ impl Bootstrapper { Ok((state, block)) } + /// Returns the most recent finalized state and block. pub fn finalized(&self) -> Result<(BeaconState, BeaconBlock), String> { let slots_per_epoch = get_slots_per_epoch(self.url.clone()) .map_err(|e| format!("Unable to get slots per epoch: {:?}", e))?; diff --git a/beacon_node/client/src/config.rs b/beacon_node/client/src/config.rs index 1a985fb4af..ea8186dbc9 100644 --- a/beacon_node/client/src/config.rs +++ b/beacon_node/client/src/config.rs @@ -158,6 +158,8 @@ impl Config { } } +/// Perform the HTTP bootstrapping procedure, reading an ENR and multiaddr from the HTTP server and +/// adding them to the `config`. fn do_bootstrapping(config: &mut Config, server: String, log: &slog::Logger) -> Result<(), String> { // Set the genesis state source. config.genesis_state = GenesisState::HttpBootstrap { From b912e26b7938270392e251f213cc50278aa0cc99 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Thu, 22 Aug 2019 14:37:47 +1000 Subject: [PATCH 095/186] Tidy API to be more consistent with recent decisions --- beacon_node/eth2-libp2p/src/service.rs | 4 +- beacon_node/network/src/service.rs | 5 ++ beacon_node/rest_api/src/beacon.rs | 64 +++++++++++++++++++++----- beacon_node/rest_api/src/helpers.rs | 13 ++++-- beacon_node/rest_api/src/lib.rs | 15 +++--- beacon_node/rest_api/src/network.rs | 17 +++++++ 6 files changed, 92 insertions(+), 26 deletions(-) diff --git a/beacon_node/eth2-libp2p/src/service.rs b/beacon_node/eth2-libp2p/src/service.rs index 316aa05798..e1e112e2d8 100644 --- a/beacon_node/eth2-libp2p/src/service.rs +++ b/beacon_node/eth2-libp2p/src/service.rs @@ -33,7 +33,7 @@ pub struct Service { //TODO: Make this private pub swarm: Swarm, /// This node's PeerId. - _local_peer_id: PeerId, + pub local_peer_id: PeerId, /// The libp2p logger handle. pub log: slog::Logger, } @@ -113,7 +113,7 @@ impl Service { info!(log, "Subscribed to topics: {:?}", subscribed_topics); Ok(Service { - _local_peer_id: local_peer_id, + local_peer_id, swarm, log, }) diff --git a/beacon_node/network/src/service.rs b/beacon_node/network/src/service.rs index 4bec038309..dc7e941409 100644 --- a/beacon_node/network/src/service.rs +++ b/beacon_node/network/src/service.rs @@ -75,6 +75,11 @@ impl Service { .clone() } + /// Returns the local libp2p PeerID. + pub fn local_peer_id(&self) -> PeerId { + self.libp2p_service.lock().local_peer_id.clone() + } + /// Returns the list of `Multiaddr` that the underlying libp2p instance is listening on. pub fn listen_multiaddrs(&self) -> Vec { Swarm::listeners(&self.libp2p_service.lock().swarm) diff --git a/beacon_node/rest_api/src/beacon.rs b/beacon_node/rest_api/src/beacon.rs index 66e31ae41c..88427c9a4b 100644 --- a/beacon_node/rest_api/src/beacon.rs +++ b/beacon_node/rest_api/src/beacon.rs @@ -2,25 +2,44 @@ use super::{success_response, ApiResult}; use crate::{helpers::*, ApiError, UrlQuery}; use beacon_chain::{BeaconChain, BeaconChainTypes}; use hyper::{Body, Request}; +use serde::Serialize; use std::sync::Arc; use store::Store; -use types::{BeaconBlock, BeaconState}; +use types::{BeaconBlock, BeaconState, EthSpec, Hash256, Slot}; + +#[derive(Serialize)] +struct HeadResponse { + pub slot: Slot, + pub block_root: Hash256, + pub state_root: Hash256, +} /// HTTP handler to return a `BeaconBlock` at a given `root` or `slot`. -pub fn get_best_slot(req: Request) -> ApiResult { +pub fn get_head(req: Request) -> ApiResult { let beacon_chain = req .extensions() .get::>>() .ok_or_else(|| ApiError::ServerError("Beacon chain extension missing".to_string()))?; - let slot = beacon_chain.head().beacon_state.slot; + let head = HeadResponse { + slot: beacon_chain.head().beacon_state.slot, + block_root: beacon_chain.head().beacon_block_root, + state_root: beacon_chain.head().beacon_state_root, + }; - let json: String = serde_json::to_string(&slot) - .map_err(|e| ApiError::ServerError(format!("Unable to serialize Slot: {:?}", e)))?; + let json: String = serde_json::to_string(&head) + .map_err(|e| ApiError::ServerError(format!("Unable to serialize HeadResponse: {:?}", e)))?; Ok(success_response(Body::from(json))) } +#[derive(Serialize)] +#[serde(bound = "T: EthSpec")] +struct BlockResponse { + pub root: Hash256, + pub beacon_block: BeaconBlock, +} + /// HTTP handler to return a `BeaconBlock` at a given `root` or `slot`. pub fn get_block(req: Request) -> ApiResult { let beacon_chain = req @@ -58,8 +77,14 @@ pub fn get_block(req: Request) -> ApiResult )) })?; - let json: String = serde_json::to_string(&block) - .map_err(|e| ApiError::ServerError(format!("Unable to serialize BeaconBlock: {:?}", e)))?; + let response = BlockResponse { + root: block_root, + beacon_block: block, + }; + + let json: String = serde_json::to_string(&response).map_err(|e| { + ApiError::ServerError(format!("Unable to serialize BlockResponse: {:?}", e)) + })?; Ok(success_response(Body::from(json))) } @@ -89,6 +114,13 @@ pub fn get_block_root(req: Request) -> ApiR Ok(success_response(Body::from(json))) } +#[derive(Serialize)] +#[serde(bound = "T: EthSpec")] +struct StateResponse { + pub root: Hash256, + pub beacon_state: BeaconState, +} + /// HTTP handler to return a `BeaconState` at a given `root` or `slot`. /// /// Will not return a state if the request slot is in the future. Will return states higher than @@ -102,21 +134,29 @@ pub fn get_state(req: Request) -> ApiResult let query_params = ["root", "slot"]; let (key, value) = UrlQuery::from_request(&req)?.first_of(&query_params)?; - let state: BeaconState = match (key.as_ref(), value) { + let (root, state): (Hash256, BeaconState) = match (key.as_ref(), value) { ("slot", value) => state_at_slot(&beacon_chain, parse_slot(&value)?)?, ("root", value) => { let root = &parse_root(&value)?; - beacon_chain + let state = beacon_chain .store .get(root)? - .ok_or_else(|| ApiError::NotFound(format!("No state for root: {}", root)))? + .ok_or_else(|| ApiError::NotFound(format!("No state for root: {}", root)))?; + + (*root, state) } _ => return Err(ApiError::ServerError("Unexpected query parameter".into())), }; - let json: String = serde_json::to_string(&state) - .map_err(|e| ApiError::ServerError(format!("Unable to serialize BeaconState: {:?}", e)))?; + let response = StateResponse { + root, + beacon_state: state, + }; + + let json: String = serde_json::to_string(&response).map_err(|e| { + ApiError::ServerError(format!("Unable to serialize StateResponse: {:?}", e)) + })?; Ok(success_response(Body::from(json))) } diff --git a/beacon_node/rest_api/src/helpers.rs b/beacon_node/rest_api/src/helpers.rs index 2a429076c7..a65c7c1ac9 100644 --- a/beacon_node/rest_api/src/helpers.rs +++ b/beacon_node/rest_api/src/helpers.rs @@ -31,22 +31,25 @@ pub fn parse_root(string: &str) -> Result { } } -/// Returns a `BeaconState` in the canonical chain of `beacon_chain` at the given `slot`, if -/// possible. +/// Returns a `BeaconState` and it's root in the canonical chain of `beacon_chain` at the given +/// `slot`, if possible. /// /// Will not return a state if the request slot is in the future. Will return states higher than /// the current head by skipping slots. pub fn state_at_slot( beacon_chain: &BeaconChain, slot: Slot, -) -> Result, ApiError> { +) -> Result<(Hash256, BeaconState), ApiError> { let head_state = &beacon_chain.head().beacon_state; if head_state.slot == slot { // The request slot is the same as the best block (head) slot. // I'm not sure if this `.clone()` will be optimized out. If not, it seems unnecessary. - Ok(beacon_chain.head().beacon_state.clone()) + Ok(( + beacon_chain.head().beacon_state_root, + beacon_chain.head().beacon_state.clone(), + )) } else { let root = state_root_at_slot(beacon_chain, slot)?; @@ -55,7 +58,7 @@ pub fn state_at_slot( .get(&root)? .ok_or_else(|| ApiError::NotFound(format!("Unable to find state at root {}", root)))?; - Ok(state) + Ok((root, state)) } } diff --git a/beacon_node/rest_api/src/lib.rs b/beacon_node/rest_api/src/lib.rs index 8ef48ad72c..839aa7abca 100644 --- a/beacon_node/rest_api/src/lib.rs +++ b/beacon_node/rest_api/src/lib.rs @@ -121,7 +121,7 @@ pub fn start_server( // Route the request to the correct handler. let result = match (req.method(), path.as_ref()) { - (&Method::GET, "/beacon/best_slot") => beacon::get_best_slot::(req), + (&Method::GET, "/beacon/head") => beacon::get_head::(req), (&Method::GET, "/beacon/block") => beacon::get_block::(req), (&Method::GET, "/beacon/block_root") => beacon::get_block_root::(req), (&Method::GET, "/beacon/latest_finalized_checkpoint") => { @@ -130,14 +130,15 @@ pub fn start_server( (&Method::GET, "/beacon/state") => beacon::get_state::(req), (&Method::GET, "/beacon/state_root") => beacon::get_state_root::(req), (&Method::GET, "/metrics") => metrics::get_prometheus::(req), - (&Method::GET, "/node/version") => node::get_version(req), - (&Method::GET, "/node/genesis_time") => node::get_genesis_time::(req), - (&Method::GET, "/node/network/enr") => network::get_enr::(req), - (&Method::GET, "/node/network/peer_count") => network::get_peer_count::(req), - (&Method::GET, "/node/network/peers") => network::get_peer_list::(req), - (&Method::GET, "/node/network/listen_addresses") => { + (&Method::GET, "/network/enr") => network::get_enr::(req), + (&Method::GET, "/network/peer_count") => network::get_peer_count::(req), + (&Method::GET, "/network/peer_id") => network::get_peer_id::(req), + (&Method::GET, "/network/peers") => network::get_peer_list::(req), + (&Method::GET, "/network/listen_addresses") => { network::get_listen_addresses::(req) } + (&Method::GET, "/node/version") => node::get_version(req), + (&Method::GET, "/node/genesis_time") => node::get_genesis_time::(req), (&Method::GET, "/spec") => spec::get_spec::(req), (&Method::GET, "/spec/slots_per_epoch") => spec::get_slots_per_epoch::(req), _ => Err(ApiError::MethodNotAllowed(path.clone())), diff --git a/beacon_node/rest_api/src/network.rs b/beacon_node/rest_api/src/network.rs index 0e2448270c..154cd142d4 100644 --- a/beacon_node/rest_api/src/network.rs +++ b/beacon_node/rest_api/src/network.rs @@ -40,6 +40,23 @@ pub fn get_enr(req: Request) ))) } +/// HTTP handle to return the `PeerId` from the client's libp2p service. +/// +/// PeerId is encoded as base58 string. +pub fn get_peer_id(req: Request) -> ApiResult { + let network = req + .extensions() + .get::>>() + .ok_or_else(|| ApiError::ServerError("NetworkService extension missing".to_string()))?; + + let peer_id: PeerId = network.local_peer_id(); + + Ok(success_response(Body::from( + serde_json::to_string(&peer_id.to_base58()) + .map_err(|e| ApiError::ServerError(format!("Unable to serialize Enr: {:?}", e)))?, + ))) +} + /// HTTP handle to return the number of peers connected in the client's libp2p service. pub fn get_peer_count( req: Request, From 5a34f86e770dedae20d4c383293bdb8cce722000 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Thu, 22 Aug 2019 16:14:51 +1000 Subject: [PATCH 096/186] Address some review comments --- beacon_node/client/src/bootstrapper.rs | 16 ++++++++-------- beacon_node/rest_api/src/beacon.rs | 22 ++++++---------------- beacon_node/rest_api/src/helpers.rs | 15 +++++++++++++++ 3 files changed, 29 insertions(+), 24 deletions(-) diff --git a/beacon_node/client/src/bootstrapper.rs b/beacon_node/client/src/bootstrapper.rs index 9843ceec77..19f13e2da8 100644 --- a/beacon_node/client/src/bootstrapper.rs +++ b/beacon_node/client/src/bootstrapper.rs @@ -10,7 +10,7 @@ use url::Host; #[derive(Debug)] enum Error { - UrlCannotBeBase, + InvalidUrl, HttpError(HttpError), } @@ -38,7 +38,7 @@ impl Bootstrapper { /// Build a multiaddr using the HTTP server URL that is not guaranteed to be correct. /// - /// The address is created by querying the HTTP server for it's listening libp2p addresses. + /// The address is created by querying the HTTP server for its listening libp2p addresses. /// Then, we find the first TCP port in those addresses and combine the port with the URL of /// the server. /// @@ -124,7 +124,7 @@ fn get_slots_per_epoch(mut url: Url) -> Result { .map(|mut url| { url.push("spec").push("slots_per_epoch"); }) - .map_err(|_| Error::UrlCannotBeBase)?; + .map_err(|_| Error::InvalidUrl)?; reqwest::get(url)? .error_for_status()? @@ -137,7 +137,7 @@ fn get_finalized_slot(mut url: Url, slots_per_epoch: u64) -> Result .map(|mut url| { url.push("beacon").push("latest_finalized_checkpoint"); }) - .map_err(|_| Error::UrlCannotBeBase)?; + .map_err(|_| Error::InvalidUrl)?; let checkpoint: Checkpoint = reqwest::get(url)?.error_for_status()?.json()?; @@ -149,7 +149,7 @@ fn get_state(mut url: Url, slot: Slot) -> Result, Err .map(|mut url| { url.push("beacon").push("state"); }) - .map_err(|_| Error::UrlCannotBeBase)?; + .map_err(|_| Error::InvalidUrl)?; url.query_pairs_mut() .append_pair("slot", &format!("{}", slot.as_u64())); @@ -165,7 +165,7 @@ fn get_block(mut url: Url, slot: Slot) -> Result, Err .map(|mut url| { url.push("beacon").push("block"); }) - .map_err(|_| Error::UrlCannotBeBase)?; + .map_err(|_| Error::InvalidUrl)?; url.query_pairs_mut() .append_pair("slot", &format!("{}", slot.as_u64())); @@ -181,7 +181,7 @@ fn get_enr(mut url: Url) -> Result { .map(|mut url| { url.push("node").push("network").push("enr"); }) - .map_err(|_| Error::UrlCannotBeBase)?; + .map_err(|_| Error::InvalidUrl)?; reqwest::get(url)? .error_for_status()? @@ -194,7 +194,7 @@ fn get_listen_addresses(mut url: Url) -> Result, Error> { .map(|mut url| { url.push("node").push("network").push("listen_addresses"); }) - .map_err(|_| Error::UrlCannotBeBase)?; + .map_err(|_| Error::InvalidUrl)?; reqwest::get(url)? .error_for_status()? diff --git a/beacon_node/rest_api/src/beacon.rs b/beacon_node/rest_api/src/beacon.rs index 88427c9a4b..4e3cc02fd2 100644 --- a/beacon_node/rest_api/src/beacon.rs +++ b/beacon_node/rest_api/src/beacon.rs @@ -54,14 +54,9 @@ pub fn get_block(req: Request) -> ApiResult ("slot", value) => { let target = parse_slot(&value)?; - beacon_chain - .rev_iter_block_roots() - .take_while(|(_root, slot)| *slot >= target) - .find(|(_root, slot)| *slot == target) - .map(|(root, _slot)| root) - .ok_or_else(|| { - ApiError::NotFound(format!("Unable to find BeaconBlock for slot {}", target)) - })? + block_root_at_slot(&beacon_chain, target).ok_or_else(|| { + ApiError::NotFound(format!("Unable to find BeaconBlock for slot {}", target)) + })? } ("root", value) => parse_root(&value)?, _ => return Err(ApiError::ServerError("Unexpected query parameter".into())), @@ -99,14 +94,9 @@ pub fn get_block_root(req: Request) -> ApiR let slot_string = UrlQuery::from_request(&req)?.only_one("slot")?; let target = parse_slot(&slot_string)?; - let root = beacon_chain - .rev_iter_block_roots() - .take_while(|(_root, slot)| *slot >= target) - .find(|(_root, slot)| *slot == target) - .map(|(root, _slot)| root) - .ok_or_else(|| { - ApiError::NotFound(format!("Unable to find BeaconBlock for slot {}", target)) - })?; + let root = block_root_at_slot(&beacon_chain, target).ok_or_else(|| { + ApiError::NotFound(format!("Unable to find BeaconBlock for slot {}", target)) + })?; let json: String = serde_json::to_string(&root) .map_err(|e| ApiError::ServerError(format!("Unable to serialize root: {:?}", e)))?; diff --git a/beacon_node/rest_api/src/helpers.rs b/beacon_node/rest_api/src/helpers.rs index a65c7c1ac9..5365086df7 100644 --- a/beacon_node/rest_api/src/helpers.rs +++ b/beacon_node/rest_api/src/helpers.rs @@ -31,6 +31,21 @@ pub fn parse_root(string: &str) -> Result { } } +/// Returns the root of the `BeaconBlock` in the canonical chain of `beacon_chain` at the given +/// `slot`, if possible. +/// +/// May return a root for a previous slot, in the case of skip slots. +pub fn block_root_at_slot( + beacon_chain: &BeaconChain, + target: Slot, +) -> Option { + beacon_chain + .rev_iter_block_roots() + .take_while(|(_root, slot)| *slot >= target) + .find(|(_root, slot)| *slot == target) + .map(|(root, _slot)| root) +} + /// Returns a `BeaconState` and it's root in the canonical chain of `beacon_chain` at the given /// `slot`, if possible. /// From 853344af8a6127a70df2207402a317fc7282b8cd Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Thu, 22 Aug 2019 16:34:21 +1000 Subject: [PATCH 097/186] Make BeaconChainTypes Send + Sync + 'static --- beacon_node/beacon_chain/src/beacon_chain.rs | 2 +- beacon_node/beacon_chain/src/test_utils.rs | 8 ++++---- beacon_node/client/src/beacon_chain_types.rs | 6 +++++- beacon_node/client/src/lib.rs | 2 +- beacon_node/client/src/notifier.rs | 6 +----- beacon_node/rest_api/src/lib.rs | 2 +- beacon_node/rest_api/src/network.rs | 14 +++++--------- beacon_node/src/run.rs | 2 +- 8 files changed, 19 insertions(+), 23 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index bd7f37fbab..5feefd8417 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -77,7 +77,7 @@ pub enum AttestationProcessingOutcome { Invalid(AttestationValidationError), } -pub trait BeaconChainTypes { +pub trait BeaconChainTypes: Send + Sync + 'static { type Store: store::Store; type SlotClock: slot_clock::SlotClock; type LmdGhost: LmdGhost; diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index 298c637dbd..bd51f86203 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -54,8 +54,8 @@ where impl BeaconChainTypes for CommonTypes where - L: LmdGhost, - E: EthSpec, + L: LmdGhost + 'static, + E: EthSpec + 'static, { type Store = MemoryStore; type SlotClock = TestingSlotClock; @@ -69,8 +69,8 @@ where /// Used for testing. pub struct BeaconChainHarness where - L: LmdGhost, - E: EthSpec, + L: LmdGhost + 'static, + E: EthSpec + 'static, { pub chain: BeaconChain>, pub keypairs: Vec, diff --git a/beacon_node/client/src/beacon_chain_types.rs b/beacon_node/client/src/beacon_chain_types.rs index f2f95226ad..adea8c7b53 100644 --- a/beacon_node/client/src/beacon_chain_types.rs +++ b/beacon_node/client/src/beacon_chain_types.rs @@ -36,7 +36,11 @@ pub struct ClientType { _phantom_u: PhantomData, } -impl BeaconChainTypes for ClientType { +impl BeaconChainTypes for ClientType +where + S: Store + 'static, + E: EthSpec + 'static + Clone, +{ type Store = S; type SlotClock = SystemTimeSlotClock; type LmdGhost = ThreadSafeReducedTree; diff --git a/beacon_node/client/src/lib.rs b/beacon_node/client/src/lib.rs index 798aedec92..6405e05e71 100644 --- a/beacon_node/client/src/lib.rs +++ b/beacon_node/client/src/lib.rs @@ -49,7 +49,7 @@ pub struct Client { impl Client where - T: BeaconChainTypes + InitialiseBeaconChain + Clone + Send + Sync + 'static, + T: BeaconChainTypes + InitialiseBeaconChain + Clone, { /// Generate an instance of the client. Spawn and link all internal sub-processes. pub fn new( diff --git a/beacon_node/client/src/notifier.rs b/beacon_node/client/src/notifier.rs index 1c7cf38670..78e50ac79d 100644 --- a/beacon_node/client/src/notifier.rs +++ b/beacon_node/client/src/notifier.rs @@ -17,11 +17,7 @@ pub const WARN_PEER_COUNT: usize = 1; /// durations. /// /// Presently unused, but remains for future use. -pub fn run( - client: &Client, - executor: TaskExecutor, - exit: Exit, -) { +pub fn run(client: &Client, executor: TaskExecutor, exit: Exit) { // notification heartbeat let interval = Interval::new( Instant::now(), diff --git a/beacon_node/rest_api/src/lib.rs b/beacon_node/rest_api/src/lib.rs index 839aa7abca..354b234031 100644 --- a/beacon_node/rest_api/src/lib.rs +++ b/beacon_node/rest_api/src/lib.rs @@ -71,7 +71,7 @@ impl From for ApiError { } } -pub fn start_server( +pub fn start_server( config: &ApiConfig, executor: &TaskExecutor, beacon_chain: Arc>, diff --git a/beacon_node/rest_api/src/network.rs b/beacon_node/rest_api/src/network.rs index 154cd142d4..daded9d3d6 100644 --- a/beacon_node/rest_api/src/network.rs +++ b/beacon_node/rest_api/src/network.rs @@ -7,9 +7,7 @@ use std::sync::Arc; /// HTTP handle to return the list of libp2p multiaddr the client is listening on. /// /// Returns a list of `Multiaddr`, serialized according to their `serde` impl. -pub fn get_listen_addresses( - req: Request, -) -> ApiResult { +pub fn get_listen_addresses(req: Request) -> ApiResult { let network = req .extensions() .get::>>() @@ -26,7 +24,7 @@ pub fn get_listen_addresses( /// HTTP handle to return the Discv5 ENR from the client's libp2p service. /// /// ENR is encoded as base64 string. -pub fn get_enr(req: Request) -> ApiResult { +pub fn get_enr(req: Request) -> ApiResult { let network = req .extensions() .get::>>() @@ -43,7 +41,7 @@ pub fn get_enr(req: Request) /// HTTP handle to return the `PeerId` from the client's libp2p service. /// /// PeerId is encoded as base58 string. -pub fn get_peer_id(req: Request) -> ApiResult { +pub fn get_peer_id(req: Request) -> ApiResult { let network = req .extensions() .get::>>() @@ -58,9 +56,7 @@ pub fn get_peer_id(req: Request( - req: Request, -) -> ApiResult { +pub fn get_peer_count(req: Request) -> ApiResult { let network = req .extensions() .get::>>() @@ -77,7 +73,7 @@ pub fn get_peer_count( /// HTTP handle to return the list of peers connected to the client's libp2p service. /// /// Peers are presented as a list of `PeerId::to_string()`. -pub fn get_peer_list(req: Request) -> ApiResult { +pub fn get_peer_list(req: Request) -> ApiResult { let network = req .extensions() .get::>>() diff --git a/beacon_node/src/run.rs b/beacon_node/src/run.rs index 5066231d55..f88cb7460b 100644 --- a/beacon_node/src/run.rs +++ b/beacon_node/src/run.rs @@ -118,7 +118,7 @@ fn run( log: &slog::Logger, ) -> error::Result<()> where - T: BeaconChainTypes + InitialiseBeaconChain + Clone + Send + Sync + 'static, + T: BeaconChainTypes + InitialiseBeaconChain + Clone, T::Store: OpenDatabase, { let store = T::Store::open_database(&db_path)?; From 11dc72a4422e7c164c2d79619b6c92d12ae2ab4b Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Thu, 22 Aug 2019 17:48:13 +1000 Subject: [PATCH 098/186] Start implementing BeaconChainBuilder --- beacon_node/beacon_chain/Cargo.toml | 1 + .../beacon_chain/src/beacon_chain_builder.rs | 68 +++++++++++++++++++ beacon_node/beacon_chain/src/lib.rs | 2 + 3 files changed, 71 insertions(+) create mode 100644 beacon_node/beacon_chain/src/beacon_chain_builder.rs diff --git a/beacon_node/beacon_chain/Cargo.toml b/beacon_node/beacon_chain/Cargo.toml index 1d3fc03b81..31f3412865 100644 --- a/beacon_node/beacon_chain/Cargo.toml +++ b/beacon_node/beacon_chain/Cargo.toml @@ -13,6 +13,7 @@ log = "0.4" operation_pool = { path = "../../eth2/operation_pool" } serde = "1.0" serde_derive = "1.0" +serde_yaml = "0.8" slog = { version = "^2.2.3" , features = ["max_level_trace"] } sloggers = { version = "^0.3" } slot_clock = { path = "../../eth2/utils/slot_clock" } diff --git a/beacon_node/beacon_chain/src/beacon_chain_builder.rs b/beacon_node/beacon_chain/src/beacon_chain_builder.rs new file mode 100644 index 0000000000..a6c77cb63c --- /dev/null +++ b/beacon_node/beacon_chain/src/beacon_chain_builder.rs @@ -0,0 +1,68 @@ +use crate::BeaconChainTypes; +use std::fs::File; +use std::path::PathBuf; +use std::time::SystemTime; +use types::{ + test_utils::TestingBeaconStateBuilder, BeaconBlock, BeaconState, ChainSpec, EthSpec, Hash256, +}; + +pub struct BeaconChainBuilder { + genesis_state: BeaconState, + genesis_block: BeaconBlock, + spec: ChainSpec, +} + +impl BeaconChainBuilder { + pub fn recent_genesis(validator_count: usize, spec: ChainSpec) -> Self { + Self::quick_start(recent_genesis_time(), validator_count, spec) + } + + pub fn quick_start(genesis_time: u64, validator_count: usize, spec: ChainSpec) -> Self { + let (mut genesis_state, _keypairs) = + TestingBeaconStateBuilder::from_default_keypairs_file_if_exists(validator_count, &spec) + .build(); + + genesis_state.genesis_time = genesis_time; + + Self::from_genesis_state(genesis_state, spec) + } + + pub fn yaml_state(file: PathBuf, spec: ChainSpec) -> Result { + let file = File::open(file.clone()) + .map_err(|e| format!("Unable to open YAML genesis state file {:?}: {:?}", file, e))?; + + let genesis_state = serde_yaml::from_reader(file) + .map_err(|e| format!("Unable to parse YAML genesis state file: {:?}", e))?; + + Ok(Self::from_genesis_state(genesis_state, spec)) + } + + pub fn from_genesis_state(genesis_state: BeaconState, spec: ChainSpec) -> Self { + Self { + genesis_block: genesis_block(&genesis_state, &spec), + genesis_state, + spec, + } + } +} + +fn genesis_block(genesis_state: &BeaconState, spec: &ChainSpec) -> BeaconBlock { + let mut genesis_block = BeaconBlock::empty(&spec); + + genesis_block.state_root = genesis_state.canonical_root(); + + genesis_block +} + +/// Returns the system time, mod 30 minutes. +/// +/// Used for easily creating testnets. +fn recent_genesis_time() -> u64 { + let now = SystemTime::now() + .duration_since(SystemTime::UNIX_EPOCH) + .unwrap() + .as_secs(); + let secs_after_last_period = now.checked_rem(30 * 60).unwrap_or(0); + // genesis is now the last 30 minute block. + now - secs_after_last_period +} diff --git a/beacon_node/beacon_chain/src/lib.rs b/beacon_node/beacon_chain/src/lib.rs index cc7725dd83..9c833f778d 100644 --- a/beacon_node/beacon_chain/src/lib.rs +++ b/beacon_node/beacon_chain/src/lib.rs @@ -3,6 +3,7 @@ extern crate lazy_static; mod beacon_chain; +mod beacon_chain_builder; mod checkpoint; mod errors; mod fork_choice; @@ -16,6 +17,7 @@ pub use self::beacon_chain::{ }; pub use self::checkpoint::CheckPoint; pub use self::errors::{BeaconChainError, BlockProductionError}; +pub use beacon_chain_builder::BeaconChainBuilder; pub use lmd_ghost; pub use metrics::scrape_for_metrics; pub use parking_lot; From 94d987cb6aaa6fcd7920803c494a00a870f1ffae Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Fri, 23 Aug 2019 12:12:29 +1000 Subject: [PATCH 099/186] Add `/network/listen_port` API endpoint --- beacon_node/client/src/bootstrapper.rs | 24 ++++++------------------ beacon_node/network/src/service.rs | 7 +++++++ beacon_node/rest_api/src/lib.rs | 1 + beacon_node/rest_api/src/network.rs | 15 +++++++++++++++ 4 files changed, 29 insertions(+), 18 deletions(-) diff --git a/beacon_node/client/src/bootstrapper.rs b/beacon_node/client/src/bootstrapper.rs index 19f13e2da8..eaaee4aa1c 100644 --- a/beacon_node/client/src/bootstrapper.rs +++ b/beacon_node/client/src/bootstrapper.rs @@ -46,7 +46,7 @@ impl Bootstrapper { /// `/ipv4/192.168.0.1/tcp/9000` if the server advertises a listening address of /// `/ipv4/172.0.0.1/tcp/9000`. pub fn best_effort_multiaddr(&self) -> Option { - let tcp_port = self.first_listening_tcp_port()?; + let tcp_port = self.listen_port().ok()?; let mut multiaddr = Multiaddr::with_capacity(2); @@ -61,17 +61,6 @@ impl Bootstrapper { Some(multiaddr) } - /// Reads the server's listening libp2p addresses and returns the first TCP port protocol it - /// finds, if any. - fn first_listening_tcp_port(&self) -> Option { - self.listen_addresses().ok()?.iter().find_map(|multiaddr| { - multiaddr.iter().find_map(|protocol| match protocol { - Protocol::Tcp(port) => Some(port), - _ => None, - }) - }) - } - /// Returns the IPv4 address of the server URL, unless it contains a FQDN. pub fn server_ipv4_addr(&self) -> Option { match self.url.host()? { @@ -86,9 +75,8 @@ impl Bootstrapper { } /// Returns the servers listening libp2p addresses. - pub fn listen_addresses(&self) -> Result, String> { - get_listen_addresses(self.url.clone()) - .map_err(|e| format!("Unable to get listen addresses: {:?}", e)) + pub fn listen_port(&self) -> Result { + get_listen_port(self.url.clone()).map_err(|e| format!("Unable to get listen port: {:?}", e)) } /// Returns the genesis block and state. @@ -179,7 +167,7 @@ fn get_block(mut url: Url, slot: Slot) -> Result, Err fn get_enr(mut url: Url) -> Result { url.path_segments_mut() .map(|mut url| { - url.push("node").push("network").push("enr"); + url.push("network").push("enr"); }) .map_err(|_| Error::InvalidUrl)?; @@ -189,10 +177,10 @@ fn get_enr(mut url: Url) -> Result { .map_err(Into::into) } -fn get_listen_addresses(mut url: Url) -> Result, Error> { +fn get_listen_port(mut url: Url) -> Result { url.path_segments_mut() .map(|mut url| { - url.push("node").push("network").push("listen_addresses"); + url.push("network").push("listen_port"); }) .map_err(|_| Error::InvalidUrl)?; diff --git a/beacon_node/network/src/service.rs b/beacon_node/network/src/service.rs index dc7e941409..152f4dc77d 100644 --- a/beacon_node/network/src/service.rs +++ b/beacon_node/network/src/service.rs @@ -18,6 +18,7 @@ use tokio::sync::{mpsc, oneshot}; /// Service that handles communication between internal services and the eth2_libp2p network service. pub struct Service { libp2p_service: Arc>, + libp2p_port: u16, _libp2p_exit: oneshot::Sender<()>, _network_send: mpsc::UnboundedSender, _phantom: PhantomData, //message_handler: MessageHandler, @@ -56,6 +57,7 @@ impl Service { )?; let network_service = Service { libp2p_service, + libp2p_port: config.libp2p_port, _libp2p_exit: libp2p_exit, _network_send: network_send.clone(), _phantom: PhantomData, @@ -87,6 +89,11 @@ impl Service { .collect() } + /// Returns the libp2p port that this node has been configured to listen using. + pub fn listen_port(&self) -> u16 { + self.libp2p_port + } + /// Returns the number of libp2p connected peers. pub fn connected_peers(&self) -> usize { self.libp2p_service.lock().swarm.connected_peers() diff --git a/beacon_node/rest_api/src/lib.rs b/beacon_node/rest_api/src/lib.rs index 354b234031..a382c49e30 100644 --- a/beacon_node/rest_api/src/lib.rs +++ b/beacon_node/rest_api/src/lib.rs @@ -134,6 +134,7 @@ pub fn start_server( (&Method::GET, "/network/peer_count") => network::get_peer_count::(req), (&Method::GET, "/network/peer_id") => network::get_peer_id::(req), (&Method::GET, "/network/peers") => network::get_peer_list::(req), + (&Method::GET, "/network/listen_port") => network::get_listen_port::(req), (&Method::GET, "/network/listen_addresses") => { network::get_listen_addresses::(req) } diff --git a/beacon_node/rest_api/src/network.rs b/beacon_node/rest_api/src/network.rs index daded9d3d6..a3e4c5ee72 100644 --- a/beacon_node/rest_api/src/network.rs +++ b/beacon_node/rest_api/src/network.rs @@ -21,6 +21,21 @@ pub fn get_listen_addresses(req: Request) -> ApiResul ))) } +/// HTTP handle to return the list of libp2p multiaddr the client is listening on. +/// +/// Returns a list of `Multiaddr`, serialized according to their `serde` impl. +pub fn get_listen_port(req: Request) -> ApiResult { + let network = req + .extensions() + .get::>>() + .ok_or_else(|| ApiError::ServerError("NetworkService extension missing".to_string()))?; + + Ok(success_response(Body::from( + serde_json::to_string(&network.listen_port()) + .map_err(|e| ApiError::ServerError(format!("Unable to serialize port: {:?}", e)))?, + ))) +} + /// HTTP handle to return the Discv5 ENR from the client's libp2p service. /// /// ENR is encoded as base64 string. From 7d11d782992fc8a8780026860a97be56bb0325b6 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Fri, 23 Aug 2019 12:43:34 +1000 Subject: [PATCH 100/186] Abandon starting the node if libp2p doesn't start --- beacon_node/eth2-libp2p/src/service.rs | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/beacon_node/eth2-libp2p/src/service.rs b/beacon_node/eth2-libp2p/src/service.rs index e1e112e2d8..e208dbecac 100644 --- a/beacon_node/eth2-libp2p/src/service.rs +++ b/beacon_node/eth2-libp2p/src/service.rs @@ -16,7 +16,7 @@ use libp2p::core::{ upgrade::{InboundUpgradeExt, OutboundUpgradeExt}, }; use libp2p::{core, secio, PeerId, Swarm, Transport}; -use slog::{debug, info, trace, warn}; +use slog::{crit, debug, info, trace, warn}; use std::fs::File; use std::io::prelude::*; use std::io::{Error, ErrorKind}; @@ -69,10 +69,15 @@ impl Service { log_address.push(Protocol::P2p(local_peer_id.clone().into())); info!(log, "Listening on: {}", log_address); } - Err(err) => warn!( - log, - "Cannot listen on: {} because: {:?}", listen_multiaddr, err - ), + Err(err) => { + crit!( + log, + "Unable to listen on libp2p address"; + "error" => format!("{:?}", err), + "listen_multiaddr" => format!("{}", listen_multiaddr), + ); + return Err("Libp2p was unable to listen on the given listen address.".into()); + } }; // attempt to connect to user-input libp2p nodes From a358bbc1b1bc04a852c792fa33f3ca85f77aabbc Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Fri, 23 Aug 2019 12:45:31 +1000 Subject: [PATCH 101/186] Update bootstrapper for API changes --- beacon_node/client/src/bootstrapper.rs | 33 ++++++++++++++++++++------ beacon_node/rest_api/src/beacon.rs | 6 ++--- beacon_node/rest_api/src/lib.rs | 4 +++- 3 files changed, 32 insertions(+), 11 deletions(-) diff --git a/beacon_node/client/src/bootstrapper.rs b/beacon_node/client/src/bootstrapper.rs index eaaee4aa1c..c94d9a51d8 100644 --- a/beacon_node/client/src/bootstrapper.rs +++ b/beacon_node/client/src/bootstrapper.rs @@ -3,9 +3,10 @@ use eth2_libp2p::{ Enr, }; use reqwest::{Error as HttpError, Url}; +use serde::Deserialize; use std::borrow::Cow; use std::net::Ipv4Addr; -use types::{BeaconBlock, BeaconState, Checkpoint, EthSpec, Slot}; +use types::{BeaconBlock, BeaconState, Checkpoint, EthSpec, Hash256, Slot}; use url::Host; #[derive(Debug)] @@ -84,9 +85,11 @@ impl Bootstrapper { let genesis_slot = Slot::new(0); let block = get_block(self.url.clone(), genesis_slot) - .map_err(|e| format!("Unable to get genesis block: {:?}", e))?; + .map_err(|e| format!("Unable to get genesis block: {:?}", e))? + .beacon_block; let state = get_state(self.url.clone(), genesis_slot) - .map_err(|e| format!("Unable to get genesis state: {:?}", e))?; + .map_err(|e| format!("Unable to get genesis state: {:?}", e))? + .beacon_state; Ok((state, block)) } @@ -99,9 +102,11 @@ impl Bootstrapper { .map_err(|e| format!("Unable to get finalized slot: {:?}", e))?; let block = get_block(self.url.clone(), finalized_slot) - .map_err(|e| format!("Unable to get finalized block: {:?}", e))?; + .map_err(|e| format!("Unable to get finalized block: {:?}", e))? + .beacon_block; let state = get_state(self.url.clone(), finalized_slot) - .map_err(|e| format!("Unable to get finalized state: {:?}", e))?; + .map_err(|e| format!("Unable to get finalized state: {:?}", e))? + .beacon_state; Ok((state, block)) } @@ -132,7 +137,14 @@ fn get_finalized_slot(mut url: Url, slots_per_epoch: u64) -> Result Ok(checkpoint.epoch.start_slot(slots_per_epoch)) } -fn get_state(mut url: Url, slot: Slot) -> Result, Error> { +#[derive(Deserialize)] +#[serde(bound = "T: EthSpec")] +pub struct StateResponse { + pub root: Hash256, + pub beacon_state: BeaconState, +} + +fn get_state(mut url: Url, slot: Slot) -> Result, Error> { url.path_segments_mut() .map(|mut url| { url.push("beacon").push("state"); @@ -148,7 +160,14 @@ fn get_state(mut url: Url, slot: Slot) -> Result, Err .map_err(Into::into) } -fn get_block(mut url: Url, slot: Slot) -> Result, Error> { +#[derive(Deserialize)] +#[serde(bound = "T: EthSpec")] +pub struct BlockResponse { + pub root: Hash256, + pub beacon_block: BeaconBlock, +} + +fn get_block(mut url: Url, slot: Slot) -> Result, Error> { url.path_segments_mut() .map(|mut url| { url.push("beacon").push("block"); diff --git a/beacon_node/rest_api/src/beacon.rs b/beacon_node/rest_api/src/beacon.rs index 4e3cc02fd2..1c66a2819f 100644 --- a/beacon_node/rest_api/src/beacon.rs +++ b/beacon_node/rest_api/src/beacon.rs @@ -8,7 +8,7 @@ use store::Store; use types::{BeaconBlock, BeaconState, EthSpec, Hash256, Slot}; #[derive(Serialize)] -struct HeadResponse { +pub struct HeadResponse { pub slot: Slot, pub block_root: Hash256, pub state_root: Hash256, @@ -35,7 +35,7 @@ pub fn get_head(req: Request) -> ApiResult #[derive(Serialize)] #[serde(bound = "T: EthSpec")] -struct BlockResponse { +pub struct BlockResponse { pub root: Hash256, pub beacon_block: BeaconBlock, } @@ -106,7 +106,7 @@ pub fn get_block_root(req: Request) -> ApiR #[derive(Serialize)] #[serde(bound = "T: EthSpec")] -struct StateResponse { +pub struct StateResponse { pub root: Hash256, pub beacon_state: BeaconState, } diff --git a/beacon_node/rest_api/src/lib.rs b/beacon_node/rest_api/src/lib.rs index a382c49e30..964dd79982 100644 --- a/beacon_node/rest_api/src/lib.rs +++ b/beacon_node/rest_api/src/lib.rs @@ -13,7 +13,6 @@ mod url_query; use beacon_chain::{BeaconChain, BeaconChainTypes}; use client_network::Service as NetworkService; -pub use config::Config as ApiConfig; use hyper::rt::Future; use hyper::service::service_fn_ok; use hyper::{Body, Method, Response, Server, StatusCode}; @@ -24,6 +23,9 @@ use std::sync::Arc; use tokio::runtime::TaskExecutor; use url_query::UrlQuery; +pub use beacon::{BlockResponse, HeadResponse, StateResponse}; +pub use config::Config as ApiConfig; + #[derive(PartialEq, Debug)] pub enum ApiError { MethodNotAllowed(String), From a8de94ca133ddfb63ced7d02d3432f3166b8bcbb Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Fri, 23 Aug 2019 13:02:17 +1000 Subject: [PATCH 102/186] Remove unnecessary trait bounds --- beacon_node/beacon_chain/src/test_utils.rs | 4 ++-- beacon_node/client/src/beacon_chain_types.rs | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index bd51f86203..09f4749ea3 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -55,7 +55,7 @@ where impl BeaconChainTypes for CommonTypes where L: LmdGhost + 'static, - E: EthSpec + 'static, + E: EthSpec, { type Store = MemoryStore; type SlotClock = TestingSlotClock; @@ -70,7 +70,7 @@ where pub struct BeaconChainHarness where L: LmdGhost + 'static, - E: EthSpec + 'static, + E: EthSpec, { pub chain: BeaconChain>, pub keypairs: Vec, diff --git a/beacon_node/client/src/beacon_chain_types.rs b/beacon_node/client/src/beacon_chain_types.rs index adea8c7b53..5168c067a9 100644 --- a/beacon_node/client/src/beacon_chain_types.rs +++ b/beacon_node/client/src/beacon_chain_types.rs @@ -39,7 +39,7 @@ pub struct ClientType { impl BeaconChainTypes for ClientType where S: Store + 'static, - E: EthSpec + 'static + Clone, + E: EthSpec, { type Store = S; type SlotClock = SystemTimeSlotClock; From 453c8e2255263b5116b8fb7f94a29254e7836e4a Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Fri, 23 Aug 2019 16:39:32 +1000 Subject: [PATCH 103/186] Re-arrange CLI to suit new "testnet" pattern --- beacon_node/Cargo.toml | 1 + beacon_node/client/src/config.rs | 11 +- beacon_node/src/config.rs | 206 ++++++++++++++++++++++++++++ beacon_node/src/main.rs | 227 ++++++++++--------------------- beacon_node/src/run.rs | 1 - 5 files changed, 280 insertions(+), 166 deletions(-) create mode 100644 beacon_node/src/config.rs diff --git a/beacon_node/Cargo.toml b/beacon_node/Cargo.toml index 9124047e45..9ce724c148 100644 --- a/beacon_node/Cargo.toml +++ b/beacon_node/Cargo.toml @@ -11,6 +11,7 @@ store = { path = "./store" } client = { path = "client" } version = { path = "version" } clap = "2.32.0" +rand = "0.7" slog = { version = "^2.2.3" , features = ["max_level_trace"] } slog-term = "^2.4.0" slog-async = "^2.3.0" diff --git a/beacon_node/client/src/config.rs b/beacon_node/client/src/config.rs index ea8186dbc9..e1464e5b43 100644 --- a/beacon_node/client/src/config.rs +++ b/beacon_node/client/src/config.rs @@ -1,4 +1,4 @@ -use crate::{Bootstrapper, Eth2Config}; +use crate::Bootstrapper; use clap::ArgMatches; use network::NetworkConfig; use serde_derive::{Deserialize, Serialize}; @@ -127,15 +127,6 @@ impl Config { self.data_dir = PathBuf::from(dir); }; - if let Some(default_spec) = args.value_of("default-spec") { - match default_spec { - "mainnet" => self.spec_constants = Eth2Config::mainnet().spec_constants, - "minimal" => self.spec_constants = Eth2Config::minimal().spec_constants, - "interop" => self.spec_constants = Eth2Config::interop().spec_constants, - _ => {} // not supported - } - } - if let Some(dir) = args.value_of("db") { self.db_type = dir.to_string(); }; diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs new file mode 100644 index 0000000000..959edbd607 --- /dev/null +++ b/beacon_node/src/config.rs @@ -0,0 +1,206 @@ +use clap::ArgMatches; +use client::{ClientConfig, Eth2Config}; +use eth2_config::{read_from_file, write_to_file}; +use rand::{distributions::Alphanumeric, Rng}; +use slog::{crit, info, Logger}; +use std::fs; +use std::path::PathBuf; + +pub const DEFAULT_DATA_DIR: &str = ".lighthouse"; +pub const CLIENT_CONFIG_FILENAME: &str = "beacon-node.toml"; +pub const ETH2_CONFIG_FILENAME: &str = "eth2-spec.toml"; + +type Result = std::result::Result; +type Config = (ClientConfig, Eth2Config); + +/// Gets the fully-initialized global client and eth2 configuration objects. +pub fn get_configs(matches: &ArgMatches, log: &Logger) -> Result { + let mut builder = ConfigBuilder::new(matches, log)?; + + match matches.subcommand() { + ("testnet", Some(sub_matches)) => { + if sub_matches.is_present("random-datadir") { + builder.set_random_datadir()?; + } + + info!( + log, + "Creating new datadir"; + "path" => format!("{:?}", builder.data_dir) + ); + + builder.update_spec_from_subcommand(&sub_matches)?; + builder.write_configs_to_new_datadir()?; + } + _ => { + info!( + log, + "Resuming from existing datadir"; + "path" => format!("{:?}", builder.data_dir) + ); + + // If the `testnet` command was not provided, attempt to load an existing datadir and + // continue with an existing chain. + builder.load_from_datadir()?; + } + }; + + builder.build() +} + +/// Allows for building a set of configurations based upon `clap` arguments. +struct ConfigBuilder<'a> { + matches: &'a ArgMatches<'a>, + log: &'a Logger, + pub data_dir: PathBuf, + eth2_config: Eth2Config, + client_config: ClientConfig, +} + +impl<'a> ConfigBuilder<'a> { + /// Create a new builder with default settings. + pub fn new(matches: &'a ArgMatches, log: &'a Logger) -> Result { + // Read the `--datadir` flag. + // + // If it's not present, try and find the home directory (`~`) and push the default data + // directory onto it. + let data_dir: PathBuf = matches + .value_of("datadir") + .map(|string| PathBuf::from(string)) + .or_else(|| { + dirs::home_dir().map(|mut home| { + home.push(DEFAULT_DATA_DIR); + home + }) + }) + .ok_or_else(|| "Unable to find a home directory for the datadir".to_string())?; + + Ok(Self { + matches, + log, + data_dir, + eth2_config: Eth2Config::minimal(), + client_config: ClientConfig::default(), + }) + } + + /// Consumes self, returning the configs. + pub fn build(mut self) -> Result { + self.eth2_config.apply_cli_args(&self.matches)?; + self.client_config + .apply_cli_args(&self.matches, &mut self.log.clone())?; + + if self.eth2_config.spec_constants != self.client_config.spec_constants { + crit!(self.log, "Specification constants do not match."; + "client_config" => format!("{}", self.client_config.spec_constants), + "eth2_config" => format!("{}", self.eth2_config.spec_constants) + ); + return Err("Specification constant mismatch".into()); + } + + self.client_config.data_dir = self.data_dir; + + Ok((self.client_config, self.eth2_config)) + } + + /// Set the config data_dir to be an random directory. + /// + /// Useful for easily spinning up ephemeral testnets. + pub fn set_random_datadir(&mut self) -> Result<()> { + let random = rand::thread_rng() + .sample_iter(&Alphanumeric) + .take(10) + .collect::(); + + let mut s = DEFAULT_DATA_DIR.to_string(); + s.push_str("_random_"); + s.push_str(&random); + + self.data_dir.pop(); + self.data_dir.push(s); + + Ok(()) + } + + /// Reads the subcommand and tries to update `self.eth2_config` based up on the `--spec` flag. + /// + /// Returns an error if the `--spec` flag is not present. + pub fn update_spec_from_subcommand(&mut self, sub_matches: &ArgMatches) -> Result<()> { + // Re-initialise the `Eth2Config`. + // + // If a CLI parameter is set, overwrite any config file present. + // If a parameter is not set, use either the config file present or default to minimal. + let eth2_config = match sub_matches.value_of("spec") { + Some("mainnet") => Eth2Config::mainnet(), + Some("minimal") => Eth2Config::minimal(), + Some("interop") => Eth2Config::interop(), + _ => return Err("Unable to determine specification type.".into()), + }; + + self.client_config.spec_constants = sub_matches + .value_of("spec") + .expect("Guarded by prior match statement") + .to_string(); + self.eth2_config = eth2_config; + + Ok(()) + } + + /// Writes the configs in `self` to `self.data_dir`. + /// + /// Returns an error if `self.data_dir` already exists. + pub fn write_configs_to_new_datadir(&mut self) -> Result<()> { + // Do not permit creating a new config when the datadir exists. + if self.data_dir.exists() { + return Err( + "Datadir already exists, will not overwrite. Remove the directory or use --datadir." + .into(), + ); + } + + // Create `datadir` and any non-existing parent directories. + fs::create_dir_all(&self.data_dir).map_err(|e| { + crit!(self.log, "Failed to initialize data dir"; "error" => format!("{}", e)); + format!("{}", e) + })?; + + // Write the client config to a TOML file in the datadir. + write_to_file( + self.data_dir.join(CLIENT_CONFIG_FILENAME), + &self.client_config, + ) + .map_err(|e| format!("Unable to write {} file: {:?}", CLIENT_CONFIG_FILENAME, e))?; + + // Write the eth2 config to a TOML file in the datadir. + write_to_file(self.data_dir.join(ETH2_CONFIG_FILENAME), &self.eth2_config) + .map_err(|e| format!("Unable to write {} file: {:?}", ETH2_CONFIG_FILENAME, e))?; + + Ok(()) + } + + /// Attempts to load the client and eth2 configs from `self.data_dir`. + /// + /// Returns an error if any files are not found or are invalid. + pub fn load_from_datadir(&mut self) -> Result<()> { + // Check to ensure the datadir exists. + // + // For now we return an error. In the future we may decide to boot a default (e.g., + // public testnet or mainnet). + if !self.data_dir.exists() { + return Err( + "No datadir found. Use the 'testnet' sub-command to select a testnet type.".into(), + ); + } + + self.eth2_config = read_from_file::(self.data_dir.join(ETH2_CONFIG_FILENAME)) + .map_err(|e| format!("Unable to parse {} file: {:?}", ETH2_CONFIG_FILENAME, e))? + .ok_or_else(|| format!("{} file does not exist", ETH2_CONFIG_FILENAME))?; + + self.client_config = + read_from_file::(self.data_dir.join(CLIENT_CONFIG_FILENAME)) + .map_err(|e| format!("Unable to parse {} file: {:?}", CLIENT_CONFIG_FILENAME, e))? + .ok_or_else(|| format!("{} file does not exist", ETH2_CONFIG_FILENAME))?; + + Ok(()) + } +} diff --git a/beacon_node/src/main.rs b/beacon_node/src/main.rs index 04366baa7a..12c9b8a017 100644 --- a/beacon_node/src/main.rs +++ b/beacon_node/src/main.rs @@ -1,12 +1,10 @@ +mod config; mod run; -use clap::{App, Arg}; -use client::{ClientConfig, Eth2Config}; +use clap::{App, Arg, SubCommand}; +use config::get_configs; use env_logger::{Builder, Env}; -use eth2_config::{read_from_file, write_to_file}; use slog::{crit, o, warn, Drain, Level}; -use std::fs; -use std::path::PathBuf; pub const DEFAULT_DATA_DIR: &str = ".lighthouse"; @@ -31,6 +29,7 @@ fn main() { .value_name("DIR") .help("Data directory for keys and databases.") .takes_value(true) + .global(true) ) .arg( Arg::with_name("logfile") @@ -45,6 +44,7 @@ fn main() { .value_name("NETWORK-DIR") .help("Data directory for network keys.") .takes_value(true) + .global(true) ) /* * Network parameters. @@ -163,24 +163,6 @@ fn main() { .possible_values(&["disk", "memory"]) .default_value("memory"), ) - /* - * Specification/testnet params. - */ - .arg( - Arg::with_name("default-spec") - .long("default-spec") - .value_name("TITLE") - .short("default-spec") - .help("Specifies the default eth2 spec to be used. This will override any spec written to disk and will therefore be used by default in future instances.") - .takes_value(true) - .possible_values(&["mainnet", "minimal", "interop"]) - ) - .arg( - Arg::with_name("recent-genesis") - .long("recent-genesis") - .short("r") - .help("When present, genesis will be within 30 minutes prior. Only for testing"), - ) /* * Logging. */ @@ -201,14 +183,68 @@ fn main() { .takes_value(true), ) /* - * Bootstrap. + * The "testnet" sub-command. + * + * Allows for creating a new datadir with testnet-specific configs. */ - .arg( - Arg::with_name("bootstrap") - .long("bootstrap") - .value_name("HTTP_SERVER") - .help("Load the genesis state and libp2p address from the HTTP API of another Lighthouse node.") - .takes_value(true) + .subcommand(SubCommand::with_name("testnet") + .about("Create a new Lighthouse datadir using a testnet strategy.") + .arg( + Arg::with_name("spec") + .short("s") + .long("spec") + .value_name("TITLE") + .help("Specifies the default eth2 spec type. Only effective when creating a new datadir.") + .takes_value(true) + .required(true) + .possible_values(&["mainnet", "minimal", "interop"]) + ) + .arg( + Arg::with_name("random-datadir") + .long("random-datadir") + .short("r") + .help("If present, append a random string to the datadir path. Useful for fast development \ + iteration.") + ) + .arg( + Arg::with_name("force-create") + .long("force-create") + .short("f") + .help("If present, will delete any existing datadir before creating a new one. Cannot be \ + used when specifying --random-datadir (logic error).") + .conflicts_with("random-datadir") + ) + /* + * Testnet sub-commands. + */ + .subcommand(SubCommand::with_name("bootstrap") + .about("Connects to the given HTTP server, downloads a genesis state and attempts to peer with it.") + .arg(Arg::with_name("server") + .value_name("HTTP_SERVER") + .required(true) + .help("A HTTP server, with a http:// prefix")) + .arg(Arg::with_name("libp2p-port") + .short("p") + .long("port") + .value_name("TCP_PORT") + .help("A libp2p listen port used to peer with the bootstrap server")) + ) + .subcommand(SubCommand::with_name("recent") + .about("Creates a new genesis state where the genesis time was at the previous \ + 30-minute boundary (e.g., 12:00, 12:30, 13:00, etc.)") + .arg(Arg::with_name("validator_count") + .value_name("VALIDATOR_COUNT") + .required(true) + .help("The number of validators in the genesis state")) + ) + .subcommand(SubCommand::with_name("yaml-genesis-state") + .about("Creates a new datadir where the genesis state is read from YAML. Will fail to parse \ + a YAML state that was generated to a different spec than that specified by --spec.") + .arg(Arg::with_name("file") + .value_name("YAML_FILE") + .required(true) + .help("A YAML file from which to read the state")) + ) ) .get_matches(); @@ -235,143 +271,24 @@ fn main() { _ => drain.filter_level(Level::Trace), }; - let mut log = slog::Logger::root(drain.fuse(), o!()); + let log = slog::Logger::root(drain.fuse(), o!()); warn!( log, "Ethereum 2.0 is pre-release. This software is experimental." ); - let data_dir = match matches - .value_of("datadir") - .and_then(|v| Some(PathBuf::from(v))) - { - Some(v) => v, - None => { - // use the default - let mut default_dir = match dirs::home_dir() { - Some(v) => v, - None => { - crit!(log, "Failed to find a home directory"); - return; - } - }; - default_dir.push(DEFAULT_DATA_DIR); - default_dir - } - }; - - // create the directory if needed - match fs::create_dir_all(&data_dir) { - Ok(_) => {} - Err(e) => { - crit!(log, "Failed to initialize data dir"; "error" => format!("{}", e)); - return; - } - } - - let client_config_path = data_dir.join(CLIENT_CONFIG_FILENAME); - - // Attempt to load the `ClientConfig` from disk. + // Load the process-wide configuration. // - // If file doesn't exist, create a new, default one. - let mut client_config = match read_from_file::(client_config_path.clone()) { - Ok(Some(c)) => c, - Ok(None) => { - let default = ClientConfig::default(); - if let Err(e) = write_to_file(client_config_path, &default) { - crit!(log, "Failed to write default ClientConfig to file"; "error" => format!("{:?}", e)); - return; - } - default - } + // May load this from disk or create a new configuration, depending on the CLI flags supplied. + let (client_config, eth2_config) = match get_configs(&matches, &log) { + Ok(configs) => configs, Err(e) => { - crit!(log, "Failed to load a ChainConfig file"; "error" => format!("{:?}", e)); + crit!(log, "Failed to load configuration"; "error" => e); return; } }; - // Ensure the `data_dir` in the config matches that supplied to the CLI. - client_config.data_dir = data_dir.clone(); - - // Update the client config with any CLI args. - match client_config.apply_cli_args(&matches, &mut log) { - Ok(()) => (), - Err(s) => { - crit!(log, "Failed to parse ClientConfig CLI arguments"; "error" => s); - return; - } - }; - - let eth2_config_path = data_dir.join(ETH2_CONFIG_FILENAME); - - // Initialise the `Eth2Config`. - // - // If a CLI parameter is set, overwrite any config file present. - // If a parameter is not set, use either the config file present or default to minimal. - let cli_config = match matches.value_of("default-spec") { - Some("mainnet") => Some(Eth2Config::mainnet()), - Some("minimal") => Some(Eth2Config::minimal()), - Some("interop") => Some(Eth2Config::interop()), - _ => None, - }; - // if a CLI flag is specified, write the new config if it doesn't exist, - // otherwise notify the user that the file will not be written. - let eth2_config_from_file = match read_from_file::(eth2_config_path.clone()) { - Ok(config) => config, - Err(e) => { - crit!(log, "Failed to read the Eth2Config from file"; "error" => format!("{:?}", e)); - return; - } - }; - - let mut eth2_config = { - if let Some(cli_config) = cli_config { - if eth2_config_from_file.is_none() { - // write to file if one doesn't exist - if let Err(e) = write_to_file(eth2_config_path, &cli_config) { - crit!(log, "Failed to write default Eth2Config to file"; "error" => format!("{:?}", e)); - return; - } - } else { - warn!( - log, - "Eth2Config file exists. Configuration file is ignored, using default" - ); - } - cli_config - } else { - // CLI config not specified, read from disk - match eth2_config_from_file { - Some(config) => config, - None => { - // set default to minimal - let eth2_config = Eth2Config::minimal(); - if let Err(e) = write_to_file(eth2_config_path, ð2_config) { - crit!(log, "Failed to write default Eth2Config to file"; "error" => format!("{:?}", e)); - return; - } - eth2_config - } - } - } - }; - - // Update the eth2 config with any CLI flags. - match eth2_config.apply_cli_args(&matches) { - Ok(()) => (), - Err(s) => { - crit!(log, "Failed to parse Eth2Config CLI arguments"; "error" => s); - return; - } - }; - - // check to ensure the spec constants between the client and eth2_config match - if eth2_config.spec_constants != client_config.spec_constants { - crit!(log, "Specification constants do not match."; "client_config" => format!("{}", client_config.spec_constants), "eth2_config" => format!("{}", eth2_config.spec_constants)); - return; - } - // Start the node using a `tokio` executor. match run::run_beacon_node(client_config, eth2_config, &log) { Ok(_) => {} diff --git a/beacon_node/src/run.rs b/beacon_node/src/run.rs index f88cb7460b..e23b5bc72d 100644 --- a/beacon_node/src/run.rs +++ b/beacon_node/src/run.rs @@ -46,7 +46,6 @@ pub fn run_beacon_node( log, "BeaconNode init"; "p2p_listen_address" => format!("{:?}", &other_client_config.network.listen_address), - "data_dir" => format!("{:?}", other_client_config.data_dir()), "network_dir" => format!("{:?}", other_client_config.network.network_dir), "spec_constants" => &spec_constants, "db_type" => &other_client_config.db_type, From cdf3ade63fd32bea919c8e7fa847855352569148 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Fri, 23 Aug 2019 18:23:58 +1000 Subject: [PATCH 104/186] Add further CLI progress --- beacon_node/client/src/bootstrapper.rs | 8 ++- beacon_node/client/src/config.rs | 72 ++++++++++++-------------- beacon_node/src/config.rs | 41 +++++++++++++-- 3 files changed, 76 insertions(+), 45 deletions(-) diff --git a/beacon_node/client/src/bootstrapper.rs b/beacon_node/client/src/bootstrapper.rs index c94d9a51d8..9baf1dc7ed 100644 --- a/beacon_node/client/src/bootstrapper.rs +++ b/beacon_node/client/src/bootstrapper.rs @@ -46,8 +46,12 @@ impl Bootstrapper { /// For example, the server `http://192.168.0.1` might end up with a `best_effort_multiaddr` of /// `/ipv4/192.168.0.1/tcp/9000` if the server advertises a listening address of /// `/ipv4/172.0.0.1/tcp/9000`. - pub fn best_effort_multiaddr(&self) -> Option { - let tcp_port = self.listen_port().ok()?; + pub fn best_effort_multiaddr(&self, port: Option) -> Option { + let tcp_port = if let Some(port) = port { + port + } else { + self.listen_port().ok()? + }; let mut multiaddr = Multiaddr::with_capacity(2); diff --git a/beacon_node/client/src/config.rs b/beacon_node/client/src/config.rs index e1464e5b43..e802a93a3d 100644 --- a/beacon_node/client/src/config.rs +++ b/beacon_node/client/src/config.rs @@ -21,14 +21,42 @@ pub struct Config { db_name: String, pub log_file: PathBuf, pub spec_constants: String, - pub genesis_state: GenesisState, + #[serde(skip)] + pub boot_method: BootMethod, pub network: network::NetworkConfig, pub rpc: rpc::RPCConfig, pub rest_api: rest_api::ApiConfig, } -#[derive(Debug, Clone, Serialize, Deserialize)] -#[serde(tag = "type")] +#[derive(Debug, Clone)] +pub enum BootMethod { + /// Resume from an existing database. + Resume, + /// Generate a state with `validator_count` validators, all with well-known secret keys. + /// + /// Set the genesis time to be the start of the previous 30-minute window. + RecentGenesis { validator_count: usize }, + /// Generate a state with `genesis_time` and `validator_count` validators, all with well-known + /// secret keys. + Generated { + validator_count: usize, + genesis_time: u64, + }, + /// Load a YAML-encoded genesis state from a file. + Yaml { file: PathBuf }, + /// Use a HTTP server (running our REST-API) to load genesis and finalized states and blocks. + HttpBootstrap { + server: String, + port: Option, + }, +} + +impl Default for BootMethod { + fn default() -> Self { + BootMethod::Resume + } +} + pub enum GenesisState { /// Use the mainnet genesis state. /// @@ -61,9 +89,7 @@ impl Default for Config { rpc: rpc::RPCConfig::default(), rest_api: rest_api::ApiConfig::default(), spec_constants: TESTNET_SPEC_CONSTANTS.into(), - genesis_state: GenesisState::RecentGenesis { - validator_count: TESTNET_VALIDATOR_COUNT, - }, + boot_method: BootMethod::default(), } } } @@ -140,40 +166,6 @@ impl Config { self.update_logger(log)?; }; - // If the `--bootstrap` flag is provided, overwrite the default configuration. - if let Some(server) = args.value_of("bootstrap") { - do_bootstrapping(self, server.to_string(), &log)?; - } - Ok(()) } } - -/// Perform the HTTP bootstrapping procedure, reading an ENR and multiaddr from the HTTP server and -/// adding them to the `config`. -fn do_bootstrapping(config: &mut Config, server: String, log: &slog::Logger) -> Result<(), String> { - // Set the genesis state source. - config.genesis_state = GenesisState::HttpBootstrap { - server: server.to_string(), - }; - - let bootstrapper = Bootstrapper::from_server_string(server.to_string())?; - - config.network.boot_nodes.push(bootstrapper.enr()?); - - if let Some(server_multiaddr) = bootstrapper.best_effort_multiaddr() { - info!( - log, - "Estimated bootstrapper libp2p address"; - "multiaddr" => format!("{:?}", server_multiaddr) - ); - config.network.libp2p_nodes.push(server_multiaddr); - } else { - warn!( - log, - "Unable to estimate a bootstrapper libp2p address, this node may not find any peers." - ); - } - - Ok(()) -} diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index 959edbd607..b66a00abba 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -1,5 +1,5 @@ use clap::ArgMatches; -use client::{ClientConfig, Eth2Config}; +use client::{Bootstrapper, ClientConfig, Eth2Config}; use eth2_config::{read_from_file, write_to_file}; use rand::{distributions::Alphanumeric, Rng}; use slog::{crit, info, Logger}; @@ -30,6 +30,41 @@ pub fn get_configs(matches: &ArgMatches, log: &Logger) -> Result { ); builder.update_spec_from_subcommand(&sub_matches)?; + + match sub_matches.subcommand() { + // The bootstrap testnet method requires inserting a libp2p address into the + // network config. + ("bootstrap", Some(sub_matches)) => { + let server = sub_matches + .value_of("server") + .ok_or_else(|| "No bootstrap server specified".into())?; + + let bootstrapper = Bootstrapper::from_server_string(server.to_string())?; + + if let Some(server_multiaddr) = + bootstrapper.best_effort_multiaddr(sub_matches.value_of("libp2p_port")) + { + info!( + log, + "Estimated bootstrapper libp2p address"; + "multiaddr" => format!("{:?}", server_multiaddr) + ); + + builder + .client_config + .network + .libp2p_nodes + .push(server_multiaddr); + } else { + warn!( + log, + "Unable to estimate a bootstrapper libp2p address, this node may not find any peers." + ); + }; + } + _ => (), + }; + builder.write_configs_to_new_datadir()?; } _ => { @@ -53,8 +88,8 @@ struct ConfigBuilder<'a> { matches: &'a ArgMatches<'a>, log: &'a Logger, pub data_dir: PathBuf, - eth2_config: Eth2Config, - client_config: ClientConfig, + pub eth2_config: Eth2Config, + pub client_config: ClientConfig, } impl<'a> ConfigBuilder<'a> { From b078385362293fda872ee4dc62d0e1f8888005a8 Mon Sep 17 00:00:00 2001 From: Age Manning Date: Sat, 24 Aug 2019 01:09:29 +1000 Subject: [PATCH 105/186] Improved syncing compilation issues --- beacon_node/network/src/message_handler.rs | 118 +--- beacon_node/network/src/sync/manager.rs | 696 +++++++++++--------- beacon_node/network/src/sync/mod.rs | 2 +- beacon_node/network/src/sync/simple_sync.rs | 336 ++++++---- 4 files changed, 622 insertions(+), 530 deletions(-) diff --git a/beacon_node/network/src/message_handler.rs b/beacon_node/network/src/message_handler.rs index fd10c5aead..7a1a4ad317 100644 --- a/beacon_node/network/src/message_handler.rs +++ b/beacon_node/network/src/message_handler.rs @@ -22,8 +22,6 @@ pub struct MessageHandler { _chain: Arc>, /// The syncing framework. sync: SimpleSync, - /// The context required to send messages to, and process messages from peers. - network_context: NetworkContext, /// The `MessageHandler` logger. log: slog::Logger, } @@ -52,15 +50,13 @@ impl MessageHandler { trace!(log, "Service starting"); let (handler_send, handler_recv) = mpsc::unbounded_channel(); - // Initialise sync and begin processing in thread - let sync = SimpleSync::new(beacon_chain.clone(), &log); + let sync = SimpleSync::new(beacon_chain.clone(), network_send, &log); // generate the Message handler let mut handler = MessageHandler { _chain: beacon_chain.clone(), sync, - network_context: NetworkContext::new(network_send, log.clone()), log: log.clone(), }; @@ -81,7 +77,7 @@ impl MessageHandler { match message { // we have initiated a connection to a peer HandlerMessage::PeerDialed(peer_id) => { - self.sync.on_connect(peer_id, &mut self.network_context); + self.sync.on_connect(peer_id); } // A peer has disconnected HandlerMessage::PeerDisconnected(peer_id) => { @@ -112,32 +108,24 @@ impl MessageHandler { /// A new RPC request has been received from the network. fn handle_rpc_request(&mut self, peer_id: PeerId, request_id: RequestId, request: RPCRequest) { match request { - RPCRequest::Hello(hello_message) => self.sync.on_hello_request( - peer_id, - request_id, - hello_message, - &mut self.network_context, - ), + RPCRequest::Hello(hello_message) => { + self.sync + .on_hello_request(peer_id, request_id, hello_message) + } RPCRequest::Goodbye(goodbye_reason) => { debug!( self.log, "PeerGoodbye"; "peer" => format!("{:?}", peer_id), - "reason" => format!("{:?}", reason), + "reason" => format!("{:?}", goodbye_reason), ); - self.sync.on_disconnect(peer_id), - }, - RPCRequest::BeaconBlocks(request) => self.sync.on_beacon_blocks_request( - peer_id, - request_id, - request, - &mut self.network_context, - ), - RPCRequest::RecentBeaconBlocks(request) => self.sync.on_recent_beacon_blocks_request( - peer_id, - request_id, - request, - &mut self.network_context, - ), + self.sync.on_disconnect(peer_id); + } + RPCRequest::BeaconBlocks(request) => self + .sync + .on_beacon_blocks_request(peer_id, request_id, request), + RPCRequest::RecentBeaconBlocks(request) => self + .sync + .on_recent_beacon_blocks_request(peer_id, request_id, request), } } @@ -163,20 +151,15 @@ impl MessageHandler { RPCErrorResponse::Success(response) => { match response { RPCResponse::Hello(hello_message) => { - self.sync.on_hello_response( - peer_id, - hello_message, - &mut self.network_context, - ); + self.sync.on_hello_response(peer_id, hello_message); } RPCResponse::BeaconBlocks(response) => { - match self.decode_beacon_blocks(response) { + match self.decode_beacon_blocks(&response) { Ok(beacon_blocks) => { self.sync.on_beacon_blocks_response( peer_id, request_id, beacon_blocks, - &mut self.network_context, ); } Err(e) => { @@ -186,13 +169,12 @@ impl MessageHandler { } } RPCResponse::RecentBeaconBlocks(response) => { - match self.decode_beacon_blocks(response) { + match self.decode_beacon_blocks(&response) { Ok(beacon_blocks) => { self.sync.on_recent_beacon_blocks_response( - request_id, peer_id, + request_id, beacon_blocks, - &mut self.network_context, ); } Err(e) => { @@ -217,19 +199,14 @@ impl MessageHandler { match gossip_message { PubsubMessage::Block(message) => match self.decode_gossip_block(message) { Ok(block) => { - let _should_forward_on = - self.sync - .on_block_gossip(peer_id, block, &mut self.network_context); + let _should_forward_on = self.sync.on_block_gossip(peer_id, block); } Err(e) => { debug!(self.log, "Invalid gossiped beacon block"; "peer_id" => format!("{}", peer_id), "Error" => format!("{:?}", e)); } }, PubsubMessage::Attestation(message) => match self.decode_gossip_attestation(message) { - Ok(attestation) => { - self.sync - .on_attestation_gossip(peer_id, attestation, &mut self.network_context) - } + Ok(attestation) => self.sync.on_attestation_gossip(peer_id, attestation), Err(e) => { debug!(self.log, "Invalid gossiped attestation"; "peer_id" => format!("{}", peer_id), "Error" => format!("{:?}", e)); } @@ -331,56 +308,3 @@ impl MessageHandler { Vec::from_ssz_bytes(&beacon_blocks) } } - -/// Wraps a Network Channel to employ various RPC/Sync related network functionality. -pub struct NetworkContext { - /// The network channel to relay messages to the Network service. - network_send: mpsc::UnboundedSender, - /// Logger for the `NetworkContext`. - log: slog::Logger, -} - -impl NetworkContext { - pub fn new(network_send: mpsc::UnboundedSender, log: slog::Logger) -> Self { - Self { network_send, log } - } - - pub fn disconnect(&mut self, peer_id: PeerId, reason: GoodbyeReason) { - self.send_rpc_request(peer_id, RPCRequest::Goodbye(reason)) - // TODO: disconnect peers. - } - - pub fn send_rpc_request(&mut self, peer_id: PeerId, rpc_request: RPCRequest) { - // Note: There is currently no use of keeping track of requests. However the functionality - // is left here for future revisions. - self.send_rpc_event(peer_id, RPCEvent::Request(0, rpc_request)); - } - - //TODO: Handle Error responses - pub fn send_rpc_response( - &mut self, - peer_id: PeerId, - request_id: RequestId, - rpc_response: RPCErrorResponse, - ) { - self.send_rpc_event( - peer_id, - RPCEvent::Response(request_id, RPCErrorResponse::Success(rpc_response)), - ); - } - - fn send_rpc_event(&mut self, peer_id: PeerId, rpc_event: RPCEvent) { - self.send(peer_id, OutgoingMessage::RPC(rpc_event)) - } - - fn send(&mut self, peer_id: PeerId, outgoing_message: OutgoingMessage) { - self.network_send - .try_send(NetworkMessage::Send(peer_id, outgoing_message)) - .unwrap_or_else(|_| { - warn!( - self.log, - "Could not send RPC message to the network service" - ) - }); - } -} diff --git a/beacon_node/network/src/sync/manager.rs b/beacon_node/network/src/sync/manager.rs index a4ce544ec3..f5c6694557 100644 --- a/beacon_node/network/src/sync/manager.rs +++ b/beacon_node/network/src/sync/manager.rs @@ -1,129 +1,164 @@ -const MAX_BLOCKS_PER_REQUEST: usize = 10; +use super::simple_sync::{PeerSyncInfo, FUTURE_SLOT_TOLERANCE}; +use beacon_chain::{BeaconChain, BeaconChainTypes, BlockProcessingOutcome}; +use eth2_libp2p::rpc::methods::*; +use eth2_libp2p::rpc::RequestId; +use eth2_libp2p::PeerId; +use slog::{debug, info, trace, warn, Logger}; +use std::collections::{HashMap, HashSet}; +use std::ops::{Add, Sub}; +use std::sync::Arc; +use types::{BeaconBlock, EthSpec, Hash256, Slot}; + +const MAX_BLOCKS_PER_REQUEST: u64 = 10; /// The number of slots that we can import blocks ahead of us, before going into full Sync mode. -const SLOT_IMPORT_TOLERANCE: u64 = 10; +const SLOT_IMPORT_TOLERANCE: usize = 10; const PARENT_FAIL_TOLERANCE: usize = 3; -const PARENT_DEPTH_TOLERANCE: usize = SLOT_IMPORT_TOLERANCE*2; +const PARENT_DEPTH_TOLERANCE: usize = SLOT_IMPORT_TOLERANCE * 2; +#[derive(PartialEq)] enum BlockRequestsState { QueuedForward, QueuedBackward, Pending(RequestId), Complete, + Failed, } -struct BlockRequests { - target_head_slot: Slot +struct BlockRequests { + target_head_slot: Slot, target_head_root: Hash256, - downloaded_blocks: Vec, - state: State, + downloaded_blocks: Vec>, + state: BlockRequestsState, } -struct ParentRequests { - downloaded_blocks: Vec, - attempts: usize, +struct ParentRequests { + downloaded_blocks: Vec>, + failed_attempts: usize, last_submitted_peer: PeerId, // to downvote the submitting peer. state: BlockRequestsState, } -impl BlockRequests { - +impl BlockRequests { // gets the start slot for next batch // last block slot downloaded plus 1 fn next_start_slot(&self) -> Option { if !self.downloaded_blocks.is_empty() { match self.state { BlockRequestsState::QueuedForward => { - let last_element_index = self.downloaded_blocks.len() -1; - Some(downloaded_blocks[last_element_index].slot.add(1)) + let last_element_index = self.downloaded_blocks.len() - 1; + Some(self.downloaded_blocks[last_element_index].slot.add(1)) } BlockRequestsState::QueuedBackward => { let earliest_known_slot = self.downloaded_blocks[0].slot; Some(earliest_known_slot.add(1).sub(MAX_BLOCKS_PER_REQUEST)) } + _ => { + // pending/complete/failed + None + } } - } - else { + } else { None } } } +#[derive(PartialEq, Debug, Clone)] enum ManagerState { Syncing, Regular, Stalled, } -enum ImportManagerOutcome { +pub(crate) enum ImportManagerOutcome { Idle, - RequestBlocks{ + RequestBlocks { peer_id: PeerId, request_id: RequestId, request: BeaconBlocksRequest, }, + /// Updates information with peer via requesting another HELLO handshake. + Hello(PeerId), RecentRequest(PeerId, RecentBeaconBlocksRequest), DownvotePeer(PeerId), } - -pub struct ImportManager { +pub struct ImportManager { /// A reference to the underlying beacon chain. chain: Arc>, - state: MangerState, - import_queue: HashMap, - parent_queue: Vec, - full_peers: Hashset, + state: ManagerState, + import_queue: HashMap>, + parent_queue: Vec>, + full_peers: HashSet, current_req_id: usize, log: Logger, } -impl ImportManager { +impl ImportManager { + pub fn new(beacon_chain: Arc>, log: &slog::Logger) -> Self { + ImportManager { + chain: beacon_chain.clone(), + state: ManagerState::Regular, + import_queue: HashMap::new(), + parent_queue: Vec::new(), + full_peers: HashSet::new(), + current_req_id: 0, + log: log.clone(), + } + } - pub fn add_peer(&mut self, peer_id, remote: PeerSyncInfo) { + pub fn add_peer(&mut self, peer_id: PeerId, remote: PeerSyncInfo) { // TODO: Improve comments. // initially try to download blocks from our current head // then backwards search all the way back to our finalized epoch until we match on a chain // has to be done sequentially to find next slot to start the batch from - + let local = PeerSyncInfo::from(&self.chain); // If a peer is within SLOT_IMPORT_TOLERANCE from out head slot, ignore a batch sync - if remote.head_slot.sub(local.head_slot) < SLOT_IMPORT_TOLERANCE { + if remote.head_slot.sub(local.head_slot).as_usize() < SLOT_IMPORT_TOLERANCE { trace!(self.log, "Ignoring full sync with peer"; - "peer" => peer_id, - "peer_head_slot" => remote.head_slot, - "local_head_slot" => local.head_slot, - ); + "peer" => format!("{:?}", peer_id), + "peer_head_slot" => remote.head_slot, + "local_head_slot" => local.head_slot, + ); // remove the peer from the queue if it exists - self.import_queue.remove(&peer_id); + self.import_queue.remove(&peer_id); return; } if let Some(block_requests) = self.import_queue.get_mut(&peer_id) { // update the target head slot - if remote.head_slot > requested_block.target_head_slot { + if remote.head_slot > block_requests.target_head_slot { block_requests.target_head_slot = remote.head_slot; } - } else { + } else { let block_requests = BlockRequests { target_head_slot: remote.head_slot, // this should be larger than the current head. It is checked in the SyncManager before add_peer is called target_head_root: remote.head_root, downloaded_blocks: Vec::new(), - state: RequestedBlockState::Queued - } + state: BlockRequestsState::QueuedForward, + }; self.import_queue.insert(peer_id, block_requests); } - } - pub fn beacon_blocks_response(peer_id: PeerId, request_id: RequestId, blocks: Vec) { - + pub fn beacon_blocks_response( + &mut self, + peer_id: PeerId, + request_id: RequestId, + mut blocks: Vec>, + ) { // find the request - let block_requests = match self.import_queue.get_mut(&peer_id) { - Some(req) if req.state = RequestedBlockState::Pending(request_id) => req, - None => { + let block_requests = match self + .import_queue + .get_mut(&peer_id) + .filter(|r| r.state == BlockRequestsState::Pending(request_id)) + { + Some(req) => req, + _ => { // No pending request, invalid request_id or coding error warn!(self.log, "BeaconBlocks response unknown"; "request_id" => request_id); return; @@ -142,100 +177,115 @@ impl ImportManager { if blocks.is_empty() { warn!(self.log, "BeaconBlocks response was empty"; "request_id" => request_id); - block_requests.state = RequestedBlockState::Failed; + block_requests.state = BlockRequestsState::Failed; return; } // Add the newly downloaded blocks to the current list of downloaded blocks. This also // determines if we are syncing forward or backward. let syncing_forwards = { - if block_requests.blocks.is_empty() { - block_requests.blocks.push(blocks); + if block_requests.downloaded_blocks.is_empty() { + block_requests.downloaded_blocks.append(&mut blocks); true - } - else if block_requests.blocks[0].slot < blocks[0].slot { // syncing forwards - // verify the peer hasn't sent overlapping blocks - ensuring the strictly - // increasing blocks in a batch will be verified during the processing - if block_requests.next_slot() > blocks[0].slot { - warn!(self.log, "BeaconBlocks response returned duplicate blocks", "request_id" => request_id, "response_initial_slot" => blocks[0].slot, "requested_initial_slot" => block_requests.next_slot()); - block_requests.state = RequestedBlockState::Failed; - return; - } - - block_requests.blocks.push(blocks); - true + } else if block_requests.downloaded_blocks[0].slot < blocks[0].slot { + // syncing forwards + // verify the peer hasn't sent overlapping blocks - ensuring the strictly + // increasing blocks in a batch will be verified during the processing + if block_requests.next_start_slot() > Some(blocks[0].slot) { + warn!(self.log, "BeaconBlocks response returned duplicate blocks"; "request_id" => request_id, "response_initial_slot" => blocks[0].slot, "requested_initial_slot" => block_requests.next_start_slot()); + block_requests.state = BlockRequestsState::Failed; + return; } - else { false } + + block_requests.downloaded_blocks.append(&mut blocks); + true + } else { + false + } }; - // Determine if more blocks need to be downloaded. There are a few cases: // - We have downloaded a batch from our head_slot, which has not reached the remotes head // (target head). Therefore we need to download another sequential batch. // - The latest batch includes blocks that greater than or equal to the target_head slot, - // which means we have caught up to their head. We then check to see if the first + // which means we have caught up to their head. We then check to see if the first // block downloaded matches our head. If so, we are on the same chain and can process // the blocks. If not we need to sync back further until we are on the same chain. So // request more blocks. // - We are syncing backwards (from our head slot) and need to check if we are on the same // chain. If so, process the blocks, if not, request more blocks all the way up to // our last finalized slot. - + if syncing_forwards { // does the batch contain the target_head_slot - let last_element_index = block_requests.blocks.len()-1; - if block_requests[last_element_index].slot >= block_requests.target_slot { + let last_element_index = block_requests.downloaded_blocks.len() - 1; + if block_requests.downloaded_blocks[last_element_index].slot + >= block_requests.target_head_slot + { // if the batch is on our chain, this is complete and we can then process. // Otherwise start backwards syncing until we reach a common chain. - let earliest_slot = block_requests_blocks[0].slot - if block_requests.blocks[0] == self.chain.get_block_by_slot(earliest_slot) { - block_requests.state = RequestedBlockState::Complete; + let earliest_slot = block_requests.downloaded_blocks[0].slot; + //TODO: Decide which is faster. Reading block from db and comparing or calculating + //the hash tree root and comparing. + if Some(block_requests.downloaded_blocks[0].canonical_root()) + == root_at_slot(self.chain, earliest_slot) + { + block_requests.state = BlockRequestsState::Complete; return; } // not on the same chain, request blocks backwards - // binary search, request half the distance between the earliest block and our - // finalized slot - let state = &beacon_chain.head().beacon_state; - let local_finalized_slot = state.finalized_checkpoint.epoch; //TODO: Convert to slot - // check that the request hasn't failed by having no common chain - if local_finalized_slot >= block_requests.blocks[0] { + let state = &self.chain.head().beacon_state; + let local_finalized_slot = state + .finalized_checkpoint + .epoch + .start_slot(T::EthSpec::slots_per_epoch()); + + // check that the request hasn't failed by having no common chain + if local_finalized_slot >= block_requests.downloaded_blocks[0].slot { warn!(self.log, "Peer returned an unknown chain."; "request_id" => request_id); - block_requests.state = RequestedBlockState::Failed; + block_requests.state = BlockRequestsState::Failed; return; } - // Start a backwards sync by requesting earlier blocks + // Start a backwards sync by requesting earlier blocks // There can be duplication in downloaded blocks here if there are a large number // of skip slots. In all cases we at least re-download the earliest known block. // It is unlikely that a backwards sync in required, so we accept this duplication // for now. - block_requests.state = RequestedBlockState::QueuedBackward; + block_requests.state = BlockRequestsState::QueuedBackward; + } else { + // batch doesn't contain the head slot, request the next batch + block_requests.state = BlockRequestsState::QueuedForward; } - else { - // batch doesn't contain the head slot, request the next batch - block_requests.state = RequestedBlockState::QueuedForward; - } - } - else { + } else { // syncing backwards // if the batch is on our chain, this is complete and we can then process. // Otherwise continue backwards - let earliest_slot = block_requests_blocks[0].slot - if block_requests.blocks[0] == self.chain.get_block_by_slot(earliest_slot) { - block_requests.state = RequestedBlockState::Complete; + let earliest_slot = block_requests.downloaded_blocks[0].slot; + if Some(block_requests.downloaded_blocks[0].canonical_root()) + == root_at_slot(self.chain, earliest_slot) + { + block_requests.state = BlockRequestsState::Complete; return; } - block_requests.state = RequestedBlockState::QueuedBackward; - + block_requests.state = BlockRequestsState::QueuedBackward; } } - pub fn recent_blocks_response(peer_id: PeerId, request_id: RequestId, blocks: Vec) { - + pub fn recent_blocks_response( + &mut self, + peer_id: PeerId, + request_id: RequestId, + blocks: Vec>, + ) { // find the request - let parent_request = match self.parent_queue.get_mut(&peer_id) { - Some(req) if req.state = RequestedBlockState::Pending(request_id) => req, + let parent_request = match self + .parent_queue + .iter_mut() + .find(|request| request.state == BlockRequestsState::Pending(request_id)) + { + Some(req) => req, None => { // No pending request, invalid request_id or coding error warn!(self.log, "RecentBeaconBlocks response unknown"; "request_id" => request_id); @@ -245,8 +295,8 @@ impl ImportManager { // if an empty response is given, the peer didn't have the requested block, try again if blocks.is_empty() { - parent_request.attempts += 1; - parent_request.state = RequestedBlockState::QueuedForward; + parent_request.failed_attempts += 1; + parent_request.state = BlockRequestsState::QueuedForward; parent_request.last_submitted_peer = peer_id; return; } @@ -256,29 +306,27 @@ impl ImportManager { if blocks.len() != 1 { //TODO: Potentially downvote the peer debug!(self.log, "Peer sent more than 1 parent. Ignoring"; - "peer_id" => peer_id, - "no_parents" => blocks.len() - ); + "peer_id" => format!("{:?}", peer_id), + "no_parents" => blocks.len() + ); return; } - // queue for processing - parent_request.state = RequestedBlockState::Complete; + parent_request.state = BlockRequestsState::Complete; } - pub fn inject_error(peer_id: PeerId, id: RequestId) { //TODO: Remove block state from pending } - pub fn peer_disconnect(peer_id: PeerId) { - self.import_queue.remove(&peer_id); - self.full_peers.remove(&peer_id); + pub fn peer_disconnect(&mut self, peer_id: &PeerId) { + self.import_queue.remove(peer_id); + self.full_peers.remove(peer_id); self.update_state(); } - pub fn add_full_peer(peer_id: PeerId) { + pub fn add_full_peer(&mut self, peer_id: PeerId) { debug!( self.log, "Fully synced peer added"; "peer" => format!("{:?}", peer_id), @@ -287,32 +335,36 @@ impl ImportManager { self.update_state(); } - pub fn add_unknown_block(&mut self,block: BeaconBlock) { + pub fn add_unknown_block(&mut self, block: BeaconBlock, peer_id: PeerId) { // if we are not in regular sync mode, ignore this block - if self.state == ManagerState::Regular { + if let ManagerState::Regular = self.state { return; } // make sure this block is not already being searched for // TODO: Potentially store a hashset of blocks for O(1) lookups for parent_req in self.parent_queue.iter() { - if let Some(_) = parent_req.downloaded_blocks.iter().find(|d_block| d_block == block) { + if let Some(_) = parent_req + .downloaded_blocks + .iter() + .find(|d_block| d_block == &&block) + { // we are already searching for this block, ignore it return; } } - let req = ParentRequests { + let req = ParentRequests { downloaded_blocks: vec![block], failed_attempts: 0, - state: RequestedBlockState::QueuedBackward - } + last_submitted_peer: peer_id, + state: BlockRequestsState::QueuedBackward, + }; self.parent_queue.push(req); } - pub fn poll() -> ImportManagerOutcome { - + pub fn poll(&mut self) -> ImportManagerOutcome { loop { // update the state of the manager self.update_state(); @@ -336,304 +388,340 @@ impl ImportManager { if let (re_run, outcome) = self.process_complete_parent_requests() { if let Some(outcome) = outcome { return outcome; - } - else if !re_run { + } else if !re_run { break; } } } - - return ImportManagerOutcome::Idle; + return ImportManagerOutcome::Idle; } - fn update_state(&mut self) { - let previous_state = self.state; + let previous_state = self.state.clone(); self.state = { if !self.import_queue.is_empty() { ManagerState::Syncing + } else if !self.full_peers.is_empty() { + ManagerState::Regular + } else { + ManagerState::Stalled } - else if !self.full_peers.is_empty() { - ManagerState::Regualar - } - else { - ManagerState::Stalled } }; if self.state != previous_state { - info!(self.log, "Syncing state updated", - "old_state" => format!("{:?}", previous_state) - "new_state" => format!("{:?}", self.state) - ); + info!(self.log, "Syncing state updated"; + "old_state" => format!("{:?}", previous_state), + "new_state" => format!("{:?}", self.state), + ); } } - - - fn process_potential_block_requests(&mut self) -> Option { + fn process_potential_block_requests(&mut self) -> Option { // check if an outbound request is required // Managing a fixed number of outbound requests is maintained at the RPC protocol libp2p // layer and not needed here. - // If any in queued state we submit a request. - + // If any in queued state we submit a request. // remove any failed batches self.import_queue.retain(|peer_id, block_request| { - if block_request.state == RequestedBlockState::Failed { - debug!(self.log, "Block import from peer failed", - "peer_id" => peer_id, - "downloaded_blocks" => block_request.downloaded.blocks.len() - ); + if let BlockRequestsState::Failed = block_request.state { + debug!(self.log, "Block import from peer failed"; + "peer_id" => format!("{:?}", peer_id), + "downloaded_blocks" => block_request.downloaded_blocks.len() + ); false + } else { + true } - else { true } }); + // process queued block requests + for (peer_id, block_requests) in self.import_queue.iter_mut().find(|(_peer_id, req)| { + req.state == BlockRequestsState::QueuedForward + || req.state == BlockRequestsState::QueuedBackward + }) { + let request_id = self.current_req_id; + block_requests.state = BlockRequestsState::Pending(request_id); + self.current_req_id += 1; - for (peer_id, block_requests) in self.import_queue.iter_mut() { - if let Some(request) = requests.iter().find(|req| req.state == RequestedBlockState::QueuedForward || req.state == RequestedBlockState::QueuedBackward) { - - let request.state = RequestedBlockState::Pending(self.current_req_id); - self.current_req_id +=1; - - let req = BeaconBlocksRequest { - head_block_root: request.target_root, - start_slot: request.next_start_slot().unwrap_or_else(|| self.chain.head().slot), - count: MAX_BLOCKS_PER_REQUEST, - step: 0 - } - return Some(ImportManagerOutCome::RequestBlocks{ peer_id, req }); - } + let request = BeaconBlocksRequest { + head_block_root: block_requests.target_head_root, + start_slot: block_requests + .next_start_slot() + .unwrap_or_else(|| self.chain.best_slot()) + .as_u64(), + count: MAX_BLOCKS_PER_REQUEST, + step: 0, + }; + return Some(ImportManagerOutcome::RequestBlocks { + peer_id: peer_id.clone(), + request, + request_id, + }); } None } fn process_complete_batches(&mut self) -> Option { - - let completed_batches = self.import_queue.iter().filter(|_peer, block_requests| block_requests.state == RequestedState::Complete).map(|peer, _| peer).collect::>(); + let completed_batches = self + .import_queue + .iter() + .filter(|(_peer, block_requests)| block_requests.state == BlockRequestsState::Complete) + .map(|(peer, _)| peer) + .cloned() + .collect::>(); for peer_id in completed_batches { - let block_requests = self.import_queue.remove(&peer_id).unwrap("key exists"); - match self.process_blocks(block_requests.downloaded_blocks) { - Ok(()) => { - //TODO: Verify it's impossible to have empty downloaded_blocks - last_element = block_requests.downloaded_blocks.len() -1 - debug!(self.log, "Blocks processed successfully"; - "peer" => peer_id, - "start_slot" => block_requests.downloaded_blocks[0].slot, - "end_slot" => block_requests.downloaded_blocks[last_element].slot, - "no_blocks" => last_element + 1, - ); - // Re-HELLO to ensure we are up to the latest head - return Some(ImportManagerOutcome::Hello(peer_id)); - } - Err(e) => { - last_element = block_requests.downloaded_blocks.len() -1 - warn!(self.log, "Block processing failed"; - "peer" => peer_id, - "start_slot" => block_requests.downloaded_blocks[0].slot, - "end_slot" => block_requests.downloaded_blocks[last_element].slot, - "no_blocks" => last_element + 1, - "error" => format!("{:?}", e), - ); - return Some(ImportManagerOutcome::DownvotePeer(peer_id)); - } + let block_requests = self.import_queue.remove(&peer_id).expect("key exists"); + match self.process_blocks(block_requests.downloaded_blocks.clone()) { + Ok(()) => { + //TODO: Verify it's impossible to have empty downloaded_blocks + let last_element = block_requests.downloaded_blocks.len() - 1; + debug!(self.log, "Blocks processed successfully"; + "peer" => format!("{:?}", peer_id), + "start_slot" => block_requests.downloaded_blocks[0].slot, + "end_slot" => block_requests.downloaded_blocks[last_element].slot, + "no_blocks" => last_element + 1, + ); + // Re-HELLO to ensure we are up to the latest head + return Some(ImportManagerOutcome::Hello(peer_id)); } + Err(e) => { + let last_element = block_requests.downloaded_blocks.len() - 1; + warn!(self.log, "Block processing failed"; + "peer" => format!("{:?}", peer_id), + "start_slot" => block_requests.downloaded_blocks[0].slot, + "end_slot" => block_requests.downloaded_blocks[last_element].slot, + "no_blocks" => last_element + 1, + "error" => format!("{:?}", e), + ); + return Some(ImportManagerOutcome::DownvotePeer(peer_id)); + } + } } None } - fn process_parent_requests(&mut self) -> Option { - // remove any failed requests self.parent_queue.retain(|parent_request| { - if parent_request.state == RequestedBlockState::Failed { - debug!(self.log, "Parent import failed", - "block" => parent_request.downloaded_blocks[0].hash, - "siblings found" => parent_request.len() - ); + if parent_request.state == BlockRequestsState::Failed { + debug!(self.log, "Parent import failed"; + "block" => format!("{:?}",parent_request.downloaded_blocks[0].canonical_root()), + "ancestors_found" => parent_request.downloaded_blocks.len() + ); false + } else { + true } - else { true } }); // check to make sure there are peers to search for the parent from if self.full_peers.is_empty() { - return; + return None; } // check if parents need to be searched for for parent_request in self.parent_queue.iter_mut() { if parent_request.failed_attempts >= PARENT_FAIL_TOLERANCE { - parent_request.state == BlockRequestsState::Failed - continue; - } - else if parent_request.state == BlockRequestsState::QueuedForward { + parent_request.state == BlockRequestsState::Failed; + continue; + } else if parent_request.state == BlockRequestsState::QueuedForward { parent_request.state = BlockRequestsState::Pending(self.current_req_id); - self.current_req_id +=1; - let parent_hash = + self.current_req_id += 1; + let last_element_index = parent_request.downloaded_blocks.len() - 1; + let parent_hash = parent_request.downloaded_blocks[last_element_index].parent_root; let req = RecentBeaconBlocksRequest { block_roots: vec![parent_hash], }; // select a random fully synced peer to attempt to download the parent block - let peer_id = self.full_peers.iter().next().expect("List is not empty"); + let peer_id = self.full_peers.iter().next().expect("List is not empty"); - return Some(ImportManagerOutcome::RecentRequest(peer_id, req); + return Some(ImportManagerOutcome::RecentRequest(peer_id.clone(), req)); } } None - } - - - fn process_complete_parent_requests(&mut self) => (bool, Option) { + } + fn process_complete_parent_requests(&mut self) -> (bool, Option) { // flag to determine if there is more process to drive or if the manager can be switched to // an idle state - let mut re_run = false; - - // verify the last added block is the parent of the last requested block - let last_index = parent_requests.downloaded_blocks.len() -1; - let expected_hash = parent_requests.downloaded_blocks[last_index].parent ; - let block_hash = parent_requests.downloaded_blocks[0].tree_hash_root(); - if block_hash != expected_hash { - //TODO: Potentially downvote the peer - debug!(self.log, "Peer sent invalid parent. Ignoring"; - "peer_id" => peer_id, - "received_block" => block_hash, - "expected_parent" => expected_hash, - ); - return; - } + let mut re_run = false; // Find any parent_requests ready to be processed - for completed_request in self.parent_queue.iter_mut().filter(|req| req.state == BlockRequestsState::Complete) { + for completed_request in self + .parent_queue + .iter_mut() + .filter(|req| req.state == BlockRequestsState::Complete) + { + // verify the last added block is the parent of the last requested block + let last_index = completed_request.downloaded_blocks.len() - 1; + let expected_hash = completed_request.downloaded_blocks[last_index].parent_root; + // Note: the length must be greater than 1 so this cannot panic. + let block_hash = completed_request.downloaded_blocks[last_index - 1].canonical_root(); + if block_hash != expected_hash { + // remove the head block + let _ = completed_request.downloaded_blocks.pop(); + completed_request.state = BlockRequestsState::QueuedForward; + //TODO: Potentially downvote the peer + let peer = completed_request.last_submitted_peer.clone(); + debug!(self.log, "Peer sent invalid parent. Ignoring"; + "peer_id" => format!("{:?}",peer), + "received_block" => format!("{}", block_hash), + "expected_parent" => format!("{}", expected_hash), + ); + return (true, Some(ImportManagerOutcome::DownvotePeer(peer))); + } + // try and process the list of blocks up to the requested block while !completed_request.downloaded_blocks.is_empty() { - let block = completed_request.downloaded_blocks.pop(); - match self.chain_process_block(block.clone()) { - Ok(BlockProcessingOutcome::ParentUnknown { parent } => { + let block = completed_request + .downloaded_blocks + .pop() + .expect("Block must exist exist"); + match self.chain.process_block(block.clone()) { + Ok(BlockProcessingOutcome::ParentUnknown { parent: _ }) => { // need to keep looking for parents completed_request.downloaded_blocks.push(block); completed_request.state == BlockRequestsState::QueuedForward; re_run = true; break; } - Ok(BlockProcessingOutcome::Processed { _ } => { } - Ok(outcome) => { // it's a future slot or an invalid block, remove it and try again - completed_request.failed_attempts +=1; + Ok(BlockProcessingOutcome::Processed { block_root: _ }) => {} + Ok(outcome) => { + // it's a future slot or an invalid block, remove it and try again + completed_request.failed_attempts += 1; trace!( self.log, "Invalid parent block"; - "outcome" => format!("{:?}", outcome); + "outcome" => format!("{:?}", outcome), "peer" => format!("{:?}", completed_request.last_submitted_peer), ); completed_request.state == BlockRequestsState::QueuedForward; re_run = true; - return (re_run, Some(ImportManagerOutcome::DownvotePeer(completed_request.last_submitted_peer))); + return ( + re_run, + Some(ImportManagerOutcome::DownvotePeer( + completed_request.last_submitted_peer.clone(), + )), + ); } - Err(e) => { - completed_request.failed_attempts +=1; + Err(e) => { + completed_request.failed_attempts += 1; warn!( self.log, "Parent processing error"; - "error" => format!("{:?}", e); + "error" => format!("{:?}", e) ); completed_request.state == BlockRequestsState::QueuedForward; re_run = true; - return (re_run, Some(ImportManagerOutcome::DownvotePeer(completed_request.last_submitted_peer))); - } + return ( + re_run, + Some(ImportManagerOutcome::DownvotePeer( + completed_request.last_submitted_peer.clone(), + )), + ); } + } } } // remove any full completed and processed parent chains - self.parent_queue.retain(|req| if req.state == BlockRequestsState::Complete { false } else { true }); + self.parent_queue.retain(|req| { + if req.state == BlockRequestsState::Complete { + false + } else { + true + } + }); (re_run, None) - } - - fn process_blocks( - &mut self, - blocks: Vec>, - ) -> Result<(), String> { - + fn process_blocks(&mut self, blocks: Vec>) -> Result<(), String> { for block in blocks { - let processing_result = self.chain.process_block(block.clone()); + let processing_result = self.chain.process_block(block.clone()); - if let Ok(outcome) = processing_result { - match outcome { - BlockProcessingOutcome::Processed { block_root } => { - // The block was valid and we processed it successfully. - trace!( - self.log, "Imported block from network"; - "source" => source, - "slot" => block.slot, - "block_root" => format!("{}", block_root), - "peer" => format!("{:?}", peer_id), - ); - } - BlockProcessingOutcome::ParentUnknown { parent } => { - // blocks should be sequential and all parents should exist - trace!( - self.log, "ParentBlockUnknown"; - "source" => source, - "parent_root" => format!("{}", parent), - "baby_block_slot" => block.slot, - ); - return Err(format!("Block at slot {} has an unknown parent.", block.slot)); - } - BlockProcessingOutcome::FutureSlot { - present_slot, - block_slot, - } => { - if present_slot + FUTURE_SLOT_TOLERANCE >= block_slot { - // The block is too far in the future, drop it. + if let Ok(outcome) = processing_result { + match outcome { + BlockProcessingOutcome::Processed { block_root } => { + // The block was valid and we processed it successfully. trace!( - self.log, "FutureBlock"; - "source" => source, - "msg" => "block for future slot rejected, check your time", - "present_slot" => present_slot, - "block_slot" => block_slot, - "FUTURE_SLOT_TOLERANCE" => FUTURE_SLOT_TOLERANCE, - "peer" => format!("{:?}", peer_id), - ); - return Err(format!("Block at slot {} is too far in the future", block.slot)); - } else { - // The block is in the future, but not too far. - trace!( - self.log, "QueuedFutureBlock"; - "source" => source, - "msg" => "queuing future block, check your time", - "present_slot" => present_slot, - "block_slot" => block_slot, - "FUTURE_SLOT_TOLERANCE" => FUTURE_SLOT_TOLERANCE, - "peer" => format!("{:?}", peer_id), + self.log, "Imported block from network"; + "slot" => block.slot, + "block_root" => format!("{}", block_root), ); } + BlockProcessingOutcome::ParentUnknown { parent } => { + // blocks should be sequential and all parents should exist + trace!( + self.log, "ParentBlockUnknown"; + "parent_root" => format!("{}", parent), + "baby_block_slot" => block.slot, + ); + return Err(format!( + "Block at slot {} has an unknown parent.", + block.slot + )); + } + BlockProcessingOutcome::FutureSlot { + present_slot, + block_slot, + } => { + if present_slot + FUTURE_SLOT_TOLERANCE >= block_slot { + // The block is too far in the future, drop it. + trace!( + self.log, "FutureBlock"; + "msg" => "block for future slot rejected, check your time", + "present_slot" => present_slot, + "block_slot" => block_slot, + "FUTURE_SLOT_TOLERANCE" => FUTURE_SLOT_TOLERANCE, + ); + return Err(format!( + "Block at slot {} is too far in the future", + block.slot + )); + } else { + // The block is in the future, but not too far. + trace!( + self.log, "QueuedFutureBlock"; + "msg" => "queuing future block, check your time", + "present_slot" => present_slot, + "block_slot" => block_slot, + "FUTURE_SLOT_TOLERANCE" => FUTURE_SLOT_TOLERANCE, + ); + } + } + _ => { + trace!( + self.log, "InvalidBlock"; + "msg" => "peer sent invalid block", + "outcome" => format!("{:?}", outcome), + ); + return Err(format!("Invalid block at slot {}", block.slot)); + } } - _ => { - trace!( - self.log, "InvalidBlock"; - "source" => source, - "msg" => "peer sent invalid block", - "outcome" => format!("{:?}", outcome), - "peer" => format!("{:?}", peer_id), - ); - return Err(format!("Invalid block at slot {}", block.slot)); - } + } else { + trace!( + self.log, "BlockProcessingFailure"; + "msg" => "unexpected condition in processing block.", + "outcome" => format!("{:?}", processing_result) + ); + return Err(format!( + "Unexpected block processing error: {:?}", + processing_result + )); } - Ok(()) - } else { - trace!( - self.log, "BlockProcessingFailure"; - "source" => source, - "msg" => "unexpected condition in processing block.", - "outcome" => format!("{:?}", processing_result) - ); - return Err(format!("Unexpected block processing error: {:?}", processing_result)); } - } + Ok(()) } } + +fn root_at_slot( + chain: Arc>, + target_slot: Slot, +) -> Option { + chain + .rev_iter_block_roots() + .find(|(_root, slot)| *slot == target_slot) + .map(|(root, _slot)| root) +} diff --git a/beacon_node/network/src/sync/mod.rs b/beacon_node/network/src/sync/mod.rs index fac1b46eb0..b26d78c147 100644 --- a/beacon_node/network/src/sync/mod.rs +++ b/beacon_node/network/src/sync/mod.rs @@ -1,4 +1,4 @@ -mod import_queue; +mod manager; /// Syncing for lighthouse. /// /// Stores the various syncing methods for the beacon chain. diff --git a/beacon_node/network/src/sync/simple_sync.rs b/beacon_node/network/src/sync/simple_sync.rs index a7f5ced401..deadf214d6 100644 --- a/beacon_node/network/src/sync/simple_sync.rs +++ b/beacon_node/network/src/sync/simple_sync.rs @@ -1,8 +1,9 @@ -use super::import_queue::{ImportQueue, PartialBeaconBlockCompletion}; -use crate::message_handler::NetworkContext; +use super::manager::{ImportManager, ImportManagerOutcome}; +use crate::service::{NetworkMessage, OutgoingMessage}; use beacon_chain::{BeaconChain, BeaconChainTypes, BlockProcessingOutcome}; use eth2_libp2p::rpc::methods::*; -use eth2_libp2p::rpc::{RPCRequest, RPCResponse, RequestId}; +use eth2_libp2p::rpc::methods::*; +use eth2_libp2p::rpc::{RPCEvent, RPCRequest, RPCResponse, RequestId}; use eth2_libp2p::PeerId; use slog::{debug, error, info, o, trace, warn}; use ssz::Encode; @@ -10,14 +11,14 @@ use std::collections::HashMap; use std::sync::Arc; use std::time::Duration; use store::Store; +use tokio::sync::mpsc; use types::{ Attestation, BeaconBlock, BeaconBlockBody, BeaconBlockHeader, Epoch, EthSpec, Hash256, Slot, }; - /// If a block is more than `FUTURE_SLOT_TOLERANCE` slots ahead of our slot clock, we drop it. /// Otherwise we queue it. -const FUTURE_SLOT_TOLERANCE: u64 = 1; +pub(crate) const FUTURE_SLOT_TOLERANCE: u64 = 1; const SHOULD_FORWARD_GOSSIP_BLOCK: bool = true; const SHOULD_NOT_FORWARD_GOSSIP_BLOCK: bool = false; @@ -25,16 +26,13 @@ const SHOULD_NOT_FORWARD_GOSSIP_BLOCK: bool = false; /// Keeps track of syncing information for known connected peers. #[derive(Clone, Copy, Debug)] pub struct PeerSyncInfo { - fork_version: [u8,4], - finalized_root: Hash256, - finalized_epoch: Epoch, - head_root: Hash256, - head_slot: Slot, + fork_version: [u8; 4], + pub finalized_root: Hash256, + pub finalized_epoch: Epoch, + pub head_root: Hash256, + pub head_slot: Slot, } - - - impl From for PeerSyncInfo { fn from(hello: HelloMessage) -> PeerSyncInfo { PeerSyncInfo { @@ -43,7 +41,6 @@ impl From for PeerSyncInfo { finalized_epoch: hello.finalized_epoch, head_root: hello.head_root, head_slot: hello.head_slot, - requested_slot_skip: None, } } } @@ -66,18 +63,24 @@ pub enum SyncState { pub struct SimpleSync { /// A reference to the underlying beacon chain. chain: Arc>, - manager: ImportManager, + manager: ImportManager, + network: NetworkContext, log: slog::Logger, } impl SimpleSync { /// Instantiate a `SimpleSync` instance, with no peers and an empty queue. - pub fn new(beacon_chain: Arc>, log: &slog::Logger) -> Self { + pub fn new( + beacon_chain: Arc>, + network_send: mpsc::UnboundedSender, + log: &slog::Logger, + ) -> Self { let sync_logger = log.new(o!("Service"=> "Sync")); SimpleSync { chain: beacon_chain.clone(), - manager: ImportManager::new(), + manager: ImportManager::new(beacon_chain, log), + network: NetworkContext::new(network_send, log.clone()), log: sync_logger, } } @@ -92,8 +95,9 @@ impl SimpleSync { /// Handle the connection of a new peer. /// /// Sends a `Hello` message to the peer. - pub fn on_connect(&self, peer_id: PeerId, network: &mut NetworkContext) { - network.send_rpc_request(peer_id, RPCRequest::Hello(hello_message(&self.chain))); + pub fn on_connect(&mut self, peer_id: PeerId) { + self.network + .send_rpc_request(peer_id, RPCRequest::Hello(hello_message(&self.chain))); } /// Handle a `Hello` request. @@ -104,42 +108,31 @@ impl SimpleSync { peer_id: PeerId, request_id: RequestId, hello: HelloMessage, - network: &mut NetworkContext, ) { trace!(self.log, "HelloRequest"; "peer" => format!("{:?}", peer_id)); // Say hello back. - network.send_rpc_response( + self.network.send_rpc_response( peer_id.clone(), request_id, RPCResponse::Hello(hello_message(&self.chain)), ); - self.process_hello(peer_id, hello, network); + self.process_hello(peer_id, hello); } /// Process a `Hello` response from a peer. - pub fn on_hello_response( - &mut self, - peer_id: PeerId, - hello: HelloMessage, - network: &mut NetworkContext, - ) { + pub fn on_hello_response(&mut self, peer_id: PeerId, hello: HelloMessage) { trace!(self.log, "HelloResponse"; "peer" => format!("{:?}", peer_id)); // Process the hello message, without sending back another hello. - self.process_hello(peer_id, hello, network); + self.process_hello(peer_id, hello); } /// Process a `Hello` message, requesting new blocks if appropriate. /// /// Disconnects the peer if required. - fn process_hello( - &mut self, - peer_id: PeerId, - hello: HelloMessage, - network: &mut NetworkContext, - ) { + fn process_hello(&mut self, peer_id: PeerId, hello: HelloMessage) { let remote = PeerSyncInfo::from(hello); let local = PeerSyncInfo::from(&self.chain); @@ -153,12 +146,13 @@ impl SimpleSync { "reason" => "network_id" ); - network.disconnect(peer_id.clone(), GoodbyeReason::IrrelevantNetwork); + self.network + .disconnect(peer_id.clone(), GoodbyeReason::IrrelevantNetwork); } else if remote.finalized_epoch <= local.finalized_epoch && remote.finalized_root != Hash256::zero() && local.finalized_root != Hash256::zero() - && (self.root_at_slot(start_slot(remote.latest_finalized_epoch)) - != Some(remote.latest_finalized_root)) + && (self.root_at_slot(start_slot(remote.finalized_epoch)) + != Some(remote.finalized_root)) { // The remotes finalized epoch is less than or greater than ours, but the block root is // different to the one in our chain. @@ -169,8 +163,9 @@ impl SimpleSync { "peer" => format!("{:?}", peer_id), "reason" => "different finalized chain" ); - network.disconnect(peer_id.clone(), GoodbyeReason::IrrelevantNetwork); - } else if remote.latest_finalized_epoch < local.latest_finalized_epoch { + self.network + .disconnect(peer_id.clone(), GoodbyeReason::IrrelevantNetwork); + } else if remote.finalized_epoch < local.finalized_epoch { // The node has a lower finalized epoch, their chain is not useful to us. There are two // cases where a node can have a lower finalized epoch: // @@ -193,12 +188,12 @@ impl SimpleSync { } else if self .chain .store - .exists::>(&remote.best_root) + .exists::>(&remote.head_root) .unwrap_or_else(|_| false) { // If the node's best-block is already known to us and they are close to our current // head, treat them as a fully sync'd peer. - self.import_manager.add_full_peer(peer_id); + self.manager.add_full_peer(peer_id); self.process_sync(); } else { // The remote node has an equal or great finalized epoch and we don't know it's head. @@ -208,29 +203,45 @@ impl SimpleSync { debug!( self.log, "UsefulPeer"; "peer" => format!("{:?}", peer_id), - "local_finalized_epoch" => local.latest_finalized_epoch, - "remote_latest_finalized_epoch" => remote.latest_finalized_epoch, + "local_finalized_epoch" => local.finalized_epoch, + "remote_latest_finalized_epoch" => remote.finalized_epoch, ); - self.import_manager.add_peer(peer_id, remote); + self.manager.add_peer(peer_id, remote); self.process_sync(); } } - self.proess_sync(&mut self) { + fn process_sync(&mut self) { loop { - match self.import_manager.poll() { - ImportManagerOutcome::RequestBlocks(peer_id, req) { + match self.manager.poll() { + ImportManagerOutcome::Hello(peer_id) => { + trace!( + self.log, + "RPC Request"; + "method" => "HELLO", + "peer" => format!("{:?}", peer_id) + ); + self.network + .send_rpc_request(peer_id, RPCRequest::Hello(hello_message(&self.chain))); + } + ImportManagerOutcome::RequestBlocks { + peer_id, + request_id, + request, + } => { trace!( self.log, "RPC Request"; "method" => "BeaconBlocks", - "count" => req.count, + "id" => request_id, + "count" => request.count, "peer" => format!("{:?}", peer_id) ); - network.send_rpc_request(peer_id.clone(), RPCRequest::BeaconBlocks(req)); - }, - ImportManagerOutcome::RecentRequest(peer_id, req) { + self.network + .send_rpc_request(peer_id.clone(), RPCRequest::BeaconBlocks(request)); + } + ImportManagerOutcome::RecentRequest(peer_id, req) => { trace!( self.log, "RPC Request"; @@ -238,18 +249,20 @@ impl SimpleSync { "count" => req.block_roots.len(), "peer" => format!("{:?}", peer_id) ); - network.send_rpc_request(peer_id.clone(), RPCRequest::RecentBeaconBlocks(req)); - }, - ImportManagerOutcome::DownvotePeer(peer_id) { + self.network + .send_rpc_request(peer_id.clone(), RPCRequest::RecentBeaconBlocks(req)); + } + ImportManagerOutcome::DownvotePeer(peer_id) => { trace!( self.log, "Peer downvoted"; "peer" => format!("{:?}", peer_id) ); // TODO: Implement reputation - network.disconnect(peer_id.clone(), GoodbyeReason::Fault); - }, - SyncManagerState::Idle { + self.network + .disconnect(peer_id.clone(), GoodbyeReason::Fault); + } + ImportManagerOutcome::Idle => { // nothing to do return; } @@ -257,37 +270,26 @@ impl SimpleSync { } } - - /* fn root_at_slot(&self, target_slot: Slot) -> Option { self.chain .rev_iter_block_roots() .find(|(_root, slot)| *slot == target_slot) .map(|(root, _slot)| root) } - */ - /// Handle a `BeaconBlocks` request from the peer. - pub fn on_beacon_blocks_request( + /// Handle a `RecentBeaconBlocks` request from the peer. + pub fn on_recent_beacon_blocks_request( &mut self, peer_id: PeerId, request_id: RequestId, - req: BeaconBlocksRequest, - network: &mut NetworkContext, + request: RecentBeaconBlocksRequest, ) { - debug!( - self.log, - "BeaconBlocksRequest"; - "peer" => format!("{:?}", peer_id), - "count" => req.count, - "start_slot" => req.start_slot, - ); - - let blocks = Vec> = self - .chain.rev_iter_block_roots().filter(|(_root, slot) req.start_slot <= slot && req.start_slot + req.count >= slot).take_while(|(_root, slot) req.start_slot <= *slot) - .filter_map(|root, slot| { + let blocks: Vec> = request + .block_roots + .iter() + .filter_map(|root| { if let Ok(Some(block)) = self.chain.store.get::>(root) { - Some(block.body) + Some(block) } else { debug!( self.log, @@ -301,10 +303,63 @@ impl SimpleSync { }) .collect(); - roots.reverse(); - roots.dedup_by_key(|brs| brs.block_root); + debug!( + self.log, + "BlockBodiesRequest"; + "peer" => format!("{:?}", peer_id), + "requested" => request.block_roots.len(), + "returned" => blocks.len(), + ); - if roots.len() as u64 != req.count { + self.network.send_rpc_response( + peer_id, + request_id, + RPCResponse::BeaconBlocks(blocks.as_ssz_bytes()), + ) + } + + /// Handle a `BeaconBlocks` request from the peer. + pub fn on_beacon_blocks_request( + &mut self, + peer_id: PeerId, + request_id: RequestId, + req: BeaconBlocksRequest, + ) { + debug!( + self.log, + "BeaconBlocksRequest"; + "peer" => format!("{:?}", peer_id), + "count" => req.count, + "start_slot" => req.start_slot, + ); + + let mut blocks: Vec> = self + .chain + .rev_iter_block_roots() + .filter(|(_root, slot)| { + req.start_slot <= slot.as_u64() && req.start_slot + req.count >= slot.as_u64() + }) + .take_while(|(_root, slot)| req.start_slot <= slot.as_u64()) + .filter_map(|(root, _slot)| { + if let Ok(Some(block)) = self.chain.store.get::>(&root) { + Some(block) + } else { + debug!( + self.log, + "Peer requested unknown block"; + "peer" => format!("{:?}", peer_id), + "request_root" => format!("{:}", root), + ); + + None + } + }) + .collect(); + + blocks.reverse(); + blocks.dedup_by_key(|brs| brs.slot); + + if blocks.len() as u64 != req.count { debug!( self.log, "BeaconBlocksRequest"; @@ -313,33 +368,33 @@ impl SimpleSync { "start_slot" => req.start_slot, "current_slot" => self.chain.present_slot(), "requested" => req.count, - "returned" => roots.len(), + "returned" => blocks.len(), ); } - network.send_rpc_response( + self.network.send_rpc_response( peer_id, request_id, RPCResponse::BeaconBlocks(blocks.as_ssz_bytes()), ) } - /// Handle a `BeaconBlocks` response from the peer. pub fn on_beacon_blocks_response( &mut self, peer_id: PeerId, request_id: RequestId, - res: Vec>, + beacon_blocks: Vec>, ) { debug!( self.log, "BeaconBlocksResponse"; "peer" => format!("{:?}", peer_id), - "count" => res.block_bodies.len(), + "count" => beacon_blocks.len(), ); - self.import_manager.beacon_blocks_response(peer_id, request_id, blocks); + self.manager + .beacon_blocks_response(peer_id, request_id, beacon_blocks); self.process_sync(); } @@ -349,16 +404,17 @@ impl SimpleSync { &mut self, peer_id: PeerId, request_id: RequestId, - res: Vec>, + beacon_blocks: Vec>, ) { debug!( self.log, "BeaconBlocksResponse"; "peer" => format!("{:?}", peer_id), - "count" => res.block_bodies.len(), + "count" => beacon_blocks.len(), ); - self.import_manager.recent_blocks_response(peer_id, request_id, blocks); + self.manager + .recent_blocks_response(peer_id, request_id, beacon_blocks); self.process_sync(); } @@ -368,19 +424,13 @@ impl SimpleSync { /// Attempts to apply to block to the beacon chain. May queue the block for later processing. /// /// Returns a `bool` which, if `true`, indicates we should forward the block to our peers. - pub fn on_block_gossip( - &mut self, - peer_id: PeerId, - block: BeaconBlock, - ) -> bool { - if let Some(outcome) = - self.process_block(peer_id.clone(), block.clone(), network, &"gossip") - { + pub fn on_block_gossip(&mut self, peer_id: PeerId, block: BeaconBlock) -> bool { + if let Ok(outcome) = self.chain.process_block(block.clone()) { match outcome { BlockProcessingOutcome::Processed { .. } => SHOULD_FORWARD_GOSSIP_BLOCK, - BlockProcessingOutcome::ParentUnknown { parent } => { + BlockProcessingOutcome::ParentUnknown { parent: _ } => { // Inform the sync manager to find parents for this block - self.import_manager.add_unknown_block(block.clone()); + self.manager.add_unknown_block(block.clone(), peer_id); SHOULD_FORWARD_GOSSIP_BLOCK } BlockProcessingOutcome::FutureSlot { @@ -401,12 +451,7 @@ impl SimpleSync { /// Process a gossip message declaring a new attestation. /// /// Not currently implemented. - pub fn on_attestation_gossip( - &mut self, - _peer_id: PeerId, - msg: Attestation, - _network: &mut NetworkContext, - ) { + pub fn on_attestation_gossip(&mut self, _peer_id: PeerId, msg: Attestation) { match self.chain.process_attestation(msg) { Ok(outcome) => info!( self.log, @@ -420,39 +465,74 @@ impl SimpleSync { } } - -/* - /// Returns `true` if `self.chain` has not yet processed this block. - pub fn chain_has_seen_block(&self, block_root: &Hash256) -> bool { - !self - .chain - .is_new_block_root(&block_root) - .unwrap_or_else(|_| { - error!(self.log, "Unable to determine if block is new."); - false - }) - } - */ - /// Generates our current state in the form of a HELLO RPC message. pub fn generate_hello(&self) -> HelloMessage { hello_message(&self.chain) } - } /// Build a `HelloMessage` representing the state of the given `beacon_chain`. fn hello_message(beacon_chain: &BeaconChain) -> HelloMessage { - let spec = &beacon_chain.spec; let state = &beacon_chain.head().beacon_state; HelloMessage { - network_id: spec.network_id, - //TODO: Correctly define the chain id - chain_id: spec.network_id as u64, - latest_finalized_root: state.finalized_checkpoint.root, - latest_finalized_epoch: state.finalized_checkpoint.epoch, - best_root: beacon_chain.head().beacon_block_root, - best_slot: state.slot, + fork_version: state.fork.current_version, + finalized_root: state.finalized_checkpoint.root, + finalized_epoch: state.finalized_checkpoint.epoch, + head_root: beacon_chain.head().beacon_block_root, + head_slot: state.slot, + } +} + +/// Wraps a Network Channel to employ various RPC/Sync related network functionality. +pub struct NetworkContext { + /// The network channel to relay messages to the Network service. + network_send: mpsc::UnboundedSender, + /// Logger for the `NetworkContext`. + log: slog::Logger, +} + +impl NetworkContext { + pub fn new(network_send: mpsc::UnboundedSender, log: slog::Logger) -> Self { + Self { network_send, log } + } + + pub fn disconnect(&mut self, peer_id: PeerId, reason: GoodbyeReason) { + self.send_rpc_request(peer_id, RPCRequest::Goodbye(reason)) + // TODO: disconnect peers. + } + + pub fn send_rpc_request(&mut self, peer_id: PeerId, rpc_request: RPCRequest) { + // Note: There is currently no use of keeping track of requests. However the functionality + // is left here for future revisions. + self.send_rpc_event(peer_id, RPCEvent::Request(0, rpc_request)); + } + + //TODO: Handle Error responses + pub fn send_rpc_response( + &mut self, + peer_id: PeerId, + request_id: RequestId, + rpc_response: RPCResponse, + ) { + self.send_rpc_event( + peer_id, + RPCEvent::Response(request_id, RPCErrorResponse::Success(rpc_response)), + ); + } + + fn send_rpc_event(&mut self, peer_id: PeerId, rpc_event: RPCEvent) { + self.send(peer_id, OutgoingMessage::RPC(rpc_event)) + } + + fn send(&mut self, peer_id: PeerId, outgoing_message: OutgoingMessage) { + self.network_send + .try_send(NetworkMessage::Send(peer_id, outgoing_message)) + .unwrap_or_else(|_| { + warn!( + self.log, + "Could not send RPC message to the network service" + ) + }); } } From 0d56df474a6df70353a89970329f3c08068eef23 Mon Sep 17 00:00:00 2001 From: Age Manning Date: Sun, 25 Aug 2019 00:27:47 +1000 Subject: [PATCH 106/186] Main batch sync debugging --- beacon_node/client/src/lib.rs | 6 +- beacon_node/eth2-libp2p/src/rpc/codec/ssz.rs | 20 +- beacon_node/eth2-libp2p/src/rpc/handler.rs | 4 +- beacon_node/network/src/message_handler.rs | 6 +- beacon_node/network/src/sync/manager.rs | 240 +++++++++---------- beacon_node/network/src/sync/simple_sync.rs | 99 +++++--- 6 files changed, 219 insertions(+), 156 deletions(-) diff --git a/beacon_node/client/src/lib.rs b/beacon_node/client/src/lib.rs index 4b64c10705..7e6449a98d 100644 --- a/beacon_node/client/src/lib.rs +++ b/beacon_node/client/src/lib.rs @@ -41,7 +41,7 @@ pub struct Client { /// Signal to terminate the slot timer. pub slot_timer_exit_signal: Option, /// Signal to terminate the API - pub api_exit_signal: Option, + // pub api_exit_signal: Option, /// The clients logger. log: slog::Logger, /// Marker to pin the beacon chain generics. @@ -134,6 +134,7 @@ where None }; + /* // Start the `rest_api` service let api_exit_signal = if client_config.rest_api.enabled { match rest_api::start_server( @@ -151,6 +152,7 @@ where } else { None }; + */ let (slot_timer_exit_signal, exit) = exit_future::signal(); if let Ok(Some(duration_to_next_slot)) = beacon_chain.slot_clock.duration_to_next_slot() { @@ -184,7 +186,7 @@ where http_exit_signal, rpc_exit_signal, slot_timer_exit_signal: Some(slot_timer_exit_signal), - api_exit_signal, + //api_exit_signal, log, network, phantom: PhantomData, diff --git a/beacon_node/eth2-libp2p/src/rpc/codec/ssz.rs b/beacon_node/eth2-libp2p/src/rpc/codec/ssz.rs index f7262118d6..260a00346c 100644 --- a/beacon_node/eth2-libp2p/src/rpc/codec/ssz.rs +++ b/beacon_node/eth2-libp2p/src/rpc/codec/ssz.rs @@ -171,7 +171,25 @@ impl Decoder for SSZOutboundCodec { }, _ => unreachable!("Cannot negotiate an unknown protocol"), }, - Ok(None) => Ok(None), + Ok(None) => { + // the object sent could be a empty. We return the empty object if this is the case + match self.protocol.message_name.as_str() { + "hello" => match self.protocol.version.as_str() { + "1" => Ok(None), // cannot have an empty HELLO message. The stream has terminated unexpectedly + _ => unreachable!("Cannot negotiate an unknown version"), + }, + "goodbye" => Err(RPCError::InvalidProtocol("GOODBYE doesn't have a response")), + "beacon_blocks" => match self.protocol.version.as_str() { + "1" => Ok(Some(RPCResponse::BeaconBlocks(Vec::new()))), + _ => unreachable!("Cannot negotiate an unknown version"), + }, + "recent_beacon_blocks" => match self.protocol.version.as_str() { + "1" => Ok(Some(RPCResponse::RecentBeaconBlocks(Vec::new()))), + _ => unreachable!("Cannot negotiate an unknown version"), + }, + _ => unreachable!("Cannot negotiate an unknown protocol"), + } + } Err(e) => Err(e), } } diff --git a/beacon_node/eth2-libp2p/src/rpc/handler.rs b/beacon_node/eth2-libp2p/src/rpc/handler.rs index a69cd0cda9..07322875ff 100644 --- a/beacon_node/eth2-libp2p/src/rpc/handler.rs +++ b/beacon_node/eth2-libp2p/src/rpc/handler.rs @@ -317,11 +317,11 @@ where RPCEvent::Response(rpc_event.id(), response), ))); } else { - // stream closed early + // stream closed early or nothing was sent return Ok(Async::Ready(ProtocolsHandlerEvent::Custom( RPCEvent::Error( rpc_event.id(), - RPCError::Custom("Stream Closed Early".into()), + RPCError::Custom("Stream closed early. Empty response".into()), ), ))); } diff --git a/beacon_node/network/src/message_handler.rs b/beacon_node/network/src/message_handler.rs index 7a1a4ad317..c14fc970d7 100644 --- a/beacon_node/network/src/message_handler.rs +++ b/beacon_node/network/src/message_handler.rs @@ -1,8 +1,7 @@ use crate::error; -use crate::service::{NetworkMessage, OutgoingMessage}; +use crate::service::NetworkMessage; use crate::sync::SimpleSync; use beacon_chain::{BeaconChain, BeaconChainTypes}; -use eth2_libp2p::rpc::methods::*; use eth2_libp2p::{ behaviour::PubsubMessage, rpc::{RPCError, RPCErrorResponse, RPCRequest, RPCResponse, RequestId}, @@ -304,6 +303,9 @@ impl MessageHandler { &self, beacon_blocks: &[u8], ) -> Result>, DecodeError> { + if beacon_blocks.is_empty() { + return Ok(Vec::new()); + } //TODO: Implement faster block verification before decoding entirely Vec::from_ssz_bytes(&beacon_blocks) } diff --git a/beacon_node/network/src/sync/manager.rs b/beacon_node/network/src/sync/manager.rs index f5c6694557..b81da0991f 100644 --- a/beacon_node/network/src/sync/manager.rs +++ b/beacon_node/network/src/sync/manager.rs @@ -13,14 +13,12 @@ const MAX_BLOCKS_PER_REQUEST: u64 = 10; /// The number of slots that we can import blocks ahead of us, before going into full Sync mode. const SLOT_IMPORT_TOLERANCE: usize = 10; - const PARENT_FAIL_TOLERANCE: usize = 3; const PARENT_DEPTH_TOLERANCE: usize = SLOT_IMPORT_TOLERANCE * 2; #[derive(PartialEq)] enum BlockRequestsState { - QueuedForward, - QueuedBackward, + Queued, Pending(RequestId), Complete, Failed, @@ -31,6 +29,10 @@ struct BlockRequests { target_head_root: Hash256, downloaded_blocks: Vec>, state: BlockRequestsState, + /// Specifies whether the current state is syncing forwards or backwards. + forward_sync: bool, + /// The current `start_slot` of the batched block request. + current_start_slot: Slot, } struct ParentRequests { @@ -43,25 +45,13 @@ struct ParentRequests { impl BlockRequests { // gets the start slot for next batch // last block slot downloaded plus 1 - fn next_start_slot(&self) -> Option { - if !self.downloaded_blocks.is_empty() { - match self.state { - BlockRequestsState::QueuedForward => { - let last_element_index = self.downloaded_blocks.len() - 1; - Some(self.downloaded_blocks[last_element_index].slot.add(1)) - } - BlockRequestsState::QueuedBackward => { - let earliest_known_slot = self.downloaded_blocks[0].slot; - Some(earliest_known_slot.add(1).sub(MAX_BLOCKS_PER_REQUEST)) - } - _ => { - // pending/complete/failed - None - } - } + fn update_start_slot(&mut self) { + if self.forward_sync { + self.current_start_slot += Slot::from(MAX_BLOCKS_PER_REQUEST); } else { - None + self.current_start_slot -= Slot::from(MAX_BLOCKS_PER_REQUEST); } + self.state = BlockRequestsState::Queued; } } @@ -117,7 +107,7 @@ impl ImportManager { let local = PeerSyncInfo::from(&self.chain); - // If a peer is within SLOT_IMPORT_TOLERANCE from out head slot, ignore a batch sync + // If a peer is within SLOT_IMPORT_TOLERANCE from our head slot, ignore a batch sync if remote.head_slot.sub(local.head_slot).as_usize() < SLOT_IMPORT_TOLERANCE { trace!(self.log, "Ignoring full sync with peer"; "peer" => format!("{:?}", peer_id), @@ -139,7 +129,9 @@ impl ImportManager { target_head_slot: remote.head_slot, // this should be larger than the current head. It is checked in the SyncManager before add_peer is called target_head_root: remote.head_root, downloaded_blocks: Vec::new(), - state: BlockRequestsState::QueuedForward, + state: BlockRequestsState::Queued, + forward_sync: true, + current_start_slot: self.chain.best_slot(), }; self.import_queue.insert(peer_id, block_requests); } @@ -165,8 +157,6 @@ impl ImportManager { } }; - // The response should contain at least one block. - // // If we are syncing up to a target head block, at least the target head block should be // returned. If we are syncing back to our last finalized block the request should return // at least the last block we received (last known block). In diagram form: @@ -176,33 +166,30 @@ impl ImportManager { // ^finalized slot ^ requested start slot ^ last known block ^ remote head if blocks.is_empty() { - warn!(self.log, "BeaconBlocks response was empty"; "request_id" => request_id); - block_requests.state = BlockRequestsState::Failed; + debug!(self.log, "BeaconBlocks response was empty"; "request_id" => request_id); + block_requests.update_start_slot(); return; } - // Add the newly downloaded blocks to the current list of downloaded blocks. This also - // determines if we are syncing forward or backward. - let syncing_forwards = { - if block_requests.downloaded_blocks.is_empty() { - block_requests.downloaded_blocks.append(&mut blocks); - true - } else if block_requests.downloaded_blocks[0].slot < blocks[0].slot { - // syncing forwards - // verify the peer hasn't sent overlapping blocks - ensuring the strictly - // increasing blocks in a batch will be verified during the processing - if block_requests.next_start_slot() > Some(blocks[0].slot) { - warn!(self.log, "BeaconBlocks response returned duplicate blocks"; "request_id" => request_id, "response_initial_slot" => blocks[0].slot, "requested_initial_slot" => block_requests.next_start_slot()); - block_requests.state = BlockRequestsState::Failed; - return; - } - - block_requests.downloaded_blocks.append(&mut blocks); - true - } else { - false - } - }; + // verify the range of received blocks + // Note that the order of blocks is verified in block processing + let last_sent_slot = blocks[blocks.len() - 1].slot; + if block_requests.current_start_slot > blocks[0].slot + || block_requests + .current_start_slot + .add(MAX_BLOCKS_PER_REQUEST) + < last_sent_slot + { + //TODO: Downvote peer - add a reason to failed + dbg!(&blocks); + warn!(self.log, "BeaconBlocks response returned out of range blocks"; + "request_id" => request_id, + "response_initial_slot" => blocks[0].slot, + "requested_initial_slot" => block_requests.current_start_slot); + // consider this sync failed + block_requests.state = BlockRequestsState::Failed; + return; + } // Determine if more blocks need to be downloaded. There are a few cases: // - We have downloaded a batch from our head_slot, which has not reached the remotes head @@ -216,61 +203,60 @@ impl ImportManager { // chain. If so, process the blocks, if not, request more blocks all the way up to // our last finalized slot. - if syncing_forwards { - // does the batch contain the target_head_slot - let last_element_index = block_requests.downloaded_blocks.len() - 1; - if block_requests.downloaded_blocks[last_element_index].slot - >= block_requests.target_head_slot - { - // if the batch is on our chain, this is complete and we can then process. - // Otherwise start backwards syncing until we reach a common chain. - let earliest_slot = block_requests.downloaded_blocks[0].slot; - //TODO: Decide which is faster. Reading block from db and comparing or calculating - //the hash tree root and comparing. - if Some(block_requests.downloaded_blocks[0].canonical_root()) - == root_at_slot(self.chain, earliest_slot) - { - block_requests.state = BlockRequestsState::Complete; - return; - } - - // not on the same chain, request blocks backwards - let state = &self.chain.head().beacon_state; - let local_finalized_slot = state - .finalized_checkpoint - .epoch - .start_slot(T::EthSpec::slots_per_epoch()); - - // check that the request hasn't failed by having no common chain - if local_finalized_slot >= block_requests.downloaded_blocks[0].slot { - warn!(self.log, "Peer returned an unknown chain."; "request_id" => request_id); - block_requests.state = BlockRequestsState::Failed; - return; - } - - // Start a backwards sync by requesting earlier blocks - // There can be duplication in downloaded blocks here if there are a large number - // of skip slots. In all cases we at least re-download the earliest known block. - // It is unlikely that a backwards sync in required, so we accept this duplication - // for now. - block_requests.state = BlockRequestsState::QueuedBackward; - } else { - // batch doesn't contain the head slot, request the next batch - block_requests.state = BlockRequestsState::QueuedForward; - } + if block_requests.forward_sync { + // append blocks if syncing forward + block_requests.downloaded_blocks.append(&mut blocks); } else { - // syncing backwards + // prepend blocks if syncing backwards + block_requests.downloaded_blocks.splice(..0, blocks); + } + + // does the batch contain the target_head_slot + let last_element_index = block_requests.downloaded_blocks.len() - 1; + if block_requests.downloaded_blocks[last_element_index].slot + >= block_requests.target_head_slot + || !block_requests.forward_sync + { // if the batch is on our chain, this is complete and we can then process. - // Otherwise continue backwards + // Otherwise start backwards syncing until we reach a common chain. let earliest_slot = block_requests.downloaded_blocks[0].slot; + //TODO: Decide which is faster. Reading block from db and comparing or calculating + //the hash tree root and comparing. if Some(block_requests.downloaded_blocks[0].canonical_root()) - == root_at_slot(self.chain, earliest_slot) + == root_at_slot(&self.chain, earliest_slot) { block_requests.state = BlockRequestsState::Complete; return; } - block_requests.state = BlockRequestsState::QueuedBackward; + + // not on the same chain, request blocks backwards + let state = &self.chain.head().beacon_state; + let local_finalized_slot = state + .finalized_checkpoint + .epoch + .start_slot(T::EthSpec::slots_per_epoch()); + + // check that the request hasn't failed by having no common chain + if local_finalized_slot >= block_requests.current_start_slot { + warn!(self.log, "Peer returned an unknown chain."; "request_id" => request_id); + block_requests.state = BlockRequestsState::Failed; + return; + } + + // if this is a forward sync, then we have reached the head without a common chain + // and we need to start syncing backwards. + if block_requests.forward_sync { + // Start a backwards sync by requesting earlier blocks + block_requests.forward_sync = false; + block_requests.current_start_slot = std::cmp::min( + self.chain.best_slot(), + block_requests.downloaded_blocks[0].slot, + ); + } } + + // update the start slot and re-queue the batch + block_requests.update_start_slot(); } pub fn recent_blocks_response( @@ -296,7 +282,7 @@ impl ImportManager { // if an empty response is given, the peer didn't have the requested block, try again if blocks.is_empty() { parent_request.failed_attempts += 1; - parent_request.state = BlockRequestsState::QueuedForward; + parent_request.state = BlockRequestsState::Queued; parent_request.last_submitted_peer = peer_id; return; } @@ -316,7 +302,7 @@ impl ImportManager { parent_request.state = BlockRequestsState::Complete; } - pub fn inject_error(peer_id: PeerId, id: RequestId) { + pub fn _inject_error(_peer_id: PeerId, _id: RequestId) { //TODO: Remove block state from pending } @@ -358,13 +344,13 @@ impl ImportManager { downloaded_blocks: vec![block], failed_attempts: 0, last_submitted_peer: peer_id, - state: BlockRequestsState::QueuedBackward, + state: BlockRequestsState::Queued, }; self.parent_queue.push(req); } - pub fn poll(&mut self) -> ImportManagerOutcome { + pub(crate) fn poll(&mut self) -> ImportManagerOutcome { loop { // update the state of the manager self.update_state(); @@ -385,12 +371,11 @@ impl ImportManager { } // process any complete parent lookups - if let (re_run, outcome) = self.process_complete_parent_requests() { - if let Some(outcome) = outcome { - return outcome; - } else if !re_run { - break; - } + let (re_run, outcome) = self.process_complete_parent_requests(); + if let Some(outcome) = outcome { + return outcome; + } else if !re_run { + break; } } @@ -423,9 +408,10 @@ impl ImportManager { // If any in queued state we submit a request. // remove any failed batches + let debug_log = &self.log; self.import_queue.retain(|peer_id, block_request| { if let BlockRequestsState::Failed = block_request.state { - debug!(self.log, "Block import from peer failed"; + debug!(debug_log, "Block import from peer failed"; "peer_id" => format!("{:?}", peer_id), "downloaded_blocks" => block_request.downloaded_blocks.len() ); @@ -436,20 +422,18 @@ impl ImportManager { }); // process queued block requests - for (peer_id, block_requests) in self.import_queue.iter_mut().find(|(_peer_id, req)| { - req.state == BlockRequestsState::QueuedForward - || req.state == BlockRequestsState::QueuedBackward - }) { + for (peer_id, block_requests) in self + .import_queue + .iter_mut() + .find(|(_peer_id, req)| req.state == BlockRequestsState::Queued) + { let request_id = self.current_req_id; block_requests.state = BlockRequestsState::Pending(request_id); self.current_req_id += 1; let request = BeaconBlocksRequest { head_block_root: block_requests.target_head_root, - start_slot: block_requests - .next_start_slot() - .unwrap_or_else(|| self.chain.best_slot()) - .as_u64(), + start_slot: block_requests.current_start_slot.as_u64(), count: MAX_BLOCKS_PER_REQUEST, step: 0, }; @@ -504,9 +488,10 @@ impl ImportManager { fn process_parent_requests(&mut self) -> Option { // remove any failed requests + let debug_log = &self.log; self.parent_queue.retain(|parent_request| { if parent_request.state == BlockRequestsState::Failed { - debug!(self.log, "Parent import failed"; + debug!(debug_log, "Parent import failed"; "block" => format!("{:?}",parent_request.downloaded_blocks[0].canonical_root()), "ancestors_found" => parent_request.downloaded_blocks.len() ); @@ -524,9 +509,15 @@ impl ImportManager { // check if parents need to be searched for for parent_request in self.parent_queue.iter_mut() { if parent_request.failed_attempts >= PARENT_FAIL_TOLERANCE { - parent_request.state == BlockRequestsState::Failed; + parent_request.state = BlockRequestsState::Failed; continue; - } else if parent_request.state == BlockRequestsState::QueuedForward { + } else if parent_request.state == BlockRequestsState::Queued { + // check the depth isn't too large + if parent_request.downloaded_blocks.len() >= PARENT_DEPTH_TOLERANCE { + parent_request.state = BlockRequestsState::Failed; + continue; + } + parent_request.state = BlockRequestsState::Pending(self.current_req_id); self.current_req_id += 1; let last_element_index = parent_request.downloaded_blocks.len() - 1; @@ -564,7 +555,7 @@ impl ImportManager { if block_hash != expected_hash { // remove the head block let _ = completed_request.downloaded_blocks.pop(); - completed_request.state = BlockRequestsState::QueuedForward; + completed_request.state = BlockRequestsState::Queued; //TODO: Potentially downvote the peer let peer = completed_request.last_submitted_peer.clone(); debug!(self.log, "Peer sent invalid parent. Ignoring"; @@ -585,7 +576,7 @@ impl ImportManager { Ok(BlockProcessingOutcome::ParentUnknown { parent: _ }) => { // need to keep looking for parents completed_request.downloaded_blocks.push(block); - completed_request.state == BlockRequestsState::QueuedForward; + completed_request.state = BlockRequestsState::Queued; re_run = true; break; } @@ -598,7 +589,7 @@ impl ImportManager { "outcome" => format!("{:?}", outcome), "peer" => format!("{:?}", completed_request.last_submitted_peer), ); - completed_request.state == BlockRequestsState::QueuedForward; + completed_request.state = BlockRequestsState::Queued; re_run = true; return ( re_run, @@ -613,7 +604,7 @@ impl ImportManager { self.log, "Parent processing error"; "error" => format!("{:?}", e) ); - completed_request.state == BlockRequestsState::QueuedForward; + completed_request.state = BlockRequestsState::Queued; re_run = true; return ( re_run, @@ -691,6 +682,13 @@ impl ImportManager { ); } } + BlockProcessingOutcome::FinalizedSlot => { + trace!( + self.log, "Finalized or earlier block processed"; + "outcome" => format!("{:?}", outcome), + ); + // block reached our finalized slot or was earlier, move to the next block + } _ => { trace!( self.log, "InvalidBlock"; @@ -717,7 +715,7 @@ impl ImportManager { } fn root_at_slot( - chain: Arc>, + chain: &Arc>, target_slot: Slot, ) -> Option { chain diff --git a/beacon_node/network/src/sync/simple_sync.rs b/beacon_node/network/src/sync/simple_sync.rs index deadf214d6..924b2de9b6 100644 --- a/beacon_node/network/src/sync/simple_sync.rs +++ b/beacon_node/network/src/sync/simple_sync.rs @@ -2,24 +2,22 @@ use super::manager::{ImportManager, ImportManagerOutcome}; use crate::service::{NetworkMessage, OutgoingMessage}; use beacon_chain::{BeaconChain, BeaconChainTypes, BlockProcessingOutcome}; use eth2_libp2p::rpc::methods::*; -use eth2_libp2p::rpc::methods::*; use eth2_libp2p::rpc::{RPCEvent, RPCRequest, RPCResponse, RequestId}; use eth2_libp2p::PeerId; -use slog::{debug, error, info, o, trace, warn}; +use slog::{debug, info, o, trace, warn}; use ssz::Encode; -use std::collections::HashMap; +use std::ops::Sub; use std::sync::Arc; -use std::time::Duration; use store::Store; use tokio::sync::mpsc; -use types::{ - Attestation, BeaconBlock, BeaconBlockBody, BeaconBlockHeader, Epoch, EthSpec, Hash256, Slot, -}; +use types::{Attestation, BeaconBlock, Epoch, EthSpec, Hash256, Slot}; /// If a block is more than `FUTURE_SLOT_TOLERANCE` slots ahead of our slot clock, we drop it. /// Otherwise we queue it. pub(crate) const FUTURE_SLOT_TOLERANCE: u64 = 1; +/// The number of slots behind our head that we still treat a peer as a fully synced peer. +const FULL_PEER_TOLERANCE: u64 = 10; const SHOULD_FORWARD_GOSSIP_BLOCK: bool = true; const SHOULD_NOT_FORWARD_GOSSIP_BLOCK: bool = false; @@ -54,8 +52,8 @@ impl From<&Arc>> for PeerSyncInfo { /// The current syncing state. #[derive(PartialEq)] pub enum SyncState { - Idle, - Downloading, + _Idle, + _Downloading, _Stopped, } @@ -97,7 +95,7 @@ impl SimpleSync { /// Sends a `Hello` message to the peer. pub fn on_connect(&mut self, peer_id: PeerId) { self.network - .send_rpc_request(peer_id, RPCRequest::Hello(hello_message(&self.chain))); + .send_rpc_request(None, peer_id, RPCRequest::Hello(hello_message(&self.chain))); } /// Handle a `Hello` request. @@ -193,8 +191,16 @@ impl SimpleSync { { // If the node's best-block is already known to us and they are close to our current // head, treat them as a fully sync'd peer. - self.manager.add_full_peer(peer_id); - self.process_sync(); + if self.chain.best_slot().sub(remote.head_slot).as_u64() < FULL_PEER_TOLERANCE { + self.manager.add_full_peer(peer_id); + self.process_sync(); + } else { + debug!( + self.log, + "Out of sync peer connected"; + "peer" => format!("{:?}", peer_id), + ); + } } else { // The remote node has an equal or great finalized epoch and we don't know it's head. // @@ -222,8 +228,11 @@ impl SimpleSync { "method" => "HELLO", "peer" => format!("{:?}", peer_id) ); - self.network - .send_rpc_request(peer_id, RPCRequest::Hello(hello_message(&self.chain))); + self.network.send_rpc_request( + None, + peer_id, + RPCRequest::Hello(hello_message(&self.chain)), + ); } ImportManagerOutcome::RequestBlocks { peer_id, @@ -238,8 +247,11 @@ impl SimpleSync { "count" => request.count, "peer" => format!("{:?}", peer_id) ); - self.network - .send_rpc_request(peer_id.clone(), RPCRequest::BeaconBlocks(request)); + self.network.send_rpc_request( + Some(request_id), + peer_id.clone(), + RPCRequest::BeaconBlocks(request), + ); } ImportManagerOutcome::RecentRequest(peer_id, req) => { trace!( @@ -249,8 +261,11 @@ impl SimpleSync { "count" => req.block_roots.len(), "peer" => format!("{:?}", peer_id) ); - self.network - .send_rpc_request(peer_id.clone(), RPCRequest::RecentBeaconBlocks(req)); + self.network.send_rpc_request( + None, + peer_id.clone(), + RPCRequest::RecentBeaconBlocks(req), + ); } ImportManagerOutcome::DownvotePeer(peer_id) => { trace!( @@ -270,6 +285,7 @@ impl SimpleSync { } } + //TODO: Move to beacon chain fn root_at_slot(&self, target_slot: Slot) -> Option { self.chain .rev_iter_block_roots() @@ -333,36 +349,58 @@ impl SimpleSync { "start_slot" => req.start_slot, ); + //TODO: Optimize this + // Currently for skipped slots, the blocks returned could be less than the requested range. + // In the current implementation we read from the db then filter out out-of-range blocks. + // Improving the db schema to prevent this would be ideal. + let mut blocks: Vec> = self .chain .rev_iter_block_roots() .filter(|(_root, slot)| { - req.start_slot <= slot.as_u64() && req.start_slot + req.count >= slot.as_u64() + req.start_slot <= slot.as_u64() && req.start_slot + req.count > slot.as_u64() }) .take_while(|(_root, slot)| req.start_slot <= slot.as_u64()) .filter_map(|(root, _slot)| { if let Ok(Some(block)) = self.chain.store.get::>(&root) { Some(block) } else { - debug!( + warn!( self.log, - "Peer requested unknown block"; - "peer" => format!("{:?}", peer_id), + "Block in the chain is not in the store"; "request_root" => format!("{:}", root), ); None } }) + .filter(|block| block.slot >= req.start_slot) .collect(); + // TODO: Again find a more elegant way to include genesis if needed + // if the genesis is requested, add it in + if req.start_slot == 0 { + if let Ok(Some(genesis)) = self + .chain + .store + .get::>(&self.chain.genesis_block_root) + { + blocks.push(genesis); + } else { + warn!( + self.log, + "Requested genesis, which is not in the chain store"; + ); + } + } + blocks.reverse(); blocks.dedup_by_key(|brs| brs.slot); if blocks.len() as u64 != req.count { debug!( self.log, - "BeaconBlocksRequest"; + "BeaconBlocksRequest response"; "peer" => format!("{:?}", peer_id), "msg" => "Failed to return all requested hashes", "start_slot" => req.start_slot, @@ -498,14 +536,19 @@ impl NetworkContext { } pub fn disconnect(&mut self, peer_id: PeerId, reason: GoodbyeReason) { - self.send_rpc_request(peer_id, RPCRequest::Goodbye(reason)) + self.send_rpc_request(None, peer_id, RPCRequest::Goodbye(reason)) // TODO: disconnect peers. } - pub fn send_rpc_request(&mut self, peer_id: PeerId, rpc_request: RPCRequest) { - // Note: There is currently no use of keeping track of requests. However the functionality - // is left here for future revisions. - self.send_rpc_event(peer_id, RPCEvent::Request(0, rpc_request)); + pub fn send_rpc_request( + &mut self, + request_id: Option, + peer_id: PeerId, + rpc_request: RPCRequest, + ) { + // use 0 as the default request id, when an ID is not required. + let request_id = request_id.unwrap_or_else(|| 0); + self.send_rpc_event(peer_id, RPCEvent::Request(request_id, rpc_request)); } //TODO: Handle Error responses From 7ee080db6021b2fb4b47056ce0a666020b71b3d9 Mon Sep 17 00:00:00 2001 From: Age Manning Date: Sun, 25 Aug 2019 08:25:54 +1000 Subject: [PATCH 107/186] Updated syncing algorithm --- beacon_node/client/src/lib.rs | 6 ++---- beacon_node/eth2-libp2p/Cargo.toml | 4 ++-- beacon_node/eth2-libp2p/src/behaviour.rs | 3 +-- beacon_node/eth2-libp2p/src/service.rs | 2 +- beacon_node/network/src/service.rs | 2 +- beacon_node/network/src/sync/simple_sync.rs | 8 +++++++- beacon_node/rpc/src/attestation.rs | 8 ++++++-- beacon_node/rpc/src/beacon_block.rs | 10 +++++++--- 8 files changed, 27 insertions(+), 16 deletions(-) diff --git a/beacon_node/client/src/lib.rs b/beacon_node/client/src/lib.rs index 7e6449a98d..4b64c10705 100644 --- a/beacon_node/client/src/lib.rs +++ b/beacon_node/client/src/lib.rs @@ -41,7 +41,7 @@ pub struct Client { /// Signal to terminate the slot timer. pub slot_timer_exit_signal: Option, /// Signal to terminate the API - // pub api_exit_signal: Option, + pub api_exit_signal: Option, /// The clients logger. log: slog::Logger, /// Marker to pin the beacon chain generics. @@ -134,7 +134,6 @@ where None }; - /* // Start the `rest_api` service let api_exit_signal = if client_config.rest_api.enabled { match rest_api::start_server( @@ -152,7 +151,6 @@ where } else { None }; - */ let (slot_timer_exit_signal, exit) = exit_future::signal(); if let Ok(Some(duration_to_next_slot)) = beacon_chain.slot_clock.duration_to_next_slot() { @@ -186,7 +184,7 @@ where http_exit_signal, rpc_exit_signal, slot_timer_exit_signal: Some(slot_timer_exit_signal), - //api_exit_signal, + api_exit_signal, log, network, phantom: PhantomData, diff --git a/beacon_node/eth2-libp2p/Cargo.toml b/beacon_node/eth2-libp2p/Cargo.toml index 55081aed58..a379bcead3 100644 --- a/beacon_node/eth2-libp2p/Cargo.toml +++ b/beacon_node/eth2-libp2p/Cargo.toml @@ -7,8 +7,8 @@ edition = "2018" [dependencies] clap = "2.32.0" #SigP repository -libp2p = { git = "https://github.com/SigP/rust-libp2p", rev = "b0d3cf7b4b0fa6c555b64dbdd110673a05457abd" } -enr = { git = "https://github.com/SigP/rust-libp2p/", rev = "b0d3cf7b4b0fa6c555b64dbdd110673a05457abd", features = ["serde"] } +libp2p = { git = "https://github.com/SigP/rust-libp2p", rev = "a56865a4077ac54767136b4bee627c9734720a6b" } +enr = { git = "https://github.com/SigP/rust-libp2p/", rev = "a56865a4077ac54767136b4bee627c9734720a6b", features = ["serde"] } types = { path = "../../eth2/types" } serde = "1.0" serde_derive = "1.0" diff --git a/beacon_node/eth2-libp2p/src/behaviour.rs b/beacon_node/eth2-libp2p/src/behaviour.rs index b4822de4c4..29725e0ced 100644 --- a/beacon_node/eth2-libp2p/src/behaviour.rs +++ b/beacon_node/eth2-libp2p/src/behaviour.rs @@ -16,7 +16,6 @@ use libp2p::{ NetworkBehaviour, PeerId, }; use slog::{debug, o, trace}; -use ssz::{ssz_encode, Encode}; use std::num::NonZeroU32; use std::time::Duration; @@ -189,7 +188,7 @@ impl Behaviour { } /// Publishes a message on the pubsub (gossipsub) behaviour. - pub fn publish(&mut self, topics: Vec, message: PubsubMessage) { + pub fn publish(&mut self, topics: &[Topic], message: PubsubMessage) { let message_data = message.to_data(); for topic in topics { self.gossipsub.publish(topic, message_data.clone()); diff --git a/beacon_node/eth2-libp2p/src/service.rs b/beacon_node/eth2-libp2p/src/service.rs index 98718445b3..9945b15863 100644 --- a/beacon_node/eth2-libp2p/src/service.rs +++ b/beacon_node/eth2-libp2p/src/service.rs @@ -148,7 +148,7 @@ impl Stream for Service { topics, message, } => { - trace!(self.log, "Gossipsub message received"; "Message" => format!("{:?}", message)); + trace!(self.log, "Gossipsub message received"; "service" => "Swarm"); return Ok(Async::Ready(Some(Libp2pEvent::PubsubMessage { source, topics, diff --git a/beacon_node/network/src/service.rs b/beacon_node/network/src/service.rs index df0404cfaa..4800a7efbc 100644 --- a/beacon_node/network/src/service.rs +++ b/beacon_node/network/src/service.rs @@ -119,7 +119,7 @@ fn network_service( }, NetworkMessage::Publish { topics, message } => { debug!(log, "Sending pubsub message"; "topics" => format!("{:?}",topics)); - libp2p_service.lock().swarm.publish(topics, message); + libp2p_service.lock().swarm.publish(&topics, message); } }, Ok(Async::NotReady) => break, diff --git a/beacon_node/network/src/sync/simple_sync.rs b/beacon_node/network/src/sync/simple_sync.rs index 924b2de9b6..bee9310d37 100644 --- a/beacon_node/network/src/sync/simple_sync.rs +++ b/beacon_node/network/src/sync/simple_sync.rs @@ -465,9 +465,15 @@ impl SimpleSync { pub fn on_block_gossip(&mut self, peer_id: PeerId, block: BeaconBlock) -> bool { if let Ok(outcome) = self.chain.process_block(block.clone()) { match outcome { - BlockProcessingOutcome::Processed { .. } => SHOULD_FORWARD_GOSSIP_BLOCK, + BlockProcessingOutcome::Processed { .. } => { + trace!(self.log, "Gossipsub block processed"; + "peer_id" => format!("{:?}",peer_id)); + SHOULD_FORWARD_GOSSIP_BLOCK + } BlockProcessingOutcome::ParentUnknown { parent: _ } => { // Inform the sync manager to find parents for this block + trace!(self.log, "Unknown parent gossip"; + "peer_id" => format!("{:?}",peer_id)); self.manager.add_unknown_block(block.clone(), peer_id); SHOULD_FORWARD_GOSSIP_BLOCK } diff --git a/beacon_node/rpc/src/attestation.rs b/beacon_node/rpc/src/attestation.rs index f442e247dd..dff3f8d70a 100644 --- a/beacon_node/rpc/src/attestation.rs +++ b/beacon_node/rpc/src/attestation.rs @@ -1,7 +1,7 @@ use beacon_chain::{BeaconChain, BeaconChainError, BeaconChainTypes}; use eth2_libp2p::PubsubMessage; use eth2_libp2p::Topic; -use eth2_libp2p::BEACON_ATTESTATION_TOPIC; +use eth2_libp2p::{BEACON_ATTESTATION_TOPIC, TOPIC_ENCODING_POSTFIX, TOPIC_PREFIX}; use futures::Future; use grpcio::{RpcContext, RpcStatus, RpcStatusCode, UnarySink}; use network::NetworkMessage; @@ -144,7 +144,11 @@ impl AttestationService for AttestationServiceInstance { ); // valid attestation, propagate to the network - let topic = Topic::new(BEACON_ATTESTATION_TOPIC.into()); + let topic_string = format!( + "/{}/{}/{}", + TOPIC_PREFIX, BEACON_ATTESTATION_TOPIC, TOPIC_ENCODING_POSTFIX + ); + let topic = Topic::new(topic_string); let message = PubsubMessage::Attestation(attestation.as_ssz_bytes()); self.network_chan diff --git a/beacon_node/rpc/src/beacon_block.rs b/beacon_node/rpc/src/beacon_block.rs index b1a67399e2..92a543ef3c 100644 --- a/beacon_node/rpc/src/beacon_block.rs +++ b/beacon_node/rpc/src/beacon_block.rs @@ -1,6 +1,6 @@ use beacon_chain::{BeaconChain, BeaconChainTypes, BlockProcessingOutcome}; -use eth2_libp2p::BEACON_BLOCK_TOPIC; use eth2_libp2p::{PubsubMessage, Topic}; +use eth2_libp2p::{BEACON_BLOCK_TOPIC, TOPIC_ENCODING_POSTFIX, TOPIC_PREFIX}; use futures::Future; use grpcio::{RpcContext, RpcStatus, RpcStatusCode, UnarySink}; use network::NetworkMessage; @@ -105,8 +105,12 @@ impl BeaconBlockService for BeaconBlockServiceInstance { "block_root" => format!("{}", block_root), ); - // get the network topic to send on - let topic = Topic::new(BEACON_BLOCK_TOPIC.into()); + // create the network topic to send on + let topic_string = format!( + "/{}/{}/{}", + TOPIC_PREFIX, BEACON_BLOCK_TOPIC, TOPIC_ENCODING_POSTFIX + ); + let topic = Topic::new(topic_string); let message = PubsubMessage::Block(block.as_ssz_bytes()); // Publish the block to the p2p network via gossipsub. From 66d78387079c187545646bf2047428d872327113 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Sun, 25 Aug 2019 09:43:03 +1000 Subject: [PATCH 108/186] Remove GenesisConfig, add BeaconChainStartMethod --- beacon_node/client/src/beacon_chain_types.rs | 14 ++--- beacon_node/client/src/config.rs | 61 ++++++++------------ beacon_node/client/src/lib.rs | 2 +- beacon_node/src/config.rs | 18 ++++-- beacon_node/src/main.rs | 4 +- 5 files changed, 46 insertions(+), 53 deletions(-) diff --git a/beacon_node/client/src/beacon_chain_types.rs b/beacon_node/client/src/beacon_chain_types.rs index 5168c067a9..37e4a055e8 100644 --- a/beacon_node/client/src/beacon_chain_types.rs +++ b/beacon_node/client/src/beacon_chain_types.rs @@ -1,6 +1,6 @@ use crate::bootstrapper::Bootstrapper; use crate::error::Result; -use crate::{config::GenesisState, ClientConfig}; +use crate::{config::BeaconChainStartMethod, ClientConfig}; use beacon_chain::{ lmd_ghost::{LmdGhost, ThreadSafeReducedTree}, slot_clock::SystemTimeSlotClock, @@ -59,19 +59,19 @@ where T: BeaconChainTypes, T::LmdGhost: LmdGhost, { - let genesis_state = match &config.genesis_state { - GenesisState::Mainnet => { + let genesis_state = match &config.beacon_chain_start_method { + BeaconChainStartMethod::Resume => { crit!(log, "This release does not support mainnet genesis state."); return Err("Mainnet is unsupported".into()); } - GenesisState::RecentGenesis { validator_count } => { + BeaconChainStartMethod::RecentGenesis { validator_count } => { generate_testnet_genesis_state(*validator_count, recent_genesis_time(), &spec) } - GenesisState::Generated { + BeaconChainStartMethod::Generated { validator_count, genesis_time, } => generate_testnet_genesis_state(*validator_count, *genesis_time, &spec), - GenesisState::Yaml { file } => { + BeaconChainStartMethod::Yaml { file } => { let file = File::open(file).map_err(|e| { format!("Unable to open YAML genesis state file {:?}: {:?}", file, e) })?; @@ -79,7 +79,7 @@ where serde_yaml::from_reader(file) .map_err(|e| format!("Unable to parse YAML genesis state file: {:?}", e))? } - GenesisState::HttpBootstrap { server } => { + BeaconChainStartMethod::HttpBootstrap { server, .. } => { let bootstrapper = Bootstrapper::from_server_string(server.to_string()) .map_err(|e| format!("Failed to initialize bootstrap client: {}", e))?; diff --git a/beacon_node/client/src/config.rs b/beacon_node/client/src/config.rs index e802a93a3d..1e8f60f6ef 100644 --- a/beacon_node/client/src/config.rs +++ b/beacon_node/client/src/config.rs @@ -1,15 +1,11 @@ -use crate::Bootstrapper; use clap::ArgMatches; use network::NetworkConfig; use serde_derive::{Deserialize, Serialize}; -use slog::{info, o, warn, Drain}; +use slog::{info, o, Drain}; use std::fs::{self, OpenOptions}; use std::path::PathBuf; use std::sync::Mutex; -/// The number initial validators when starting the `Minimal`. -const TESTNET_VALIDATOR_COUNT: usize = 16; - /// The number initial validators when starting the `Minimal`. const TESTNET_SPEC_CONSTANTS: &str = "minimal"; @@ -21,63 +17,52 @@ pub struct Config { db_name: String, pub log_file: PathBuf, pub spec_constants: String, + /// Defines how we should initialize a BeaconChain instances. + /// + /// This field is not serialized, there for it will not be written to (or loaded from) config + /// files. It can only be configured via the CLI. #[serde(skip)] - pub boot_method: BootMethod, + pub beacon_chain_start_method: BeaconChainStartMethod, pub network: network::NetworkConfig, pub rpc: rpc::RPCConfig, pub rest_api: rest_api::ApiConfig, } +/// Defines how the client should initialize a BeaconChain. +/// +/// In general, there are two methods: +/// - resuming a new chain, or +/// - initializing a new one. #[derive(Debug, Clone)] -pub enum BootMethod { - /// Resume from an existing database. +pub enum BeaconChainStartMethod { + /// Resume from an existing BeaconChain, loaded from the existing local database. Resume, - /// Generate a state with `validator_count` validators, all with well-known secret keys. + /// Create a new beacon chain with `validator_count` validators, all with well-known secret keys. /// /// Set the genesis time to be the start of the previous 30-minute window. RecentGenesis { validator_count: usize }, - /// Generate a state with `genesis_time` and `validator_count` validators, all with well-known + /// Create a new beacon chain with `genesis_time` and `validator_count` validators, all with well-known /// secret keys. Generated { validator_count: usize, genesis_time: u64, }, - /// Load a YAML-encoded genesis state from a file. + /// Create a new beacon chain by loading a YAML-encoded genesis state from a file. Yaml { file: PathBuf }, - /// Use a HTTP server (running our REST-API) to load genesis and finalized states and blocks. + /// Create a new beacon chain by using a HTTP server (running our REST-API) to load genesis and + /// finalized states and blocks. HttpBootstrap { server: String, port: Option, }, } -impl Default for BootMethod { +impl Default for BeaconChainStartMethod { fn default() -> Self { - BootMethod::Resume + BeaconChainStartMethod::Resume } } -pub enum GenesisState { - /// Use the mainnet genesis state. - /// - /// Mainnet genesis state is not presently known, so this is a place-holder. - Mainnet, - /// Generate a state with `validator_count` validators, all with well-known secret keys. - /// - /// Set the genesis time to be the start of the previous 30-minute window. - RecentGenesis { validator_count: usize }, - /// Generate a state with `genesis_time` and `validator_count` validators, all with well-known - /// secret keys. - Generated { - validator_count: usize, - genesis_time: u64, - }, - /// Load a YAML-encoded genesis state from a file. - Yaml { file: PathBuf }, - /// Use a HTTP server (running our REST-API) to load genesis and finalized states and blocks. - HttpBootstrap { server: String }, -} - impl Default for Config { fn default() -> Self { Self { @@ -86,10 +71,10 @@ impl Default for Config { db_type: "disk".to_string(), db_name: "chain_db".to_string(), network: NetworkConfig::new(), - rpc: rpc::RPCConfig::default(), - rest_api: rest_api::ApiConfig::default(), + rpc: <_>::default(), + rest_api: <_>::default(), spec_constants: TESTNET_SPEC_CONSTANTS.into(), - boot_method: BootMethod::default(), + beacon_chain_start_method: <_>::default(), } } } diff --git a/beacon_node/client/src/lib.rs b/beacon_node/client/src/lib.rs index 6405e05e71..3eb5553696 100644 --- a/beacon_node/client/src/lib.rs +++ b/beacon_node/client/src/lib.rs @@ -23,7 +23,7 @@ pub use beacon_chain::BeaconChainTypes; pub use beacon_chain_types::ClientType; pub use beacon_chain_types::InitialiseBeaconChain; pub use bootstrapper::Bootstrapper; -pub use config::{Config as ClientConfig, GenesisState}; +pub use config::Config as ClientConfig; pub use eth2_config::Eth2Config; /// Main beacon node client service. This provides the connection and initialisation of the clients diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index b66a00abba..a97ec3708d 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -2,7 +2,7 @@ use clap::ArgMatches; use client::{Bootstrapper, ClientConfig, Eth2Config}; use eth2_config::{read_from_file, write_to_file}; use rand::{distributions::Alphanumeric, Rng}; -use slog::{crit, info, Logger}; +use slog::{crit, info, warn, Logger}; use std::fs; use std::path::PathBuf; @@ -35,15 +35,16 @@ pub fn get_configs(matches: &ArgMatches, log: &Logger) -> Result { // The bootstrap testnet method requires inserting a libp2p address into the // network config. ("bootstrap", Some(sub_matches)) => { - let server = sub_matches + let server: String = sub_matches .value_of("server") - .ok_or_else(|| "No bootstrap server specified".into())?; + .ok_or_else(|| "No bootstrap server specified")? + .to_string(); let bootstrapper = Bootstrapper::from_server_string(server.to_string())?; - if let Some(server_multiaddr) = - bootstrapper.best_effort_multiaddr(sub_matches.value_of("libp2p_port")) - { + if let Some(server_multiaddr) = bootstrapper.best_effort_multiaddr( + parse_port_option(sub_matches.value_of("libp2p_port")), + ) { info!( log, "Estimated bootstrapper libp2p address"; @@ -83,6 +84,11 @@ pub fn get_configs(matches: &ArgMatches, log: &Logger) -> Result { builder.build() } +/// Decodes an optional string into an optional u16. +fn parse_port_option(o: Option<&str>) -> Option { + o.and_then(|s| s.parse::().ok()) +} + /// Allows for building a set of configurations based upon `clap` arguments. struct ConfigBuilder<'a> { matches: &'a ArgMatches<'a>, diff --git a/beacon_node/src/main.rs b/beacon_node/src/main.rs index 12c9b8a017..d7a4bae795 100644 --- a/beacon_node/src/main.rs +++ b/beacon_node/src/main.rs @@ -227,7 +227,9 @@ fn main() { .short("p") .long("port") .value_name("TCP_PORT") - .help("A libp2p listen port used to peer with the bootstrap server")) + .help("A libp2p listen port used to peer with the bootstrap server. This flag is useful \ + when port-fowarding is used: you may connect using a different port than \ + the one the server is immediately listening on.")) ) .subcommand(SubCommand::with_name("recent") .about("Creates a new genesis state where the genesis time was at the previous \ From 9cdcc7d198b9e0e48e89870ef050958e8bb94abd Mon Sep 17 00:00:00 2001 From: Age Manning Date: Sun, 25 Aug 2019 10:02:54 +1000 Subject: [PATCH 109/186] Update to latest libp2p --- beacon_node/eth2-libp2p/Cargo.toml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/beacon_node/eth2-libp2p/Cargo.toml b/beacon_node/eth2-libp2p/Cargo.toml index 7517c29804..92c2c80d44 100644 --- a/beacon_node/eth2-libp2p/Cargo.toml +++ b/beacon_node/eth2-libp2p/Cargo.toml @@ -7,8 +7,8 @@ edition = "2018" [dependencies] clap = "2.32.0" #SigP repository -libp2p = { git = "https://github.com/SigP/rust-libp2p", rev = "a56865a4077ac54767136b4bee627c9734720a6b" } -enr = { git = "https://github.com/SigP/rust-libp2p/", rev = "a56865a4077ac54767136b4bee627c9734720a6b", features = ["serde"] } +libp2p = { git = "https://github.com/SigP/rust-libp2p", rev = "a6ae26225bf1ef154f8c61a0e5391898ba038948" } +enr = { git = "https://github.com/SigP/rust-libp2p/", rev = "a6ae26225bf1ef154f8c61a0e5391898ba038948", features = ["serde"] } types = { path = "../../eth2/types" } serde = "1.0" serde_derive = "1.0" From 7fd7aa2cdbe1538232440c22ea8f752ede465bd2 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Sun, 25 Aug 2019 10:09:51 +1000 Subject: [PATCH 110/186] Tidy ConfigBuilder --- beacon_node/src/config.rs | 136 +++++++++++++++++++++----------------- 1 file changed, 77 insertions(+), 59 deletions(-) diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index a97ec3708d..c1074da03d 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -14,12 +14,12 @@ type Result = std::result::Result; type Config = (ClientConfig, Eth2Config); /// Gets the fully-initialized global client and eth2 configuration objects. -pub fn get_configs(matches: &ArgMatches, log: &Logger) -> Result { - let mut builder = ConfigBuilder::new(matches, log)?; +pub fn get_configs(cli_args: &ArgMatches, log: &Logger) -> Result { + let mut builder = ConfigBuilder::new(cli_args, log)?; - match matches.subcommand() { - ("testnet", Some(sub_matches)) => { - if sub_matches.is_present("random-datadir") { + match cli_args.subcommand() { + ("testnet", Some(sub_cmd_args)) => { + if sub_cmd_args.is_present("random-datadir") { builder.set_random_datadir()?; } @@ -29,39 +29,13 @@ pub fn get_configs(matches: &ArgMatches, log: &Logger) -> Result { "path" => format!("{:?}", builder.data_dir) ); - builder.update_spec_from_subcommand(&sub_matches)?; + builder.update_spec_from_subcommand(&sub_cmd_args)?; - match sub_matches.subcommand() { + match sub_cmd_args.subcommand() { // The bootstrap testnet method requires inserting a libp2p address into the // network config. - ("bootstrap", Some(sub_matches)) => { - let server: String = sub_matches - .value_of("server") - .ok_or_else(|| "No bootstrap server specified")? - .to_string(); - - let bootstrapper = Bootstrapper::from_server_string(server.to_string())?; - - if let Some(server_multiaddr) = bootstrapper.best_effort_multiaddr( - parse_port_option(sub_matches.value_of("libp2p_port")), - ) { - info!( - log, - "Estimated bootstrapper libp2p address"; - "multiaddr" => format!("{:?}", server_multiaddr) - ); - - builder - .client_config - .network - .libp2p_nodes - .push(server_multiaddr); - } else { - warn!( - log, - "Unable to estimate a bootstrapper libp2p address, this node may not find any peers." - ); - }; + ("bootstrap", Some(sub_cmd_args)) => { + builder.import_bootstrap_libp2p_address(&sub_cmd_args)?; } _ => (), }; @@ -81,7 +55,7 @@ pub fn get_configs(matches: &ArgMatches, log: &Logger) -> Result { } }; - builder.build() + builder.build(cli_args) } /// Decodes an optional string into an optional u16. @@ -91,21 +65,20 @@ fn parse_port_option(o: Option<&str>) -> Option { /// Allows for building a set of configurations based upon `clap` arguments. struct ConfigBuilder<'a> { - matches: &'a ArgMatches<'a>, log: &'a Logger, pub data_dir: PathBuf, - pub eth2_config: Eth2Config, - pub client_config: ClientConfig, + eth2_config: Eth2Config, + client_config: ClientConfig, } impl<'a> ConfigBuilder<'a> { /// Create a new builder with default settings. - pub fn new(matches: &'a ArgMatches, log: &'a Logger) -> Result { + pub fn new(cli_args: &'a ArgMatches, log: &'a Logger) -> Result { // Read the `--datadir` flag. // // If it's not present, try and find the home directory (`~`) and push the default data // directory onto it. - let data_dir: PathBuf = matches + let data_dir: PathBuf = cli_args .value_of("datadir") .map(|string| PathBuf::from(string)) .or_else(|| { @@ -117,7 +90,6 @@ impl<'a> ConfigBuilder<'a> { .ok_or_else(|| "Unable to find a home directory for the datadir".to_string())?; Ok(Self { - matches, log, data_dir, eth2_config: Eth2Config::minimal(), @@ -125,23 +97,47 @@ impl<'a> ConfigBuilder<'a> { }) } - /// Consumes self, returning the configs. - pub fn build(mut self) -> Result { - self.eth2_config.apply_cli_args(&self.matches)?; - self.client_config - .apply_cli_args(&self.matches, &mut self.log.clone())?; + pub fn set_beacon_chain_start_method(&mut self, cli_args: &ArgMatches) -> Result<()> { + // + } - if self.eth2_config.spec_constants != self.client_config.spec_constants { - crit!(self.log, "Specification constants do not match."; - "client_config" => format!("{}", self.client_config.spec_constants), - "eth2_config" => format!("{}", self.eth2_config.spec_constants) + /// Reads a `server` flag from `cli_args` and attempts to generate a libp2p `Multiaddr` that + /// this client can use to connect to the given `server`. + /// + /// Also reads for a `libp2p_port` flag in `cli_args`, using that as the port for the + /// `Multiaddr`. If `libp2p_port` is not in `cli_args`, attempts to connect to `server` via HTTP + /// and retrieve it's libp2p listen port. + /// + /// Returns an error if the `server` flag is not present in `cli_args`. + pub fn import_bootstrap_libp2p_address(&mut self, cli_args: &ArgMatches) -> Result<()> { + let server: String = cli_args + .value_of("server") + .ok_or_else(|| "No bootstrap server specified")? + .to_string(); + + let bootstrapper = Bootstrapper::from_server_string(server.to_string())?; + + if let Some(server_multiaddr) = + bootstrapper.best_effort_multiaddr(parse_port_option(cli_args.value_of("libp2p_port"))) + { + info!( + self.log, + "Estimated bootstrapper libp2p address"; + "multiaddr" => format!("{:?}", server_multiaddr) ); - return Err("Specification constant mismatch".into()); - } - self.client_config.data_dir = self.data_dir; + self.client_config + .network + .libp2p_nodes + .push(server_multiaddr); + } else { + warn!( + self.log, + "Unable to estimate a bootstrapper libp2p address, this node may not find any peers." + ); + }; - Ok((self.client_config, self.eth2_config)) + Ok(()) } /// Set the config data_dir to be an random directory. @@ -165,20 +161,20 @@ impl<'a> ConfigBuilder<'a> { /// Reads the subcommand and tries to update `self.eth2_config` based up on the `--spec` flag. /// - /// Returns an error if the `--spec` flag is not present. - pub fn update_spec_from_subcommand(&mut self, sub_matches: &ArgMatches) -> Result<()> { + /// Returns an error if the `--spec` flag is not present in the given `cli_args`. + pub fn update_spec_from_subcommand(&mut self, cli_args: &ArgMatches) -> Result<()> { // Re-initialise the `Eth2Config`. // // If a CLI parameter is set, overwrite any config file present. // If a parameter is not set, use either the config file present or default to minimal. - let eth2_config = match sub_matches.value_of("spec") { + let eth2_config = match cli_args.value_of("spec") { Some("mainnet") => Eth2Config::mainnet(), Some("minimal") => Eth2Config::minimal(), Some("interop") => Eth2Config::interop(), _ => return Err("Unable to determine specification type.".into()), }; - self.client_config.spec_constants = sub_matches + self.client_config.spec_constants = cli_args .value_of("spec") .expect("Guarded by prior match statement") .to_string(); @@ -244,4 +240,26 @@ impl<'a> ConfigBuilder<'a> { Ok(()) } + + /// Consumes self, returning the configs. + /// + /// The supplied `cli_args` should be the base-level `clap` cli_args (i.e., not a subcommand + /// cli_args). + pub fn build(mut self, cli_args: &ArgMatches) -> Result { + self.eth2_config.apply_cli_args(cli_args)?; + self.client_config + .apply_cli_args(cli_args, &mut self.log.clone())?; + + if self.eth2_config.spec_constants != self.client_config.spec_constants { + crit!(self.log, "Specification constants do not match."; + "client_config" => format!("{}", self.client_config.spec_constants), + "eth2_config" => format!("{}", self.eth2_config.spec_constants) + ); + return Err("Specification constant mismatch".into()); + } + + self.client_config.data_dir = self.data_dir; + + Ok((self.client_config, self.eth2_config)) + } } From 1bea1755c46d17fff9c6cea56e55691bf5cfa1b9 Mon Sep 17 00:00:00 2001 From: Age Manning Date: Sun, 25 Aug 2019 10:13:17 +1000 Subject: [PATCH 111/186] Remove redundant code --- beacon_node/eth2-libp2p/src/rpc/methods.rs | 11 ----------- 1 file changed, 11 deletions(-) diff --git a/beacon_node/eth2-libp2p/src/rpc/methods.rs b/beacon_node/eth2-libp2p/src/rpc/methods.rs index 8fef1a75a6..d912bcfa1e 100644 --- a/beacon_node/eth2-libp2p/src/rpc/methods.rs +++ b/beacon_node/eth2-libp2p/src/rpc/methods.rs @@ -89,17 +89,6 @@ pub struct BeaconBlocksRequest { pub step: u64, } -// TODO: Currently handle encoding/decoding of blocks in the message handler. Leave this struct -// here in case encoding/decoding of ssz requires an object. -/* -/// Response containing a number of beacon block roots from a peer. -#[derive(Encode, Decode, Clone, Debug, PartialEq)] -pub struct BeaconBlocksResponse { - /// List of requested blocks and associated slots. - pub beacon_blocks: Vec, -} -*/ - /// Request a number of beacon block bodies from a peer. #[derive(Encode, Decode, Clone, Debug, PartialEq)] pub struct RecentBeaconBlocksRequest { From 140c677a38d16a8f51f4b6521ee0f74f1cd1ddca Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Sun, 25 Aug 2019 12:14:04 +1000 Subject: [PATCH 112/186] Add much more progress to new CLI setup --- beacon_node/client/src/beacon_chain_types.rs | 7 +- beacon_node/client/src/config.rs | 9 +- beacon_node/client/src/lib.rs | 2 +- beacon_node/src/config.rs | 258 ++++++++++++++----- beacon_node/src/main.rs | 19 +- 5 files changed, 214 insertions(+), 81 deletions(-) diff --git a/beacon_node/client/src/beacon_chain_types.rs b/beacon_node/client/src/beacon_chain_types.rs index 37e4a055e8..7a57aa4757 100644 --- a/beacon_node/client/src/beacon_chain_types.rs +++ b/beacon_node/client/src/beacon_chain_types.rs @@ -60,9 +60,10 @@ where T::LmdGhost: LmdGhost, { let genesis_state = match &config.beacon_chain_start_method { - BeaconChainStartMethod::Resume => { - crit!(log, "This release does not support mainnet genesis state."); - return Err("Mainnet is unsupported".into()); + BeaconChainStartMethod::Resume => unimplemented!("No resume code yet"), + BeaconChainStartMethod::Mainnet => { + crit!(log, "No mainnet beacon chain startup specification."); + return Err("Mainnet is not yet specified. We're working on it.".into()); } BeaconChainStartMethod::RecentGenesis { validator_count } => { generate_testnet_genesis_state(*validator_count, recent_genesis_time(), &spec) diff --git a/beacon_node/client/src/config.rs b/beacon_node/client/src/config.rs index 1e8f60f6ef..f2725b3e79 100644 --- a/beacon_node/client/src/config.rs +++ b/beacon_node/client/src/config.rs @@ -37,7 +37,9 @@ pub struct Config { pub enum BeaconChainStartMethod { /// Resume from an existing BeaconChain, loaded from the existing local database. Resume, - /// Create a new beacon chain with `validator_count` validators, all with well-known secret keys. + /// Resume from an existing BeaconChain, loaded from the existing local database. + Mainnet, + /// Create a new beacon chain that can connect to mainnet. /// /// Set the genesis time to be the start of the previous 30-minute window. RecentGenesis { validator_count: usize }, @@ -51,10 +53,7 @@ pub enum BeaconChainStartMethod { Yaml { file: PathBuf }, /// Create a new beacon chain by using a HTTP server (running our REST-API) to load genesis and /// finalized states and blocks. - HttpBootstrap { - server: String, - port: Option, - }, + HttpBootstrap { server: String, port: Option }, } impl Default for BeaconChainStartMethod { diff --git a/beacon_node/client/src/lib.rs b/beacon_node/client/src/lib.rs index 3eb5553696..9d3e001faf 100644 --- a/beacon_node/client/src/lib.rs +++ b/beacon_node/client/src/lib.rs @@ -23,7 +23,7 @@ pub use beacon_chain::BeaconChainTypes; pub use beacon_chain_types::ClientType; pub use beacon_chain_types::InitialiseBeaconChain; pub use bootstrapper::Bootstrapper; -pub use config::Config as ClientConfig; +pub use config::{BeaconChainStartMethod, Config as ClientConfig}; pub use eth2_config::Eth2Config; /// Main beacon node client service. This provides the connection and initialisation of the clients diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index c1074da03d..68d905ed22 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -1,10 +1,10 @@ use clap::ArgMatches; -use client::{Bootstrapper, ClientConfig, Eth2Config}; +use client::{BeaconChainStartMethod, Bootstrapper, ClientConfig, Eth2Config}; use eth2_config::{read_from_file, write_to_file}; use rand::{distributions::Alphanumeric, Rng}; use slog::{crit, info, warn, Logger}; use std::fs; -use std::path::PathBuf; +use std::path::{Path, PathBuf}; pub const DEFAULT_DATA_DIR: &str = ".lighthouse"; pub const CLIENT_CONFIG_FILENAME: &str = "beacon-node.toml"; @@ -19,29 +19,9 @@ pub fn get_configs(cli_args: &ArgMatches, log: &Logger) -> Result { match cli_args.subcommand() { ("testnet", Some(sub_cmd_args)) => { - if sub_cmd_args.is_present("random-datadir") { - builder.set_random_datadir()?; - } - - info!( - log, - "Creating new datadir"; - "path" => format!("{:?}", builder.data_dir) - ); - - builder.update_spec_from_subcommand(&sub_cmd_args)?; - - match sub_cmd_args.subcommand() { - // The bootstrap testnet method requires inserting a libp2p address into the - // network config. - ("bootstrap", Some(sub_cmd_args)) => { - builder.import_bootstrap_libp2p_address(&sub_cmd_args)?; - } - _ => (), - }; - - builder.write_configs_to_new_datadir()?; + process_testnet_subcommand(&mut builder, sub_cmd_args, log)? } + // No sub-command assumes a resume operation. _ => { info!( log, @@ -49,6 +29,20 @@ pub fn get_configs(cli_args: &ArgMatches, log: &Logger) -> Result { "path" => format!("{:?}", builder.data_dir) ); + // If no primary subcommand was given, start the beacon chain from an existing + // database. + builder.set_beacon_chain_start_method(BeaconChainStartMethod::Resume); + + // Whilst there is no large testnet or mainnet force the user to specify how they want + // to start a new chain (e.g., from a genesis YAML file, another node, etc). + if !builder.data_dir.exists() { + return Err( + "No datadir found. To start a new beacon chain, see `testnet --help`. \ + Use `--datadir` to specify a different directory" + .into(), + ); + } + // If the `testnet` command was not provided, attempt to load an existing datadir and // continue with an existing chain. builder.load_from_datadir()?; @@ -58,9 +52,62 @@ pub fn get_configs(cli_args: &ArgMatches, log: &Logger) -> Result { builder.build(cli_args) } -/// Decodes an optional string into an optional u16. -fn parse_port_option(o: Option<&str>) -> Option { - o.and_then(|s| s.parse::().ok()) +/// Process the `testnet` CLI subcommand arguments, updating the `builder`. +fn process_testnet_subcommand( + builder: &mut ConfigBuilder, + cli_args: &ArgMatches, + log: &Logger, +) -> Result<()> { + if cli_args.is_present("random-datadir") { + builder.set_random_datadir()?; + } + + if cli_args.is_present("force") { + builder.clean_datadir()?; + } + + info!( + log, + "Creating new datadir"; + "path" => format!("{:?}", builder.data_dir) + ); + + builder.update_spec_from_subcommand(&cli_args)?; + + // Start matching on the second subcommand (e.g., `testnet bootstrap ...`) + match cli_args.subcommand() { + ("bootstrap", Some(cli_args)) => { + let server = cli_args + .value_of("server") + .ok_or_else(|| "No bootstrap server specified")?; + let port: Option = cli_args + .value_of("port") + .and_then(|s| s.parse::().ok()); + + builder.import_bootstrap_libp2p_address(server, port)?; + + builder.set_beacon_chain_start_method(BeaconChainStartMethod::HttpBootstrap { + server: server.to_string(), + port, + }) + } + ("recent", Some(cli_args)) => { + let validator_count = cli_args + .value_of("validator_count") + .ok_or_else(|| "No validator_count specified")? + .parse::() + .map_err(|e| format!("Unable to parse validator_count: {:?}", e))?; + + builder.set_beacon_chain_start_method(BeaconChainStartMethod::RecentGenesis { + validator_count, + }) + } + _ => return Err("No testnet method specified. See 'testnet --help'.".into()), + }; + + builder.write_configs_to_new_datadir()?; + + Ok(()) } /// Allows for building a set of configurations based upon `clap` arguments. @@ -97,29 +144,65 @@ impl<'a> ConfigBuilder<'a> { }) } - pub fn set_beacon_chain_start_method(&mut self, cli_args: &ArgMatches) -> Result<()> { - // + /// Clears any configuration files that would interfere with writing new configs. + /// + /// Moves the following files in `data_dir` into a backup directory: + /// + /// - Client config + /// - Eth2 config + /// - The entire database directory + pub fn clean_datadir(&mut self) -> Result<()> { + let backup_dir = { + let mut s = String::from("backup_"); + s.push_str(&random_string(6)); + self.data_dir.join(s) + }; + + fs::create_dir_all(&backup_dir) + .map_err(|e| format!("Unable to create config backup dir: {:?}", e))?; + + let move_to_backup_dir = |path: &Path| -> Result<()> { + let file_name = path + .file_name() + .ok_or_else(|| "Invalid path found during datadir clean (no filename).")?; + + let mut new = path.to_path_buf(); + new.pop(); + new.push(backup_dir.clone()); + new.push(file_name); + + let _ = fs::rename(path, new); + + Ok(()) + }; + + move_to_backup_dir(&self.data_dir.join(CLIENT_CONFIG_FILENAME))?; + move_to_backup_dir(&self.data_dir.join(ETH2_CONFIG_FILENAME))?; + + if let Some(db_path) = self.client_config.db_path() { + move_to_backup_dir(&db_path)?; + } + + Ok(()) } - /// Reads a `server` flag from `cli_args` and attempts to generate a libp2p `Multiaddr` that - /// this client can use to connect to the given `server`. - /// - /// Also reads for a `libp2p_port` flag in `cli_args`, using that as the port for the - /// `Multiaddr`. If `libp2p_port` is not in `cli_args`, attempts to connect to `server` via HTTP - /// and retrieve it's libp2p listen port. - /// - /// Returns an error if the `server` flag is not present in `cli_args`. - pub fn import_bootstrap_libp2p_address(&mut self, cli_args: &ArgMatches) -> Result<()> { - let server: String = cli_args - .value_of("server") - .ok_or_else(|| "No bootstrap server specified")? - .to_string(); + /// Sets the method for starting the beacon chain. + pub fn set_beacon_chain_start_method(&mut self, method: BeaconChainStartMethod) { + self.client_config.beacon_chain_start_method = method; + } + /// Import the libp2p address for `server` into the list of bootnodes in `self`. + /// + /// If `port` is `Some`, it is used as the port for the `Multiaddr`. If `port` is `None`, + /// attempts to connect to the `server` via HTTP and retrieve it's libp2p listen port. + pub fn import_bootstrap_libp2p_address( + &mut self, + server: &str, + port: Option, + ) -> Result<()> { let bootstrapper = Bootstrapper::from_server_string(server.to_string())?; - if let Some(server_multiaddr) = - bootstrapper.best_effort_multiaddr(parse_port_option(cli_args.value_of("libp2p_port"))) - { + if let Some(server_multiaddr) = bootstrapper.best_effort_multiaddr(port) { info!( self.log, "Estimated bootstrapper libp2p address"; @@ -132,9 +215,9 @@ impl<'a> ConfigBuilder<'a> { .push(server_multiaddr); } else { warn!( - self.log, - "Unable to estimate a bootstrapper libp2p address, this node may not find any peers." - ); + self.log, + "Unable to estimate a bootstrapper libp2p address, this node may not find any peers." + ); }; Ok(()) @@ -144,14 +227,9 @@ impl<'a> ConfigBuilder<'a> { /// /// Useful for easily spinning up ephemeral testnets. pub fn set_random_datadir(&mut self) -> Result<()> { - let random = rand::thread_rng() - .sample_iter(&Alphanumeric) - .take(10) - .collect::(); - let mut s = DEFAULT_DATA_DIR.to_string(); s.push_str("_random_"); - s.push_str(&random); + s.push_str(&random_string(6)); self.data_dir.pop(); self.data_dir.push(s); @@ -187,12 +265,15 @@ impl<'a> ConfigBuilder<'a> { /// /// Returns an error if `self.data_dir` already exists. pub fn write_configs_to_new_datadir(&mut self) -> Result<()> { + let db_exists = self + .client_config + .db_path() + .map(|d| d.exists()) + .unwrap_or_else(|| false); + // Do not permit creating a new config when the datadir exists. - if self.data_dir.exists() { - return Err( - "Datadir already exists, will not overwrite. Remove the directory or use --datadir." - .into(), - ); + if db_exists { + return Err("Database already exists. See `-f` in `testnet --help`".into()); } // Create `datadir` and any non-existing parent directories. @@ -201,16 +282,35 @@ impl<'a> ConfigBuilder<'a> { format!("{}", e) })?; - // Write the client config to a TOML file in the datadir. - write_to_file( - self.data_dir.join(CLIENT_CONFIG_FILENAME), - &self.client_config, - ) - .map_err(|e| format!("Unable to write {} file: {:?}", CLIENT_CONFIG_FILENAME, e))?; + let client_config_file = self.data_dir.join(CLIENT_CONFIG_FILENAME); + if client_config_file.exists() { + return Err(format!( + "Datadir is not clean, {} exists. See `-f` in `testnet --help`.", + CLIENT_CONFIG_FILENAME + )); + } else { + // Write the onfig to a TOML file in the datadir. + write_to_file( + self.data_dir.join(CLIENT_CONFIG_FILENAME), + &self.client_config, + ) + .map_err(|e| format!("Unable to write {} file: {:?}", CLIENT_CONFIG_FILENAME, e))?; + } - // Write the eth2 config to a TOML file in the datadir. - write_to_file(self.data_dir.join(ETH2_CONFIG_FILENAME), &self.eth2_config) + let eth2_config_file = self.data_dir.join(ETH2_CONFIG_FILENAME); + if eth2_config_file.exists() { + return Err(format!( + "Datadir is not clean, {} exists. See `-f` in `testnet --help`.", + ETH2_CONFIG_FILENAME + )); + } else { + // Write the config to a TOML file in the datadir. + write_to_file( + self.data_dir.join(ETH2_CONFIG_FILENAME), + &self.client_config, + ) .map_err(|e| format!("Unable to write {} file: {:?}", ETH2_CONFIG_FILENAME, e))?; + } Ok(()) } @@ -225,7 +325,22 @@ impl<'a> ConfigBuilder<'a> { // public testnet or mainnet). if !self.data_dir.exists() { return Err( - "No datadir found. Use the 'testnet' sub-command to select a testnet type.".into(), + "No datadir found. Either create a new testnet or specify a different `--datadir`." + .into(), + ); + } + + // If there is a path to a databse in the config, ensure it exists. + if !self + .client_config + .db_path() + .map(|path| path.exists()) + .unwrap_or_else(|| true) + { + return Err( + "No database found in datadir. Use the 'testnet -f' sub-command to overwrite the \ + existing datadir, or specify a different `--datadir`." + .into(), ); } @@ -263,3 +378,10 @@ impl<'a> ConfigBuilder<'a> { Ok((self.client_config, self.eth2_config)) } } + +fn random_string(len: usize) -> String { + rand::thread_rng() + .sample_iter(&Alphanumeric) + .take(len) + .collect::() +} diff --git a/beacon_node/src/main.rs b/beacon_node/src/main.rs index d7a4bae795..4430db1287 100644 --- a/beacon_node/src/main.rs +++ b/beacon_node/src/main.rs @@ -161,7 +161,7 @@ fn main() { .help("Type of database to use.") .takes_value(true) .possible_values(&["disk", "memory"]) - .default_value("memory"), + .default_value("disk"), ) /* * Logging. @@ -207,15 +207,20 @@ fn main() { iteration.") ) .arg( - Arg::with_name("force-create") - .long("force-create") + Arg::with_name("force") + .long("force") .short("f") - .help("If present, will delete any existing datadir before creating a new one. Cannot be \ + .help("If present, will backup any existing config files before creating new ones. Cannot be \ used when specifying --random-datadir (logic error).") .conflicts_with("random-datadir") ) /* * Testnet sub-commands. + * + * `boostrap` + * + * Start a new node by downloading genesis and network info from another node via the + * HTTP API. */ .subcommand(SubCommand::with_name("bootstrap") .about("Connects to the given HTTP server, downloads a genesis state and attempts to peer with it.") @@ -231,6 +236,12 @@ fn main() { when port-fowarding is used: you may connect using a different port than \ the one the server is immediately listening on.")) ) + /* + * `recent` + * + * Start a new node, with a specified number of validators with a genesis time in the last + * 30-minutes. + */ .subcommand(SubCommand::with_name("recent") .about("Creates a new genesis state where the genesis time was at the previous \ 30-minute boundary (e.g., 12:00, 12:30, 13:00, etc.)") From cf435d96536567414141ccf3c1bfaa9b292cb523 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Mon, 26 Aug 2019 14:45:49 +1000 Subject: [PATCH 113/186] Refactor beacon chain start code --- beacon_node/Cargo.toml | 1 + beacon_node/beacon_chain/Cargo.toml | 3 + beacon_node/beacon_chain/src/beacon_chain.rs | 8 +- .../beacon_chain/src/beacon_chain_builder.rs | 98 ++++++++-- .../src/bootstrapper.rs | 0 beacon_node/beacon_chain/src/lib.rs | 2 + beacon_node/beacon_chain/src/test_utils.rs | 20 +-- beacon_node/client/Cargo.toml | 2 - beacon_node/client/src/beacon_chain_types.rs | 170 ------------------ beacon_node/client/src/lib.rs | 74 ++++++-- beacon_node/src/config.rs | 3 +- beacon_node/src/run.rs | 7 +- 12 files changed, 161 insertions(+), 227 deletions(-) rename beacon_node/{client => beacon_chain}/src/bootstrapper.rs (100%) delete mode 100644 beacon_node/client/src/beacon_chain_types.rs diff --git a/beacon_node/Cargo.toml b/beacon_node/Cargo.toml index 9ce724c148..5efb734239 100644 --- a/beacon_node/Cargo.toml +++ b/beacon_node/Cargo.toml @@ -6,6 +6,7 @@ edition = "2018" [dependencies] eth2_config = { path = "../eth2/utils/eth2_config" } +beacon_chain = { path = "beacon_chain" } types = { path = "../eth2/types" } store = { path = "./store" } client = { path = "client" } diff --git a/beacon_node/beacon_chain/Cargo.toml b/beacon_node/beacon_chain/Cargo.toml index 31f3412865..018ea19766 100644 --- a/beacon_node/beacon_chain/Cargo.toml +++ b/beacon_node/beacon_chain/Cargo.toml @@ -11,9 +11,11 @@ lazy_static = "1.3.0" lighthouse_metrics = { path = "../../eth2/utils/lighthouse_metrics" } log = "0.4" operation_pool = { path = "../../eth2/operation_pool" } +reqwest = "0.9" serde = "1.0" serde_derive = "1.0" serde_yaml = "0.8" +eth2-libp2p = { path = "../eth2-libp2p" } slog = { version = "^2.2.3" , features = ["max_level_trace"] } sloggers = { version = "^0.3" } slot_clock = { path = "../../eth2/utils/slot_clock" } @@ -22,6 +24,7 @@ eth2_ssz_derive = "0.1" state_processing = { path = "../../eth2/state_processing" } tree_hash = "0.1" types = { path = "../../eth2/types" } +url = "1.2" lmd_ghost = { path = "../../eth2/lmd_ghost" } [dev-dependencies] diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 5feefd8417..d79d8c3589 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -114,7 +114,6 @@ impl BeaconChain { /// Instantiate a new Beacon Chain, from genesis. pub fn from_genesis( store: Arc, - slot_clock: T::SlotClock, mut genesis_state: BeaconState, mut genesis_block: BeaconBlock, spec: ChainSpec, @@ -147,6 +146,13 @@ impl BeaconChain { "genesis_block_root" => format!("{}", genesis_block_root), ); + // Slot clock + let slot_clock = T::SlotClock::new( + spec.genesis_slot, + genesis_state.genesis_time, + spec.seconds_per_slot, + ); + Ok(Self { spec, slot_clock, diff --git a/beacon_node/beacon_chain/src/beacon_chain_builder.rs b/beacon_node/beacon_chain/src/beacon_chain_builder.rs index a6c77cb63c..79c74b0068 100644 --- a/beacon_node/beacon_chain/src/beacon_chain_builder.rs +++ b/beacon_node/beacon_chain/src/beacon_chain_builder.rs @@ -1,49 +1,115 @@ -use crate::BeaconChainTypes; +use super::bootstrapper::Bootstrapper; +use crate::{BeaconChain, BeaconChainTypes}; +use slog::Logger; use std::fs::File; use std::path::PathBuf; +use std::sync::Arc; use std::time::SystemTime; -use types::{ - test_utils::TestingBeaconStateBuilder, BeaconBlock, BeaconState, ChainSpec, EthSpec, Hash256, -}; +use types::{test_utils::TestingBeaconStateBuilder, BeaconBlock, BeaconState, ChainSpec, EthSpec}; + +enum BuildStrategy { + FromGenesis { + genesis_state: Box>, + genesis_block: Box>, + }, + LoadFromStore, +} pub struct BeaconChainBuilder { - genesis_state: BeaconState, - genesis_block: BeaconBlock, + build_strategy: BuildStrategy, spec: ChainSpec, + log: Logger, } impl BeaconChainBuilder { - pub fn recent_genesis(validator_count: usize, spec: ChainSpec) -> Self { - Self::quick_start(recent_genesis_time(), validator_count, spec) + pub fn recent_genesis(validator_count: usize, spec: ChainSpec, log: Logger) -> Self { + Self::quick_start(recent_genesis_time(), validator_count, spec, log) } - pub fn quick_start(genesis_time: u64, validator_count: usize, spec: ChainSpec) -> Self { + pub fn quick_start( + genesis_time: u64, + validator_count: usize, + spec: ChainSpec, + log: Logger, + ) -> Self { let (mut genesis_state, _keypairs) = TestingBeaconStateBuilder::from_default_keypairs_file_if_exists(validator_count, &spec) .build(); genesis_state.genesis_time = genesis_time; - Self::from_genesis_state(genesis_state, spec) + Self::from_genesis_state(genesis_state, spec, log) } - pub fn yaml_state(file: PathBuf, spec: ChainSpec) -> Result { + pub fn yaml_state(file: &PathBuf, spec: ChainSpec, log: Logger) -> Result { let file = File::open(file.clone()) .map_err(|e| format!("Unable to open YAML genesis state file {:?}: {:?}", file, e))?; let genesis_state = serde_yaml::from_reader(file) .map_err(|e| format!("Unable to parse YAML genesis state file: {:?}", e))?; - Ok(Self::from_genesis_state(genesis_state, spec)) + Ok(Self::from_genesis_state(genesis_state, spec, log)) } - pub fn from_genesis_state(genesis_state: BeaconState, spec: ChainSpec) -> Self { - Self { - genesis_block: genesis_block(&genesis_state, &spec), - genesis_state, + pub fn http_bootstrap(server: &str, spec: ChainSpec, log: Logger) -> Result { + let bootstrapper = Bootstrapper::from_server_string(server.to_string()) + .map_err(|e| format!("Failed to initialize bootstrap client: {}", e))?; + + let (genesis_state, genesis_block) = bootstrapper + .genesis() + .map_err(|e| format!("Failed to bootstrap genesis state: {}", e))?; + + Ok(Self { + build_strategy: BuildStrategy::FromGenesis { + genesis_block: Box::new(genesis_block), + genesis_state: Box::new(genesis_state), + }, spec, + log, + }) + } + + fn from_genesis_state( + genesis_state: BeaconState, + spec: ChainSpec, + log: Logger, + ) -> Self { + Self { + build_strategy: BuildStrategy::FromGenesis { + genesis_block: Box::new(genesis_block(&genesis_state, &spec)), + genesis_state: Box::new(genesis_state), + }, + spec, + log, } } + + pub fn from_store(spec: ChainSpec, log: Logger) -> Self { + Self { + build_strategy: BuildStrategy::LoadFromStore, + spec, + log, + } + } + + pub fn build(self, store: Arc) -> Result, String> { + Ok(match self.build_strategy { + BuildStrategy::LoadFromStore => BeaconChain::from_store(store, self.spec, self.log) + .map_err(|e| format!("Error loading BeaconChain from database: {:?}", e))? + .ok_or_else(|| format!("Unable to find exising BeaconChain in database."))?, + BuildStrategy::FromGenesis { + genesis_block, + genesis_state, + } => BeaconChain::from_genesis( + store, + genesis_state.as_ref().clone(), + genesis_block.as_ref().clone(), + self.spec, + self.log, + ) + .map_err(|e| format!("Failed to initialize new beacon chain: {:?}", e))?, + }) + } } fn genesis_block(genesis_state: &BeaconState, spec: &ChainSpec) -> BeaconBlock { diff --git a/beacon_node/client/src/bootstrapper.rs b/beacon_node/beacon_chain/src/bootstrapper.rs similarity index 100% rename from beacon_node/client/src/bootstrapper.rs rename to beacon_node/beacon_chain/src/bootstrapper.rs diff --git a/beacon_node/beacon_chain/src/lib.rs b/beacon_node/beacon_chain/src/lib.rs index 9c833f778d..560da65197 100644 --- a/beacon_node/beacon_chain/src/lib.rs +++ b/beacon_node/beacon_chain/src/lib.rs @@ -4,6 +4,7 @@ extern crate lazy_static; mod beacon_chain; mod beacon_chain_builder; +mod bootstrapper; mod checkpoint; mod errors; mod fork_choice; @@ -18,6 +19,7 @@ pub use self::beacon_chain::{ pub use self::checkpoint::CheckPoint; pub use self::errors::{BeaconChainError, BlockProductionError}; pub use beacon_chain_builder::BeaconChainBuilder; +pub use bootstrapper::Bootstrapper; pub use lmd_ghost; pub use metrics::scrape_for_metrics; pub use parking_lot; diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index 09f4749ea3..29696b771a 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -1,7 +1,6 @@ use crate::{BeaconChain, BeaconChainTypes, BlockProcessingOutcome}; use lmd_ghost::LmdGhost; use sloggers::{null::NullLoggerBuilder, Build}; -use slot_clock::SlotClock; use slot_clock::TestingSlotClock; use state_processing::per_slot_processing; use std::marker::PhantomData; @@ -114,22 +113,9 @@ where let builder = NullLoggerBuilder; let log = builder.build().expect("logger should build"); - // Slot clock - let slot_clock = TestingSlotClock::new( - spec.genesis_slot, - genesis_state.genesis_time, - spec.seconds_per_slot, - ); - - let chain = BeaconChain::from_genesis( - store, - slot_clock, - genesis_state, - genesis_block, - spec.clone(), - log, - ) - .expect("Terminate if beacon chain generation fails"); + let chain = + BeaconChain::from_genesis(store, genesis_state, genesis_block, spec.clone(), log) + .expect("Terminate if beacon chain generation fails"); Self { chain, diff --git a/beacon_node/client/Cargo.toml b/beacon_node/client/Cargo.toml index 9b5a9cf42c..05c58cc8ba 100644 --- a/beacon_node/client/Cargo.toml +++ b/beacon_node/client/Cargo.toml @@ -27,5 +27,3 @@ clap = "2.32.0" dirs = "1.0.3" exit-future = "0.1.3" futures = "0.1.25" -reqwest = "0.9" -url = "1.2" diff --git a/beacon_node/client/src/beacon_chain_types.rs b/beacon_node/client/src/beacon_chain_types.rs deleted file mode 100644 index 7a57aa4757..0000000000 --- a/beacon_node/client/src/beacon_chain_types.rs +++ /dev/null @@ -1,170 +0,0 @@ -use crate::bootstrapper::Bootstrapper; -use crate::error::Result; -use crate::{config::BeaconChainStartMethod, ClientConfig}; -use beacon_chain::{ - lmd_ghost::{LmdGhost, ThreadSafeReducedTree}, - slot_clock::SystemTimeSlotClock, - store::Store, - BeaconChain, BeaconChainTypes, -}; -use slog::{crit, info, Logger}; -use slot_clock::SlotClock; -use std::fs::File; -use std::marker::PhantomData; -use std::sync::Arc; -use std::time::SystemTime; -use tree_hash::TreeHash; -use types::{ - test_utils::TestingBeaconStateBuilder, BeaconBlock, BeaconState, ChainSpec, EthSpec, Hash256, -}; - -/// Provides a new, initialized `BeaconChain` -pub trait InitialiseBeaconChain { - fn initialise_beacon_chain( - store: Arc, - config: &ClientConfig, - spec: ChainSpec, - log: Logger, - ) -> Result> { - maybe_load_from_store_for_testnet::<_, T::Store, T::EthSpec>(store, config, spec, log) - } -} - -#[derive(Clone)] -pub struct ClientType { - _phantom_t: PhantomData, - _phantom_u: PhantomData, -} - -impl BeaconChainTypes for ClientType -where - S: Store + 'static, - E: EthSpec, -{ - type Store = S; - type SlotClock = SystemTimeSlotClock; - type LmdGhost = ThreadSafeReducedTree; - type EthSpec = E; -} -impl InitialiseBeaconChain for ClientType {} - -/// Loads a `BeaconChain` from `store`, if it exists. Otherwise, create a new chain from genesis. -fn maybe_load_from_store_for_testnet( - store: Arc, - config: &ClientConfig, - spec: ChainSpec, - log: Logger, -) -> Result> -where - T: BeaconChainTypes, - T::LmdGhost: LmdGhost, -{ - let genesis_state = match &config.beacon_chain_start_method { - BeaconChainStartMethod::Resume => unimplemented!("No resume code yet"), - BeaconChainStartMethod::Mainnet => { - crit!(log, "No mainnet beacon chain startup specification."); - return Err("Mainnet is not yet specified. We're working on it.".into()); - } - BeaconChainStartMethod::RecentGenesis { validator_count } => { - generate_testnet_genesis_state(*validator_count, recent_genesis_time(), &spec) - } - BeaconChainStartMethod::Generated { - validator_count, - genesis_time, - } => generate_testnet_genesis_state(*validator_count, *genesis_time, &spec), - BeaconChainStartMethod::Yaml { file } => { - let file = File::open(file).map_err(|e| { - format!("Unable to open YAML genesis state file {:?}: {:?}", file, e) - })?; - - serde_yaml::from_reader(file) - .map_err(|e| format!("Unable to parse YAML genesis state file: {:?}", e))? - } - BeaconChainStartMethod::HttpBootstrap { server, .. } => { - let bootstrapper = Bootstrapper::from_server_string(server.to_string()) - .map_err(|e| format!("Failed to initialize bootstrap client: {}", e))?; - - let (state, _block) = bootstrapper - .genesis() - .map_err(|e| format!("Failed to bootstrap genesis state: {}", e))?; - - state - } - }; - - let mut genesis_block = BeaconBlock::empty(&spec); - genesis_block.state_root = Hash256::from_slice(&genesis_state.tree_hash_root()); - let genesis_block_root = genesis_block.canonical_root(); - - // Slot clock - let slot_clock = T::SlotClock::new( - spec.genesis_slot, - genesis_state.genesis_time, - spec.seconds_per_slot, - ); - - // Try load an existing `BeaconChain` from the store. If unable, create a new one. - if let Ok(Some(beacon_chain)) = - BeaconChain::from_store(store.clone(), spec.clone(), log.clone()) - { - // Here we check to ensure that the `BeaconChain` loaded from store has the expected - // genesis block. - // - // Without this check, it's possible that there will be an existing DB with a `BeaconChain` - // that has different parameters than provided to this executable. - if beacon_chain.genesis_block_root == genesis_block_root { - info!( - log, - "Loaded BeaconChain from store"; - "slot" => beacon_chain.head().beacon_state.slot, - "best_slot" => beacon_chain.best_slot(), - ); - - Ok(beacon_chain) - } else { - crit!( - log, - "The BeaconChain loaded from disk has an incorrect genesis root. \ - This may be caused by an old database in located in datadir." - ); - Err("Incorrect genesis root".into()) - } - } else { - BeaconChain::from_genesis( - store, - slot_clock, - genesis_state, - genesis_block, - spec, - log.clone(), - ) - .map_err(|e| format!("Failed to initialize new beacon chain: {:?}", e).into()) - } -} - -fn generate_testnet_genesis_state( - validator_count: usize, - genesis_time: u64, - spec: &ChainSpec, -) -> BeaconState { - let (mut genesis_state, _keypairs) = - TestingBeaconStateBuilder::from_default_keypairs_file_if_exists(validator_count, spec) - .build(); - - genesis_state.genesis_time = genesis_time; - - genesis_state -} - -/// Returns the system time, mod 30 minutes. -/// -/// Used for easily creating testnets. -fn recent_genesis_time() -> u64 { - let now = SystemTime::now() - .duration_since(SystemTime::UNIX_EPOCH) - .unwrap() - .as_secs(); - let secs_after_last_period = now.checked_rem(30 * 60).unwrap_or(0); - // genesis is now the last 30 minute block. - now - secs_after_last_period -} diff --git a/beacon_node/client/src/lib.rs b/beacon_node/client/src/lib.rs index 9d3e001faf..e2baf22d5f 100644 --- a/beacon_node/client/src/lib.rs +++ b/beacon_node/client/src/lib.rs @@ -1,31 +1,47 @@ extern crate slog; -mod beacon_chain_types; -mod bootstrapper; mod config; pub mod error; pub mod notifier; -use beacon_chain::BeaconChain; +use beacon_chain::{ + lmd_ghost::ThreadSafeReducedTree, slot_clock::SystemTimeSlotClock, store::Store, BeaconChain, + BeaconChainBuilder, +}; use exit_future::Signal; use futures::{future::Future, Stream}; use network::Service as NetworkService; -use slog::{error, info, o}; +use slog::{crit, error, info, o}; use slot_clock::SlotClock; use std::marker::PhantomData; use std::sync::Arc; use std::time::{Duration, Instant}; use tokio::runtime::TaskExecutor; use tokio::timer::Interval; +use types::EthSpec; pub use beacon_chain::BeaconChainTypes; -pub use beacon_chain_types::ClientType; -pub use beacon_chain_types::InitialiseBeaconChain; -pub use bootstrapper::Bootstrapper; pub use config::{BeaconChainStartMethod, Config as ClientConfig}; pub use eth2_config::Eth2Config; +#[derive(Clone)] +pub struct ClientType { + _phantom_t: PhantomData, + _phantom_u: PhantomData, +} + +impl BeaconChainTypes for ClientType +where + S: Store + 'static, + E: EthSpec, +{ + type Store = S; + type SlotClock = SystemTimeSlotClock; + type LmdGhost = ThreadSafeReducedTree; + type EthSpec = E; +} + /// Main beacon node client service. This provides the connection and initialisation of the clients /// sub-services in multiple threads. pub struct Client { @@ -49,7 +65,7 @@ pub struct Client { impl Client where - T: BeaconChainTypes + InitialiseBeaconChain + Clone, + T: BeaconChainTypes + Clone, { /// Generate an instance of the client. Spawn and link all internal sub-processes. pub fn new( @@ -62,13 +78,41 @@ where let store = Arc::new(store); let seconds_per_slot = eth2_config.spec.seconds_per_slot; - // Load a `BeaconChain` from the store, or create a new one if it does not exist. - let beacon_chain = Arc::new(T::initialise_beacon_chain( - store, - &client_config, - eth2_config.spec.clone(), - log.clone(), - )?); + let spec = ð2_config.spec.clone(); + + let beacon_chain_builder = match &client_config.beacon_chain_start_method { + BeaconChainStartMethod::Resume => { + BeaconChainBuilder::from_store(spec.clone(), log.clone()) + } + BeaconChainStartMethod::Mainnet => { + crit!(log, "No mainnet beacon chain startup specification."); + return Err("Mainnet is not yet specified. We're working on it.".into()); + } + BeaconChainStartMethod::RecentGenesis { validator_count } => { + BeaconChainBuilder::recent_genesis(*validator_count, spec.clone(), log.clone()) + } + BeaconChainStartMethod::Generated { + validator_count, + genesis_time, + } => BeaconChainBuilder::quick_start( + *genesis_time, + *validator_count, + spec.clone(), + log.clone(), + ), + BeaconChainStartMethod::Yaml { file } => { + BeaconChainBuilder::yaml_state(file, spec.clone(), log.clone())? + } + BeaconChainStartMethod::HttpBootstrap { server, .. } => { + BeaconChainBuilder::http_bootstrap(server, spec.clone(), log.clone())? + } + }; + + let beacon_chain: Arc> = Arc::new( + beacon_chain_builder + .build(store) + .map_err(error::Error::from)?, + ); if beacon_chain.read_slot_clock().is_none() { panic!("Cannot start client before genesis!") diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index 68d905ed22..9fac9b49a4 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -1,5 +1,6 @@ +use beacon_chain::Bootstrapper; use clap::ArgMatches; -use client::{BeaconChainStartMethod, Bootstrapper, ClientConfig, Eth2Config}; +use client::{BeaconChainStartMethod, ClientConfig, Eth2Config}; use eth2_config::{read_from_file, write_to_file}; use rand::{distributions::Alphanumeric, Rng}; use slog::{crit, info, warn, Logger}; diff --git a/beacon_node/src/run.rs b/beacon_node/src/run.rs index e23b5bc72d..620cb64bb5 100644 --- a/beacon_node/src/run.rs +++ b/beacon_node/src/run.rs @@ -1,7 +1,4 @@ -use client::{ - error, notifier, BeaconChainTypes, Client, ClientConfig, ClientType, Eth2Config, - InitialiseBeaconChain, -}; +use client::{error, notifier, BeaconChainTypes, Client, ClientConfig, ClientType, Eth2Config}; use futures::sync::oneshot; use futures::Future; use slog::{error, info}; @@ -117,7 +114,7 @@ fn run( log: &slog::Logger, ) -> error::Result<()> where - T: BeaconChainTypes + InitialiseBeaconChain + Clone, + T: BeaconChainTypes + Clone, T::Store: OpenDatabase, { let store = T::Store::open_database(&db_path)?; From b58aa1d1481b4b7104032c48e30a5de99aed7a20 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Mon, 26 Aug 2019 15:47:03 +1000 Subject: [PATCH 114/186] Add custom config options to testnet sub-cmd --- beacon_node/src/config.rs | 61 ++++++++++++++++++++++++++++----------- beacon_node/src/main.rs | 20 +++++++++++-- 2 files changed, 62 insertions(+), 19 deletions(-) diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index 9fac9b49a4..c8a9299a58 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -67,14 +67,28 @@ fn process_testnet_subcommand( builder.clean_datadir()?; } + if let Some(path_string) = cli_args.value_of("eth2-config") { + let path = path_string + .parse::() + .map_err(|e| format!("Unable to parse eth2-config path: {:?}", e))?; + builder.load_eth2_config(path)?; + } else { + builder.update_spec_from_subcommand(&cli_args)?; + } + + if let Some(path_string) = cli_args.value_of("config") { + let path = path_string + .parse::() + .map_err(|e| format!("Unable to parse config path: {:?}", e))?; + builder.load_client_config(path)?; + } + info!( log, "Creating new datadir"; "path" => format!("{:?}", builder.data_dir) ); - builder.update_spec_from_subcommand(&cli_args)?; - // Start matching on the second subcommand (e.g., `testnet bootstrap ...`) match cli_args.subcommand() { ("bootstrap", Some(cli_args)) => { @@ -82,7 +96,7 @@ fn process_testnet_subcommand( .value_of("server") .ok_or_else(|| "No bootstrap server specified")?; let port: Option = cli_args - .value_of("port") + .value_of("libp2p-port") .and_then(|s| s.parse::().ok()); builder.import_bootstrap_libp2p_address(server, port)?; @@ -306,11 +320,8 @@ impl<'a> ConfigBuilder<'a> { )); } else { // Write the config to a TOML file in the datadir. - write_to_file( - self.data_dir.join(ETH2_CONFIG_FILENAME), - &self.client_config, - ) - .map_err(|e| format!("Unable to write {} file: {:?}", ETH2_CONFIG_FILENAME, e))?; + write_to_file(self.data_dir.join(ETH2_CONFIG_FILENAME), &self.eth2_config) + .map_err(|e| format!("Unable to write {} file: {:?}", ETH2_CONFIG_FILENAME, e))?; } Ok(()) @@ -339,20 +350,36 @@ impl<'a> ConfigBuilder<'a> { .unwrap_or_else(|| true) { return Err( - "No database found in datadir. Use the 'testnet -f' sub-command to overwrite the \ - existing datadir, or specify a different `--datadir`." + "No database found in datadir. Use 'testnet -f' to overwrite the existing \ + datadir, or specify a different `--datadir`." .into(), ); } - self.eth2_config = read_from_file::(self.data_dir.join(ETH2_CONFIG_FILENAME)) - .map_err(|e| format!("Unable to parse {} file: {:?}", ETH2_CONFIG_FILENAME, e))? - .ok_or_else(|| format!("{} file does not exist", ETH2_CONFIG_FILENAME))?; + self.load_eth2_config(self.data_dir.join(ETH2_CONFIG_FILENAME))?; + self.load_client_config(self.data_dir.join(CLIENT_CONFIG_FILENAME))?; - self.client_config = - read_from_file::(self.data_dir.join(CLIENT_CONFIG_FILENAME)) - .map_err(|e| format!("Unable to parse {} file: {:?}", CLIENT_CONFIG_FILENAME, e))? - .ok_or_else(|| format!("{} file does not exist", ETH2_CONFIG_FILENAME))?; + Ok(()) + } + + /// Attempts to load the client config from `path`. + /// + /// Returns an error if any files are not found or are invalid. + pub fn load_client_config(&mut self, path: PathBuf) -> Result<()> { + self.client_config = read_from_file::(path) + .map_err(|e| format!("Unable to parse ClientConfig file: {:?}", e))? + .ok_or_else(|| "ClientConfig file does not exist".to_string())?; + + Ok(()) + } + + /// Attempts to load the eth2 config from `path`. + /// + /// Returns an error if any files are not found or are invalid. + pub fn load_eth2_config(&mut self, path: PathBuf) -> Result<()> { + self.eth2_config = read_from_file::(path) + .map_err(|e| format!("Unable to parse Eth2Config file: {:?}", e))? + .ok_or_else(|| "Eth2Config file does not exist".to_string())?; Ok(()) } diff --git a/beacon_node/src/main.rs b/beacon_node/src/main.rs index 4430db1287..a9659362ca 100644 --- a/beacon_node/src/main.rs +++ b/beacon_node/src/main.rs @@ -198,6 +198,22 @@ fn main() { .takes_value(true) .required(true) .possible_values(&["mainnet", "minimal", "interop"]) + .default_value("minimal") + ) + .arg( + Arg::with_name("eth2-config") + .long("eth2-config") + .value_name("TOML_FILE") + .help("A existing eth2_spec TOML file (e.g., eth2_spec.toml).") + .takes_value(true) + .conflicts_with("spec") + ) + .arg( + Arg::with_name("config") + .long("config") + .value_name("TOML_FILE") + .help("An existing beacon_node TOML file (e.g., beacon_node.toml).") + .takes_value(true) ) .arg( Arg::with_name("random-datadir") @@ -210,8 +226,8 @@ fn main() { Arg::with_name("force") .long("force") .short("f") - .help("If present, will backup any existing config files before creating new ones. Cannot be \ - used when specifying --random-datadir (logic error).") + .help("If present, will create new config and database files and move the any existing to a \ + backup directory.") .conflicts_with("random-datadir") ) /* From bab1f2b06423445e4aa72958bb293c0f65afb190 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Mon, 26 Aug 2019 15:51:11 +1000 Subject: [PATCH 115/186] Rename CLI flag --- beacon_node/src/config.rs | 4 ++-- beacon_node/src/main.rs | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index c8a9299a58..0aa2d29bd3 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -76,10 +76,10 @@ fn process_testnet_subcommand( builder.update_spec_from_subcommand(&cli_args)?; } - if let Some(path_string) = cli_args.value_of("config") { + if let Some(path_string) = cli_args.value_of("client-config") { let path = path_string .parse::() - .map_err(|e| format!("Unable to parse config path: {:?}", e))?; + .map_err(|e| format!("Unable to parse client config path: {:?}", e))?; builder.load_client_config(path)?; } diff --git a/beacon_node/src/main.rs b/beacon_node/src/main.rs index a9659362ca..243e4b7160 100644 --- a/beacon_node/src/main.rs +++ b/beacon_node/src/main.rs @@ -209,8 +209,8 @@ fn main() { .conflicts_with("spec") ) .arg( - Arg::with_name("config") - .long("config") + Arg::with_name("client-config") + .long("client-config") .value_name("TOML_FILE") .help("An existing beacon_node TOML file (e.g., beacon_node.toml).") .takes_value(true) From 39be2ed1d24f53b5494e53a89cf00f6b1023dd0f Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Mon, 26 Aug 2019 15:57:40 +1000 Subject: [PATCH 116/186] Improve CLI error messages --- beacon_node/src/config.rs | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index 0aa2d29bd3..2c928ad449 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -366,9 +366,9 @@ impl<'a> ConfigBuilder<'a> { /// /// Returns an error if any files are not found or are invalid. pub fn load_client_config(&mut self, path: PathBuf) -> Result<()> { - self.client_config = read_from_file::(path) - .map_err(|e| format!("Unable to parse ClientConfig file: {:?}", e))? - .ok_or_else(|| "ClientConfig file does not exist".to_string())?; + self.client_config = read_from_file::(path.clone()) + .map_err(|e| format!("Unable to parse {:?} file: {:?}", path, e))? + .ok_or_else(|| format!("{:?} file does not exist", path))?; Ok(()) } @@ -377,9 +377,9 @@ impl<'a> ConfigBuilder<'a> { /// /// Returns an error if any files are not found or are invalid. pub fn load_eth2_config(&mut self, path: PathBuf) -> Result<()> { - self.eth2_config = read_from_file::(path) - .map_err(|e| format!("Unable to parse Eth2Config file: {:?}", e))? - .ok_or_else(|| "Eth2Config file does not exist".to_string())?; + self.eth2_config = read_from_file::(path.clone()) + .map_err(|e| format!("Unable to parse {:?} file: {:?}", path, e))? + .ok_or_else(|| format!("{:?} file does not exist", path))?; Ok(()) } From 901393b6642e5f01971d04fa79cd7ccfb4dac9ef Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Mon, 26 Aug 2019 16:02:05 +1000 Subject: [PATCH 117/186] Clean datadir after config files have been loaded --- beacon_node/src/config.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index 2c928ad449..f47a2ddb06 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -63,10 +63,6 @@ fn process_testnet_subcommand( builder.set_random_datadir()?; } - if cli_args.is_present("force") { - builder.clean_datadir()?; - } - if let Some(path_string) = cli_args.value_of("eth2-config") { let path = path_string .parse::() @@ -83,6 +79,10 @@ fn process_testnet_subcommand( builder.load_client_config(path)?; } + if cli_args.is_present("force") { + builder.clean_datadir()?; + } + info!( log, "Creating new datadir"; From 6875ae8af510ea2fe4bc86671f28770344368def Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Tue, 27 Aug 2019 00:04:15 +1000 Subject: [PATCH 118/186] Pull Eth2Config during bootstrap --- beacon_node/beacon_chain/Cargo.toml | 1 + beacon_node/beacon_chain/src/bootstrapper.rs | 19 +++++++++++++++++++ beacon_node/client/src/lib.rs | 1 + beacon_node/rest_api/Cargo.toml | 1 + beacon_node/rest_api/src/lib.rs | 7 +++++++ beacon_node/rest_api/src/spec.rs | 14 ++++++++++++++ beacon_node/src/config.rs | 20 ++++++++++++++++++++ 7 files changed, 63 insertions(+) diff --git a/beacon_node/beacon_chain/Cargo.toml b/beacon_node/beacon_chain/Cargo.toml index 018ea19766..f6763d1671 100644 --- a/beacon_node/beacon_chain/Cargo.toml +++ b/beacon_node/beacon_chain/Cargo.toml @@ -5,6 +5,7 @@ authors = ["Paul Hauner ", "Age Manning Result { + get_eth2_config(self.url.clone()).map_err(|e| format!("Unable to get Eth2Config: {:?}", e)) + } + /// Returns the servers ENR address. pub fn enr(&self) -> Result { get_enr(self.url.clone()).map_err(|e| format!("Unable to get ENR: {:?}", e)) @@ -129,6 +135,19 @@ fn get_slots_per_epoch(mut url: Url) -> Result { .map_err(Into::into) } +fn get_eth2_config(mut url: Url) -> Result { + url.path_segments_mut() + .map(|mut url| { + url.push("spec").push("eth2_config"); + }) + .map_err(|_| Error::InvalidUrl)?; + + reqwest::get(url)? + .error_for_status()? + .json() + .map_err(Into::into) +} + fn get_finalized_slot(mut url: Url, slots_per_epoch: u64) -> Result { url.path_segments_mut() .map(|mut url| { diff --git a/beacon_node/client/src/lib.rs b/beacon_node/client/src/lib.rs index 0bb30d0aff..2612fd6489 100644 --- a/beacon_node/client/src/lib.rs +++ b/beacon_node/client/src/lib.rs @@ -162,6 +162,7 @@ where beacon_chain.clone(), network.clone(), client_config.db_path().expect("unable to read datadir"), + eth2_config.clone(), &log, ) { Ok(s) => Some(s), diff --git a/beacon_node/rest_api/Cargo.toml b/beacon_node/rest_api/Cargo.toml index cac196d9cb..5303dc8bdc 100644 --- a/beacon_node/rest_api/Cargo.toml +++ b/beacon_node/rest_api/Cargo.toml @@ -27,5 +27,6 @@ exit-future = "0.1.3" tokio = "0.1.17" url = "2.0" lazy_static = "1.3.0" +eth2_config = { path = "../../eth2/utils/eth2_config" } lighthouse_metrics = { path = "../../eth2/utils/lighthouse_metrics" } slot_clock = { path = "../../eth2/utils/slot_clock" } diff --git a/beacon_node/rest_api/src/lib.rs b/beacon_node/rest_api/src/lib.rs index 964dd79982..b1137c2493 100644 --- a/beacon_node/rest_api/src/lib.rs +++ b/beacon_node/rest_api/src/lib.rs @@ -13,6 +13,7 @@ mod url_query; use beacon_chain::{BeaconChain, BeaconChainTypes}; use client_network::Service as NetworkService; +use eth2_config::Eth2Config; use hyper::rt::Future; use hyper::service::service_fn_ok; use hyper::{Body, Method, Response, Server, StatusCode}; @@ -79,6 +80,7 @@ pub fn start_server( beacon_chain: Arc>, network_service: Arc>, db_path: PathBuf, + eth2_config: Eth2Config, log: &slog::Logger, ) -> Result { let log = log.new(o!("Service" => "Api")); @@ -100,12 +102,14 @@ pub fn start_server( // Clone our stateful objects, for use in service closure. let server_log = log.clone(); let server_bc = beacon_chain.clone(); + let eth2_config = Arc::new(eth2_config); let service = move || { let log = server_log.clone(); let beacon_chain = server_bc.clone(); let db_path = db_path.clone(); let network_service = network_service.clone(); + let eth2_config = eth2_config.clone(); // Create a simple handler for the router, inject our stateful objects into the request. service_fn_ok(move |mut req| { @@ -118,6 +122,8 @@ pub fn start_server( req.extensions_mut().insert::(db_path.clone()); req.extensions_mut() .insert::>>(network_service.clone()); + req.extensions_mut() + .insert::>(eth2_config.clone()); let path = req.uri().path().to_string(); @@ -144,6 +150,7 @@ pub fn start_server( (&Method::GET, "/node/genesis_time") => node::get_genesis_time::(req), (&Method::GET, "/spec") => spec::get_spec::(req), (&Method::GET, "/spec/slots_per_epoch") => spec::get_slots_per_epoch::(req), + (&Method::GET, "/spec/eth2_config") => spec::get_eth2_config::(req), _ => Err(ApiError::MethodNotAllowed(path.clone())), }; diff --git a/beacon_node/rest_api/src/spec.rs b/beacon_node/rest_api/src/spec.rs index d0c8e4368d..86d1c227d3 100644 --- a/beacon_node/rest_api/src/spec.rs +++ b/beacon_node/rest_api/src/spec.rs @@ -1,6 +1,7 @@ use super::{success_response, ApiResult}; use crate::ApiError; use beacon_chain::{BeaconChain, BeaconChainTypes}; +use eth2_config::Eth2Config; use hyper::{Body, Request}; use std::sync::Arc; use types::EthSpec; @@ -18,6 +19,19 @@ pub fn get_spec(req: Request) -> ApiResult Ok(success_response(Body::from(json))) } +/// HTTP handler to return the full Eth2Config object. +pub fn get_eth2_config(req: Request) -> ApiResult { + let eth2_config = req + .extensions() + .get::>() + .ok_or_else(|| ApiError::ServerError("Eth2Config extension missing".to_string()))?; + + let json: String = serde_json::to_string(eth2_config.as_ref()) + .map_err(|e| ApiError::ServerError(format!("Unable to serialize Eth2Config: {:?}", e)))?; + + Ok(success_response(Body::from(json))) +} + /// HTTP handler to return the full spec object. pub fn get_slots_per_epoch(_req: Request) -> ApiResult { let json: String = serde_json::to_string(&T::EthSpec::slots_per_epoch()) diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index f47a2ddb06..e76bd48fa7 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -63,7 +63,13 @@ fn process_testnet_subcommand( builder.set_random_datadir()?; } + let is_bootstrap = cli_args.subcommand_name() == Some("bootstrap"); + if let Some(path_string) = cli_args.value_of("eth2-config") { + if is_bootstrap { + return Err("Cannot supply --eth2-config when using bootsrap".to_string()); + } + let path = path_string .parse::() .map_err(|e| format!("Unable to parse eth2-config path: {:?}", e))?; @@ -100,6 +106,7 @@ fn process_testnet_subcommand( .and_then(|s| s.parse::().ok()); builder.import_bootstrap_libp2p_address(server, port)?; + builder.import_bootstrap_eth2_config(server)?; builder.set_beacon_chain_start_method(BeaconChainStartMethod::HttpBootstrap { server: server.to_string(), @@ -252,6 +259,19 @@ impl<'a> ConfigBuilder<'a> { Ok(()) } + /// Imports an `Eth2Config` from `server`, returning an error if this fails. + pub fn import_bootstrap_eth2_config(&mut self, server: &str) -> Result<()> { + let bootstrapper = Bootstrapper::from_server_string(server.to_string())?; + + self.update_eth2_config(bootstrapper.eth2_config()?); + + Ok(()) + } + + fn update_eth2_config(&mut self, eth2_config: Eth2Config) { + self.eth2_config = eth2_config; + } + /// Reads the subcommand and tries to update `self.eth2_config` based up on the `--spec` flag. /// /// Returns an error if the `--spec` flag is not present in the given `cli_args`. From 7f6b700b983429f4c67b1592bc76b4fd2486716a Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Tue, 27 Aug 2019 00:05:25 +1000 Subject: [PATCH 119/186] Remove old git merge relic --- beacon_node/src/main.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/beacon_node/src/main.rs b/beacon_node/src/main.rs index 797217af03..aba44e6fe5 100644 --- a/beacon_node/src/main.rs +++ b/beacon_node/src/main.rs @@ -182,7 +182,6 @@ fn main() { .takes_value(true), ) /* -<<<<<<< HEAD * The "testnet" sub-command. * * Allows for creating a new datadir with testnet-specific configs. From ed6c39e25a7ee7fae51ef4d20522ac171a2202aa Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Tue, 27 Aug 2019 11:19:50 +1000 Subject: [PATCH 120/186] Add log for fork choice integrity in beacon chain --- beacon_node/beacon_chain/src/beacon_chain.rs | 21 ++++++++++---- beacon_node/beacon_chain/src/fork_choice.rs | 8 ++++++ eth2/lmd_ghost/src/lib.rs | 6 ++++ eth2/lmd_ghost/src/reduced_tree.rs | 30 ++++++-------------- 4 files changed, 38 insertions(+), 27 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 5feefd8417..0fc71fe7b9 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -739,8 +739,19 @@ impl BeaconChain { } else { // Provide the attestation to fork choice, updating the validator latest messages but // _without_ finding and updating the head. - self.fork_choice - .process_attestation(&state, &attestation, block)?; + if let Err(e) = self + .fork_choice + .process_attestation(&state, &attestation, block) + { + error!( + self.log, + "Add attestation to fork choice failed"; + "fork_choice_integrity" => format!("{:?}", self.fork_choice.verify_integrity()), + "beacon_block_root" => format!("{}", attestation.data.beacon_block_root), + "error" => format!("{:?}", e) + ); + return Err(e.into()); + } // Provide the valid attestation to op pool, which may choose to retain the // attestation for inclusion in a future block. @@ -947,10 +958,10 @@ impl BeaconChain { if let Err(e) = self.fork_choice.process_block(&state, &block, block_root) { error!( self.log, - "fork choice failed to process_block"; - "error" => format!("{:?}", e), + "Add block to fork choice failed"; + "fork_choice_integrity" => format!("{:?}", self.fork_choice.verify_integrity()), "block_root" => format!("{}", block_root), - "block_slot" => format!("{}", block.slot) + "error" => format!("{:?}", e), ) } diff --git a/beacon_node/beacon_chain/src/fork_choice.rs b/beacon_node/beacon_chain/src/fork_choice.rs index 77fdaacdc5..26084e04a7 100644 --- a/beacon_node/beacon_chain/src/fork_choice.rs +++ b/beacon_node/beacon_chain/src/fork_choice.rs @@ -199,6 +199,14 @@ impl ForkChoice { self.backend.latest_message(validator_index) } + /// Runs an integrity verification function on the underlying fork choice algorithm. + /// + /// Returns `Ok(())` if the underlying fork choice has maintained it's integrity, + /// `Err(description)` otherwise. + pub fn verify_integrity(&self) -> core::result::Result<(), String> { + self.backend.verify_integrity() + } + /// Inform the fork choice that the given block (and corresponding root) have been finalized so /// it may prune it's storage. /// diff --git a/eth2/lmd_ghost/src/lib.rs b/eth2/lmd_ghost/src/lib.rs index 95cd0679c1..167cd36eaf 100644 --- a/eth2/lmd_ghost/src/lib.rs +++ b/eth2/lmd_ghost/src/lib.rs @@ -46,4 +46,10 @@ pub trait LmdGhost: Send + Sync { /// Returns the latest message for a given validator index. fn latest_message(&self, validator_index: usize) -> Option<(Hash256, Slot)>; + + /// Runs an integrity verification function on fork choice algorithm. + /// + /// Returns `Ok(())` if the underlying fork choice has maintained it's integrity, + /// `Err(description)` otherwise. + fn verify_integrity(&self) -> Result<()>; } diff --git a/eth2/lmd_ghost/src/reduced_tree.rs b/eth2/lmd_ghost/src/reduced_tree.rs index deda02e1fd..cd3a38c463 100644 --- a/eth2/lmd_ghost/src/reduced_tree.rs +++ b/eth2/lmd_ghost/src/reduced_tree.rs @@ -43,16 +43,6 @@ impl fmt::Debug for ThreadSafeReducedTree { } } -impl ThreadSafeReducedTree -where - T: Store, - E: EthSpec, -{ - pub fn verify_integrity(&self) -> std::result::Result<(), String> { - self.core.read().verify_integrity() - } -} - impl LmdGhost for ThreadSafeReducedTree where T: Store, @@ -80,7 +70,7 @@ where fn process_block(&self, block: &BeaconBlock, block_hash: Hash256) -> SuperResult<()> { self.core .write() - .add_weightless_node(block.slot, block_hash) + .maybe_add_weightless_node(block.slot, block_hash) .map_err(|e| format!("process_block failed: {:?}", e)) } @@ -113,6 +103,10 @@ where fn latest_message(&self, validator_index: usize) -> Option<(Hash256, Slot)> { self.core.read().latest_message(validator_index) } + + fn verify_integrity(&self) -> std::result::Result<(), String> { + self.core.read().verify_integrity() + } } struct ReducedTree { @@ -163,15 +157,7 @@ where /// The given `new_root` must be in the block tree (but not necessarily in the reduced tree). /// Any nodes which are not a descendant of `new_root` will be removed from the store. pub fn update_root(&mut self, new_slot: Slot, new_root: Hash256) -> Result<()> { - if !self.nodes.contains_key(&new_root) { - let node = Node { - block_hash: new_root, - voters: vec![], - ..Node::default() - }; - - self.add_node(node)?; - } + self.maybe_add_weightless_node(new_slot, new_root)?; self.retain_subtree(self.root.0, new_root)?; @@ -247,7 +233,7 @@ where // // In this case, we add a weightless node at `start_block_root`. if !self.nodes.contains_key(&start_block_root) { - self.add_weightless_node(start_block_slot, start_block_root)?; + self.maybe_add_weightless_node(start_block_slot, start_block_root)?; }; let _root_weight = self.update_weight(start_block_root, weight_fn)?; @@ -430,7 +416,7 @@ where Ok(()) } - fn add_weightless_node(&mut self, slot: Slot, hash: Hash256) -> Result<()> { + fn maybe_add_weightless_node(&mut self, slot: Slot, hash: Hash256) -> Result<()> { if slot > self.root_slot() && !self.nodes.contains_key(&hash) { let node = Node { block_hash: hash, From 6bb3a651893960679bf1de3190dd2ed484a34710 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Tue, 27 Aug 2019 18:09:31 +1000 Subject: [PATCH 121/186] Guard reduced tree from errors --- eth2/lmd_ghost/src/reduced_tree.rs | 107 +++++++++++++++-------------- 1 file changed, 57 insertions(+), 50 deletions(-) diff --git a/eth2/lmd_ghost/src/reduced_tree.rs b/eth2/lmd_ghost/src/reduced_tree.rs index cd3a38c463..a388d2c383 100644 --- a/eth2/lmd_ghost/src/reduced_tree.rs +++ b/eth2/lmd_ghost/src/reduced_tree.rs @@ -311,51 +311,53 @@ where /// become redundant and removed from the reduced tree. fn remove_latest_message(&mut self, validator_index: usize) -> Result<()> { if let Some(vote) = *self.latest_votes.get(validator_index) { - self.get_mut_node(vote.hash)?.remove_voter(validator_index); - let node = self.get_node(vote.hash)?.clone(); + if self.nodes.contains_key(&vote.hash) { + self.get_mut_node(vote.hash)?.remove_voter(validator_index); + let node = self.get_node(vote.hash)?.clone(); - if let Some(parent_hash) = node.parent_hash { - if node.has_votes() || node.children.len() > 1 { - // A node with votes or more than one child is never removed. - } else if node.children.len() == 1 { - // A node which has only one child may be removed. - // - // Load the child of the node and set it's parent to be the parent of this - // node (viz., graft the node's child to the node's parent) - let child = self.get_mut_node(node.children[0])?; - child.parent_hash = node.parent_hash; + if let Some(parent_hash) = node.parent_hash { + if node.has_votes() || node.children.len() > 1 { + // A node with votes or more than one child is never removed. + } else if node.children.len() == 1 { + // A node which has only one child may be removed. + // + // Load the child of the node and set it's parent to be the parent of this + // node (viz., graft the node's child to the node's parent) + let child = self.get_mut_node(node.children[0])?; + child.parent_hash = node.parent_hash; - // Graft the parent of this node to it's child. - if let Some(parent_hash) = node.parent_hash { - let parent = self.get_mut_node(parent_hash)?; - parent.replace_child(node.block_hash, node.children[0])?; + // Graft the parent of this node to it's child. + if let Some(parent_hash) = node.parent_hash { + let parent = self.get_mut_node(parent_hash)?; + parent.replace_child(node.block_hash, node.children[0])?; + } + + self.nodes.remove(&vote.hash); + } else if node.children.is_empty() { + // Remove the to-be-deleted node from it's parent. + if let Some(parent_hash) = node.parent_hash { + self.get_mut_node(parent_hash)? + .remove_child(node.block_hash)?; + } + + self.nodes.remove(&vote.hash); + + // A node which has no children may be deleted and potentially it's parent + // too. + self.maybe_delete_node(parent_hash)?; + } else { + // It is impossible for a node to have a number of children that is not 0, 1 or + // greater than one. + // + // This code is strictly unnecessary, however we keep it for readability. + unreachable!(); } - - self.nodes.remove(&vote.hash); - } else if node.children.is_empty() { - // Remove the to-be-deleted node from it's parent. - if let Some(parent_hash) = node.parent_hash { - self.get_mut_node(parent_hash)? - .remove_child(node.block_hash)?; - } - - self.nodes.remove(&vote.hash); - - // A node which has no children may be deleted and potentially it's parent - // too. - self.maybe_delete_node(parent_hash)?; } else { - // It is impossible for a node to have a number of children that is not 0, 1 or - // greater than one. - // - // This code is strictly unnecessary, however we keep it for readability. - unreachable!(); + // A node without a parent is the genesis/finalized node and should never be removed. } - } else { - // A node without a parent is the genesis/finalized node and should never be removed. - } - self.latest_votes.insert(validator_index, Some(vote)); + self.latest_votes.insert(validator_index, Some(vote)); + } } Ok(()) @@ -370,25 +372,30 @@ where /// - it does not have any votes. fn maybe_delete_node(&mut self, hash: Hash256) -> Result<()> { let should_delete = { - let node = self.get_node(hash)?.clone(); + if let Ok(node) = self.get_node(hash) { + let node = node.clone(); - if let Some(parent_hash) = node.parent_hash { - if (node.children.len() == 1) && !node.has_votes() { - let child_hash = node.children[0]; + if let Some(parent_hash) = node.parent_hash { + if (node.children.len() == 1) && !node.has_votes() { + let child_hash = node.children[0]; - // Graft the single descendant `node` to the `parent` of node. - self.get_mut_node(child_hash)?.parent_hash = Some(parent_hash); + // Graft the single descendant `node` to the `parent` of node. + self.get_mut_node(child_hash)?.parent_hash = Some(parent_hash); - // Detach `node` from `parent`, replacing it with `child`. - self.get_mut_node(parent_hash)? - .replace_child(hash, child_hash)?; + // Detach `node` from `parent`, replacing it with `child`. + self.get_mut_node(parent_hash)? + .replace_child(hash, child_hash)?; - true + true + } else { + false + } } else { + // A node without a parent is the genesis node and should not be deleted. false } } else { - // A node without a parent is the genesis node and should not be deleted. + // No need to delete a node that does not exist. false } }; From 7bfe02be1cde9c08bb24b3b8ed5b0c5689d96327 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Thu, 29 Aug 2019 12:46:18 +1000 Subject: [PATCH 122/186] Refactor slot clock. --- eth2/utils/slot_clock/src/lib.rs | 19 +- eth2/utils/slot_clock/src/metrics.rs | 7 +- .../slot_clock/src/system_time_slot_clock.rs | 191 +++++++----------- .../slot_clock/src/testing_slot_clock.rs | 37 ++-- 4 files changed, 98 insertions(+), 156 deletions(-) diff --git a/eth2/utils/slot_clock/src/lib.rs b/eth2/utils/slot_clock/src/lib.rs index 871743c9e6..988f3d322c 100644 --- a/eth2/utils/slot_clock/src/lib.rs +++ b/eth2/utils/slot_clock/src/lib.rs @@ -5,24 +5,19 @@ mod metrics; mod system_time_slot_clock; mod testing_slot_clock; -use std::time::Duration; +use std::time::{Duration, Instant}; -pub use crate::system_time_slot_clock::{Error as SystemTimeSlotClockError, SystemTimeSlotClock}; -pub use crate::testing_slot_clock::{Error as TestingSlotClockError, TestingSlotClock}; +pub use crate::system_time_slot_clock::SystemTimeSlotClock; +pub use crate::testing_slot_clock::TestingSlotClock; pub use metrics::scrape_for_metrics; pub use types::Slot; pub trait SlotClock: Send + Sync + Sized { - type Error; + fn new(genesis_slot: Slot, genesis: Instant, slot_duration: Duration) -> Self; - /// Create a new `SlotClock`. - /// - /// Returns an Error if `slot_duration_seconds == 0`. - fn new(genesis_slot: Slot, genesis_seconds: u64, slot_duration_seconds: u64) -> Self; + fn present_slot(&self) -> Option; - fn present_slot(&self) -> Result, Self::Error>; + fn duration_to_next_slot(&self) -> Option; - fn duration_to_next_slot(&self) -> Result, Self::Error>; - - fn slot_duration_millis(&self) -> u64; + fn slot_duration(&self) -> Duration; } diff --git a/eth2/utils/slot_clock/src/metrics.rs b/eth2/utils/slot_clock/src/metrics.rs index e0d3923e00..1abd93c488 100644 --- a/eth2/utils/slot_clock/src/metrics.rs +++ b/eth2/utils/slot_clock/src/metrics.rs @@ -18,7 +18,7 @@ lazy_static! { /// Update the global metrics `DEFAULT_REGISTRY` with info from the slot clock. pub fn scrape_for_metrics(clock: &U) { let present_slot = match clock.present_slot() { - Ok(Some(slot)) => slot, + Some(slot) => slot, _ => Slot::new(0), }; @@ -28,5 +28,8 @@ pub fn scrape_for_metrics(clock: &U) { present_slot.epoch(T::slots_per_epoch()).as_u64() as i64, ); set_gauge(&SLOTS_PER_EPOCH, T::slots_per_epoch() as i64); - set_gauge(&MILLISECONDS_PER_SLOT, clock.slot_duration_millis() as i64); + set_gauge( + &MILLISECONDS_PER_SLOT, + clock.slot_duration().as_millis() as i64, + ); } diff --git a/eth2/utils/slot_clock/src/system_time_slot_clock.rs b/eth2/utils/slot_clock/src/system_time_slot_clock.rs index c493a8be83..88c9c0e63e 100644 --- a/eth2/utils/slot_clock/src/system_time_slot_clock.rs +++ b/eth2/utils/slot_clock/src/system_time_slot_clock.rs @@ -1,99 +1,68 @@ use super::SlotClock; -use std::time::{Duration, SystemTime}; +use std::time::{Duration, Instant}; use types::Slot; pub use std::time::SystemTimeError; -#[derive(Debug, PartialEq)] -pub enum Error { - SlotDurationIsZero, - SystemTimeError(String), -} - /// Determines the present slot based upon the present system time. #[derive(Clone)] pub struct SystemTimeSlotClock { genesis_slot: Slot, - genesis_seconds: u64, - slot_duration_seconds: u64, + genesis: Instant, + slot_duration: Duration, } impl SlotClock for SystemTimeSlotClock { - type Error = Error; + fn new(genesis_slot: Slot, genesis: Instant, slot_duration: Duration) -> Self { + if slot_duration.as_millis() == 0 { + panic!("SystemTimeSlotClock cannot have a < 1ms slot duration."); + } - /// Create a new `SystemTimeSlotClock`. - /// - /// Returns an Error if `slot_duration_seconds == 0`. - fn new(genesis_slot: Slot, genesis_seconds: u64, slot_duration_seconds: u64) -> Self { Self { genesis_slot, - genesis_seconds, - slot_duration_seconds, + genesis, + slot_duration, } } - fn present_slot(&self) -> Result, Error> { - if self.slot_duration_seconds == 0 { - return Err(Error::SlotDurationIsZero); - } + fn present_slot(&self) -> Option { + let now = Instant::now(); - let syslot_time = SystemTime::now(); - let duration_since_epoch = syslot_time.duration_since(SystemTime::UNIX_EPOCH)?; - let duration_since_genesis = - duration_since_epoch.checked_sub(Duration::from_secs(self.genesis_seconds)); - - match duration_since_genesis { - None => Ok(None), - Some(d) => Ok(slot_from_duration(self.slot_duration_seconds, d) - .and_then(|s| Some(s + self.genesis_slot))), + if now < self.genesis { + None + } else { + let slot = Slot::from( + (now.duration_since(self.genesis).as_millis() / self.slot_duration.as_millis()) + as u64, + ); + Some(slot + self.genesis_slot) } } - fn duration_to_next_slot(&self) -> Result, Error> { - duration_to_next_slot(self.genesis_seconds, self.slot_duration_seconds) + fn duration_to_next_slot(&self) -> Option { + let now = Instant::now(); + if now < self.genesis { + None + } else { + let duration_since_genesis = now - self.genesis; + let millis_since_genesis = duration_since_genesis.as_millis(); + let millis_per_slot = self.slot_duration.as_millis(); + + let current_slot = millis_since_genesis / millis_per_slot; + let next_slot = current_slot + 1; + + let next_slot = + self.genesis + Duration::from_millis((next_slot * millis_per_slot) as u64); + + Some(next_slot.duration_since(now)) + } } - fn slot_duration_millis(&self) -> u64 { - self.slot_duration_seconds * 1000 + fn slot_duration(&self) -> Duration { + self.slot_duration } } -impl From for Error { - fn from(e: SystemTimeError) -> Error { - Error::SystemTimeError(format!("{:?}", e)) - } -} - -fn slot_from_duration(slot_duration_seconds: u64, duration: Duration) -> Option { - Some(Slot::new( - duration.as_secs().checked_div(slot_duration_seconds)?, - )) -} -// calculate the duration to the next slot -fn duration_to_next_slot( - genesis_time: u64, - seconds_per_slot: u64, -) -> Result, Error> { - let now = SystemTime::now().duration_since(SystemTime::UNIX_EPOCH)?; - let genesis_time = Duration::from_secs(genesis_time); - - if now < genesis_time { - return Ok(None); - } - - let since_genesis = now - genesis_time; - - let elapsed_slots = since_genesis.as_secs() / seconds_per_slot; - - let next_slot_start_seconds = (elapsed_slots + 1) - .checked_mul(seconds_per_slot) - .expect("Next slot time should not overflow u64"); - - let time_to_next_slot = Duration::from_secs(next_slot_start_seconds) - since_genesis; - - Ok(Some(time_to_next_slot)) -} - #[cfg(test)] mod tests { use super::*; @@ -104,71 +73,51 @@ mod tests { */ #[test] fn test_slot_now() { - let slot_time = 100; let genesis_slot = Slot::new(0); - let now = SystemTime::now(); - let since_epoch = now.duration_since(SystemTime::UNIX_EPOCH).unwrap(); + let prior_genesis = + |seconds_prior: u64| Instant::now() - Duration::from_secs(seconds_prior); - let genesis = since_epoch.as_secs() - slot_time * 89; + let clock = + SystemTimeSlotClock::new(genesis_slot, prior_genesis(0), Duration::from_secs(1)); + assert_eq!(clock.present_slot(), Some(Slot::new(0))); - let clock = SystemTimeSlotClock { + let clock = + SystemTimeSlotClock::new(genesis_slot, prior_genesis(5), Duration::from_secs(1)); + assert_eq!(clock.present_slot(), Some(Slot::new(5))); + + let clock = SystemTimeSlotClock::new( genesis_slot, - genesis_seconds: genesis, - slot_duration_seconds: slot_time, - }; - assert_eq!(clock.present_slot().unwrap(), Some(Slot::new(89))); + Instant::now() - Duration::from_millis(500), + Duration::from_secs(1), + ); + assert_eq!(clock.present_slot(), Some(Slot::new(0))); + assert!(clock.duration_to_next_slot().unwrap() < Duration::from_millis(500)); - let clock = SystemTimeSlotClock { + let clock = SystemTimeSlotClock::new( genesis_slot, - genesis_seconds: since_epoch.as_secs(), - slot_duration_seconds: slot_time, - }; - assert_eq!(clock.present_slot().unwrap(), Some(Slot::new(0))); - - let clock = SystemTimeSlotClock { - genesis_slot, - genesis_seconds: since_epoch.as_secs() - slot_time * 42 - 5, - slot_duration_seconds: slot_time, - }; - assert_eq!(clock.present_slot().unwrap(), Some(Slot::new(42))); + Instant::now() - Duration::from_millis(1_500), + Duration::from_secs(1), + ); + assert_eq!(clock.present_slot(), Some(Slot::new(1))); + assert!(clock.duration_to_next_slot().unwrap() < Duration::from_millis(500)); } #[test] - fn test_slot_from_duration() { - let slot_time = 100; - - assert_eq!( - slot_from_duration(slot_time, Duration::from_secs(0)), - Some(Slot::new(0)) - ); - assert_eq!( - slot_from_duration(slot_time, Duration::from_secs(10)), - Some(Slot::new(0)) - ); - assert_eq!( - slot_from_duration(slot_time, Duration::from_secs(100)), - Some(Slot::new(1)) - ); - assert_eq!( - slot_from_duration(slot_time, Duration::from_secs(101)), - Some(Slot::new(1)) - ); - assert_eq!( - slot_from_duration(slot_time, Duration::from_secs(1000)), - Some(Slot::new(10)) - ); + #[should_panic] + fn zero_seconds() { + SystemTimeSlotClock::new(Slot::new(0), Instant::now(), Duration::from_secs(0)); } #[test] - fn test_slot_from_duration_slot_time_zero() { - let slot_time = 0; + #[should_panic] + fn zero_millis() { + SystemTimeSlotClock::new(Slot::new(0), Instant::now(), Duration::from_millis(0)); + } - assert_eq!(slot_from_duration(slot_time, Duration::from_secs(0)), None); - assert_eq!(slot_from_duration(slot_time, Duration::from_secs(10)), None); - assert_eq!( - slot_from_duration(slot_time, Duration::from_secs(1000)), - None - ); + #[test] + #[should_panic] + fn less_than_one_millis() { + SystemTimeSlotClock::new(Slot::new(0), Instant::now(), Duration::from_nanos(999)); } } diff --git a/eth2/utils/slot_clock/src/testing_slot_clock.rs b/eth2/utils/slot_clock/src/testing_slot_clock.rs index f741d3b87a..0b65b15694 100644 --- a/eth2/utils/slot_clock/src/testing_slot_clock.rs +++ b/eth2/utils/slot_clock/src/testing_slot_clock.rs @@ -1,12 +1,11 @@ use super::SlotClock; use std::sync::RwLock; -use std::time::Duration; +use std::time::{Duration, Instant}; use types::Slot; -#[derive(Debug, PartialEq)] -pub enum Error {} - -/// Determines the present slot based upon the present system time. +/// A slot clock where the slot is manually set instead of being determined by the system time. +/// +/// Useful for testing scenarios. pub struct TestingSlotClock { slot: RwLock, } @@ -17,32 +16,30 @@ impl TestingSlotClock { } pub fn advance_slot(&self) { - self.set_slot(self.present_slot().unwrap().unwrap().as_u64() + 1) + self.set_slot(self.present_slot().unwrap().as_u64() + 1) } } impl SlotClock for TestingSlotClock { - type Error = Error; - - /// Create a new `TestingSlotClock` at `genesis_slot`. - fn new(genesis_slot: Slot, _genesis_seconds: u64, _slot_duration_seconds: u64) -> Self { + fn new(genesis_slot: Slot, _genesis: Instant, _slot_duration: Duration) -> Self { TestingSlotClock { slot: RwLock::new(genesis_slot), } } - fn present_slot(&self) -> Result, Error> { + fn present_slot(&self) -> Option { let slot = *self.slot.read().expect("TestingSlotClock poisoned."); - Ok(Some(slot)) + Some(slot) } /// Always returns a duration of 1 second. - fn duration_to_next_slot(&self) -> Result, Error> { - Ok(Some(Duration::from_secs(1))) + fn duration_to_next_slot(&self) -> Option { + Some(Duration::from_secs(1)) } - fn slot_duration_millis(&self) -> u64 { - 0 + /// Always returns a slot duration of 0 seconds. + fn slot_duration(&self) -> Duration { + Duration::from_secs(0) } } @@ -52,11 +49,9 @@ mod tests { #[test] fn test_slot_now() { - let null = 0; - - let clock = TestingSlotClock::new(Slot::new(10), null, null); - assert_eq!(clock.present_slot(), Ok(Some(Slot::new(10)))); + let clock = TestingSlotClock::new(Slot::new(10), Instant::now(), Duration::from_secs(0)); + assert_eq!(clock.present_slot(), Some(Slot::new(10))); clock.set_slot(123); - assert_eq!(clock.present_slot(), Ok(Some(Slot::new(123)))); + assert_eq!(clock.present_slot(), Some(Slot::new(123))); } } From bcd53a8b10f46488eb41eafb110cf8a5576de446 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Thu, 29 Aug 2019 13:25:55 +1000 Subject: [PATCH 123/186] Migrate codebase across to new SlotClock API --- beacon_node/beacon_chain/src/beacon_chain.rs | 61 ++++++++++---------- beacon_node/beacon_chain/src/errors.rs | 1 + beacon_node/beacon_chain/src/test_utils.rs | 18 ++++-- beacon_node/network/src/sync/simple_sync.rs | 2 +- beacon_node/rest_api/src/helpers.rs | 4 +- eth2/utils/slot_clock/src/lib.rs | 20 ++++++- validator_client/src/error.rs | 7 --- validator_client/src/service.rs | 27 ++++----- 8 files changed, 78 insertions(+), 62 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 0fc71fe7b9..9283d22310 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -23,6 +23,7 @@ use state_processing::{ per_slot_processing, BlockProcessingError, }; use std::sync::Arc; +use std::time::Duration; use store::iter::{BlockRootsIterator, StateRootsIterator}; use store::{Error as DBError, Store}; use tree_hash::TreeHash; @@ -173,11 +174,12 @@ impl BeaconChain { Ok(Some(p)) => p, }; - let slot_clock = T::SlotClock::new( + let slot_clock = T::SlotClock::from_eth2_genesis( spec.genesis_slot, p.state.genesis_time, - spec.seconds_per_slot, - ); + Duration::from_secs(spec.seconds_per_slot), + ) + .ok_or_else(|| Error::SlotClockDidNotStart)?; let last_finalized_root = p.canonical_head.beacon_state.finalized_checkpoint.root; let last_finalized_block = &p.canonical_head.beacon_block; @@ -216,6 +218,20 @@ impl BeaconChain { Ok(()) } + /// Reads the slot clock, returns `Err` if the slot is unavailable. + /// + /// The slot might be unavailable due to an error with the system clock, or if the present time + /// is before genesis (i.e., a negative slot). + /// + /// This is distinct to `present_slot`, which simply reads the latest state. If a + /// call to `read_slot_clock` results in a higher slot than a call to `present_slot`, + /// `self.state` should undergo per slot processing. + pub fn present_slot(&self) -> Result { + self.slot_clock + .present_slot() + .ok_or_else(|| Error::UnableToReadSlot) + } + /// Returns the beacon block body for each beacon block root in `roots`. /// /// Fails if any root in `roots` does not have a corresponding block. @@ -326,10 +342,10 @@ impl BeaconChain { pub fn catchup_state(&self) -> Result<(), Error> { let spec = &self.spec; - let present_slot = match self.slot_clock.present_slot() { - Ok(Some(slot)) => slot, - _ => return Err(Error::UnableToReadSlot), - }; + let present_slot = self + .slot_clock + .present_slot() + .ok_or_else(|| Error::UnableToReadSlot)?; if self.state.read().slot < present_slot { let mut state = self.state.write(); @@ -369,26 +385,10 @@ impl BeaconChain { None } - /// Reads the slot clock, returns `None` if the slot is unavailable. - /// - /// The slot might be unavailable due to an error with the system clock, or if the present time - /// is before genesis (i.e., a negative slot). - /// - /// This is distinct to `present_slot`, which simply reads the latest state. If a - /// call to `read_slot_clock` results in a higher slot than a call to `present_slot`, - /// `self.state` should undergo per slot processing. - pub fn read_slot_clock(&self) -> Option { - match self.slot_clock.present_slot() { - Ok(Some(some_slot)) => Some(some_slot), - Ok(None) => None, - _ => None, - } - } - /// Reads the slot clock (see `self.read_slot_clock()` and returns the number of slots since /// genesis. pub fn slots_since_genesis(&self) -> Option { - let now = self.read_slot_clock()?; + let now = self.slot_clock.present_slot()?; let genesis_slot = self.spec.genesis_slot; if now < genesis_slot { @@ -398,6 +398,7 @@ impl BeaconChain { } } + /* /// Returns slot of the present state. /// /// This is distinct to `read_slot_clock`, which reads from the actual system clock. If @@ -406,6 +407,7 @@ impl BeaconChain { pub fn present_slot(&self) -> Slot { self.state.read().slot } + */ /// Returns the block proposer for a given slot. /// @@ -840,7 +842,8 @@ impl BeaconChain { } let present_slot = self - .read_slot_clock() + .slot_clock + .present_slot() .ok_or_else(|| Error::UnableToReadSlot)?; if block.slot > present_slot { @@ -1004,7 +1007,8 @@ impl BeaconChain { ) -> Result<(BeaconBlock, BeaconState), BlockProductionError> { let state = self.state.read().clone(); let slot = self - .read_slot_clock() + .slot_clock + .present_slot() .ok_or_else(|| BlockProductionError::UnableToReadSlot)?; self.produce_block_on_state(state, slot, randao_reveal) @@ -1181,10 +1185,7 @@ impl BeaconChain { *self.state.write() = { let mut state = self.canonical_head.read().beacon_state.clone(); - let present_slot = match self.slot_clock.present_slot() { - Ok(Some(slot)) => slot, - _ => return Err(Error::UnableToReadSlot), - }; + let present_slot = self.present_slot()?; // If required, transition the new state to the present slot. for _ in state.slot.as_u64()..present_slot.as_u64() { diff --git a/beacon_node/beacon_chain/src/errors.rs b/beacon_node/beacon_chain/src/errors.rs index 22df90397e..8541a0d0b3 100644 --- a/beacon_node/beacon_chain/src/errors.rs +++ b/beacon_node/beacon_chain/src/errors.rs @@ -25,6 +25,7 @@ pub enum BeaconChainError { previous_epoch: Epoch, new_epoch: Epoch, }, + SlotClockDidNotStart, UnableToFindTargetRoot(Slot), BeaconStateError(BeaconStateError), DBInconsistent(String), diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index 09f4749ea3..4d6e56b041 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -6,6 +6,7 @@ use slot_clock::TestingSlotClock; use state_processing::per_slot_processing; use std::marker::PhantomData; use std::sync::Arc; +use std::time::Duration; use store::MemoryStore; use store::Store; use tree_hash::{SignedRoot, TreeHash}; @@ -115,11 +116,12 @@ where let log = builder.build().expect("logger should build"); // Slot clock - let slot_clock = TestingSlotClock::new( + let slot_clock = TestingSlotClock::from_eth2_genesis( spec.genesis_slot, genesis_state.genesis_time, - spec.seconds_per_slot, - ); + Duration::from_secs(spec.seconds_per_slot), + ) + .expect("Slot clock should start"); let chain = BeaconChain::from_genesis( store, @@ -164,7 +166,9 @@ where let mut state = { // Determine the slot for the first block (or skipped block). let state_slot = match block_strategy { - BlockStrategy::OnCanonicalHead => self.chain.read_slot_clock().unwrap() - 1, + BlockStrategy::OnCanonicalHead => { + self.chain.present_slot().expect("should have a slot") - 1 + } BlockStrategy::ForkCanonicalChainAt { previous_slot, .. } => previous_slot, }; @@ -173,14 +177,16 @@ where // Determine the first slot where a block should be built. let mut slot = match block_strategy { - BlockStrategy::OnCanonicalHead => self.chain.read_slot_clock().unwrap(), + BlockStrategy::OnCanonicalHead => { + self.chain.present_slot().expect("should have a slot") + } BlockStrategy::ForkCanonicalChainAt { first_slot, .. } => first_slot, }; let mut head_block_root = None; for _ in 0..num_blocks { - while self.chain.read_slot_clock().expect("should have a slot") < slot { + while self.chain.present_slot().expect("should have a slot") < slot { self.advance_slot(); } diff --git a/beacon_node/network/src/sync/simple_sync.rs b/beacon_node/network/src/sync/simple_sync.rs index 573ac9dd1f..49196facc1 100644 --- a/beacon_node/network/src/sync/simple_sync.rs +++ b/beacon_node/network/src/sync/simple_sync.rs @@ -387,7 +387,7 @@ impl SimpleSync { "peer" => format!("{:?}", peer_id), "msg" => "Failed to return all requested hashes", "start_slot" => req.start_slot, - "current_slot" => self.chain.present_slot(), + "current_slot" => format!("{:?}", self.chain.present_slot()), "requested" => req.count, "returned" => blocks.len(), ); diff --git a/beacon_node/rest_api/src/helpers.rs b/beacon_node/rest_api/src/helpers.rs index 5365086df7..0f47200e9d 100644 --- a/beacon_node/rest_api/src/helpers.rs +++ b/beacon_node/rest_api/src/helpers.rs @@ -88,8 +88,8 @@ pub fn state_root_at_slot( ) -> Result { let head_state = &beacon_chain.head().beacon_state; let current_slot = beacon_chain - .read_slot_clock() - .ok_or_else(|| ApiError::ServerError("Unable to read slot clock".to_string()))?; + .present_slot() + .map_err(|_| ApiError::ServerError("Unable to read slot clock".to_string()))?; // There are four scenarios when obtaining a state for a given slot: // diff --git a/eth2/utils/slot_clock/src/lib.rs b/eth2/utils/slot_clock/src/lib.rs index 988f3d322c..5986191dc1 100644 --- a/eth2/utils/slot_clock/src/lib.rs +++ b/eth2/utils/slot_clock/src/lib.rs @@ -5,7 +5,7 @@ mod metrics; mod system_time_slot_clock; mod testing_slot_clock; -use std::time::{Duration, Instant}; +use std::time::{Duration, Instant, SystemTime, UNIX_EPOCH}; pub use crate::system_time_slot_clock::SystemTimeSlotClock; pub use crate::testing_slot_clock::TestingSlotClock; @@ -13,6 +13,24 @@ pub use metrics::scrape_for_metrics; pub use types::Slot; pub trait SlotClock: Send + Sync + Sized { + fn from_eth2_genesis( + genesis_slot: Slot, + genesis_seconds: u64, + slot_duration: Duration, + ) -> Option { + let duration_between_now_and_unix_epoch = + SystemTime::now().duration_since(UNIX_EPOCH).ok()?; + let duration_between_unix_epoch_and_genesis = Duration::from_secs(genesis_seconds); + + if duration_between_now_and_unix_epoch < duration_between_unix_epoch_and_genesis { + None + } else { + let genesis_instant = Instant::now() + - (duration_between_now_and_unix_epoch - duration_between_unix_epoch_and_genesis); + Some(Self::new(genesis_slot, genesis_instant, slot_duration)) + } + } + fn new(genesis_slot: Slot, genesis: Instant, slot_duration: Duration) -> Self; fn present_slot(&self) -> Option; diff --git a/validator_client/src/error.rs b/validator_client/src/error.rs index 97500f900b..e13f7ded51 100644 --- a/validator_client/src/error.rs +++ b/validator_client/src/error.rs @@ -1,16 +1,9 @@ -use slot_clock; - use error_chain::error_chain; error_chain! { links { } errors { - SlotClockError(e: slot_clock::SystemTimeSlotClockError) { - description("Error reading system time"), - display("SlotClockError: '{:?}'", e) - } - SystemTimeError(t: String ) { description("Error reading system time"), display("SystemTimeError: '{}'", t) diff --git a/validator_client/src/service.rs b/validator_client/src/service.rs index 3ddb96e4c2..62a782da93 100644 --- a/validator_client/src/service.rs +++ b/validator_client/src/service.rs @@ -13,7 +13,6 @@ use crate::block_producer::{BeaconBlockGrpcClient, BlockProducer}; use crate::config::Config as ValidatorConfig; use crate::duties::{BeaconNodeDuties, DutiesManager, EpochDutiesMap}; use crate::error as error_chain; -use crate::error::ErrorKind; use crate::signer::Signer; use bls::Keypair; use eth2_config::Eth2Config; @@ -159,17 +158,19 @@ impl Service(|| { + "Unable to start slot clock. Genesis may not have occurred yet. Exiting.".into() + })?; let current_slot = slot_clock .present_slot() - .map_err(ErrorKind::SlotClockError)? .ok_or_else::(|| { - "Genesis is not in the past. Exiting.".into() + "Genesis has not yet occurred. Exiting.".into() })?; /* Generate the duties manager */ @@ -244,7 +245,6 @@ impl Service(|| { "Genesis is not in the past. Exiting.".into() })?; @@ -291,15 +291,12 @@ impl Service error_chain::Result<()> { - let current_slot = match self.slot_clock.present_slot() { - Err(e) => { - error!(self.log, "SystemTimeError {:?}", e); - return Err("Could not read system time".into()); - } - Ok(slot) => slot.ok_or_else::(|| { + let current_slot = self + .slot_clock + .present_slot() + .ok_or_else::(|| { "Genesis is not in the past. Exiting.".into() - })?, - }; + })?; let current_epoch = current_slot.epoch(self.slots_per_epoch); From e9e912323e7a1327f6f6b26e64d7429fa9311a29 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Thu, 29 Aug 2019 13:56:00 +1000 Subject: [PATCH 124/186] Restrict fork choice iterators to the root --- eth2/lmd_ghost/src/reduced_tree.rs | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/eth2/lmd_ghost/src/reduced_tree.rs b/eth2/lmd_ghost/src/reduced_tree.rs index a388d2c383..73fab13bfd 100644 --- a/eth2/lmd_ghost/src/reduced_tree.rs +++ b/eth2/lmd_ghost/src/reduced_tree.rs @@ -470,6 +470,7 @@ where // descendant of both `node` and `prev_in_tree`. if self .iter_ancestors(child_hash)? + .take_while(|(_, slot)| *slot >= self.root_slot()) .any(|(ancestor, _slot)| ancestor == node.block_hash) { let child = self.get_mut_node(child_hash)?; @@ -555,6 +556,7 @@ where fn find_prev_in_tree(&mut self, hash: Hash256) -> Option { self.iter_ancestors(hash) .ok()? + .take_while(|(_, slot)| *slot >= self.root_slot()) .find(|(root, _slot)| self.nodes.contains_key(root)) .and_then(|(root, _slot)| Some(root)) } @@ -562,8 +564,12 @@ where /// For the two given block roots (`a_root` and `b_root`), find the first block they share in /// the tree. Viz, find the block that these two distinct blocks forked from. fn find_highest_common_ancestor(&self, a_root: Hash256, b_root: Hash256) -> Result { - let mut a_iter = self.iter_ancestors(a_root)?; - let mut b_iter = self.iter_ancestors(b_root)?; + let mut a_iter = self + .iter_ancestors(a_root)? + .take_while(|(_, slot)| *slot >= self.root_slot()); + let mut b_iter = self + .iter_ancestors(b_root)? + .take_while(|(_, slot)| *slot >= self.root_slot()); // Combines the `next()` fns on the `a_iter` and `b_iter` and returns the roots of two // blocks at the same slot, or `None` if we have gone past genesis or the root of this tree. From 7d03806107db9c2f6ad0984682ae0d7f652fb563 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Thu, 29 Aug 2019 14:26:30 +1000 Subject: [PATCH 125/186] Upgrade codebase to new SlotClock API --- beacon_node/beacon_chain/src/beacon_chain.rs | 39 +++++++------------ beacon_node/beacon_chain/src/test_utils.rs | 8 ++-- beacon_node/client/src/lib.rs | 10 +++-- beacon_node/network/src/sync/simple_sync.rs | 2 +- beacon_node/rest_api/src/helpers.rs | 2 +- .../builders/testing_beacon_state_builder.rs | 4 +- eth2/utils/slot_clock/src/lib.rs | 2 +- eth2/utils/slot_clock/src/metrics.rs | 2 +- .../slot_clock/src/system_time_slot_clock.rs | 10 ++--- .../slot_clock/src/testing_slot_clock.rs | 8 ++-- validator_client/src/service.rs | 10 ++--- 11 files changed, 43 insertions(+), 54 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 9ad4d5414e..67e4646c6f 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -148,11 +148,12 @@ impl BeaconChain { ); // Slot clock - let slot_clock = T::SlotClock::new( + let slot_clock = T::SlotClock::from_eth2_genesis( spec.genesis_slot, genesis_state.genesis_time, - spec.seconds_per_slot, - ); + Duration::from_secs(spec.seconds_per_slot), + ) + .ok_or_else(|| Error::SlotClockDidNotStart)?; Ok(Self { spec, @@ -224,18 +225,13 @@ impl BeaconChain { Ok(()) } - /// Reads the slot clock, returns `Err` if the slot is unavailable. + /// Returns the slot _right now_ according to `self.slot_clock`. Returns `Err` if the slot is + /// unavailable. /// /// The slot might be unavailable due to an error with the system clock, or if the present time /// is before genesis (i.e., a negative slot). - /// - /// This is distinct to `present_slot`, which simply reads the latest state. If a - /// call to `read_slot_clock` results in a higher slot than a call to `present_slot`, - /// `self.state` should undergo per slot processing. - pub fn present_slot(&self) -> Result { - self.slot_clock - .present_slot() - .ok_or_else(|| Error::UnableToReadSlot) + pub fn slot(&self) -> Result { + self.slot_clock.now().ok_or_else(|| Error::UnableToReadSlot) } /// Returns the beacon block body for each beacon block root in `roots`. @@ -348,10 +344,7 @@ impl BeaconChain { pub fn catchup_state(&self) -> Result<(), Error> { let spec = &self.spec; - let present_slot = self - .slot_clock - .present_slot() - .ok_or_else(|| Error::UnableToReadSlot)?; + let present_slot = self.slot()?; if self.state.read().slot < present_slot { let mut state = self.state.write(); @@ -394,7 +387,7 @@ impl BeaconChain { /// Reads the slot clock (see `self.read_slot_clock()` and returns the number of slots since /// genesis. pub fn slots_since_genesis(&self) -> Option { - let now = self.slot_clock.present_slot()?; + let now = self.slot().ok()?; let genesis_slot = self.spec.genesis_slot; if now < genesis_slot { @@ -847,10 +840,7 @@ impl BeaconChain { return Ok(BlockProcessingOutcome::GenesisBlock); } - let present_slot = self - .slot_clock - .present_slot() - .ok_or_else(|| Error::UnableToReadSlot)?; + let present_slot = self.slot()?; if block.slot > present_slot { return Ok(BlockProcessingOutcome::FutureSlot { @@ -1013,9 +1003,8 @@ impl BeaconChain { ) -> Result<(BeaconBlock, BeaconState), BlockProductionError> { let state = self.state.read().clone(); let slot = self - .slot_clock - .present_slot() - .ok_or_else(|| BlockProductionError::UnableToReadSlot)?; + .slot() + .map_err(|_| BlockProductionError::UnableToReadSlot)?; self.produce_block_on_state(state, slot, randao_reveal) } @@ -1191,7 +1180,7 @@ impl BeaconChain { *self.state.write() = { let mut state = self.canonical_head.read().beacon_state.clone(); - let present_slot = self.present_slot()?; + let present_slot = self.slot()?; // If required, transition the new state to the present slot. for _ in state.slot.as_u64()..present_slot.as_u64() { diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index 6ab657b087..c45a22fd85 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -151,7 +151,7 @@ where // Determine the slot for the first block (or skipped block). let state_slot = match block_strategy { BlockStrategy::OnCanonicalHead => { - self.chain.present_slot().expect("should have a slot") - 1 + self.chain.slot().expect("should have a slot") - 1 } BlockStrategy::ForkCanonicalChainAt { previous_slot, .. } => previous_slot, }; @@ -161,16 +161,14 @@ where // Determine the first slot where a block should be built. let mut slot = match block_strategy { - BlockStrategy::OnCanonicalHead => { - self.chain.present_slot().expect("should have a slot") - } + BlockStrategy::OnCanonicalHead => self.chain.slot().expect("should have a slot"), BlockStrategy::ForkCanonicalChainAt { first_slot, .. } => first_slot, }; let mut head_block_root = None; for _ in 0..num_blocks { - while self.chain.present_slot().expect("should have a slot") < slot { + while self.chain.slot().expect("should have a slot") < slot { self.advance_slot(); } diff --git a/beacon_node/client/src/lib.rs b/beacon_node/client/src/lib.rs index 2612fd6489..004353d38b 100644 --- a/beacon_node/client/src/lib.rs +++ b/beacon_node/client/src/lib.rs @@ -114,7 +114,7 @@ where .map_err(error::Error::from)?, ); - if beacon_chain.read_slot_clock().is_none() { + if beacon_chain.slot().is_err() { panic!("Cannot start client before genesis!") } @@ -124,7 +124,9 @@ where // blocks and we're basically useless. { let state_slot = beacon_chain.head().beacon_state.slot; - let wall_clock_slot = beacon_chain.read_slot_clock().unwrap(); + let wall_clock_slot = beacon_chain + .slot() + .expect("Cannot start client before genesis"); let slots_since_genesis = beacon_chain.slots_since_genesis().unwrap(); info!( log, @@ -176,7 +178,7 @@ where }; let (slot_timer_exit_signal, exit) = exit_future::signal(); - if let Ok(Some(duration_to_next_slot)) = beacon_chain.slot_clock.duration_to_next_slot() { + if let Some(duration_to_next_slot) = beacon_chain.slot_clock.duration_to_next_slot() { // set up the validator work interval - start at next slot and proceed every slot let interval = { // Set the interval to start at the next slot, and every slot after @@ -223,7 +225,7 @@ impl Drop for Client { fn do_state_catchup(chain: &Arc>, log: &slog::Logger) { // Only attempt to `catchup_state` if we can read the slot clock. - if let Some(current_slot) = chain.read_slot_clock() { + if let Ok(current_slot) = chain.slot() { let state_catchup_result = chain.catchup_state(); let best_slot = chain.head().beacon_block.slot; diff --git a/beacon_node/network/src/sync/simple_sync.rs b/beacon_node/network/src/sync/simple_sync.rs index 49196facc1..d3ed2f3e4f 100644 --- a/beacon_node/network/src/sync/simple_sync.rs +++ b/beacon_node/network/src/sync/simple_sync.rs @@ -387,7 +387,7 @@ impl SimpleSync { "peer" => format!("{:?}", peer_id), "msg" => "Failed to return all requested hashes", "start_slot" => req.start_slot, - "current_slot" => format!("{:?}", self.chain.present_slot()), + "current_slot" => format!("{:?}", self.chain.slot()), "requested" => req.count, "returned" => blocks.len(), ); diff --git a/beacon_node/rest_api/src/helpers.rs b/beacon_node/rest_api/src/helpers.rs index 0f47200e9d..aeaf5ad6e8 100644 --- a/beacon_node/rest_api/src/helpers.rs +++ b/beacon_node/rest_api/src/helpers.rs @@ -88,7 +88,7 @@ pub fn state_root_at_slot( ) -> Result { let head_state = &beacon_chain.head().beacon_state; let current_slot = beacon_chain - .present_slot() + .slot() .map_err(|_| ApiError::ServerError("Unable to read slot clock".to_string()))?; // There are four scenarios when obtaining a state for a given slot: diff --git a/eth2/types/src/test_utils/builders/testing_beacon_state_builder.rs b/eth2/types/src/test_utils/builders/testing_beacon_state_builder.rs index 98f8409538..4f8a2d9240 100644 --- a/eth2/types/src/test_utils/builders/testing_beacon_state_builder.rs +++ b/eth2/types/src/test_utils/builders/testing_beacon_state_builder.rs @@ -123,8 +123,10 @@ impl TestingBeaconStateBuilder { .collect::>() .into(); + let genesis_time = 1567052589; // 29 August, 2019; + let mut state = BeaconState::new( - spec.min_genesis_time, + genesis_time, Eth1Data { deposit_root: Hash256::zero(), deposit_count: 0, diff --git a/eth2/utils/slot_clock/src/lib.rs b/eth2/utils/slot_clock/src/lib.rs index 5986191dc1..fd3bf029be 100644 --- a/eth2/utils/slot_clock/src/lib.rs +++ b/eth2/utils/slot_clock/src/lib.rs @@ -33,7 +33,7 @@ pub trait SlotClock: Send + Sync + Sized { fn new(genesis_slot: Slot, genesis: Instant, slot_duration: Duration) -> Self; - fn present_slot(&self) -> Option; + fn now(&self) -> Option; fn duration_to_next_slot(&self) -> Option; diff --git a/eth2/utils/slot_clock/src/metrics.rs b/eth2/utils/slot_clock/src/metrics.rs index 1abd93c488..d1de491d00 100644 --- a/eth2/utils/slot_clock/src/metrics.rs +++ b/eth2/utils/slot_clock/src/metrics.rs @@ -17,7 +17,7 @@ lazy_static! { /// Update the global metrics `DEFAULT_REGISTRY` with info from the slot clock. pub fn scrape_for_metrics(clock: &U) { - let present_slot = match clock.present_slot() { + let present_slot = match clock.now() { Some(slot) => slot, _ => Slot::new(0), }; diff --git a/eth2/utils/slot_clock/src/system_time_slot_clock.rs b/eth2/utils/slot_clock/src/system_time_slot_clock.rs index 88c9c0e63e..0d4a52ef64 100644 --- a/eth2/utils/slot_clock/src/system_time_slot_clock.rs +++ b/eth2/utils/slot_clock/src/system_time_slot_clock.rs @@ -25,7 +25,7 @@ impl SlotClock for SystemTimeSlotClock { } } - fn present_slot(&self) -> Option { + fn now(&self) -> Option { let now = Instant::now(); if now < self.genesis { @@ -80,18 +80,18 @@ mod tests { let clock = SystemTimeSlotClock::new(genesis_slot, prior_genesis(0), Duration::from_secs(1)); - assert_eq!(clock.present_slot(), Some(Slot::new(0))); + assert_eq!(clock.now(), Some(Slot::new(0))); let clock = SystemTimeSlotClock::new(genesis_slot, prior_genesis(5), Duration::from_secs(1)); - assert_eq!(clock.present_slot(), Some(Slot::new(5))); + assert_eq!(clock.now(), Some(Slot::new(5))); let clock = SystemTimeSlotClock::new( genesis_slot, Instant::now() - Duration::from_millis(500), Duration::from_secs(1), ); - assert_eq!(clock.present_slot(), Some(Slot::new(0))); + assert_eq!(clock.now(), Some(Slot::new(0))); assert!(clock.duration_to_next_slot().unwrap() < Duration::from_millis(500)); let clock = SystemTimeSlotClock::new( @@ -99,7 +99,7 @@ mod tests { Instant::now() - Duration::from_millis(1_500), Duration::from_secs(1), ); - assert_eq!(clock.present_slot(), Some(Slot::new(1))); + assert_eq!(clock.now(), Some(Slot::new(1))); assert!(clock.duration_to_next_slot().unwrap() < Duration::from_millis(500)); } diff --git a/eth2/utils/slot_clock/src/testing_slot_clock.rs b/eth2/utils/slot_clock/src/testing_slot_clock.rs index 0b65b15694..d90cb157aa 100644 --- a/eth2/utils/slot_clock/src/testing_slot_clock.rs +++ b/eth2/utils/slot_clock/src/testing_slot_clock.rs @@ -16,7 +16,7 @@ impl TestingSlotClock { } pub fn advance_slot(&self) { - self.set_slot(self.present_slot().unwrap().as_u64() + 1) + self.set_slot(self.now().unwrap().as_u64() + 1) } } @@ -27,7 +27,7 @@ impl SlotClock for TestingSlotClock { } } - fn present_slot(&self) -> Option { + fn now(&self) -> Option { let slot = *self.slot.read().expect("TestingSlotClock poisoned."); Some(slot) } @@ -50,8 +50,8 @@ mod tests { #[test] fn test_slot_now() { let clock = TestingSlotClock::new(Slot::new(10), Instant::now(), Duration::from_secs(0)); - assert_eq!(clock.present_slot(), Some(Slot::new(10))); + assert_eq!(clock.now(), Some(Slot::new(10))); clock.set_slot(123); - assert_eq!(clock.present_slot(), Some(Slot::new(123))); + assert_eq!(clock.now(), Some(Slot::new(123))); } } diff --git a/validator_client/src/service.rs b/validator_client/src/service.rs index 62a782da93..68a9132656 100644 --- a/validator_client/src/service.rs +++ b/validator_client/src/service.rs @@ -167,11 +167,9 @@ impl Service(|| { - "Genesis has not yet occurred. Exiting.".into() - })?; + let current_slot = slot_clock.now().ok_or_else::(|| { + "Genesis has not yet occurred. Exiting.".into() + })?; /* Generate the duties manager */ @@ -293,7 +291,7 @@ impl Service error_chain::Result<()> { let current_slot = self .slot_clock - .present_slot() + .now() .ok_or_else::(|| { "Genesis is not in the past. Exiting.".into() })?; From 8cfbe8bbfba1b0d9371455959a5260f9675f767c Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Thu, 29 Aug 2019 14:32:21 +1000 Subject: [PATCH 126/186] Change seconds_per_slot to milliseconds_per_slot --- beacon_node/beacon_chain/src/beacon_chain.rs | 15 ++------------- beacon_node/client/src/lib.rs | 4 ++-- eth2/types/src/chain_spec.rs | 6 +++--- validator_client/src/service.rs | 4 ++-- 4 files changed, 9 insertions(+), 20 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 67e4646c6f..fb2f8ea6a9 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -151,7 +151,7 @@ impl BeaconChain { let slot_clock = T::SlotClock::from_eth2_genesis( spec.genesis_slot, genesis_state.genesis_time, - Duration::from_secs(spec.seconds_per_slot), + Duration::from_millis(spec.milliseconds_per_slot), ) .ok_or_else(|| Error::SlotClockDidNotStart)?; @@ -184,7 +184,7 @@ impl BeaconChain { let slot_clock = T::SlotClock::from_eth2_genesis( spec.genesis_slot, p.state.genesis_time, - Duration::from_secs(spec.seconds_per_slot), + Duration::from_millis(spec.milliseconds_per_slot), ) .ok_or_else(|| Error::SlotClockDidNotStart)?; @@ -397,17 +397,6 @@ impl BeaconChain { } } - /* - /// Returns slot of the present state. - /// - /// This is distinct to `read_slot_clock`, which reads from the actual system clock. If - /// `self.state` has not been transitioned it is possible for the system clock to be on a - /// different slot to what is returned from this call. - pub fn present_slot(&self) -> Slot { - self.state.read().slot - } - */ - /// Returns the block proposer for a given slot. /// /// Information is read from the present `beacon_state` shuffling, only information from the diff --git a/beacon_node/client/src/lib.rs b/beacon_node/client/src/lib.rs index 004353d38b..67528e2f96 100644 --- a/beacon_node/client/src/lib.rs +++ b/beacon_node/client/src/lib.rs @@ -76,7 +76,7 @@ where executor: &TaskExecutor, ) -> error::Result { let store = Arc::new(store); - let seconds_per_slot = eth2_config.spec.seconds_per_slot; + let milliseconds_per_slot = eth2_config.spec.milliseconds_per_slot; let spec = ð2_config.spec.clone(); @@ -182,7 +182,7 @@ where // set up the validator work interval - start at next slot and proceed every slot let interval = { // Set the interval to start at the next slot, and every slot after - let slot_duration = Duration::from_secs(seconds_per_slot); + let slot_duration = Duration::from_millis(milliseconds_per_slot); //TODO: Handle checked add correctly Interval::new(Instant::now() + duration_to_next_slot, slot_duration) }; diff --git a/eth2/types/src/chain_spec.rs b/eth2/types/src/chain_spec.rs index 9dec626d44..d59e0db0ac 100644 --- a/eth2/types/src/chain_spec.rs +++ b/eth2/types/src/chain_spec.rs @@ -58,7 +58,7 @@ pub struct ChainSpec { /* * Time parameters */ - pub seconds_per_slot: u64, + pub milliseconds_per_slot: u64, pub min_attestation_inclusion_delay: u64, pub min_seed_lookahead: Epoch, pub activation_exit_delay: u64, @@ -158,7 +158,7 @@ impl ChainSpec { /* * Time parameters */ - seconds_per_slot: 6, + milliseconds_per_slot: 6_000, min_attestation_inclusion_delay: 1, min_seed_lookahead: Epoch::new(1), activation_exit_delay: 4, @@ -221,7 +221,7 @@ impl ChainSpec { let boot_nodes = vec![]; Self { - seconds_per_slot: 12, + milliseconds_per_slot: 12_000, target_committee_size: 4, shuffle_round_count: 10, network_id: 13, diff --git a/validator_client/src/service.rs b/validator_client/src/service.rs index 68a9132656..bd694668bf 100644 --- a/validator_client/src/service.rs +++ b/validator_client/src/service.rs @@ -161,7 +161,7 @@ impl Service(|| { "Unable to start slot clock. Genesis may not have occurred yet. Exiting.".into() @@ -250,7 +250,7 @@ impl Service Date: Thu, 29 Aug 2019 14:59:32 +1000 Subject: [PATCH 127/186] Allow for customizable recent genesis window --- .../beacon_chain/src/beacon_chain_builder.rs | 15 ++++++++++----- beacon_node/client/src/config.rs | 5 ++++- beacon_node/client/src/lib.rs | 12 +++++++++--- beacon_node/src/config.rs | 7 +++++++ beacon_node/src/main.rs | 8 +++++++- 5 files changed, 37 insertions(+), 10 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain_builder.rs b/beacon_node/beacon_chain/src/beacon_chain_builder.rs index 79c74b0068..223d99d8dc 100644 --- a/beacon_node/beacon_chain/src/beacon_chain_builder.rs +++ b/beacon_node/beacon_chain/src/beacon_chain_builder.rs @@ -22,8 +22,13 @@ pub struct BeaconChainBuilder { } impl BeaconChainBuilder { - pub fn recent_genesis(validator_count: usize, spec: ChainSpec, log: Logger) -> Self { - Self::quick_start(recent_genesis_time(), validator_count, spec, log) + pub fn recent_genesis( + validator_count: usize, + minutes: u64, + spec: ChainSpec, + log: Logger, + ) -> Self { + Self::quick_start(recent_genesis_time(minutes), validator_count, spec, log) } pub fn quick_start( @@ -123,12 +128,12 @@ fn genesis_block(genesis_state: &BeaconState, spec: &ChainSpec) - /// Returns the system time, mod 30 minutes. /// /// Used for easily creating testnets. -fn recent_genesis_time() -> u64 { +fn recent_genesis_time(minutes: u64) -> u64 { let now = SystemTime::now() .duration_since(SystemTime::UNIX_EPOCH) .unwrap() .as_secs(); - let secs_after_last_period = now.checked_rem(30 * 60).unwrap_or(0); - // genesis is now the last 30 minute block. + let secs_after_last_period = now.checked_rem(minutes * 60).unwrap_or(0); + // genesis is now the last 15 minute block. now - secs_after_last_period } diff --git a/beacon_node/client/src/config.rs b/beacon_node/client/src/config.rs index f2725b3e79..3aed26881f 100644 --- a/beacon_node/client/src/config.rs +++ b/beacon_node/client/src/config.rs @@ -42,7 +42,10 @@ pub enum BeaconChainStartMethod { /// Create a new beacon chain that can connect to mainnet. /// /// Set the genesis time to be the start of the previous 30-minute window. - RecentGenesis { validator_count: usize }, + RecentGenesis { + validator_count: usize, + minutes: u64, + }, /// Create a new beacon chain with `genesis_time` and `validator_count` validators, all with well-known /// secret keys. Generated { diff --git a/beacon_node/client/src/lib.rs b/beacon_node/client/src/lib.rs index 67528e2f96..4554ff9a1a 100644 --- a/beacon_node/client/src/lib.rs +++ b/beacon_node/client/src/lib.rs @@ -88,9 +88,15 @@ where crit!(log, "No mainnet beacon chain startup specification."); return Err("Mainnet is not yet specified. We're working on it.".into()); } - BeaconChainStartMethod::RecentGenesis { validator_count } => { - BeaconChainBuilder::recent_genesis(*validator_count, spec.clone(), log.clone()) - } + BeaconChainStartMethod::RecentGenesis { + validator_count, + minutes, + } => BeaconChainBuilder::recent_genesis( + *validator_count, + *minutes, + spec.clone(), + log.clone(), + ), BeaconChainStartMethod::Generated { validator_count, genesis_time, diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index e76bd48fa7..7c471e8acf 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -120,8 +120,15 @@ fn process_testnet_subcommand( .parse::() .map_err(|e| format!("Unable to parse validator_count: {:?}", e))?; + let minutes = cli_args + .value_of("minutes") + .ok_or_else(|| "No recent genesis minutes supplied")? + .parse::() + .map_err(|e| format!("Unable to parse minutes: {:?}", e))?; + builder.set_beacon_chain_start_method(BeaconChainStartMethod::RecentGenesis { validator_count, + minutes, }) } _ => return Err("No testnet method specified. See 'testnet --help'.".into()), diff --git a/beacon_node/src/main.rs b/beacon_node/src/main.rs index aba44e6fe5..5bfb712151 100644 --- a/beacon_node/src/main.rs +++ b/beacon_node/src/main.rs @@ -259,11 +259,17 @@ fn main() { */ .subcommand(SubCommand::with_name("recent") .about("Creates a new genesis state where the genesis time was at the previous \ - 30-minute boundary (e.g., 12:00, 12:30, 13:00, etc.)") + MINUTES boundary (e.g., when MINUTES == 30; 12:00, 12:30, 13:00, etc.)") .arg(Arg::with_name("validator_count") .value_name("VALIDATOR_COUNT") .required(true) .help("The number of validators in the genesis state")) + .arg(Arg::with_name("minutes") + .short("m") + .value_name("MINUTES") + .required(true) + .default_value("15") + .help("The maximum number of minutes that will have elapsed before genesis")) ) .subcommand(SubCommand::with_name("yaml-genesis-state") .about("Creates a new datadir where the genesis state is read from YAML. Will fail to parse \ From 75ac21604f7f3d10b0d323a216f3f2cc4c00dc0f Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Thu, 29 Aug 2019 15:03:52 +1000 Subject: [PATCH 128/186] Add long minutes CLI flag --- beacon_node/src/main.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/beacon_node/src/main.rs b/beacon_node/src/main.rs index 5bfb712151..8ab20a4813 100644 --- a/beacon_node/src/main.rs +++ b/beacon_node/src/main.rs @@ -265,6 +265,7 @@ fn main() { .required(true) .help("The number of validators in the genesis state")) .arg(Arg::with_name("minutes") + .long("minutes") .short("m") .value_name("MINUTES") .required(true) From 682081ef072c82a21896f0bee78723464a65a4bf Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Thu, 29 Aug 2019 19:14:52 +1000 Subject: [PATCH 129/186] Add first pass at removing speculative state --- beacon_node/beacon_chain/src/beacon_chain.rs | 301 ++++++++++++------ beacon_node/beacon_chain/src/errors.rs | 2 + .../src/persisted_beacon_chain.rs | 3 +- beacon_node/beacon_chain/src/test_utils.rs | 1 - beacon_node/beacon_chain/tests/tests.rs | 4 +- beacon_node/client/src/lib.rs | 35 +- beacon_node/rest_api/src/validator.rs | 7 +- beacon_node/rpc/src/attestation.rs | 48 +-- beacon_node/rpc/src/beacon_block.rs | 4 +- beacon_node/rpc/src/validator.rs | 5 +- 10 files changed, 225 insertions(+), 185 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index f552dbd272..56923ab6af 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -5,7 +5,6 @@ use crate::iter::{ReverseBlockRootIterator, ReverseStateRootIterator}; use crate::metrics; use crate::persisted_beacon_chain::{PersistedBeaconChain, BEACON_CHAIN_DB_KEY}; use lmd_ghost::LmdGhost; -use log::trace; use operation_pool::DepositInsertStatus; use operation_pool::{OperationPool, PersistedOperationPool}; use parking_lot::{RwLock, RwLockReadGuard}; @@ -77,6 +76,20 @@ pub enum AttestationProcessingOutcome { Invalid(AttestationValidationError), } +pub enum StateCow<'a, T: EthSpec> { + Borrowed(RwLockReadGuard<'a, CheckPoint>), + Owned(BeaconState), +} + +impl<'a, T: EthSpec> AsRef> for StateCow<'a, T> { + fn as_ref(&self) -> &BeaconState { + match self { + StateCow::Borrowed(checkpoint) => &checkpoint.beacon_state, + StateCow::Owned(state) => &state, + } + } +} + pub trait BeaconChainTypes: Send + Sync + 'static { type Store: store::Store; type SlotClock: slot_clock::SlotClock; @@ -97,10 +110,6 @@ pub struct BeaconChain { pub op_pool: OperationPool, /// Stores a "snapshot" of the chain at the time the head-of-the-chain block was received. canonical_head: RwLock>, - /// The same state from `self.canonical_head`, but updated at the start of each slot with a - /// skip slot if no block is received. This is effectively a cache that avoids repeating calls - /// to `per_slot_processing`. - state: RwLock>, /// The root of the genesis block. pub genesis_block_root: Hash256, /// A state-machine that is updated with information from the network and chooses a canonical @@ -158,7 +167,6 @@ impl BeaconChain { spec, slot_clock, op_pool: OperationPool::new(), - state: RwLock::new(genesis_state), canonical_head, genesis_block_root, fork_choice: ForkChoice::new(store.clone(), &genesis_block, genesis_block_root), @@ -180,9 +188,11 @@ impl BeaconChain { Ok(Some(p)) => p, }; + let state = &p.canonical_head.beacon_state; + let slot_clock = T::SlotClock::from_eth2_genesis( spec.genesis_slot, - p.state.genesis_time, + state.genesis_time, Duration::from_millis(spec.milliseconds_per_slot), ) .ok_or_else(|| Error::SlotClockDidNotStart)?; @@ -190,7 +200,7 @@ impl BeaconChain { let last_finalized_root = p.canonical_head.beacon_state.finalized_checkpoint.root; let last_finalized_block = &p.canonical_head.beacon_block; - let op_pool = p.op_pool.into_operation_pool(&p.state, &spec); + let op_pool = p.op_pool.into_operation_pool(state, &spec); Ok(Some(BeaconChain { spec, @@ -198,7 +208,6 @@ impl BeaconChain { fork_choice: ForkChoice::new(store.clone(), last_finalized_block, last_finalized_root), op_pool, canonical_head: RwLock::new(p.canonical_head), - state: RwLock::new(p.state), genesis_block_root: p.genesis_block_root, store, log, @@ -213,7 +222,6 @@ impl BeaconChain { canonical_head: self.canonical_head.read().clone(), op_pool: PersistedOperationPool::from_operation_pool(&self.op_pool), genesis_block_root: self.genesis_block_root, - state: self.state.read().clone(), }; let key = Hash256::from_slice(&BEACON_CHAIN_DB_KEY.as_bytes()); @@ -233,6 +241,16 @@ impl BeaconChain { self.slot_clock.now().ok_or_else(|| Error::UnableToReadSlot) } + /// Returns the epoch _right now_ according to `self.slot_clock`. Returns `Err` if the epoch is + /// unavailable. + /// + /// The epoch might be unavailable due to an error with the system clock, or if the present time + /// is before genesis (i.e., a negative epoch). + pub fn epoch(&self) -> Result { + self.slot() + .map(|slot| slot.epoch(T::EthSpec::slots_per_epoch())) + } + /// Returns the beacon block body for each beacon block root in `roots`. /// /// Fails if any root in `roots` does not have a corresponding block. @@ -318,12 +336,6 @@ impl BeaconChain { Ok(self.store.get(block_root)?) } - /// Returns a read-lock guarded `BeaconState` which is the `canonical_head` that has been - /// updated to match the current slot clock. - pub fn speculative_state(&self) -> Result>, Error> { - Ok(self.state.read()) - } - /// Returns a read-lock guarded `CheckPoint` struct for reading the head (as chosen by the /// fork-choice rule). /// @@ -334,43 +346,74 @@ impl BeaconChain { self.canonical_head.read() } + /// Returns the `BeaconState` at the given slot. + /// + /// May return: + /// + /// - A new state loaded from the database (for states prior to the head) + /// - A reference to the head state (note: this keeps a read lock on the head, try to use + /// sparingly). + /// - The head state, but with skipped slots (for states later than the head). + /// + /// Returns `None` when the state is not found in the database or there is an error skipping + /// to a future state. + pub fn state_at_slot(&self, slot: Slot) -> Result, Error> { + let head_state = &self.head().beacon_state; + + if slot == head_state.slot { + Ok(StateCow::Borrowed(self.head())) + } else if slot > head_state.slot { + let head_state_slot = head_state.slot; + let mut state = head_state.clone(); + drop(head_state); + while state.slot < slot { + match per_slot_processing(&mut state, &self.spec) { + Ok(()) => (), + Err(e) => { + warn!( + self.log, + "Unable to load state at slot"; + "error" => format!("{:?}", e), + "head_slot" => head_state_slot, + "requested_slot" => slot + ); + return Err(Error::NoStateForSlot(slot)); + } + }; + } + Ok(StateCow::Owned(state)) + } else { + let state_root = self + .rev_iter_state_roots() + .find(|(_root, s)| *s == slot) + .map(|(root, _slot)| root) + .ok_or_else(|| Error::NoStateForSlot(slot))?; + + Ok(StateCow::Owned( + self.store + .get(&state_root)? + .ok_or_else(|| Error::NoStateForSlot(slot))?, + )) + } + } + + /// Returns the `BeaconState` the current slot (viz., `self.slot()`). + /// + /// - A reference to the head state (note: this keeps a read lock on the head, try to use + /// sparingly). + /// - The head state, but with skipped slots (for states later than the head). + /// + /// Returns `None` when there is an error skipping to a future state or the slot clock cannot + /// be read. + pub fn state_now(&self) -> Result, Error> { + self.state_at_slot(self.slot()?) + } + /// Returns the slot of the highest block in the canonical chain. pub fn best_slot(&self) -> Slot { self.canonical_head.read().beacon_block.slot } - /// Ensures the current canonical `BeaconState` has been transitioned to match the `slot_clock`. - pub fn catchup_state(&self) -> Result<(), Error> { - let spec = &self.spec; - - let present_slot = self.slot()?; - - if self.state.read().slot < present_slot { - let mut state = self.state.write(); - - // If required, transition the new state to the present slot. - for _ in state.slot.as_u64()..present_slot.as_u64() { - // Ensure the next epoch state caches are built in case of an epoch transition. - state.build_committee_cache(RelativeEpoch::Next, spec)?; - - per_slot_processing(&mut *state, spec)?; - } - - state.build_all_caches(spec)?; - } - - Ok(()) - } - - /// Build all of the caches on the current state. - /// - /// Ideally this shouldn't be required, however we leave it here for testing. - pub fn ensure_state_caches_are_built(&self) -> Result<(), Error> { - self.state.write().build_all_caches(&self.spec)?; - - Ok(()) - } - /// Returns the validator index (if any) for the given public key. /// /// Information is retrieved from the present `beacon_state.validators`. @@ -401,18 +444,19 @@ impl BeaconChain { /// Information is read from the present `beacon_state` shuffling, only information from the /// present epoch is available. pub fn block_proposer(&self, slot: Slot) -> Result { - // Ensures that the present state has been advanced to the present slot, skipping slots if - // blocks are not present. - self.catchup_state()?; + let epoch = |slot: Slot| slot.epoch(T::EthSpec::slots_per_epoch()); + let head_state = &self.head().beacon_state; - // TODO: permit lookups of the proposer at any slot. - let index = self.state.read().get_beacon_proposer_index( - slot, - RelativeEpoch::Current, - &self.spec, - )?; + let state = if epoch(slot) == epoch(head_state.slot) { + StateCow::Borrowed(self.head()) + } else { + self.state_at_slot(slot)? + }; - Ok(index) + state + .as_ref() + .get_beacon_proposer_index(slot, RelativeEpoch::Current, &self.spec) + .map_err(Into::into) } /// Returns the attestation slot and shard for a given validator index. @@ -422,14 +466,19 @@ impl BeaconChain { pub fn validator_attestation_slot_and_shard( &self, validator_index: usize, - ) -> Result, BeaconStateError> { - trace!( - "BeaconChain::validator_attestation_slot_and_shard: validator_index: {}", - validator_index - ); - if let Some(attestation_duty) = self - .state - .read() + epoch: Epoch, + ) -> Result, Error> { + let as_epoch = |slot: Slot| slot.epoch(T::EthSpec::slots_per_epoch()); + let head_state = &self.head().beacon_state; + + let state = if epoch == as_epoch(head_state.slot) { + StateCow::Borrowed(self.head()) + } else { + self.state_at_slot(epoch.start_slot(T::EthSpec::slots_per_epoch()))? + }; + + if let Some(attestation_duty) = state + .as_ref() .get_attestation_duties(validator_index, RelativeEpoch::Current)? { Ok(Some((attestation_duty.slot, attestation_duty.shard))) @@ -438,15 +487,25 @@ impl BeaconChain { } } - /// Produce an `AttestationData` that is valid for the present `slot` and given `shard`. + /// Produce an `AttestationData` that is valid for the given `slot` `shard`. /// - /// Attests to the canonical chain. - pub fn produce_attestation_data(&self, shard: u64) -> Result { - let state = self.state.read(); + /// Always attests to the canonical chain. + pub fn produce_attestation_data( + &self, + shard: u64, + slot: Slot, + ) -> Result { + let state = self.state_at_slot(slot)?; + let head_block_root = self.head().beacon_block_root; let head_block_slot = self.head().beacon_block.slot; - self.produce_attestation_data_for_block(shard, head_block_root, head_block_slot, &*state) + self.produce_attestation_data_for_block( + shard, + head_block_root, + head_block_slot, + state.as_ref(), + ) } /// Produce an `AttestationData` that attests to the chain denoted by `block_root` and `state`. @@ -765,14 +824,38 @@ impl BeaconChain { /// Accept some exit and queue it for inclusion in an appropriate block. pub fn process_voluntary_exit(&self, exit: VoluntaryExit) -> Result<(), ExitValidationError> { - self.op_pool - .insert_voluntary_exit(exit, &*self.state.read(), &self.spec) + match self.state_now() { + Ok(state) => self + .op_pool + .insert_voluntary_exit(exit, state.as_ref(), &self.spec), + Err(e) => { + error!( + &self.log, + "Unable to process voluntary exit"; + "error" => format!("{:?}", e), + "reason" => "no state" + ); + Ok(()) + } + } } /// Accept some transfer and queue it for inclusion in an appropriate block. pub fn process_transfer(&self, transfer: Transfer) -> Result<(), TransferValidationError> { - self.op_pool - .insert_transfer(transfer, &*self.state.read(), &self.spec) + match self.state_now() { + Ok(state) => self + .op_pool + .insert_transfer(transfer, state.as_ref(), &self.spec), + Err(e) => { + error!( + &self.log, + "Unable to process transfer"; + "error" => format!("{:?}", e), + "reason" => "no state" + ); + Ok(()) + } + } } /// Accept some proposer slashing and queue it for inclusion in an appropriate block. @@ -780,8 +863,21 @@ impl BeaconChain { &self, proposer_slashing: ProposerSlashing, ) -> Result<(), ProposerSlashingValidationError> { - self.op_pool - .insert_proposer_slashing(proposer_slashing, &*self.state.read(), &self.spec) + match self.state_now() { + Ok(state) => { + self.op_pool + .insert_proposer_slashing(proposer_slashing, state.as_ref(), &self.spec) + } + Err(e) => { + error!( + &self.log, + "Unable to process proposer slashing"; + "error" => format!("{:?}", e), + "reason" => "no state" + ); + Ok(()) + } + } } /// Accept some attester slashing and queue it for inclusion in an appropriate block. @@ -789,8 +885,21 @@ impl BeaconChain { &self, attester_slashing: AttesterSlashing, ) -> Result<(), AttesterSlashingValidationError> { - self.op_pool - .insert_attester_slashing(attester_slashing, &*self.state.read(), &self.spec) + match self.state_now() { + Ok(state) => { + self.op_pool + .insert_attester_slashing(attester_slashing, state.as_ref(), &self.spec) + } + Err(e) => { + error!( + &self.log, + "Unable to process attester slashing"; + "error" => format!("{:?}", e), + "reason" => "no state" + ); + Ok(()) + } + } } /// Accept some block and attempt to add it to block DAG. @@ -804,8 +913,8 @@ impl BeaconChain { let full_timer = metrics::start_timer(&metrics::BLOCK_PROCESSING_TIMES); let finalized_slot = self - .state - .read() + .head() + .beacon_state .finalized_checkpoint .epoch .start_slot(T::EthSpec::slots_per_epoch()); @@ -987,20 +1096,24 @@ impl BeaconChain { Ok(BlockProcessingOutcome::Processed { block_root }) } - /// Produce a new block at the present slot. + /// Produce a new block at the given `slot`. /// /// The produced block will not be inherently valid, it must be signed by a block producer. /// Block signing is out of the scope of this function and should be done by a separate program. pub fn produce_block( &self, randao_reveal: Signature, + slot: Slot, ) -> Result<(BeaconBlock, BeaconState), BlockProductionError> { - let state = self.state.read().clone(); + let state = self + .state_at_slot(slot) + .map_err(|_| BlockProductionError::UnableToProduceAtSlot(slot))?; + let slot = self .slot() .map_err(|_| BlockProductionError::UnableToReadSlot)?; - self.produce_block_on_state(state, slot, randao_reveal) + self.produce_block_on_state(state.as_ref().clone(), slot, randao_reveal) } /// Produce a block for some `slot` upon the given `state`. @@ -1169,29 +1282,15 @@ impl BeaconChain { } /// Update the canonical head to `new_head`. - fn update_canonical_head(&self, new_head: CheckPoint) -> Result<(), Error> { + fn update_canonical_head(&self, mut new_head: CheckPoint) -> Result<(), Error> { let timer = metrics::start_timer(&metrics::UPDATE_HEAD_TIMES); + new_head.beacon_state.build_all_caches(&self.spec)?; + // Update the checkpoint that stores the head of the chain at the time it received the // block. *self.canonical_head.write() = new_head; - // Update the always-at-the-present-slot state we keep around for performance gains. - *self.state.write() = { - let mut state = self.canonical_head.read().beacon_state.clone(); - - let present_slot = self.slot()?; - - // If required, transition the new state to the present slot. - for _ in state.slot.as_u64()..present_slot.as_u64() { - per_slot_processing(&mut state, &self.spec)?; - } - - state.build_all_caches(&self.spec)?; - - state - }; - // Save `self` to `self.store`. self.persist()?; diff --git a/beacon_node/beacon_chain/src/errors.rs b/beacon_node/beacon_chain/src/errors.rs index 75dbb655f3..cd8d6aad63 100644 --- a/beacon_node/beacon_chain/src/errors.rs +++ b/beacon_node/beacon_chain/src/errors.rs @@ -24,6 +24,7 @@ pub enum BeaconChainError { new_epoch: Epoch, }, SlotClockDidNotStart, + NoStateForSlot(Slot), UnableToFindTargetRoot(Slot), BeaconStateError(BeaconStateError), DBInconsistent(String), @@ -44,6 +45,7 @@ easy_from_to!(SlotProcessingError, BeaconChainError); pub enum BlockProductionError { UnableToGetBlockRootFromState, UnableToReadSlot, + UnableToProduceAtSlot(Slot), SlotProcessingError(SlotProcessingError), BlockProcessingError(BlockProcessingError), BeaconStateError(BeaconStateError), diff --git a/beacon_node/beacon_chain/src/persisted_beacon_chain.rs b/beacon_node/beacon_chain/src/persisted_beacon_chain.rs index 8b9f78dc5b..a85f78ac82 100644 --- a/beacon_node/beacon_chain/src/persisted_beacon_chain.rs +++ b/beacon_node/beacon_chain/src/persisted_beacon_chain.rs @@ -3,7 +3,7 @@ use operation_pool::PersistedOperationPool; use ssz::{Decode, Encode}; use ssz_derive::{Decode, Encode}; use store::{DBColumn, Error as StoreError, StoreItem}; -use types::{BeaconState, Hash256}; +use types::Hash256; /// 32-byte key for accessing the `PersistedBeaconChain`. pub const BEACON_CHAIN_DB_KEY: &str = "PERSISTEDBEACONCHAINPERSISTEDBEA"; @@ -13,7 +13,6 @@ pub struct PersistedBeaconChain { pub canonical_head: CheckPoint, pub op_pool: PersistedOperationPool, pub genesis_block_root: Hash256, - pub state: BeaconState, } impl StoreItem for PersistedBeaconChain { diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index 52e1ec8dee..1006fabf53 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -130,7 +130,6 @@ where /// Does not produce blocks or attestations. pub fn advance_slot(&self) { self.chain.slot_clock.advance_slot(); - self.chain.catchup_state().expect("should catchup state"); } /// Extend the `BeaconChain` with some blocks and attestations. Returns the root of the diff --git a/beacon_node/beacon_chain/tests/tests.rs b/beacon_node/beacon_chain/tests/tests.rs index 22b667f159..ba7f7bf84b 100644 --- a/beacon_node/beacon_chain/tests/tests.rs +++ b/beacon_node/beacon_chain/tests/tests.rs @@ -322,7 +322,9 @@ fn roundtrip_operation_pool() { let p: PersistedBeaconChain> = harness.chain.store.get(&key).unwrap().unwrap(); - let restored_op_pool = p.op_pool.into_operation_pool(&p.state, &harness.spec); + let restored_op_pool = p + .op_pool + .into_operation_pool(&p.canonical_head.beacon_state, &harness.spec); assert_eq!(harness.chain.op_pool, restored_op_pool); } diff --git a/beacon_node/client/src/lib.rs b/beacon_node/client/src/lib.rs index 4554ff9a1a..9876e96723 100644 --- a/beacon_node/client/src/lib.rs +++ b/beacon_node/client/src/lib.rs @@ -143,7 +143,6 @@ where "catchup_distance" => wall_clock_slot - state_slot, ); } - do_state_catchup(&beacon_chain, &log); let network_config = &client_config.network; let (network, network_send) = @@ -199,7 +198,7 @@ where exit.until( interval .for_each(move |_| { - do_state_catchup(&chain, &log); + log_new_slot(&chain, &log); Ok(()) }) @@ -229,35 +228,19 @@ impl Drop for Client { } } -fn do_state_catchup(chain: &Arc>, log: &slog::Logger) { - // Only attempt to `catchup_state` if we can read the slot clock. +fn log_new_slot(chain: &Arc>, log: &slog::Logger) { + let best_slot = chain.head().beacon_block.slot; + let latest_block_root = chain.head().beacon_block_root; + if let Ok(current_slot) = chain.slot() { - let state_catchup_result = chain.catchup_state(); - - let best_slot = chain.head().beacon_block.slot; - let latest_block_root = chain.head().beacon_block_root; - - let common = o!( + info!( + log, + "Slot start"; "skip_slots" => current_slot.saturating_sub(best_slot), "best_block_root" => format!("{}", latest_block_root), "best_block_slot" => best_slot, "slot" => current_slot, - ); - - if let Err(e) = state_catchup_result { - error!( - log, - "State catchup failed"; - "error" => format!("{:?}", e), - common - ) - } else { - info!( - log, - "Slot start"; - common - ) - } + ) } else { error!( log, diff --git a/beacon_node/rest_api/src/validator.rs b/beacon_node/rest_api/src/validator.rs index 4294f9c20b..365b7e5521 100644 --- a/beacon_node/rest_api/src/validator.rs +++ b/beacon_node/rest_api/src/validator.rs @@ -39,12 +39,7 @@ pub fn get_validator_duties(req: Request) - .extensions() .get::>>() .ok_or_else(|| ApiError::ServerError("Beacon chain extension missing".to_string()))?; - let _ = beacon_chain - .ensure_state_caches_are_built() - .map_err(|e| ApiError::ServerError(format!("Unable to build state caches: {:?}", e)))?; - let head_state = beacon_chain - .speculative_state() - .expect("This is legacy code and should be removed."); + let head_state = &beacon_chain.head().beacon_state; // Parse and check query parameters let query = UrlQuery::from_request(&req)?; diff --git a/beacon_node/rpc/src/attestation.rs b/beacon_node/rpc/src/attestation.rs index 68d3829ee7..f4b49049ae 100644 --- a/beacon_node/rpc/src/attestation.rs +++ b/beacon_node/rpc/src/attestation.rs @@ -14,7 +14,7 @@ use slog::{error, info, trace, warn}; use ssz::{ssz_encode, Decode, Encode}; use std::sync::Arc; use tokio::sync::mpsc; -use types::Attestation; +use types::{Attestation, Slot}; #[derive(Clone)] pub struct AttestationServiceInstance { @@ -37,49 +37,13 @@ impl AttestationService for AttestationServiceInstance { req.get_slot() ); - // verify the slot, drop lock on state afterwards - { - let slot_requested = req.get_slot(); - // TODO: this whole module is legacy and not maintained well. - let state = &self - .chain - .speculative_state() - .expect("This is legacy code and should be removed"); - - // Start by performing some checks - // Check that the AttestationData is for the current slot (otherwise it will not be valid) - if slot_requested > state.slot.as_u64() { - let log_clone = self.log.clone(); - let f = sink - .fail(RpcStatus::new( - RpcStatusCode::OutOfRange, - Some( - "AttestationData request for a slot that is in the future.".to_string(), - ), - )) - .map_err(move |e| { - error!(log_clone, "Failed to reply with failure {:?}: {:?}", req, e) - }); - return ctx.spawn(f); - } - // currently cannot handle past slots. TODO: Handle this case - else if slot_requested < state.slot.as_u64() { - let log_clone = self.log.clone(); - let f = sink - .fail(RpcStatus::new( - RpcStatusCode::InvalidArgument, - Some("AttestationData request for a slot that is in the past.".to_string()), - )) - .map_err(move |e| { - error!(log_clone, "Failed to reply with failure {:?}: {:?}", req, e) - }); - return ctx.spawn(f); - } - } - // Then get the AttestationData from the beacon chain let shard = req.get_shard(); - let attestation_data = match self.chain.produce_attestation_data(shard) { + let slot_requested = req.get_slot(); + let attestation_data = match self + .chain + .produce_attestation_data(shard, Slot::from(slot_requested)) + { Ok(v) => v, Err(e) => { // Could not produce an attestation diff --git a/beacon_node/rpc/src/beacon_block.rs b/beacon_node/rpc/src/beacon_block.rs index 92a543ef3c..b7332b395e 100644 --- a/beacon_node/rpc/src/beacon_block.rs +++ b/beacon_node/rpc/src/beacon_block.rs @@ -35,7 +35,7 @@ impl BeaconBlockService for BeaconBlockServiceInstance { // decode the request // TODO: requested slot currently unused, see: https://github.com/sigp/lighthouse/issues/336 - let _requested_slot = Slot::from(req.get_slot()); + let requested_slot = Slot::from(req.get_slot()); let randao_reveal = match Signature::from_ssz_bytes(req.get_randao_reveal()) { Ok(reveal) => reveal, Err(_) => { @@ -51,7 +51,7 @@ impl BeaconBlockService for BeaconBlockServiceInstance { } }; - let produced_block = match self.chain.produce_block(randao_reveal) { + let produced_block = match self.chain.produce_block(randao_reveal, requested_slot) { Ok((block, _state)) => block, Err(e) => { // could not produce a block diff --git a/beacon_node/rpc/src/validator.rs b/beacon_node/rpc/src/validator.rs index 080c828a78..fd6d7f3d16 100644 --- a/beacon_node/rpc/src/validator.rs +++ b/beacon_node/rpc/src/validator.rs @@ -30,10 +30,7 @@ impl ValidatorService for ValidatorServiceInstance { let spec = &self.chain.spec; // TODO: this whole module is legacy and not maintained well. - let state = &self - .chain - .speculative_state() - .expect("This is legacy code and should be removed"); + let state = &self.chain.head().beacon_state; let epoch = Epoch::from(req.get_epoch()); let mut resp = GetDutiesResponse::new(); let resp_validators = resp.mut_active_validators(); From ae114889c1e49729205d9377ce41652eac6b2500 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Fri, 30 Aug 2019 00:24:46 +1000 Subject: [PATCH 130/186] Fix bugs from removing speculative state --- beacon_node/beacon_chain/src/beacon_chain.rs | 21 +++++++++++++-- beacon_node/rpc/src/validator.rs | 28 ++++++++++++++++++-- 2 files changed, 45 insertions(+), 4 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 56923ab6af..f6cef7dacb 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -90,6 +90,15 @@ impl<'a, T: EthSpec> AsRef> for StateCow<'a, T> { } } +impl<'a, T: EthSpec> StateCow<'a, T> { + pub fn as_mut_ref(&mut self) -> Option<&mut BeaconState> { + match self { + StateCow::Borrowed(_) => None, + StateCow::Owned(ref mut state) => Some(state), + } + } +} + pub trait BeaconChainTypes: Send + Sync + 'static { type Store: store::Store; type SlotClock: slot_clock::SlotClock; @@ -447,12 +456,16 @@ impl BeaconChain { let epoch = |slot: Slot| slot.epoch(T::EthSpec::slots_per_epoch()); let head_state = &self.head().beacon_state; - let state = if epoch(slot) == epoch(head_state.slot) { + let mut state = if epoch(slot) == epoch(head_state.slot) { StateCow::Borrowed(self.head()) } else { self.state_at_slot(slot)? }; + if let Some(state) = state.as_mut_ref() { + state.build_committee_cache(RelativeEpoch::Current, &self.spec)?; + } + state .as_ref() .get_beacon_proposer_index(slot, RelativeEpoch::Current, &self.spec) @@ -471,12 +484,16 @@ impl BeaconChain { let as_epoch = |slot: Slot| slot.epoch(T::EthSpec::slots_per_epoch()); let head_state = &self.head().beacon_state; - let state = if epoch == as_epoch(head_state.slot) { + let mut state = if epoch == as_epoch(head_state.slot) { StateCow::Borrowed(self.head()) } else { self.state_at_slot(epoch.start_slot(T::EthSpec::slots_per_epoch()))? }; + if let Some(state) = state.as_mut_ref() { + state.build_committee_cache(RelativeEpoch::Current, &self.spec)?; + } + if let Some(attestation_duty) = state .as_ref() .get_attestation_duties(validator_index, RelativeEpoch::Current)? diff --git a/beacon_node/rpc/src/validator.rs b/beacon_node/rpc/src/validator.rs index fd6d7f3d16..afe1733189 100644 --- a/beacon_node/rpc/src/validator.rs +++ b/beacon_node/rpc/src/validator.rs @@ -28,9 +28,33 @@ impl ValidatorService for ValidatorServiceInstance { let validators = req.get_validators(); trace!(self.log, "RPC request"; "endpoint" => "GetValidatorDuties", "epoch" => req.get_epoch()); + let slot = if let Ok(slot) = self.chain.slot() { + slot + } else { + let log_clone = self.log.clone(); + let f = sink + .fail(RpcStatus::new( + RpcStatusCode::FailedPrecondition, + Some("No slot for chain".to_string()), + )) + .map_err(move |e| warn!(log_clone, "failed to reply {:?}: {:?}", req, e)); + return ctx.spawn(f); + }; + let state_cow = if let Ok(state) = self.chain.state_at_slot(slot) { + state + } else { + let log_clone = self.log.clone(); + let f = sink + .fail(RpcStatus::new( + RpcStatusCode::FailedPrecondition, + Some("No state".to_string()), + )) + .map_err(move |e| warn!(log_clone, "failed to reply {:?}: {:?}", req, e)); + return ctx.spawn(f); + }; + let state = state_cow.as_ref(); + let spec = &self.chain.spec; - // TODO: this whole module is legacy and not maintained well. - let state = &self.chain.head().beacon_state; let epoch = Epoch::from(req.get_epoch()); let mut resp = GetDutiesResponse::new(); let resp_validators = resp.mut_active_validators(); From ea562595ed4e8b53385357b812236c0289e44ce2 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Fri, 30 Aug 2019 00:46:25 +1000 Subject: [PATCH 131/186] Fix bugs with gRPC API --- beacon_node/rpc/src/validator.rs | 27 +++++---------------------- 1 file changed, 5 insertions(+), 22 deletions(-) diff --git a/beacon_node/rpc/src/validator.rs b/beacon_node/rpc/src/validator.rs index afe1733189..7f33e0c3a6 100644 --- a/beacon_node/rpc/src/validator.rs +++ b/beacon_node/rpc/src/validator.rs @@ -40,8 +40,8 @@ impl ValidatorService for ValidatorServiceInstance { .map_err(move |e| warn!(log_clone, "failed to reply {:?}: {:?}", req, e)); return ctx.spawn(f); }; - let state_cow = if let Ok(state) = self.chain.state_at_slot(slot) { - state + let mut state = if let Ok(state) = self.chain.state_at_slot(slot) { + state.as_ref().clone() } else { let log_clone = self.log.clone(); let f = sink @@ -52,33 +52,16 @@ impl ValidatorService for ValidatorServiceInstance { .map_err(move |e| warn!(log_clone, "failed to reply {:?}: {:?}", req, e)); return ctx.spawn(f); }; - let state = state_cow.as_ref(); - let spec = &self.chain.spec; + let _ = state.build_all_caches(&self.chain.spec); + let epoch = Epoch::from(req.get_epoch()); let mut resp = GetDutiesResponse::new(); let resp_validators = resp.mut_active_validators(); - let relative_epoch = - match RelativeEpoch::from_epoch(state.slot.epoch(T::EthSpec::slots_per_epoch()), epoch) - { - Ok(v) => v, - Err(e) => { - // incorrect epoch - let log_clone = self.log.clone(); - let f = sink - .fail(RpcStatus::new( - RpcStatusCode::FailedPrecondition, - Some(format!("Invalid epoch: {:?}", e)), - )) - .map_err(move |e| warn!(log_clone, "failed to reply {:?}: {:?}", req, e)); - return ctx.spawn(f); - } - }; - let validator_proposers: Result, _> = epoch .slot_iter(T::EthSpec::slots_per_epoch()) - .map(|slot| state.get_beacon_proposer_index(slot, relative_epoch, &spec)) + .map(|slot| self.chain.block_proposer(slot)) .collect(); let validator_proposers = match validator_proposers { Ok(v) => v, From 8060cd8f5cdc5deff8ff5f312ed108d7cb8bb131 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Fri, 30 Aug 2019 01:14:57 +1000 Subject: [PATCH 132/186] Change RPC slot behaviour --- beacon_node/rpc/src/beacon_block.rs | 19 +++++++++++++++++-- 1 file changed, 17 insertions(+), 2 deletions(-) diff --git a/beacon_node/rpc/src/beacon_block.rs b/beacon_node/rpc/src/beacon_block.rs index b7332b395e..f6be6207f4 100644 --- a/beacon_node/rpc/src/beacon_block.rs +++ b/beacon_node/rpc/src/beacon_block.rs @@ -35,7 +35,7 @@ impl BeaconBlockService for BeaconBlockServiceInstance { // decode the request // TODO: requested slot currently unused, see: https://github.com/sigp/lighthouse/issues/336 - let requested_slot = Slot::from(req.get_slot()); + let _requested_slot = Slot::from(req.get_slot()); let randao_reveal = match Signature::from_ssz_bytes(req.get_randao_reveal()) { Ok(reveal) => reveal, Err(_) => { @@ -51,7 +51,22 @@ impl BeaconBlockService for BeaconBlockServiceInstance { } }; - let produced_block = match self.chain.produce_block(randao_reveal, requested_slot) { + let slot = match self.chain.slot() { + Ok(slot) => slot, + Err(_) => { + // decode error, incorrect signature + let log_clone = self.log.clone(); + let f = sink + .fail(RpcStatus::new( + RpcStatusCode::InvalidArgument, + Some("No slot from chain".to_string()), + )) + .map_err(move |e| warn!(log_clone, "failed to reply {:?}: {:?}", req, e)); + return ctx.spawn(f); + } + }; + + let produced_block = match self.chain.produce_block(randao_reveal, slot) { Ok((block, _state)) => block, Err(e) => { // could not produce a block From 31bbb0f5739d21c37454adc4e17e0562c2668122 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Fri, 30 Aug 2019 01:51:18 +1000 Subject: [PATCH 133/186] Modify RPC duties endpoint --- beacon_node/rpc/src/validator.rs | 16 +++------------- 1 file changed, 3 insertions(+), 13 deletions(-) diff --git a/beacon_node/rpc/src/validator.rs b/beacon_node/rpc/src/validator.rs index 7f33e0c3a6..2a1ad45f4d 100644 --- a/beacon_node/rpc/src/validator.rs +++ b/beacon_node/rpc/src/validator.rs @@ -28,18 +28,9 @@ impl ValidatorService for ValidatorServiceInstance { let validators = req.get_validators(); trace!(self.log, "RPC request"; "endpoint" => "GetValidatorDuties", "epoch" => req.get_epoch()); - let slot = if let Ok(slot) = self.chain.slot() { - slot - } else { - let log_clone = self.log.clone(); - let f = sink - .fail(RpcStatus::new( - RpcStatusCode::FailedPrecondition, - Some("No slot for chain".to_string()), - )) - .map_err(move |e| warn!(log_clone, "failed to reply {:?}: {:?}", req, e)); - return ctx.spawn(f); - }; + let epoch = Epoch::from(req.get_epoch()); + let slot = epoch.start_slot(T::EthSpec::slots_per_epoch()); + let mut state = if let Ok(state) = self.chain.state_at_slot(slot) { state.as_ref().clone() } else { @@ -55,7 +46,6 @@ impl ValidatorService for ValidatorServiceInstance { let _ = state.build_all_caches(&self.chain.spec); - let epoch = Epoch::from(req.get_epoch()); let mut resp = GetDutiesResponse::new(); let resp_validators = resp.mut_active_validators(); From 336510634038ea0b8c42afb545896e399ade60d9 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Fri, 30 Aug 2019 11:04:15 +1000 Subject: [PATCH 134/186] Fix bug with block production --- beacon_node/beacon_chain/src/beacon_chain.rs | 22 +++++++++++++---- beacon_node/beacon_chain/src/errors.rs | 2 ++ beacon_node/rpc/src/beacon_block.rs | 25 ++++++-------------- beacon_node/rpc/src/validator.rs | 10 +++++++- 4 files changed, 35 insertions(+), 24 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index f6cef7dacb..afc7a992ae 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -466,6 +466,14 @@ impl BeaconChain { state.build_committee_cache(RelativeEpoch::Current, &self.spec)?; } + if epoch(state.as_ref().slot) != epoch(slot) { + return Err(Error::InvariantViolated(format!( + "Epochs in consistent in proposer lookup: state: {}, requested: {}", + epoch(state.as_ref().slot), + epoch(slot) + ))); + } + state .as_ref() .get_beacon_proposer_index(slot, RelativeEpoch::Current, &self.spec) @@ -494,6 +502,14 @@ impl BeaconChain { state.build_committee_cache(RelativeEpoch::Current, &self.spec)?; } + if as_epoch(state.as_ref().slot) != epoch { + return Err(Error::InvariantViolated(format!( + "Epochs in consistent in attestation duties lookup: state: {}, requested: {}", + as_epoch(state.as_ref().slot), + epoch + ))); + } + if let Some(attestation_duty) = state .as_ref() .get_attestation_duties(validator_index, RelativeEpoch::Current)? @@ -1123,13 +1139,9 @@ impl BeaconChain { slot: Slot, ) -> Result<(BeaconBlock, BeaconState), BlockProductionError> { let state = self - .state_at_slot(slot) + .state_at_slot(slot - 1) .map_err(|_| BlockProductionError::UnableToProduceAtSlot(slot))?; - let slot = self - .slot() - .map_err(|_| BlockProductionError::UnableToReadSlot)?; - self.produce_block_on_state(state.as_ref().clone(), slot, randao_reveal) } diff --git a/beacon_node/beacon_chain/src/errors.rs b/beacon_node/beacon_chain/src/errors.rs index cd8d6aad63..5ef68f2cdd 100644 --- a/beacon_node/beacon_chain/src/errors.rs +++ b/beacon_node/beacon_chain/src/errors.rs @@ -37,6 +37,8 @@ pub enum BeaconChainError { beacon_block_root: Hash256, }, AttestationValidationError(AttestationValidationError), + /// Returned when an internal check fails, indicating corrupt data. + InvariantViolated(String), } easy_from_to!(SlotProcessingError, BeaconChainError); diff --git a/beacon_node/rpc/src/beacon_block.rs b/beacon_node/rpc/src/beacon_block.rs index f6be6207f4..346d7e263e 100644 --- a/beacon_node/rpc/src/beacon_block.rs +++ b/beacon_node/rpc/src/beacon_block.rs @@ -34,8 +34,7 @@ impl BeaconBlockService for BeaconBlockServiceInstance { trace!(self.log, "Generating a beacon block"; "req" => format!("{:?}", req)); // decode the request - // TODO: requested slot currently unused, see: https://github.com/sigp/lighthouse/issues/336 - let _requested_slot = Slot::from(req.get_slot()); + let requested_slot = Slot::from(req.get_slot()); let randao_reveal = match Signature::from_ssz_bytes(req.get_randao_reveal()) { Ok(reveal) => reveal, Err(_) => { @@ -51,22 +50,7 @@ impl BeaconBlockService for BeaconBlockServiceInstance { } }; - let slot = match self.chain.slot() { - Ok(slot) => slot, - Err(_) => { - // decode error, incorrect signature - let log_clone = self.log.clone(); - let f = sink - .fail(RpcStatus::new( - RpcStatusCode::InvalidArgument, - Some("No slot from chain".to_string()), - )) - .map_err(move |e| warn!(log_clone, "failed to reply {:?}: {:?}", req, e)); - return ctx.spawn(f); - } - }; - - let produced_block = match self.chain.produce_block(randao_reveal, slot) { + let produced_block = match self.chain.produce_block(randao_reveal, requested_slot) { Ok((block, _state)) => block, Err(e) => { // could not produce a block @@ -82,6 +66,11 @@ impl BeaconBlockService for BeaconBlockServiceInstance { } }; + assert_eq!( + produced_block.slot, requested_slot, + "should produce at the requested slot" + ); + let mut block = BeaconBlockProto::new(); block.set_ssz(ssz_encode(&produced_block)); diff --git a/beacon_node/rpc/src/validator.rs b/beacon_node/rpc/src/validator.rs index 2a1ad45f4d..84995ca504 100644 --- a/beacon_node/rpc/src/validator.rs +++ b/beacon_node/rpc/src/validator.rs @@ -46,12 +46,20 @@ impl ValidatorService for ValidatorServiceInstance { let _ = state.build_all_caches(&self.chain.spec); + assert_eq!( + state.current_epoch(), + epoch, + "Retrieved state should be from the same epoch" + ); + let mut resp = GetDutiesResponse::new(); let resp_validators = resp.mut_active_validators(); let validator_proposers: Result, _> = epoch .slot_iter(T::EthSpec::slots_per_epoch()) - .map(|slot| self.chain.block_proposer(slot)) + .map(|slot| { + state.get_beacon_proposer_index(slot, RelativeEpoch::Current, &self.chain.spec) + }) .collect(); let validator_proposers = match validator_proposers { Ok(v) => v, From a474061ec75c0858943264053ef05eefd7edd132 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Fri, 30 Aug 2019 11:14:13 +1000 Subject: [PATCH 135/186] Disable sig verification when filling blocks --- eth2/operation_pool/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/eth2/operation_pool/src/lib.rs b/eth2/operation_pool/src/lib.rs index 0badf38072..3e1c0ece1e 100644 --- a/eth2/operation_pool/src/lib.rs +++ b/eth2/operation_pool/src/lib.rs @@ -134,7 +134,7 @@ impl OperationPool { verify_attestation_for_block_inclusion( state, attestation, - VerifySignatures::True, + VerifySignatures::False, spec, ) .is_ok() From 2e11faf7631af03408bb45315fc0e8a749f3befe Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Fri, 30 Aug 2019 11:19:52 +1000 Subject: [PATCH 136/186] Re-enable signature verification on attn incl --- eth2/operation_pool/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/eth2/operation_pool/src/lib.rs b/eth2/operation_pool/src/lib.rs index 3e1c0ece1e..0badf38072 100644 --- a/eth2/operation_pool/src/lib.rs +++ b/eth2/operation_pool/src/lib.rs @@ -134,7 +134,7 @@ impl OperationPool { verify_attestation_for_block_inclusion( state, attestation, - VerifySignatures::False, + VerifySignatures::True, spec, ) .is_ok() From 25f2e212c307627ef169a93557ad1527d08e0151 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Fri, 30 Aug 2019 13:30:07 +1000 Subject: [PATCH 137/186] Update to latest interop keypair spec --- .../generate_deterministic_keypairs.rs | 15 +- eth2/utils/bls/src/public_key.rs | 4 + eth2/utils/bls/src/secret_key.rs | 4 + eth2/utils/eth2_interop_keypairs/Cargo.toml | 8 + eth2/utils/eth2_interop_keypairs/src/lib.rs | 138 +++++------------- .../utils/eth2_interop_keypairs/tests/test.rs | 64 ++++++++ 6 files changed, 121 insertions(+), 112 deletions(-) create mode 100644 eth2/utils/eth2_interop_keypairs/tests/test.rs diff --git a/eth2/types/src/test_utils/generate_deterministic_keypairs.rs b/eth2/types/src/test_utils/generate_deterministic_keypairs.rs index 172b142ef9..a687eb978f 100644 --- a/eth2/types/src/test_utils/generate_deterministic_keypairs.rs +++ b/eth2/types/src/test_utils/generate_deterministic_keypairs.rs @@ -1,5 +1,5 @@ use crate::*; -use eth2_interop_keypairs::be_private_key; +use eth2_interop_keypairs::keypair; use log::debug; use rayon::prelude::*; @@ -15,8 +15,8 @@ pub fn generate_deterministic_keypairs(validator_count: usize) -> Vec { let keypairs: Vec = (0..validator_count) .collect::>() - .par_iter() - .map(|&i| generate_deterministic_keypair(i)) + .into_par_iter() + .map(generate_deterministic_keypair) .collect(); keypairs @@ -26,8 +26,9 @@ pub fn generate_deterministic_keypairs(validator_count: usize) -> Vec { /// /// This is used for testing only, and not to be used in production! pub fn generate_deterministic_keypair(validator_index: usize) -> Keypair { - let sk = SecretKey::from_bytes(&be_private_key(validator_index)) - .expect("be_private_key always returns valid keys"); - let pk = PublicKey::from_secret_key(&sk); - Keypair { sk, pk } + let raw = keypair(validator_index); + Keypair { + pk: PublicKey::from_raw(raw.pk), + sk: SecretKey::from_raw(raw.sk), + } } diff --git a/eth2/utils/bls/src/public_key.rs b/eth2/utils/bls/src/public_key.rs index e03b17686a..4b5abb58e3 100644 --- a/eth2/utils/bls/src/public_key.rs +++ b/eth2/utils/bls/src/public_key.rs @@ -20,6 +20,10 @@ impl PublicKey { PublicKey(RawPublicKey::from_secret_key(secret_key.as_raw())) } + pub fn from_raw(raw: RawPublicKey) -> Self { + Self(raw) + } + /// Returns the underlying signature. pub fn as_raw(&self) -> &RawPublicKey { &self.0 diff --git a/eth2/utils/bls/src/secret_key.rs b/eth2/utils/bls/src/secret_key.rs index 12f9a713b3..54da0fa0f3 100644 --- a/eth2/utils/bls/src/secret_key.rs +++ b/eth2/utils/bls/src/secret_key.rs @@ -20,6 +20,10 @@ impl SecretKey { SecretKey(RawSecretKey::random(&mut rand::thread_rng())) } + pub fn from_raw(raw: RawSecretKey) -> Self { + Self(raw) + } + /// Returns the underlying point as compressed bytes. fn as_bytes(&self) -> Vec { self.as_raw().as_bytes() diff --git a/eth2/utils/eth2_interop_keypairs/Cargo.toml b/eth2/utils/eth2_interop_keypairs/Cargo.toml index e1c4dab040..31f9718cd4 100644 --- a/eth2/utils/eth2_interop_keypairs/Cargo.toml +++ b/eth2/utils/eth2_interop_keypairs/Cargo.toml @@ -7,5 +7,13 @@ edition = "2018" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] +lazy_static = "1.4" num-bigint = "0.2" eth2_hashing = "0.1" +milagro_bls = { git = "https://github.com/sigp/milagro_bls", tag = "v0.10.0" } + +[dev-dependencies] +base64 = "0.10" +serde = "1.0" +serde_derive = "1.0" +serde_yaml = "0.8" diff --git a/eth2/utils/eth2_interop_keypairs/src/lib.rs b/eth2/utils/eth2_interop_keypairs/src/lib.rs index 8ba2b9eba3..4c1320723a 100644 --- a/eth2/utils/eth2_interop_keypairs/src/lib.rs +++ b/eth2/utils/eth2_interop_keypairs/src/lib.rs @@ -5,126 +5,54 @@ //! keys generated here are **not secret** and are **not for production use**. //! //! Note: these keys have not been tested against a reference implementation, yet. +#[macro_use] +extern crate lazy_static; use eth2_hashing::hash; +use milagro_bls::{Keypair, PublicKey, SecretKey}; use num_bigint::BigUint; pub const CURVE_ORDER_BITS: usize = 255; pub const PRIVATE_KEY_BYTES: usize = 48; pub const HASH_BYTES: usize = 32; -fn hash_big_int_le(uint: BigUint) -> BigUint { - let mut preimage = uint.to_bytes_le(); - preimage.resize(32, 0_u8); - BigUint::from_bytes_le(&hash(&preimage)) +lazy_static! { + static ref CURVE_ORDER: BigUint = + "52435875175126190479447740508185965837690552500527637822603658699938581184513" + .parse::() + .expect("Curve order should be valid"); } -fn private_key(validator_index: usize) -> BigUint { - let mut key = BigUint::from(validator_index); - loop { - key = hash_big_int_le(key); - if key.bits() <= CURVE_ORDER_BITS { - break key; - } - } -} - -/// Generates an **unsafe** BLS12-381 private key for the given validator index, where that private -/// key is represented in big-endian bytes. -pub fn be_private_key(validator_index: usize) -> [u8; PRIVATE_KEY_BYTES] { - let vec = private_key(validator_index).to_bytes_be(); - - let mut out = [0; PRIVATE_KEY_BYTES]; - out[PRIVATE_KEY_BYTES - vec.len()..PRIVATE_KEY_BYTES].copy_from_slice(&vec); - out -} - -/// Generates an **unsafe** BLS12-381 private key for the given validator index, where that private -/// key is represented in little-endian bytes. pub fn le_private_key(validator_index: usize) -> [u8; PRIVATE_KEY_BYTES] { - let vec = private_key(validator_index).to_bytes_le(); + let preimage = { + let mut bytes = [0; HASH_BYTES]; + let index = validator_index.to_le_bytes(); + bytes[0..index.len()].copy_from_slice(&index); + bytes + }; - let mut out = [0; PRIVATE_KEY_BYTES]; - out[0..vec.len()].copy_from_slice(&vec); - out + let privkey = BigUint::from_bytes_le(&hash(&preimage)) % &*CURVE_ORDER; + + let mut bytes = [0; PRIVATE_KEY_BYTES]; + let privkey_bytes = privkey.to_bytes_le(); + bytes[0..privkey_bytes.len()].copy_from_slice(&privkey_bytes); + bytes } -#[cfg(test)] -mod tests { - use super::*; +pub fn keypair(validator_index: usize) -> Keypair { + let bytes = le_private_key(validator_index); - fn flip(vec: &[u8]) -> Vec { - let len = vec.len(); - let mut out = vec![0; len]; - for i in 0..len { - out[len - 1 - i] = vec[i]; - } - out - } + let sk = + SecretKey::from_bytes(&swap_bytes(bytes.to_vec())).expect("Should be valid private key"); - fn pad_le_bls(mut vec: Vec) -> Vec { - vec.resize(PRIVATE_KEY_BYTES, 0_u8); - vec - } - - fn pad_be_bls(mut vec: Vec) -> Vec { - let mut out = vec![0; PRIVATE_KEY_BYTES - vec.len()]; - out.append(&mut vec); - out - } - - fn pad_le_hash(index: usize) -> Vec { - let mut vec = index.to_le_bytes().to_vec(); - vec.resize(HASH_BYTES, 0_u8); - vec - } - - fn multihash(index: usize, rounds: usize) -> Vec { - let mut vec = pad_le_hash(index); - for _ in 0..rounds { - vec = hash(&vec); - } - vec - } - - fn compare(validator_index: usize, preimage: &[u8]) { - assert_eq!( - &le_private_key(validator_index)[..], - &pad_le_bls(hash(preimage))[..] - ); - assert_eq!( - &be_private_key(validator_index)[..], - &pad_be_bls(flip(&hash(preimage)))[..] - ); - } - - #[test] - fn consistency() { - for i in 0..256 { - let le = BigUint::from_bytes_le(&le_private_key(i)); - let be = BigUint::from_bytes_be(&be_private_key(i)); - assert_eq!(le, be); - } - } - - #[test] - fn non_repeats() { - // These indices only need one hash to be in the curve order. - compare(0, &pad_le_hash(0)); - compare(3, &pad_le_hash(3)); - } - - #[test] - fn repeats() { - // Index 5 needs 5x hashes to get into the curve order. - compare(5, &multihash(5, 5)); - } - - #[test] - fn doesnt_panic() { - for i in 0..256 { - be_private_key(i); - le_private_key(i); - } + Keypair { + pk: PublicKey::from_secret_key(&sk), + sk, } } + +fn swap_bytes(input: Vec) -> Vec { + let mut output = vec![]; + input.into_iter().rev().for_each(|byte| output.push(byte)); + output +} diff --git a/eth2/utils/eth2_interop_keypairs/tests/test.rs b/eth2/utils/eth2_interop_keypairs/tests/test.rs new file mode 100644 index 0000000000..45f128db61 --- /dev/null +++ b/eth2/utils/eth2_interop_keypairs/tests/test.rs @@ -0,0 +1,64 @@ +#![cfg(test)] +use eth2_interop_keypairs::{keypair, le_private_key}; +use num_bigint::BigUint; + +#[test] +fn reference_private_keys() { + // Sourced from: + // + // https://github.com/ethereum/eth2.0-pm/blob/6e41fcf383ebeb5125938850d8e9b4e9888389b4/interop/mocked_start/keygen_test_vector.yaml + let reference = [ + "16808672146709759238327133555736750089977066230599028589193936481731504400486", + "37006103240406073079686739739280712467525465637222501547219594975923976982528", + "22330876536127119444572216874798222843352868708084730796787004036811744442455", + "17048462031355941381150076874414096388968985457797372268770826099852902060945", + "28647806952216650698330424381872693846361470773871570637461872359310549743691", + "2416304019107052589452838695606585506736351107897780798170812672519914514344", + "7300215445567548136411883691093515822872548648751398235557229381530420545683", + "26495790445032093722332687600112008700915252495659977774957922313678954054133", + "2908643403277969554503670470854573663206729491025062456164283925661321952518", + "19554639423851580804889717218680781396599791537051606512605582393920758869044", + ]; + reference + .into_iter() + .enumerate() + .for_each(|(i, reference)| { + let bytes = le_private_key(i); + let num = BigUint::from_bytes_le(&bytes); + assert_eq!(&num.to_str_radix(10), reference) + }); +} + +#[test] +fn reference_public_keys() { + // Sourced from: + // + // https://github.com/ethereum/eth2.0-pm/blob/6e41fcf383ebeb5125938850d8e9b4e9888389b4/interop/mocked_start/keygen_test_vector.yaml + let reference = [ + "qZp27XeW974i1bfoXe63xWd+iOUR4LM3YY+MTrYTSbS/LRU/ZJ97UzWf6LlKOORM", + "uJvrxpl2lyajGMjplxvTFxKXxhrqSmV4p6T5S1R9y6W6wWqJEItrah/jaV0ah0oL", + "o6MrD4tN24PxoKhT2B3XJd/ld9T0w9uOzlLOKwJuyoSBXBp+jpKk3j11VzO/fkqb", + "iMFB33fNnY16cadcgmxBqcnwPG7hsYDz54UvaigAmd7TUbWNZuZTr45CgWpNj1Mu", + "gSg7eiDhykYOvZu9dwBdVXNwyrsfmkT1MMTExmIw9nX434tMKBiFGqfXeoDKWkpe", + "qwvdoPhfhC9DG+rM8SUL8f17pRtBAP1kNktkAf2oW7AGmz5xW1iBloTn/AsQpyo0", + "mXfxyLcxqNVVgUa/uGyuomQ088WHi1ib8oCkLJFZ5wDp3w5AhilsILAR0ueMJ9Nz", + "qNTHwneVpyWWExfvWVOnAy7W2Dc524sOinI1PRuLRDlCf376LInKoDzJ8o+Muris", + "ptMQ27+rmiJFD1mZP4ekzl22Ij87Xx8w0sTscYki1ADgs8d0HejlmWD3JBGg7hCn", + "mJNBPAAoOj+e2f2YRd2hzqOCKNIlZ/lUHczDV+VKLWpuIEEDySVky8BfSQWsfEk6", + ]; + reference + .into_iter() + .enumerate() + .for_each(|(i, reference)| { + let pair = keypair(i); + let reference = base64::decode(reference).expect("Reference should be valid base64"); + + assert_eq!( + reference.len(), + 48, + "Reference should be 48 bytes (public key size)" + ); + + assert_eq!(pair.pk.as_bytes(), reference); + }); +} From e154b30232a9ca5db31cd1d4331d17a49a840ff7 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Tue, 25 Jun 2019 18:46:57 +1000 Subject: [PATCH 138/186] merkle_proof: implement tree construction Plus QuickCheck tests! --- eth2/utils/merkle_proof/Cargo.toml | 5 + eth2/utils/merkle_proof/src/lib.rs | 193 ++++++++++++++++++++++++++++- 2 files changed, 193 insertions(+), 5 deletions(-) diff --git a/eth2/utils/merkle_proof/Cargo.toml b/eth2/utils/merkle_proof/Cargo.toml index 6ef6cc0aac..5ffb6af532 100644 --- a/eth2/utils/merkle_proof/Cargo.toml +++ b/eth2/utils/merkle_proof/Cargo.toml @@ -7,3 +7,8 @@ edition = "2018" [dependencies] ethereum-types = "0.6" eth2_hashing = { path = "../eth2_hashing" } +lazy_static = "1.3.0" + +[dev-dependencies] +quickcheck = "0.8" +quickcheck_macros = "0.8" diff --git a/eth2/utils/merkle_proof/src/lib.rs b/eth2/utils/merkle_proof/src/lib.rs index bc8bcea127..73a972c759 100644 --- a/eth2/utils/merkle_proof/src/lib.rs +++ b/eth2/utils/merkle_proof/src/lib.rs @@ -1,6 +1,138 @@ +#[macro_use] +extern crate lazy_static; + use eth2_hashing::hash; use ethereum_types::H256; +const MAX_TREE_DEPTH: usize = 32; +const EMPTY_SLICE: &[H256] = &[]; + +lazy_static! { + /// Cached zero hashes where `ZERO_HASHES[i]` is the hash of a Merkle tree with 2^i zero leaves. + static ref ZERO_HASHES: Vec = { + let mut hashes = vec![H256::from([0; 32]); MAX_TREE_DEPTH + 1]; + + for i in 0..MAX_TREE_DEPTH { + hashes[i + 1] = hash_concat(hashes[i], hashes[i]); + } + + hashes + }; + + /// Zero nodes to act as "synthetic" left and right subtrees of other zero nodes. + static ref ZERO_NODES: Vec = { + (0..MAX_TREE_DEPTH + 1).map(MerkleTree::Zero).collect() + }; +} + +/// Right-sparse Merkle tree. +/// +/// Efficiently represents a Merkle tree of fixed depth where only the first N +/// indices are populated by non-zero leaves (perfect for the deposit contract tree). +#[derive(Debug)] +pub enum MerkleTree { + /// Leaf node with the hash of its content. + Leaf(H256), + /// Internal node with hash, left subtree and right subtree. + Node(H256, Box, Box), + /// Zero subtree of a given depth. + /// + /// It represents a Merkle tree of 2^depth zero leaves. + Zero(usize), +} + +impl MerkleTree { + /// Create a new Merkle tree from a list of leaves and a fixed depth. + pub fn create(leaves: &[H256], depth: usize) -> Self { + use MerkleTree::*; + + if leaves.is_empty() { + return Zero(depth); + } + + match depth { + 0 => { + debug_assert_eq!(leaves.len(), 1); + Leaf(leaves[0]) + } + _ => { + // Split leaves into left and right subtrees + let subtree_capacity = 2usize.pow(depth as u32 - 1); + let (left_leaves, right_leaves) = if leaves.len() <= subtree_capacity { + (leaves, EMPTY_SLICE) + } else { + leaves.split_at(subtree_capacity) + }; + + let left_subtree = MerkleTree::create(left_leaves, depth - 1); + let right_subtree = MerkleTree::create(right_leaves, depth - 1); + let hash = hash_concat(left_subtree.hash(), right_subtree.hash()); + + Node(hash, Box::new(left_subtree), Box::new(right_subtree)) + } + } + } + + /// Retrieve the root hash of this Merkle tree. + pub fn hash(&self) -> H256 { + match *self { + MerkleTree::Leaf(h) => h, + MerkleTree::Node(h, _, _) => h, + MerkleTree::Zero(depth) => ZERO_HASHES[depth], + } + } + + /// Get a reference to the left and right subtrees if they exist. + pub fn left_and_right_branches(&self) -> Option<(&Self, &Self)> { + match *self { + MerkleTree::Leaf(_) | MerkleTree::Zero(0) => None, + MerkleTree::Node(_, ref l, ref r) => Some((l, r)), + MerkleTree::Zero(depth) => Some((&ZERO_NODES[depth - 1], &ZERO_NODES[depth - 1])), + } + } + + /// Is this Merkle tree a leaf? + pub fn is_leaf(&self) -> bool { + match self { + MerkleTree::Leaf(_) => true, + _ => false, + } + } + + /// Return the leaf at `index` and a Merkle proof of its inclusion. + /// + /// The Merkle proof is in "bottom-up" order, starting with a leaf node + /// and moving up the tree. Its length will be exactly equal to `depth`. + pub fn generate_proof(&self, index: usize, depth: usize) -> (H256, Vec) { + let mut proof = vec![]; + let mut current_node = self; + let mut current_depth = depth; + while current_depth > 0 { + let ith_bit = (index >> (current_depth - 1)) & 0x01; + // Note: unwrap is safe because leaves are only ever constructed at depth == 0. + let (left, right) = current_node.left_and_right_branches().unwrap(); + + // Go right, include the left branch in the proof. + if ith_bit == 1 { + proof.push(left.hash()); + current_node = right; + } else { + proof.push(right.hash()); + current_node = left; + } + current_depth -= 1; + } + + debug_assert_eq!(proof.len(), depth); + debug_assert!(current_node.is_leaf()); + + // Put proof in bottom-up order. + proof.reverse(); + + (current_node.hash(), proof) + } +} + /// Verify a proof that `leaf` exists at `index` in a Merkle tree rooted at `root`. /// /// The `branch` argument is the main component of the proof: it should be a list of internal @@ -46,15 +178,66 @@ fn concat(mut vec1: Vec, mut vec2: Vec) -> Vec { vec1 } +/// Compute the hash of two other hashes concatenated. +fn hash_concat(h1: H256, h2: H256) -> H256 { + H256::from_slice(&hash(&concat( + h1.as_bytes().to_vec(), + h2.as_bytes().to_vec(), + ))) +} + #[cfg(test)] mod tests { use super::*; + use quickcheck::TestResult; + use quickcheck_macros::quickcheck; - fn hash_concat(h1: H256, h2: H256) -> H256 { - H256::from_slice(&hash(&concat( - h1.as_bytes().to_vec(), - h2.as_bytes().to_vec(), - ))) + /// Check that we can: + /// 1. Build a MerkleTree from arbitrary leaves and an arbitrary depth. + /// 2. Generate valid proofs for all of the leaves of this MerkleTree. + #[quickcheck] + fn quickcheck_create_and_verify(int_leaves: Vec, depth: usize) -> TestResult { + if depth > MAX_TREE_DEPTH || int_leaves.len() > 2usize.pow(depth as u32) { + return TestResult::discard(); + } + + let leaves: Vec<_> = int_leaves.into_iter().map(H256::from_low_u64_be).collect(); + let merkle_tree = MerkleTree::create(&leaves, depth); + let merkle_root = merkle_tree.hash(); + + let proofs_ok = (0..leaves.len()).into_iter().all(|i| { + let (leaf, branch) = merkle_tree.generate_proof(i, depth); + leaf == leaves[i] && verify_merkle_proof(leaf, &branch, depth, i, merkle_root) + }); + + TestResult::from_bool(proofs_ok) + } + + #[test] + fn sparse_zero_correct() { + let depth = 2; + let zero = H256::from([0x00; 32]); + let dense_tree = MerkleTree::create(&[zero, zero, zero, zero], depth); + let sparse_tree = MerkleTree::create(&[], depth); + assert_eq!(dense_tree.hash(), sparse_tree.hash()); + } + + #[test] + fn create_small_example() { + // Construct a small merkle tree manually and check that it's consistent with + // the MerkleTree type. + let leaf_b00 = H256::from([0xAA; 32]); + let leaf_b01 = H256::from([0xBB; 32]); + let leaf_b10 = H256::from([0xCC; 32]); + let leaf_b11 = H256::from([0xDD; 32]); + + let node_b0x = hash_concat(leaf_b00, leaf_b01); + let node_b1x = hash_concat(leaf_b10, leaf_b11); + + let root = hash_concat(node_b0x, node_b1x); + + let tree = MerkleTree::create(&[leaf_b00, leaf_b01, leaf_b10, leaf_b11], 2); + assert_eq!(tree.hash(), root); } #[test] From 6234adc0d6801b92811383f686f59a1fad8985bc Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Fri, 30 Aug 2019 15:33:34 +1000 Subject: [PATCH 139/186] Add interop-spec genesis procedure --- beacon_node/beacon_chain/Cargo.toml | 2 + .../beacon_chain/src/beacon_chain_builder.rs | 174 +++++++++++++++++- beacon_node/client/src/lib.rs | 4 +- eth2/operation_pool/src/lib.rs | 6 +- eth2/types/src/slot_epoch_macros.rs | 2 +- .../builders/testing_beacon_state_builder.rs | 2 +- eth2/utils/bls/src/fake_public_key.rs | 8 + 7 files changed, 181 insertions(+), 17 deletions(-) diff --git a/beacon_node/beacon_chain/Cargo.toml b/beacon_node/beacon_chain/Cargo.toml index 56cf7eed68..3378e6a349 100644 --- a/beacon_node/beacon_chain/Cargo.toml +++ b/beacon_node/beacon_chain/Cargo.toml @@ -6,6 +6,7 @@ edition = "2018" [dependencies] eth2_config = { path = "../../eth2/utils/eth2_config" } +merkle_proof = { path = "../../eth2/utils/merkle_proof" } store = { path = "../store" } parking_lot = "0.7" lazy_static = "1.3.0" @@ -21,6 +22,7 @@ eth2-libp2p = { path = "../eth2-libp2p" } slog = { version = "^2.2.3" , features = ["max_level_trace"] } sloggers = { version = "^0.3" } slot_clock = { path = "../../eth2/utils/slot_clock" } +eth2_hashing = { path = "../../eth2/utils/eth2_hashing" } eth2_ssz = "0.1" eth2_ssz_derive = "0.1" state_processing = { path = "../../eth2/state_processing" } diff --git a/beacon_node/beacon_chain/src/beacon_chain_builder.rs b/beacon_node/beacon_chain/src/beacon_chain_builder.rs index 223d99d8dc..8a51900481 100644 --- a/beacon_node/beacon_chain/src/beacon_chain_builder.rs +++ b/beacon_node/beacon_chain/src/beacon_chain_builder.rs @@ -1,11 +1,20 @@ use super::bootstrapper::Bootstrapper; use crate::{BeaconChain, BeaconChainTypes}; +use eth2_hashing::hash; +use merkle_proof::MerkleTree; +use rayon::prelude::*; use slog::Logger; +use ssz::Encode; +use state_processing::initialize_beacon_state_from_eth1; use std::fs::File; use std::path::PathBuf; use std::sync::Arc; use std::time::SystemTime; -use types::{test_utils::TestingBeaconStateBuilder, BeaconBlock, BeaconState, ChainSpec, EthSpec}; +use tree_hash::{SignedRoot, TreeHash}; +use types::{ + test_utils::generate_deterministic_keypairs, BeaconBlock, BeaconState, ChainSpec, Deposit, + DepositData, Domain, EthSpec, Fork, Hash256, PublicKey, Signature, +}; enum BuildStrategy { FromGenesis { @@ -27,7 +36,7 @@ impl BeaconChainBuilder { minutes: u64, spec: ChainSpec, log: Logger, - ) -> Self { + ) -> Result { Self::quick_start(recent_genesis_time(minutes), validator_count, spec, log) } @@ -36,14 +45,10 @@ impl BeaconChainBuilder { validator_count: usize, spec: ChainSpec, log: Logger, - ) -> Self { - let (mut genesis_state, _keypairs) = - TestingBeaconStateBuilder::from_default_keypairs_file_if_exists(validator_count, &spec) - .build(); + ) -> Result { + let genesis_state = interop_genesis_state(validator_count, genesis_time, &spec)?; - genesis_state.genesis_time = genesis_time; - - Self::from_genesis_state(genesis_state, spec, log) + Ok(Self::from_genesis_state(genesis_state, spec, log)) } pub fn yaml_state(file: &PathBuf, spec: ChainSpec, log: Logger) -> Result { @@ -125,6 +130,95 @@ fn genesis_block(genesis_state: &BeaconState, spec: &ChainSpec) - genesis_block } +fn interop_genesis_state( + validator_count: usize, + genesis_time: u64, + spec: &ChainSpec, +) -> Result, String> { + let keypairs = generate_deterministic_keypairs(validator_count); + let eth1_block_hash = Hash256::from_slice(&[42; 32]); + let eth1_timestamp = 2_u64.pow(40); + let amount = spec.max_effective_balance; + dbg!(amount); + + let withdrawal_credentials = |pubkey: &PublicKey| { + let mut credentials = hash(&pubkey.as_ssz_bytes()); + credentials[0] = spec.bls_withdrawal_prefix_byte; + Hash256::from_slice(&credentials) + }; + + let datas = keypairs + .into_par_iter() + .map(|keypair| { + let mut data = DepositData { + withdrawal_credentials: withdrawal_credentials(&keypair.pk), + pubkey: keypair.pk.into(), + amount, + signature: Signature::empty_signature().into(), + }; + + let domain = spec.get_domain( + spec.genesis_slot.epoch(T::slots_per_epoch()), + Domain::Deposit, + &Fork::default(), + ); + data.signature = Signature::new(&data.signed_root()[..], domain, &keypair.sk).into(); + + data + }) + .collect::>(); + + let deposit_root_leaves = datas + .par_iter() + .map(|data| Hash256::from_slice(&data.tree_hash_root())) + .collect::>(); + + let mut proofs = vec![]; + for i in 1..=deposit_root_leaves.len() { + // Note: this implementation is not so efficient. + // + // If `MerkleTree` had a push method, we could just build one tree and sample it instead of + // rebuilding the tree for each deposit. + let tree = MerkleTree::create( + &deposit_root_leaves[0..i], + spec.deposit_contract_tree_depth as usize, + ); + + let (_, mut proof) = tree.generate_proof(i - 1, spec.deposit_contract_tree_depth as usize); + proof.push(Hash256::from_slice(&int_to_bytes32(i))); + + assert_eq!( + proof.len(), + spec.deposit_contract_tree_depth as usize + 1, + "Deposit proof should be correct len" + ); + + proofs.push(proof); + } + + let deposits = datas + .into_par_iter() + .zip(proofs.into_par_iter()) + .map(|(data, proof)| (data, proof.into())) + .map(|(data, proof)| Deposit { proof, data }) + .collect::>(); + + let mut state = + initialize_beacon_state_from_eth1(eth1_block_hash, eth1_timestamp, deposits, spec) + .map_err(|e| format!("Unable to initialize genesis state: {:?}", e))?; + + state.genesis_time = genesis_time; + + Ok(state) +} + +/// Returns `int` as little-endian bytes with a length of 32. +fn int_to_bytes32(int: usize) -> Vec { + let mut vec = int.to_le_bytes().to_vec(); + vec.resize(32, 0); + vec +} + /// Returns the system time, mod 30 minutes. /// /// Used for easily creating testnets. @@ -134,6 +228,66 @@ fn recent_genesis_time(minutes: u64) -> u64 { .unwrap() .as_secs(); let secs_after_last_period = now.checked_rem(minutes * 60).unwrap_or(0); - // genesis is now the last 15 minute block. now - secs_after_last_period } + +#[cfg(test)] +mod test { + use super::*; + use types::{EthSpec, MinimalEthSpec}; + + type TestEthSpec = MinimalEthSpec; + + #[test] + fn interop_state() { + let validator_count = 16; + let genesis_time = 42; + let spec = &TestEthSpec::default_spec(); + + let state = interop_genesis_state::(validator_count, genesis_time, spec) + .expect("should build state"); + + assert_eq!( + state.eth1_data.block_hash, + Hash256::from_slice(&[42; 32]), + "eth1 block hash should be co-ordinated junk" + ); + + assert_eq!( + state.genesis_time, genesis_time, + "genesis time should be as specified" + ); + + for b in &state.balances { + assert_eq!( + *b, spec.max_effective_balance, + "validator balances should be max effective balance" + ); + } + + for v in &state.validators { + let creds = v.withdrawal_credentials.as_bytes(); + assert_eq!( + creds[0], spec.bls_withdrawal_prefix_byte, + "first byte of withdrawal creds should be bls prefix" + ); + assert_eq!( + &creds[1..], + &hash(&v.pubkey.as_ssz_bytes())[1..], + "rest of withdrawal creds should be pubkey hash" + ) + } + + assert_eq!( + state.balances.len(), + validator_count, + "validator balances len should be correct" + ); + + assert_eq!( + state.validators.len(), + validator_count, + "validator count should be correct" + ); + } +} diff --git a/beacon_node/client/src/lib.rs b/beacon_node/client/src/lib.rs index 9876e96723..c7558dd5e1 100644 --- a/beacon_node/client/src/lib.rs +++ b/beacon_node/client/src/lib.rs @@ -96,7 +96,7 @@ where *minutes, spec.clone(), log.clone(), - ), + )?, BeaconChainStartMethod::Generated { validator_count, genesis_time, @@ -105,7 +105,7 @@ where *validator_count, spec.clone(), log.clone(), - ), + )?, BeaconChainStartMethod::Yaml { file } => { BeaconChainBuilder::yaml_state(file, spec.clone(), log.clone())? } diff --git a/eth2/operation_pool/src/lib.rs b/eth2/operation_pool/src/lib.rs index 0badf38072..bb64c3ca26 100644 --- a/eth2/operation_pool/src/lib.rs +++ b/eth2/operation_pool/src/lib.rs @@ -16,9 +16,9 @@ use state_processing::per_block_processing::errors::{ }; use state_processing::per_block_processing::{ get_slashable_indices_modular, verify_attestation_for_block_inclusion, - verify_attestation_for_state, verify_attester_slashing, verify_exit, - verify_exit_time_independent_only, verify_proposer_slashing, verify_transfer, - verify_transfer_time_independent_only, VerifySignatures, + verify_attester_slashing, verify_exit, verify_exit_time_independent_only, + verify_proposer_slashing, verify_transfer, verify_transfer_time_independent_only, + VerifySignatures, }; use std::collections::{btree_map::Entry, hash_map, BTreeMap, HashMap, HashSet}; use std::marker::PhantomData; diff --git a/eth2/types/src/slot_epoch_macros.rs b/eth2/types/src/slot_epoch_macros.rs index 084ff98e7c..62ca6b3af3 100644 --- a/eth2/types/src/slot_epoch_macros.rs +++ b/eth2/types/src/slot_epoch_macros.rs @@ -182,7 +182,7 @@ macro_rules! impl_display { &self, record: &slog::Record, key: slog::Key, - serializer: &mut slog::Serializer, + serializer: &mut dyn slog::Serializer, ) -> slog::Result { slog::Value::serialize(&self.0, record, key, serializer) } diff --git a/eth2/types/src/test_utils/builders/testing_beacon_state_builder.rs b/eth2/types/src/test_utils/builders/testing_beacon_state_builder.rs index 4f8a2d9240..cf8c9ec8ea 100644 --- a/eth2/types/src/test_utils/builders/testing_beacon_state_builder.rs +++ b/eth2/types/src/test_utils/builders/testing_beacon_state_builder.rs @@ -94,7 +94,7 @@ impl TestingBeaconStateBuilder { /// Creates the builder from an existing set of keypairs. pub fn from_keypairs(keypairs: Vec, spec: &ChainSpec) -> Self { let validator_count = keypairs.len(); - let starting_balance = 32_000_000_000; + let starting_balance = spec.max_effective_balance; debug!( "Building {} Validator objects from keypairs...", diff --git a/eth2/utils/bls/src/fake_public_key.rs b/eth2/utils/bls/src/fake_public_key.rs index e8dafaca6c..82b1c707f2 100644 --- a/eth2/utils/bls/src/fake_public_key.rs +++ b/eth2/utils/bls/src/fake_public_key.rs @@ -1,5 +1,6 @@ use super::{SecretKey, BLS_PUBLIC_KEY_BYTE_SIZE}; use milagro_bls::G1Point; +use milagro_bls::PublicKey as RawPublicKey; use serde::de::{Deserialize, Deserializer}; use serde::ser::{Serialize, Serializer}; use serde_hex::{encode as hex_encode, HexVisitor}; @@ -24,6 +25,13 @@ impl FakePublicKey { Self::zero() } + pub fn from_raw(raw: RawPublicKey) -> Self { + Self { + bytes: raw.clone().as_bytes(), + point: G1Point::new(), + } + } + /// Creates a new all-zero's public key pub fn zero() -> Self { Self { From 9ffb6d0fe141f7919f7122f47d7b6a4c1b3a600a Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Fri, 30 Aug 2019 15:49:06 +1000 Subject: [PATCH 140/186] Fix fake_crypto test fails --- beacon_node/beacon_chain/src/beacon_chain_builder.rs | 4 ++++ eth2/state_processing/src/per_block_processing/tests.rs | 1 + eth2/state_processing/tests/tests.rs | 2 ++ eth2/utils/bls/src/public_key_bytes.rs | 1 + eth2/utils/bls/src/signature_bytes.rs | 1 + 5 files changed, 9 insertions(+) diff --git a/beacon_node/beacon_chain/src/beacon_chain_builder.rs b/beacon_node/beacon_chain/src/beacon_chain_builder.rs index 8a51900481..a569fe8338 100644 --- a/beacon_node/beacon_chain/src/beacon_chain_builder.rs +++ b/beacon_node/beacon_chain/src/beacon_chain_builder.rs @@ -130,6 +130,10 @@ fn genesis_block(genesis_state: &BeaconState, spec: &ChainSpec) - genesis_block } +/// Builds a genesis state as defined by the Eth2 interop procedure (see below). +/// +/// Reference: +/// https://github.com/ethereum/eth2.0-pm/tree/6e41fcf383ebeb5125938850d8e9b4e9888389b4/interop/mocked_start fn interop_genesis_state( validator_count: usize, genesis_time: u64, diff --git a/eth2/state_processing/src/per_block_processing/tests.rs b/eth2/state_processing/src/per_block_processing/tests.rs index cf64dc85e1..f419d5fae7 100644 --- a/eth2/state_processing/src/per_block_processing/tests.rs +++ b/eth2/state_processing/src/per_block_processing/tests.rs @@ -1,4 +1,5 @@ #![cfg(all(test, not(feature = "fake_crypto")))] + use super::block_processing_builder::BlockProcessingBuilder; use super::errors::*; use crate::{per_block_processing, BlockSignatureStrategy}; diff --git a/eth2/state_processing/tests/tests.rs b/eth2/state_processing/tests/tests.rs index 43b66f3edf..a7390c8505 100644 --- a/eth2/state_processing/tests/tests.rs +++ b/eth2/state_processing/tests/tests.rs @@ -1,3 +1,5 @@ +#![cfg(not(feature = "fake_crypto"))] + use state_processing::{ per_block_processing, test_utils::BlockBuilder, BlockProcessingError, BlockSignatureStrategy, }; diff --git a/eth2/utils/bls/src/public_key_bytes.rs b/eth2/utils/bls/src/public_key_bytes.rs index f757351403..afdbcb2701 100644 --- a/eth2/utils/bls/src/public_key_bytes.rs +++ b/eth2/utils/bls/src/public_key_bytes.rs @@ -31,6 +31,7 @@ mod tests { } #[test] + #[cfg(not(feature = "fake_crypto"))] pub fn test_invalid_public_key() { let mut public_key_bytes = [0; BLS_PUBLIC_KEY_BYTE_SIZE]; public_key_bytes[0] = 255; //a_flag1 == b_flag1 == c_flag1 == 1 and x1 = 0 shouldn't be allowed diff --git a/eth2/utils/bls/src/signature_bytes.rs b/eth2/utils/bls/src/signature_bytes.rs index a30cecb4d5..b89c0f0d11 100644 --- a/eth2/utils/bls/src/signature_bytes.rs +++ b/eth2/utils/bls/src/signature_bytes.rs @@ -32,6 +32,7 @@ mod tests { } #[test] + #[cfg(not(feature = "fake_crypto"))] pub fn test_invalid_signature() { let mut signature_bytes = [0; BLS_SIG_BYTE_SIZE]; signature_bytes[0] = 255; //a_flag1 == b_flag1 == c_flag1 == 1 and x1 = 0 shouldn't be allowed From a6e6827337cb62041b1b3e64480854e70bd3016b Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Fri, 30 Aug 2019 16:48:22 +1000 Subject: [PATCH 141/186] Remove pub const from interop keypairs --- eth2/utils/eth2_interop_keypairs/src/lib.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/eth2/utils/eth2_interop_keypairs/src/lib.rs b/eth2/utils/eth2_interop_keypairs/src/lib.rs index 4c1320723a..490477eb33 100644 --- a/eth2/utils/eth2_interop_keypairs/src/lib.rs +++ b/eth2/utils/eth2_interop_keypairs/src/lib.rs @@ -12,7 +12,6 @@ use eth2_hashing::hash; use milagro_bls::{Keypair, PublicKey, SecretKey}; use num_bigint::BigUint; -pub const CURVE_ORDER_BITS: usize = 255; pub const PRIVATE_KEY_BYTES: usize = 48; pub const HASH_BYTES: usize = 32; From 96fb3be2c753aeb9e4926c6b9bc99a374cfc115a Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Fri, 30 Aug 2019 17:02:57 +1000 Subject: [PATCH 142/186] Swap endianness of test keys --- eth2/utils/eth2_interop_keypairs/src/lib.rs | 40 +++++++++++-------- .../utils/eth2_interop_keypairs/tests/test.rs | 6 +-- 2 files changed, 27 insertions(+), 19 deletions(-) diff --git a/eth2/utils/eth2_interop_keypairs/src/lib.rs b/eth2/utils/eth2_interop_keypairs/src/lib.rs index 490477eb33..ac610ee776 100644 --- a/eth2/utils/eth2_interop_keypairs/src/lib.rs +++ b/eth2/utils/eth2_interop_keypairs/src/lib.rs @@ -1,10 +1,21 @@ //! Produces the "deterministic" validator private keys used for inter-operability testing for //! Ethereum 2.0 clients. //! -//! Each private key is the first hash in the sha2 hash-chain that is less than 2^255. As such, -//! keys generated here are **not secret** and are **not for production use**. +//! Each private key is the sha2 hash of the validator index (little-endian, padded to 32 bytes), +//! modulo the BLS-381 curve order. //! -//! Note: these keys have not been tested against a reference implementation, yet. +//! Keys generated here are **not secret** and are **not for production use**. It is trivial to +//! know the secret key for any validator. +//! +//!## Reference +//! +//! Reference implementation: +//! +//! https://github.com/ethereum/eth2.0-pm/blob/6e41fcf383ebeb5125938850d8e9b4e9888389b4/interop/mocked_start/keygen.py +//! +//! +//! This implementation passes the [reference implementation +//! tests](https://github.com/ethereum/eth2.0-pm/blob/6e41fcf383ebeb5125938850d8e9b4e9888389b4/interop/mocked_start/keygen_test_vector.yaml). #[macro_use] extern crate lazy_static; @@ -22,7 +33,9 @@ lazy_static! { .expect("Curve order should be valid"); } -pub fn le_private_key(validator_index: usize) -> [u8; PRIVATE_KEY_BYTES] { +/// Return a G1 point for the given `validator_index`, encoded as a compressed point in +/// big-endian byte-ordering. +pub fn be_private_key(validator_index: usize) -> [u8; PRIVATE_KEY_BYTES] { let preimage = { let mut bytes = [0; HASH_BYTES]; let index = validator_index.to_le_bytes(); @@ -33,25 +46,20 @@ pub fn le_private_key(validator_index: usize) -> [u8; PRIVATE_KEY_BYTES] { let privkey = BigUint::from_bytes_le(&hash(&preimage)) % &*CURVE_ORDER; let mut bytes = [0; PRIVATE_KEY_BYTES]; - let privkey_bytes = privkey.to_bytes_le(); - bytes[0..privkey_bytes.len()].copy_from_slice(&privkey_bytes); + let privkey_bytes = privkey.to_bytes_be(); + bytes[PRIVATE_KEY_BYTES - privkey_bytes.len()..].copy_from_slice(&privkey_bytes); bytes } +/// Return a public and private keypair for a given `validator_index`. pub fn keypair(validator_index: usize) -> Keypair { - let bytes = le_private_key(validator_index); - - let sk = - SecretKey::from_bytes(&swap_bytes(bytes.to_vec())).expect("Should be valid private key"); + let sk = SecretKey::from_bytes(&be_private_key(validator_index)).expect(&format!( + "Should build valid private key for validator index {}", + validator_index + )); Keypair { pk: PublicKey::from_secret_key(&sk), sk, } } - -fn swap_bytes(input: Vec) -> Vec { - let mut output = vec![]; - input.into_iter().rev().for_each(|byte| output.push(byte)); - output -} diff --git a/eth2/utils/eth2_interop_keypairs/tests/test.rs b/eth2/utils/eth2_interop_keypairs/tests/test.rs index 45f128db61..0d89eaa4dc 100644 --- a/eth2/utils/eth2_interop_keypairs/tests/test.rs +++ b/eth2/utils/eth2_interop_keypairs/tests/test.rs @@ -1,5 +1,5 @@ #![cfg(test)] -use eth2_interop_keypairs::{keypair, le_private_key}; +use eth2_interop_keypairs::{be_private_key, keypair}; use num_bigint::BigUint; #[test] @@ -23,8 +23,8 @@ fn reference_private_keys() { .into_iter() .enumerate() .for_each(|(i, reference)| { - let bytes = le_private_key(i); - let num = BigUint::from_bytes_le(&bytes); + let bytes = be_private_key(i); + let num = BigUint::from_bytes_be(&bytes); assert_eq!(&num.to_str_radix(10), reference) }); } From 6ba093d14fd263073902e70b6ced458d45256e6b Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Fri, 30 Aug 2019 17:06:59 +1000 Subject: [PATCH 143/186] Add warning when disconnecting peer --- beacon_node/network/src/sync/simple_sync.rs | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/beacon_node/network/src/sync/simple_sync.rs b/beacon_node/network/src/sync/simple_sync.rs index d3ed2f3e4f..222b4c7fc1 100644 --- a/beacon_node/network/src/sync/simple_sync.rs +++ b/beacon_node/network/src/sync/simple_sync.rs @@ -525,6 +525,13 @@ impl NetworkContext { } pub fn disconnect(&mut self, peer_id: PeerId, reason: GoodbyeReason) { + warn!( + &self.log, + "Disconnecting peer"; + "reason" => format!("{:?}", reason), + "peer_id" => format!("{:?}", peer_id), + ); + self.send_rpc_request(None, peer_id, RPCRequest::Goodbye(reason)) // TODO: disconnect peers. } From 5f0509be501585f1043111df720f157441e8567a Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Sat, 31 Aug 2019 12:34:27 +1000 Subject: [PATCH 144/186] Improve and extend CLI interface --- beacon_node/beacon_chain/src/beacon_chain.rs | 19 +++-- beacon_node/client/src/lib.rs | 84 ++++++++++++-------- beacon_node/src/config.rs | 38 ++++++++- beacon_node/src/main.rs | 42 +++++++++- beacon_node/src/run.rs | 7 +- 5 files changed, 141 insertions(+), 49 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index afc7a992ae..6380d03b3e 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -158,12 +158,6 @@ impl BeaconChain { genesis_state_root, )); - info!(log, "BeaconChain init"; - "genesis_validator_count" => genesis_state.validators.len(), - "genesis_state_root" => format!("{}", genesis_state_root), - "genesis_block_root" => format!("{}", genesis_block_root), - ); - // Slot clock let slot_clock = T::SlotClock::from_eth2_genesis( spec.genesis_slot, @@ -172,6 +166,12 @@ impl BeaconChain { ) .ok_or_else(|| Error::SlotClockDidNotStart)?; + info!(log, "Beacon chain initialized from genesis"; + "validator_count" => genesis_state.validators.len(), + "state_root" => format!("{}", genesis_state_root), + "block_root" => format!("{}", genesis_block_root), + ); + Ok(Self { spec, slot_clock, @@ -211,6 +211,13 @@ impl BeaconChain { let op_pool = p.op_pool.into_operation_pool(state, &spec); + info!(log, "Beacon chain initialized from store"; + "head_root" => format!("{}", p.canonical_head.beacon_block_root), + "head_epoch" => format!("{}", p.canonical_head.beacon_block.slot.epoch(T::EthSpec::slots_per_epoch())), + "finalized_root" => format!("{}", last_finalized_root), + "finalized_epoch" => format!("{}", last_finalized_block.slot.epoch(T::EthSpec::slots_per_epoch())), + ); + Ok(Some(BeaconChain { spec, slot_clock, diff --git a/beacon_node/client/src/lib.rs b/beacon_node/client/src/lib.rs index c7558dd5e1..766d12c561 100644 --- a/beacon_node/client/src/lib.rs +++ b/beacon_node/client/src/lib.rs @@ -82,34 +82,70 @@ where let beacon_chain_builder = match &client_config.beacon_chain_start_method { BeaconChainStartMethod::Resume => { + info!( + log, + "Starting beacon chain"; + "method" => "resume" + ); BeaconChainBuilder::from_store(spec.clone(), log.clone()) } BeaconChainStartMethod::Mainnet => { crit!(log, "No mainnet beacon chain startup specification."); - return Err("Mainnet is not yet specified. We're working on it.".into()); + return Err("Mainnet launch is not yet announced.".into()); } BeaconChainStartMethod::RecentGenesis { validator_count, minutes, - } => BeaconChainBuilder::recent_genesis( - *validator_count, - *minutes, - spec.clone(), - log.clone(), - )?, + } => { + info!( + log, + "Starting beacon chain"; + "validator_count" => validator_count, + "minutes" => minutes, + "method" => "recent" + ); + BeaconChainBuilder::recent_genesis( + *validator_count, + *minutes, + spec.clone(), + log.clone(), + )? + } BeaconChainStartMethod::Generated { validator_count, genesis_time, - } => BeaconChainBuilder::quick_start( - *genesis_time, - *validator_count, - spec.clone(), - log.clone(), - )?, + } => { + info!( + log, + "Starting beacon chain"; + "validator_count" => validator_count, + "genesis_time" => genesis_time, + "method" => "quick" + ); + BeaconChainBuilder::quick_start( + *genesis_time, + *validator_count, + spec.clone(), + log.clone(), + )? + } BeaconChainStartMethod::Yaml { file } => { + info!( + log, + "Starting beacon chain"; + "file" => format!("{:?}", file), + "method" => "yaml" + ); BeaconChainBuilder::yaml_state(file, spec.clone(), log.clone())? } - BeaconChainStartMethod::HttpBootstrap { server, .. } => { + BeaconChainStartMethod::HttpBootstrap { server, port } => { + info!( + log, + "Starting beacon chain"; + "port" => port, + "server" => server, + "method" => "bootstrap" + ); BeaconChainBuilder::http_bootstrap(server, spec.clone(), log.clone())? } }; @@ -124,26 +160,6 @@ where panic!("Cannot start client before genesis!") } - // Block starting the client until we have caught the state up to the current slot. - // - // If we don't block here we create an initial scenario where we're unable to process any - // blocks and we're basically useless. - { - let state_slot = beacon_chain.head().beacon_state.slot; - let wall_clock_slot = beacon_chain - .slot() - .expect("Cannot start client before genesis"); - let slots_since_genesis = beacon_chain.slots_since_genesis().unwrap(); - info!( - log, - "BeaconState cache init"; - "state_slot" => state_slot, - "wall_clock_slot" => wall_clock_slot, - "slots_since_genesis" => slots_since_genesis, - "catchup_distance" => wall_clock_slot - state_slot, - ); - } - let network_config = &client_config.network; let (network, network_send) = NetworkService::new(beacon_chain.clone(), network_config, executor, log.clone())?; diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index 7c471e8acf..c4fa5eebcd 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -15,6 +15,12 @@ type Result = std::result::Result; type Config = (ClientConfig, Eth2Config); /// Gets the fully-initialized global client and eth2 configuration objects. +/// +/// The top-level `clap` arguments should be provied as `cli_args`. +/// +/// The output of this function depends primarily upon the given `cli_args`, however it's behaviour +/// may be influenced by other external services like the contents of the file system or the +/// response of some remote server. pub fn get_configs(cli_args: &ArgMatches, log: &Logger) -> Result { let mut builder = ConfigBuilder::new(cli_args, log)?; @@ -95,7 +101,7 @@ fn process_testnet_subcommand( "path" => format!("{:?}", builder.data_dir) ); - // Start matching on the second subcommand (e.g., `testnet bootstrap ...`) + // Start matching on the second subcommand (e.g., `testnet bootstrap ...`). match cli_args.subcommand() { ("bootstrap", Some(cli_args)) => { let server = cli_args @@ -131,6 +137,24 @@ fn process_testnet_subcommand( minutes, }) } + ("quick", Some(cli_args)) => { + let validator_count = cli_args + .value_of("validator_count") + .ok_or_else(|| "No validator_count specified")? + .parse::() + .map_err(|e| format!("Unable to parse validator_count: {:?}", e))?; + + let genesis_time = cli_args + .value_of("genesis_time") + .ok_or_else(|| "No genesis time supplied")? + .parse::() + .map_err(|e| format!("Unable to parse genesis time: {:?}", e))?; + + builder.set_beacon_chain_start_method(BeaconChainStartMethod::Generated { + validator_count, + genesis_time, + }) + } _ => return Err("No testnet method specified. See 'testnet --help'.".into()), }; @@ -420,6 +444,18 @@ impl<'a> ConfigBuilder<'a> { self.client_config .apply_cli_args(cli_args, &mut self.log.clone())?; + if let Some(bump) = cli_args.value_of("port-bump") { + let bump = bump + .parse::() + .map_err(|e| format!("Unable to parse port bump: {}", e))?; + + self.client_config.network.libp2p_port += bump; + self.client_config.network.discovery_port += bump; + self.client_config.rpc.port += bump; + self.client_config.rpc.port += bump; + self.client_config.rest_api.port += bump; + } + if self.eth2_config.spec_constants != self.client_config.spec_constants { crit!(self.log, "Specification constants do not match."; "client_config" => format!("{}", self.client_config.spec_constants), diff --git a/beacon_node/src/main.rs b/beacon_node/src/main.rs index 8ab20a4813..02e30b660d 100644 --- a/beacon_node/src/main.rs +++ b/beacon_node/src/main.rs @@ -48,18 +48,29 @@ fn main() { /* * Network parameters. */ + .arg( + Arg::with_name("port-bump") + .long("port-bump") + .short("b") + .value_name("INCREMENT") + .help("Sets all listening TCP/UDP ports to default values, but with each port increased by \ + INCREMENT. Useful when starting multiple nodes on a single machine. Using increments \ + in multiples of 10 is recommended.") + .takes_value(true), + ) .arg( Arg::with_name("listen-address") .long("listen-address") .value_name("ADDRESS") .help("The address lighthouse will listen for UDP and TCP connections. (default 127.0.0.1).") - .takes_value(true), + .takes_value(true) ) .arg( Arg::with_name("port") .long("port") .value_name("PORT") .help("The TCP/UDP port to listen on. The UDP port can be modified by the --discovery-port flag.") + .conflicts_with("port-bump") .takes_value(true), ) .arg( @@ -81,6 +92,7 @@ fn main() { .long("disc-port") .value_name("PORT") .help("The discovery UDP port.") + .conflicts_with("port-bump") .takes_value(true), ) .arg( @@ -125,6 +137,7 @@ fn main() { Arg::with_name("rpc-port") .long("rpc-port") .help("Listen port for RPC endpoint.") + .conflicts_with("port-bump") .takes_value(true), ) /* Client related arguments */ @@ -147,6 +160,7 @@ fn main() { .long("api-port") .value_name("APIPORT") .help("Set the listen TCP port for the RESTful HTTP API server.") + .conflicts_with("port-bump") .takes_value(true), ) @@ -230,8 +244,6 @@ fn main() { .conflicts_with("random-datadir") ) /* - * Testnet sub-commands. - * * `boostrap` * * Start a new node by downloading genesis and network info from another node via the @@ -272,7 +284,29 @@ fn main() { .default_value("15") .help("The maximum number of minutes that will have elapsed before genesis")) ) - .subcommand(SubCommand::with_name("yaml-genesis-state") + /* + * `quick` + * + * Start a new node, specifying the number of validators and genesis time + */ + .subcommand(SubCommand::with_name("quick") + .about("Creates a new genesis state from the specified validator count and genesis time. \ + Compatible with the `quick-start genesis` defined in the eth2.0-pm repo.") + .arg(Arg::with_name("validator_count") + .value_name("VALIDATOR_COUNT") + .required(true) + .help("The number of validators in the genesis state")) + .arg(Arg::with_name("genesis_time") + .value_name("UNIX_EPOCH_SECONDS") + .required(true) + .help("The genesis time for the given state.")) + ) + /* + * `yaml` + * + * Start a new node, using a genesis state loaded from a YAML file + */ + .subcommand(SubCommand::with_name("yaml") .about("Creates a new datadir where the genesis state is read from YAML. Will fail to parse \ a YAML state that was generated to a different spec than that specified by --spec.") .arg(Arg::with_name("file") diff --git a/beacon_node/src/run.rs b/beacon_node/src/run.rs index 620cb64bb5..26225cc920 100644 --- a/beacon_node/src/run.rs +++ b/beacon_node/src/run.rs @@ -41,11 +41,10 @@ pub fn run_beacon_node( info!( log, - "BeaconNode init"; - "p2p_listen_address" => format!("{:?}", &other_client_config.network.listen_address), - "network_dir" => format!("{:?}", other_client_config.network.network_dir), - "spec_constants" => &spec_constants, + "Starting beacon node"; + "p2p_listen_address" => format!("{}", &other_client_config.network.listen_address), "db_type" => &other_client_config.db_type, + "spec_constants" => &spec_constants, ); match (db_type.as_str(), spec_constants.as_str()) { From 14ea6f7710cb73ba10d0b03324cc02ba1ce5b29b Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Sun, 1 Sep 2019 11:31:18 +1000 Subject: [PATCH 145/186] Add newly created mdbook --- book/.gitignore | 1 + book/book.toml | 6 ++++ book/src/SUMMARY.md | 7 +++++ book/src/interop.md | 70 ++++++++++++++++++++++++++++++++++++++++++++ book/src/intro.md | 47 +++++++++++++++++++++++++++++ book/src/setup.md | 65 ++++++++++++++++++++++++++++++++++++++++ book/src/testnets.md | 64 ++++++++++++++++++++++++++++++++++++++++ 7 files changed, 260 insertions(+) create mode 100644 book/.gitignore create mode 100644 book/book.toml create mode 100644 book/src/SUMMARY.md create mode 100644 book/src/interop.md create mode 100644 book/src/intro.md create mode 100644 book/src/setup.md create mode 100644 book/src/testnets.md diff --git a/book/.gitignore b/book/.gitignore new file mode 100644 index 0000000000..7585238efe --- /dev/null +++ b/book/.gitignore @@ -0,0 +1 @@ +book diff --git a/book/book.toml b/book/book.toml new file mode 100644 index 0000000000..829c7b99c2 --- /dev/null +++ b/book/book.toml @@ -0,0 +1,6 @@ +[book] +authors = ["Paul Hauner"] +language = "en" +multilingual = false +src = "src" +title = "Lighthouse" diff --git a/book/src/SUMMARY.md b/book/src/SUMMARY.md new file mode 100644 index 0000000000..e08af247c4 --- /dev/null +++ b/book/src/SUMMARY.md @@ -0,0 +1,7 @@ +# Summary + +* [Introduction](./intro.md) +* [Development Environment](./setup.md) +* [Testnets](./testnets.md) + * [Simple local testnet](./testnets.md) + * [Interop](./interop.md) diff --git a/book/src/interop.md b/book/src/interop.md new file mode 100644 index 0000000000..79d4a13762 --- /dev/null +++ b/book/src/interop.md @@ -0,0 +1,70 @@ +# Lighthouse Interop Guide + +This guide is intended for other Ethereum 2.0 client developers performing +inter-operability testing with Lighthouse. + +To allow for faster iteration cycles without the "merging to master" overhead, +we will use the [`interop`](https://github.com/sigp/lighthouse/tree/interop) +branch of [sigp/lighthouse](https://github.com/sigp/lighthouse/tree/interop) +for September 2019 interop. **Please use ensure you `git checkout interop` +after cloning the repo.** + +## Environment + +All that is required for inter-op is a built and tested [development +environment](setup). When lighthouse boots, it will create the following +directories: + +- `~/.lighthouse`: database and configuration for the beacon node. +- `~/.lighthouse-validator`: database and configuration for the validator + client. + +After building the binaries with `cargo build --release --all`, there will be a +`target/release` directory in the root of the Lighthouse repository. This is +where the `beacon_node` and `validator_client` binaries are located. + +## Interop Procedure + +The following scenarios are documented: + +- [Starting a "quick-start" beacon node](#quick-start-beacon-node) from a + `(validator_count, genesis)` tuple. +- [Starting a validator client](#validator-client) with `n` interop keypairs. +- [Starting a node from a genesis state file](#starting-from-a-genesis-file). +- [Exporting a genesis state file](#exporting-a-genesis-file) from a running Lighthouse + node. + +First, setup a Lighthouse development environment and navigate to the +`target/release` directory (this is where the binaries are located). + +#### Quick-start Beacon Node + + +To start the node (each time creating a fresh database and configuration in +`~/.lighthouse`), use: + +``` +$ ./beacon_node testnet -f quick 8 1567222226 +``` + +>This method conforms the ["Quick-start +genesis"](https://github.com/ethereum/eth2.0-pm/tree/6e41fcf383ebeb5125938850d8e9b4e9888389b4/interop/mocked_start#quick-start-genesis) +method in the `ethereum/eth2.0-pm` repository. +> +> The `-f` flag ignores any existing database or configuration, backing them up +before re-initializing. `8` is the validator count and `1567222226` is the +genesis time. +> +> See `$ ./beacon_node testnet quick --help` for more configuration options. + +#### Validator Client + +**TODO** + +#### Starting from a genesis file + +**TODO** + +#### Exporting a genesis file + +**TODO** diff --git a/book/src/intro.md b/book/src/intro.md new file mode 100644 index 0000000000..f290b7e40f --- /dev/null +++ b/book/src/intro.md @@ -0,0 +1,47 @@ +# Lighthouse Documentation + +[![Build Status]][Build Link] [![Doc Status]][Doc Link] [![Chat Badge]][Chat Link] + +[Build Status]: https://gitlab.sigmaprime.io/sigp/lighthouse/badges/master/build.svg +[Build Link]: https://gitlab.sigmaprime.io/sigp/lighthouse/pipelines +[Chat Badge]: https://img.shields.io/badge/chat-discord-%237289da +[Chat Link]: https://discord.gg/cyAszAh +[Doc Status]:https://img.shields.io/badge/rust--docs-master-orange +[Doc Link]: http://lighthouse-docs.sigmaprime.io/ + +Lighthouse is an **Ethereum 2.0 client** that connects to other Ethereum 2.0 +clients to form a resilient and decentralized proof-of-stake blockchain. + +It is written in Rust, maintained by Sigma Prime and funded by the Ethereum +Foundation, Consensys and other individuals and organisations. + +## Developer Resources + +Documentation is provided for **researchers and developers** working on +Ethereum 2.0 and assumes prior knowledge on the topic. + +- Get started with [development environment setup](setup.html). +- [Run a simple testnet](testnets.html) in Only Three CLI Commands™. +- Read about our interop workflow. +- API? + +## Release + +Ethereum 2.0 is not fully specified or implemented and as such, Lighthouse is +still **under development**. + +We are on-track to provide a public, multi-client testnet in late-2019 and an +initial production-grade blockchain in 2020. + +## Features + +Lighthouse has been in development since mid-2018 and has an extensive feature +set: + +- Libp2p networking stack, featuring Discovery v5. +- Optimized `BeaconChain` state machine, up-to-date and + passing all tests. +- RESTful HTTP API. +- Documented and feature-rich CLI interface. +- Capable of running small, local testnets with 250ms slot times. +- Detailed metrics exposed in the Prometheus format. diff --git a/book/src/setup.md b/book/src/setup.md new file mode 100644 index 0000000000..e53ca93d83 --- /dev/null +++ b/book/src/setup.md @@ -0,0 +1,65 @@ +# Development Environment Setup + +Follow this guide to get a Lighthouse development environment up-and-running. + +See the [Quick instructions](#quick-instructions) for a summary or the +[Detailed instructions](#detailed-instructions) for clarification. + +## Quick instructions + +1. Install Rust + Cargo with [rustup](https://rustup.rs/). +1. Install build dependencies using your package manager. + - `$ clang protobuf libssl-dev cmake git-lfs` + - Ensure [git-lfs](https://git-lfs.github.com/) is installed with `git lfs + install`. +1. Clone the [sigp/lighthouse](https://github.com/sigp/lighthouse), ensuring to + **initialize submodules**. +1. In the root of the repo, run the tests with `cargo test --all --release`. +1. Then, build the binaries with `cargo build --all --release`. +1. Lighthouse is now fully built and tested. + +_Note: first-time compilation may take several minutes._ + +## Detailed instructions + +A fully-featured development environment can be achieved with the following +steps: + + 1. Install [rustup](https://rustup.rs/). + 1. Use the command `rustup show` to get information about the Rust + installation. You should see that the active tool-chain is the stable + version. + - Updates can be performed using` rustup update`, Lighthouse generally + requires a recent version of Rust. + 1. Install build dependencies (Arch packages are listed here, your + distribution will likely be similar): + - `clang`: required by RocksDB. + - `protobuf`: required for protobuf serialization (gRPC) + - `libssl-dev`: also gRPC + - `cmake`: required for building protobuf + - `git-lfs`: The Git extension for [Large File + Support](https://git-lfs.github.com/) (required for Ethereum Foundation + test vectors). + 1. Clone the repository with submodules: `git clone --recursive + https://github.com/sigp/lighthouse`. If you're already cloned the repo, + ensure testing submodules are present: `$ git submodule init; git + submodule update` + 1. Change directory to the root of the repository. + 1. Run the test suite with `cargo test --all --release`. The build and test + process can take several minutes. If you experience any failures on + `master`, please raise an + [issue](https://github.com/sigp/lighthouse/issues). + +### Notes: + +Lighthouse targets Rust `stable` but generally runs on `nightly` too. + +#### Note for Windows users: + +Perl may also be required to build lighthouse. You can install [Strawberry +Perl](http://strawberryperl.com/), or alternatively use a choco install command +`choco install strawberryperl`. + +Additionally, the dependency `protoc-grpcio v0.3.1` is reported to have issues +compiling in Windows. You can specify a known working version by editing +version in `protos/Cargo.toml` section to `protoc-grpcio = "<=0.3.0"`. diff --git a/book/src/testnets.md b/book/src/testnets.md new file mode 100644 index 0000000000..c07797ba0f --- /dev/null +++ b/book/src/testnets.md @@ -0,0 +1,64 @@ +# Simple Local Testnet + +You can setup a local, two-node testnet in **Only Three CLI Commands™**. + +Follow the [Quick instructions](#tldr) version if you're confident, or see +[Detailed instructions](#detail) for more. + + +## Quick instructions + +Setup a development environment, build the project and navigate to the +`target/release` directory. + +1. Start the first node: `$ ./beacon_node testnet -f recent 8` +1. Start a validator client: **TODO** +1. Start another node `$ ./beacon_node -b 10 testnet -f bootstrap http://localhost:5052` + +_Repeat #3 to add more nodes._ + +## Detailed instructions + +First, setup a Lighthouse development environment and navigate to the +`target/release` directory (this is where the binaries are located). + +## Starting the Beacon Node + +Start a new node (creating a fresh database and configuration in `~/.lighthouse`), using: + +``` +$ ./beacon_node testnet -f recent 8 +``` + +> The `-f` flag ignores any existing database or configuration, backing them up +before re-initializing. `8` is number of validators with deposits in the +genesis state. +> +> See `$ ./beacon_node testnet recent --help` for more configuration options, +including `minimal`/`mainnet` specification. + +## Starting the Validator Client + +**TODO** + +## Adding another Beacon Node + +You may connect another (non-validating) node to your local network using the +lighthouse `bootstrap` command. + +In a new terminal terminal, run: + + +``` +$ ./beacon_node -b 10 testnet -r bootstrap http://localhost:5052 +``` + +> The `-b` (or `--port-bump`) increases all the listening TCP/UDP ports of the +new node to `10` higher. Your first node's HTTP server was at TCP `5052` but +this one will be at `5062`. +> +> The `-r` flag creates a new data directory in your home with a random string +appended, to avoid conflicting with any other running node. +> +> The HTTP address is the API of the first node. The new node will download +configuration via HTTP before starting sync via libp2p. From 543e9457b7e7e0329b442e0a09f6daeca9e47e6c Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Sun, 1 Sep 2019 19:32:57 +1000 Subject: [PATCH 146/186] Move bootstrapper into own crate --- Cargo.toml | 1 + beacon_node/Cargo.toml | 1 + beacon_node/beacon_chain/Cargo.toml | 4 +- .../beacon_chain/src/beacon_chain_builder.rs | 2 +- beacon_node/beacon_chain/src/lib.rs | 2 - beacon_node/client/src/bootstrapper.rs | 210 ------------------ beacon_node/src/config.rs | 2 +- eth2/utils/lighthouse_bootstrap/Cargo.toml | 15 ++ .../utils/lighthouse_bootstrap/src/lib.rs | 0 validator_client/Cargo.toml | 1 + 10 files changed, 21 insertions(+), 217 deletions(-) delete mode 100644 beacon_node/client/src/bootstrapper.rs create mode 100644 eth2/utils/lighthouse_bootstrap/Cargo.toml rename beacon_node/beacon_chain/src/bootstrapper.rs => eth2/utils/lighthouse_bootstrap/src/lib.rs (100%) diff --git a/Cargo.toml b/Cargo.toml index f087539e6a..d081ee74f7 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -12,6 +12,7 @@ members = [ "eth2/utils/logging", "eth2/utils/eth2_hashing", "eth2/utils/lighthouse_metrics", + "eth2/utils/lighthouse_bootstrap", "eth2/utils/merkle_proof", "eth2/utils/int_to_bytes", "eth2/utils/serde_hex", diff --git a/beacon_node/Cargo.toml b/beacon_node/Cargo.toml index 531c4615a1..0e42990182 100644 --- a/beacon_node/Cargo.toml +++ b/beacon_node/Cargo.toml @@ -6,6 +6,7 @@ edition = "2018" [dependencies] eth2_config = { path = "../eth2/utils/eth2_config" } +lighthouse_bootstrap = { path = "../eth2/utils/lighthouse_bootstrap" } beacon_chain = { path = "beacon_chain" } types = { path = "../eth2/types" } store = { path = "./store" } diff --git a/beacon_node/beacon_chain/Cargo.toml b/beacon_node/beacon_chain/Cargo.toml index 3378e6a349..d5594a49af 100644 --- a/beacon_node/beacon_chain/Cargo.toml +++ b/beacon_node/beacon_chain/Cargo.toml @@ -11,14 +11,13 @@ store = { path = "../store" } parking_lot = "0.7" lazy_static = "1.3.0" lighthouse_metrics = { path = "../../eth2/utils/lighthouse_metrics" } +lighthouse_bootstrap = { path = "../../eth2/utils/lighthouse_bootstrap" } log = "0.4" operation_pool = { path = "../../eth2/operation_pool" } -reqwest = "0.9" rayon = "1.0" serde = "1.0" serde_derive = "1.0" serde_yaml = "0.8" -eth2-libp2p = { path = "../eth2-libp2p" } slog = { version = "^2.2.3" , features = ["max_level_trace"] } sloggers = { version = "^0.3" } slot_clock = { path = "../../eth2/utils/slot_clock" } @@ -28,7 +27,6 @@ eth2_ssz_derive = "0.1" state_processing = { path = "../../eth2/state_processing" } tree_hash = "0.1" types = { path = "../../eth2/types" } -url = "1.2" lmd_ghost = { path = "../../eth2/lmd_ghost" } [dev-dependencies] diff --git a/beacon_node/beacon_chain/src/beacon_chain_builder.rs b/beacon_node/beacon_chain/src/beacon_chain_builder.rs index a569fe8338..fdddf64812 100644 --- a/beacon_node/beacon_chain/src/beacon_chain_builder.rs +++ b/beacon_node/beacon_chain/src/beacon_chain_builder.rs @@ -1,6 +1,6 @@ -use super::bootstrapper::Bootstrapper; use crate::{BeaconChain, BeaconChainTypes}; use eth2_hashing::hash; +use lighthouse_bootstrap::Bootstrapper; use merkle_proof::MerkleTree; use rayon::prelude::*; use slog::Logger; diff --git a/beacon_node/beacon_chain/src/lib.rs b/beacon_node/beacon_chain/src/lib.rs index 560da65197..9c833f778d 100644 --- a/beacon_node/beacon_chain/src/lib.rs +++ b/beacon_node/beacon_chain/src/lib.rs @@ -4,7 +4,6 @@ extern crate lazy_static; mod beacon_chain; mod beacon_chain_builder; -mod bootstrapper; mod checkpoint; mod errors; mod fork_choice; @@ -19,7 +18,6 @@ pub use self::beacon_chain::{ pub use self::checkpoint::CheckPoint; pub use self::errors::{BeaconChainError, BlockProductionError}; pub use beacon_chain_builder::BeaconChainBuilder; -pub use bootstrapper::Bootstrapper; pub use lmd_ghost; pub use metrics::scrape_for_metrics; pub use parking_lot; diff --git a/beacon_node/client/src/bootstrapper.rs b/beacon_node/client/src/bootstrapper.rs deleted file mode 100644 index c94d9a51d8..0000000000 --- a/beacon_node/client/src/bootstrapper.rs +++ /dev/null @@ -1,210 +0,0 @@ -use eth2_libp2p::{ - multiaddr::{Multiaddr, Protocol}, - Enr, -}; -use reqwest::{Error as HttpError, Url}; -use serde::Deserialize; -use std::borrow::Cow; -use std::net::Ipv4Addr; -use types::{BeaconBlock, BeaconState, Checkpoint, EthSpec, Hash256, Slot}; -use url::Host; - -#[derive(Debug)] -enum Error { - InvalidUrl, - HttpError(HttpError), -} - -impl From for Error { - fn from(e: HttpError) -> Error { - Error::HttpError(e) - } -} - -/// Used to load "bootstrap" information from the HTTP API of another Lighthouse beacon node. -/// -/// Bootstrapping information includes things like genesis and finalized states and blocks, and -/// libp2p connection details. -pub struct Bootstrapper { - url: Url, -} - -impl Bootstrapper { - /// Parses the given `server` as a URL, instantiating `Self`. - pub fn from_server_string(server: String) -> Result { - Ok(Self { - url: Url::parse(&server).map_err(|e| format!("Invalid bootstrap server url: {}", e))?, - }) - } - - /// Build a multiaddr using the HTTP server URL that is not guaranteed to be correct. - /// - /// The address is created by querying the HTTP server for its listening libp2p addresses. - /// Then, we find the first TCP port in those addresses and combine the port with the URL of - /// the server. - /// - /// For example, the server `http://192.168.0.1` might end up with a `best_effort_multiaddr` of - /// `/ipv4/192.168.0.1/tcp/9000` if the server advertises a listening address of - /// `/ipv4/172.0.0.1/tcp/9000`. - pub fn best_effort_multiaddr(&self) -> Option { - let tcp_port = self.listen_port().ok()?; - - let mut multiaddr = Multiaddr::with_capacity(2); - - match self.url.host()? { - Host::Ipv4(addr) => multiaddr.push(Protocol::Ip4(addr)), - Host::Domain(s) => multiaddr.push(Protocol::Dns4(Cow::Borrowed(s))), - _ => return None, - }; - - multiaddr.push(Protocol::Tcp(tcp_port)); - - Some(multiaddr) - } - - /// Returns the IPv4 address of the server URL, unless it contains a FQDN. - pub fn server_ipv4_addr(&self) -> Option { - match self.url.host()? { - Host::Ipv4(addr) => Some(addr), - _ => None, - } - } - - /// Returns the servers ENR address. - pub fn enr(&self) -> Result { - get_enr(self.url.clone()).map_err(|e| format!("Unable to get ENR: {:?}", e)) - } - - /// Returns the servers listening libp2p addresses. - pub fn listen_port(&self) -> Result { - get_listen_port(self.url.clone()).map_err(|e| format!("Unable to get listen port: {:?}", e)) - } - - /// Returns the genesis block and state. - pub fn genesis(&self) -> Result<(BeaconState, BeaconBlock), String> { - let genesis_slot = Slot::new(0); - - let block = get_block(self.url.clone(), genesis_slot) - .map_err(|e| format!("Unable to get genesis block: {:?}", e))? - .beacon_block; - let state = get_state(self.url.clone(), genesis_slot) - .map_err(|e| format!("Unable to get genesis state: {:?}", e))? - .beacon_state; - - Ok((state, block)) - } - - /// Returns the most recent finalized state and block. - pub fn finalized(&self) -> Result<(BeaconState, BeaconBlock), String> { - let slots_per_epoch = get_slots_per_epoch(self.url.clone()) - .map_err(|e| format!("Unable to get slots per epoch: {:?}", e))?; - let finalized_slot = get_finalized_slot(self.url.clone(), slots_per_epoch.as_u64()) - .map_err(|e| format!("Unable to get finalized slot: {:?}", e))?; - - let block = get_block(self.url.clone(), finalized_slot) - .map_err(|e| format!("Unable to get finalized block: {:?}", e))? - .beacon_block; - let state = get_state(self.url.clone(), finalized_slot) - .map_err(|e| format!("Unable to get finalized state: {:?}", e))? - .beacon_state; - - Ok((state, block)) - } -} - -fn get_slots_per_epoch(mut url: Url) -> Result { - url.path_segments_mut() - .map(|mut url| { - url.push("spec").push("slots_per_epoch"); - }) - .map_err(|_| Error::InvalidUrl)?; - - reqwest::get(url)? - .error_for_status()? - .json() - .map_err(Into::into) -} - -fn get_finalized_slot(mut url: Url, slots_per_epoch: u64) -> Result { - url.path_segments_mut() - .map(|mut url| { - url.push("beacon").push("latest_finalized_checkpoint"); - }) - .map_err(|_| Error::InvalidUrl)?; - - let checkpoint: Checkpoint = reqwest::get(url)?.error_for_status()?.json()?; - - Ok(checkpoint.epoch.start_slot(slots_per_epoch)) -} - -#[derive(Deserialize)] -#[serde(bound = "T: EthSpec")] -pub struct StateResponse { - pub root: Hash256, - pub beacon_state: BeaconState, -} - -fn get_state(mut url: Url, slot: Slot) -> Result, Error> { - url.path_segments_mut() - .map(|mut url| { - url.push("beacon").push("state"); - }) - .map_err(|_| Error::InvalidUrl)?; - - url.query_pairs_mut() - .append_pair("slot", &format!("{}", slot.as_u64())); - - reqwest::get(url)? - .error_for_status()? - .json() - .map_err(Into::into) -} - -#[derive(Deserialize)] -#[serde(bound = "T: EthSpec")] -pub struct BlockResponse { - pub root: Hash256, - pub beacon_block: BeaconBlock, -} - -fn get_block(mut url: Url, slot: Slot) -> Result, Error> { - url.path_segments_mut() - .map(|mut url| { - url.push("beacon").push("block"); - }) - .map_err(|_| Error::InvalidUrl)?; - - url.query_pairs_mut() - .append_pair("slot", &format!("{}", slot.as_u64())); - - reqwest::get(url)? - .error_for_status()? - .json() - .map_err(Into::into) -} - -fn get_enr(mut url: Url) -> Result { - url.path_segments_mut() - .map(|mut url| { - url.push("network").push("enr"); - }) - .map_err(|_| Error::InvalidUrl)?; - - reqwest::get(url)? - .error_for_status()? - .json() - .map_err(Into::into) -} - -fn get_listen_port(mut url: Url) -> Result { - url.path_segments_mut() - .map(|mut url| { - url.push("network").push("listen_port"); - }) - .map_err(|_| Error::InvalidUrl)?; - - reqwest::get(url)? - .error_for_status()? - .json() - .map_err(Into::into) -} diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index c4fa5eebcd..c9ad964f5a 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -1,7 +1,7 @@ -use beacon_chain::Bootstrapper; use clap::ArgMatches; use client::{BeaconChainStartMethod, ClientConfig, Eth2Config}; use eth2_config::{read_from_file, write_to_file}; +use lighthouse_bootstrap::Bootstrapper; use rand::{distributions::Alphanumeric, Rng}; use slog::{crit, info, warn, Logger}; use std::fs; diff --git a/eth2/utils/lighthouse_bootstrap/Cargo.toml b/eth2/utils/lighthouse_bootstrap/Cargo.toml new file mode 100644 index 0000000000..3f48505b80 --- /dev/null +++ b/eth2/utils/lighthouse_bootstrap/Cargo.toml @@ -0,0 +1,15 @@ +[package] +name = "lighthouse_bootstrap" +version = "0.1.0" +authors = ["Paul Hauner "] +edition = "2018" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +eth2_config = { path = "../eth2_config" } +eth2-libp2p = { path = "../../../beacon_node/eth2-libp2p" } +reqwest = "0.9" +url = "1.2" +types = { path = "../../types" } +serde = "1.0" diff --git a/beacon_node/beacon_chain/src/bootstrapper.rs b/eth2/utils/lighthouse_bootstrap/src/lib.rs similarity index 100% rename from beacon_node/beacon_chain/src/bootstrapper.rs rename to eth2/utils/lighthouse_bootstrap/src/lib.rs diff --git a/validator_client/Cargo.toml b/validator_client/Cargo.toml index 927731f63f..2000f5409b 100644 --- a/validator_client/Cargo.toml +++ b/validator_client/Cargo.toml @@ -18,6 +18,7 @@ eth2_ssz = "0.1" eth2_config = { path = "../eth2/utils/eth2_config" } tree_hash = "0.1" clap = "2.32.0" +lighthouse_bootstrap = { path = "../eth2/utils/lighthouse_bootstrap" } grpcio = { version = "0.4", default-features = false, features = ["protobuf-codec"] } protos = { path = "../protos" } slot_clock = { path = "../eth2/utils/slot_clock" } From fa6ba51eb776e5b8dfe7842f07387464f2ea601c Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Sun, 1 Sep 2019 19:33:21 +1000 Subject: [PATCH 147/186] Make gRPC and HTTP on by default --- beacon_node/rest_api/src/config.rs | 6 +++--- beacon_node/rpc/src/config.rs | 6 +++--- beacon_node/rpc/src/lib.rs | 7 ++++++- beacon_node/src/main.rs | 14 ++++++-------- 4 files changed, 18 insertions(+), 15 deletions(-) diff --git a/beacon_node/rest_api/src/config.rs b/beacon_node/rest_api/src/config.rs index 90ac0821b1..c262a128a1 100644 --- a/beacon_node/rest_api/src/config.rs +++ b/beacon_node/rest_api/src/config.rs @@ -16,7 +16,7 @@ pub struct Config { impl Default for Config { fn default() -> Self { Config { - enabled: true, // rest_api enabled by default + enabled: true, listen_address: Ipv4Addr::new(127, 0, 0, 1), port: 5052, } @@ -25,8 +25,8 @@ impl Default for Config { impl Config { pub fn apply_cli_args(&mut self, args: &ArgMatches) -> Result<(), &'static str> { - if args.is_present("api") { - self.enabled = true; + if args.is_present("no-api") { + self.enabled = false; } if let Some(rpc_address) = args.value_of("api-address") { diff --git a/beacon_node/rpc/src/config.rs b/beacon_node/rpc/src/config.rs index 0f031ddc60..47eff6824a 100644 --- a/beacon_node/rpc/src/config.rs +++ b/beacon_node/rpc/src/config.rs @@ -16,7 +16,7 @@ pub struct Config { impl Default for Config { fn default() -> Self { Config { - enabled: false, // rpc disabled by default + enabled: true, listen_address: Ipv4Addr::new(127, 0, 0, 1), port: 5051, } @@ -25,8 +25,8 @@ impl Default for Config { impl Config { pub fn apply_cli_args(&mut self, args: &ArgMatches) -> Result<(), &'static str> { - if args.is_present("rpc") { - self.enabled = true; + if args.is_present("no-grpc") { + self.enabled = false; } if let Some(rpc_address) = args.value_of("rpc-address") { diff --git a/beacon_node/rpc/src/lib.rs b/beacon_node/rpc/src/lib.rs index eef0092921..59902ff43f 100644 --- a/beacon_node/rpc/src/lib.rs +++ b/beacon_node/rpc/src/lib.rs @@ -80,7 +80,12 @@ pub fn start_server( let spawn_rpc = { server.start(); for &(ref host, port) in server.bind_addrs() { - info!(log, "gRPC listening on {}:{}", host, port); + info!( + log, + "gRPC API started"; + "port" => port, + "host" => host, + ); } rpc_exit.and_then(move |_| { info!(log, "RPC Server shutting down"); diff --git a/beacon_node/src/main.rs b/beacon_node/src/main.rs index 02e30b660d..26537c6f76 100644 --- a/beacon_node/src/main.rs +++ b/beacon_node/src/main.rs @@ -120,10 +120,9 @@ fn main() { * gRPC parameters. */ .arg( - Arg::with_name("rpc") - .long("rpc") - .value_name("RPC") - .help("Enable the RPC server.") + Arg::with_name("no-grpc") + .long("no-grpc") + .help("Disable the gRPC server.") .takes_value(false), ) .arg( @@ -142,10 +141,9 @@ fn main() { ) /* Client related arguments */ .arg( - Arg::with_name("api") - .long("api") - .value_name("API") - .help("Enable the RESTful HTTP API server.") + Arg::with_name("no-api") + .long("no-api") + .help("Disable RESTful HTTP API server.") .takes_value(false), ) .arg( From 4a69d01a3781d1c26e95ffb595cfe11a1c519853 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Sun, 1 Sep 2019 19:33:43 +1000 Subject: [PATCH 148/186] Add first changes to validator CLI --- beacon_node/client/src/config.rs | 2 + validator_client/src/config.rs | 178 +++++++++++++++++++++------- validator_client/src/main.rs | 193 +++++++++++++++++++++++++++---- validator_client/src/service.rs | 32 +++-- 4 files changed, 331 insertions(+), 74 deletions(-) diff --git a/beacon_node/client/src/config.rs b/beacon_node/client/src/config.rs index 3aed26881f..2f5389ce54 100644 --- a/beacon_node/client/src/config.rs +++ b/beacon_node/client/src/config.rs @@ -89,6 +89,8 @@ impl Config { } /// Returns the core path for the client. + /// + /// Creates the directory if it does not exist. pub fn data_dir(&self) -> Option { let path = dirs::home_dir()?.join(&self.data_dir); fs::create_dir_all(&path).ok()?; diff --git a/validator_client/src/config.rs b/validator_client/src/config.rs index 7bc504b233..8e148cfab2 100644 --- a/validator_client/src/config.rs +++ b/validator_client/src/config.rs @@ -5,19 +5,45 @@ use serde_derive::{Deserialize, Serialize}; use slog::{debug, error, info, o, Drain}; use std::fs::{self, File, OpenOptions}; use std::io::{Error, ErrorKind}; +use std::ops::Range; use std::path::PathBuf; use std::sync::Mutex; use types::{EthSpec, MainnetEthSpec}; +pub const DEFAULT_SERVER: &str = "localhost"; +pub const DEFAULT_SERVER_GRPC_PORT: &str = "5051"; +pub const DEFAULT_SERVER_HTTP_PORT: &str = "5052"; + +#[derive(Clone)] +pub enum KeySource { + /// Load the keypairs from disk. + Disk, + /// Generate the keypairs (insecure, generates predictable keys). + TestingKeypairRange(Range), +} + +impl Default for KeySource { + fn default() -> Self { + KeySource::Disk + } +} + /// Stores the core configuration for this validator instance. #[derive(Clone, Serialize, Deserialize)] pub struct Config { /// The data directory, which stores all validator databases pub data_dir: PathBuf, + /// The source for loading keypairs + #[serde(skip)] + pub key_source: KeySource, /// The path where the logs will be outputted pub log_file: PathBuf, /// The server at which the Beacon Node can be contacted pub server: String, + /// The gRPC port on the server + pub server_grpc_port: u16, + /// The HTTP port on the server, for the REST API. + pub server_http_port: u16, /// The number of slots per epoch. pub slots_per_epoch: u64, } @@ -29,14 +55,33 @@ impl Default for Config { fn default() -> Self { Self { data_dir: PathBuf::from(".lighthouse-validator"), + key_source: <_>::default(), log_file: PathBuf::from(""), - server: "localhost:5051".to_string(), + server: DEFAULT_SERVER.into(), + server_grpc_port: DEFAULT_SERVER_GRPC_PORT + .parse::() + .expect("gRPC port constant should be valid"), + server_http_port: DEFAULT_SERVER_GRPC_PORT + .parse::() + .expect("HTTP port constant should be valid"), slots_per_epoch: MainnetEthSpec::slots_per_epoch(), } } } impl Config { + /// Returns the full path for the client data directory (not just the name of the directory). + pub fn full_data_dir(&self) -> Option { + dirs::home_dir().map(|path| path.join(&self.data_dir)) + } + + /// Creates the data directory (and any non-existing parent directories). + pub fn create_data_dir(&self) -> Option { + let path = dirs::home_dir()?.join(&self.data_dir); + fs::create_dir_all(&path).ok()?; + Some(path) + } + /// Apply the following arguments to `self`, replacing values if they are specified in `args`. /// /// Returns an error if arguments are obviously invalid. May succeed even if some values are @@ -94,61 +139,106 @@ impl Config { Ok(()) } + /// Reads a single keypair from the given `path`. + /// + /// `path` should be the path to a directory containing a private key. The file name of `path` + /// must align with the public key loaded from it, otherwise an error is returned. + /// + /// An error will be returned if `path` is a file (not a directory). + fn read_keypair_file(&self, path: PathBuf) -> Result { + if !path.is_dir() { + return Err("Is not a directory".into()); + } + + let key_filename: PathBuf = path.join(DEFAULT_PRIVATE_KEY_FILENAME); + + if !key_filename.is_file() { + return Err(format!( + "Private key is not a file: {:?}", + key_filename.to_str() + )); + } + + let mut key_file = File::open(key_filename.clone()) + .map_err(|e| format!("Unable to open private key file: {}", e))?; + + let key: Keypair = bincode::deserialize_from(&mut key_file) + .map_err(|e| format!("Unable to deserialize private key: {:?}", e))?; + + let ki = key.identifier(); + if &ki + != &path + .file_name() + .ok_or_else(|| "Invalid path".to_string())? + .to_string_lossy() + { + return Err(format!( + "The validator key ({:?}) did not match the directory filename {:?}.", + ki, + path.to_str() + )); + } else { + Ok(key) + } + } + /// Try to load keys from validator_dir, returning None if none are found or an error. #[allow(dead_code)] pub fn fetch_keys(&self, log: &slog::Logger) -> Option> { - let key_pairs: Vec = fs::read_dir(&self.data_dir) - .ok()? - .filter_map(|validator_dir| { - let validator_dir = validator_dir.ok()?; + let key_pairs: Vec = + fs::read_dir(&self.full_data_dir().expect("Data dir must exist")) + .ok()? + .filter_map(|validator_dir| { + let validator_dir = validator_dir.ok()?; - if !(validator_dir.file_type().ok()?.is_dir()) { - // Skip non-directories (i.e. no files/symlinks) - return None; - } + if !(validator_dir.file_type().ok()?.is_dir()) { + // Skip non-directories (i.e. no files/symlinks) + return None; + } - let key_filename = validator_dir.path().join(DEFAULT_PRIVATE_KEY_FILENAME); + let key_filename = validator_dir.path().join(DEFAULT_PRIVATE_KEY_FILENAME); - if !(key_filename.is_file()) { - info!( + if !(key_filename.is_file()) { + info!( + log, + "Private key is not a file: {:?}", + key_filename.to_str() + ); + return None; + } + + debug!( log, - "Private key is not a file: {:?}", + "Deserializing private key from file: {:?}", key_filename.to_str() ); - return None; - } - debug!( - log, - "Deserializing private key from file: {:?}", - key_filename.to_str() - ); + let mut key_file = File::open(key_filename.clone()).ok()?; - let mut key_file = File::open(key_filename.clone()).ok()?; + let key: Keypair = if let Ok(key_ok) = bincode::deserialize_from(&mut key_file) + { + key_ok + } else { + error!( + log, + "Unable to deserialize the private key file: {:?}", key_filename + ); + return None; + }; - let key: Keypair = if let Ok(key_ok) = bincode::deserialize_from(&mut key_file) { - key_ok - } else { - error!( - log, - "Unable to deserialize the private key file: {:?}", key_filename - ); - return None; - }; - - let ki = key.identifier(); - if ki != validator_dir.file_name().into_string().ok()? { - error!( - log, - "The validator key ({:?}) did not match the directory filename {:?}.", - ki, - &validator_dir.path().to_string_lossy() - ); - return None; - } - Some(key) - }) - .collect(); + let ki = key.identifier(); + if ki != validator_dir.file_name().into_string().ok()? { + error!( + log, + "The validator key ({:?}) did not match the directory filename {:?}.", + ki, + &validator_dir.path().to_string_lossy() + ); + return None; + } + Some(key) + }) + .collect(); // Check if it's an empty vector, and return none. if key_pairs.is_empty() { diff --git a/validator_client/src/main.rs b/validator_client/src/main.rs index 83a874df7d..40d5f6ab01 100644 --- a/validator_client/src/main.rs +++ b/validator_client/src/main.rs @@ -6,12 +6,16 @@ pub mod error; mod service; mod signer; -use crate::config::Config as ValidatorClientConfig; +use crate::config::{ + Config as ClientConfig, KeySource, DEFAULT_SERVER, DEFAULT_SERVER_GRPC_PORT, + DEFAULT_SERVER_HTTP_PORT, +}; use crate::service::Service as ValidatorService; -use clap::{App, Arg}; +use clap::{App, Arg, ArgMatches, SubCommand}; use eth2_config::{read_from_file, write_to_file, Eth2Config}; +use lighthouse_bootstrap::Bootstrapper; use protos::services_grpc::ValidatorServiceClient; -use slog::{crit, error, info, o, warn, Drain, Level}; +use slog::{crit, error, info, o, warn, Drain, Level, Logger}; use std::fs; use std::path::PathBuf; use types::{InteropEthSpec, Keypair, MainnetEthSpec, MinimalEthSpec}; @@ -21,6 +25,8 @@ pub const DEFAULT_DATA_DIR: &str = ".lighthouse-validator"; pub const CLIENT_CONFIG_FILENAME: &str = "validator-client.toml"; pub const ETH2_CONFIG_FILENAME: &str = "eth2-spec.toml"; +type Result = core::result::Result; + fn main() { // Logging let decorator = slog_term::TermDecorator::new().build(); @@ -49,28 +55,36 @@ fn main() { .takes_value(true), ) .arg( - Arg::with_name("eth2-spec") - .long("eth2-spec") + Arg::with_name("eth2-config") + .long("eth2-config") .short("e") .value_name("TOML_FILE") - .help("Path to Ethereum 2.0 specifications file.") + .help("Path to Ethereum 2.0 config and specification file (e.g., eth2_spec.toml).") .takes_value(true), ) .arg( Arg::with_name("server") .long("server") - .value_name("server") + .value_name("NETWORK_ADDRESS") .help("Address to connect to BeaconNode.") + .default_value(DEFAULT_SERVER) .takes_value(true), ) .arg( - Arg::with_name("default-spec") - .long("default-spec") - .value_name("TITLE") - .short("default-spec") - .help("Specifies the default eth2 spec to be used. This will override any spec written to disk and will therefore be used by default in future instances.") - .takes_value(true) - .possible_values(&["mainnet", "minimal", "interop"]) + Arg::with_name("server-grpc-port") + .long("g") + .value_name("PORT") + .help("Port to use for gRPC API connection to the server.") + .default_value(DEFAULT_SERVER_GRPC_PORT) + .takes_value(true), + ) + .arg( + Arg::with_name("server-http-port") + .long("h") + .value_name("PORT") + .help("Port to use for HTTP API connection to the server.") + .default_value(DEFAULT_SERVER_HTTP_PORT) + .takes_value(true), ) .arg( Arg::with_name("debug-level") @@ -82,6 +96,33 @@ fn main() { .possible_values(&["info", "debug", "trace", "warn", "error", "crit"]) .default_value("info"), ) + /* + * The "testnet" sub-command. + * + * Used for starting testnet validator clients. + */ + .subcommand(SubCommand::with_name("testnet") + .about("Starts a testnet validator using INSECURE, predicatable private keys, based off the canonical \ + validator index. ONLY USE FOR TESTING PURPOSES!") + .arg( + Arg::with_name("bootstrap") + .short("b") + .long("bootstrap") + .help("Connect to the RPC server to download the eth2_config via the HTTP API.") + ) + .subcommand(SubCommand::with_name("range") + .about("Uses the standard, predicatable `interop` keygen method to produce a range \ + of predicatable private keys and starts performing their validator duties.") + .arg(Arg::with_name("first_validator") + .value_name("VALIDATOR_INDEX") + .required(true) + .help("The first validator public key to be generated for this client.")) + .arg(Arg::with_name("validator_count") + .value_name("COUNT") + .required(true) + .help("The number of validators.")) + ) + ) .get_matches(); let drain = match matches.value_of("debug-level") { @@ -93,8 +134,9 @@ fn main() { Some("crit") => drain.filter_level(Level::Critical), _ => unreachable!("guarded by clap"), }; - let mut log = slog::Logger::root(drain.fuse(), o!()); + let log = slog::Logger::root(drain.fuse(), o!()); + /* let data_dir = match matches .value_of("datadir") .and_then(|v| Some(PathBuf::from(v))) @@ -128,12 +170,10 @@ fn main() { // Attempt to load the `ClientConfig` from disk. // // If file doesn't exist, create a new, default one. - let mut client_config = match read_from_file::( - client_config_path.clone(), - ) { + let mut client_config = match read_from_file::(client_config_path.clone()) { Ok(Some(c)) => c, Ok(None) => { - let default = ValidatorClientConfig::default(); + let default = ClientConfig::default(); if let Err(e) = write_to_file(client_config_path.clone(), &default) { crit!(log, "Failed to write default ClientConfig to file"; "error" => format!("{:?}", e)); return; @@ -223,12 +263,23 @@ fn main() { return; } }; + */ + let (client_config, eth2_config) = match get_configs(&matches, &log) { + Ok(tuple) => tuple, + Err(e) => { + crit!( + log, + "Unable to initialize configuration"; + "error" => e + ); + return; + } + }; info!( log, "Starting validator client"; - "datadir" => client_config.data_dir.to_str(), - "spec_constants" => ð2_config.spec_constants, + "datadir" => client_config.full_data_dir().expect("Unable to find datadir").to_str(), ); let result = match eth2_config.spec_constants.as_str() { @@ -260,3 +311,103 @@ fn main() { Err(e) => crit!(log, "Validator client exited with error"; "error" => e.to_string()), } } + +/// Parses the CLI arguments and attempts to load the client and eth2 configuration. +/// +/// This is not a pure function, it reads from disk and may contact network servers. +pub fn get_configs(cli_args: &ArgMatches, log: &Logger) -> Result<(ClientConfig, Eth2Config)> { + let mut client_config = ClientConfig::default(); + + if let Some(server) = cli_args.value_of("server") { + client_config.server = server.to_string(); + } + + if let Some(port) = cli_args.value_of("server-http-port") { + client_config.server_http_port = port + .parse::() + .map_err(|e| format!("Unable to parse HTTP port: {:?}", e))?; + } + + if let Some(port) = cli_args.value_of("server-grpc-port") { + client_config.server_grpc_port = port + .parse::() + .map_err(|e| format!("Unable to parse gRPC port: {:?}", e))?; + } + + info!( + log, + "Beacon node connection info"; + "grpc_port" => client_config.server_grpc_port, + "http_port" => client_config.server_http_port, + "server" => &client_config.server, + ); + + match cli_args.subcommand() { + ("testnet", Some(sub_cli_args)) => { + if cli_args.is_present("eth2-config") && sub_cli_args.is_present("bootstrap") { + return Err( + "Cannot specify --eth2-config and --bootstrap as it may result \ + in ambiguity." + .into(), + ); + } + process_testnet_subcommand(sub_cli_args, client_config, log) + } + _ => { + unimplemented!("Resuming (not starting a testnet)"); + } + } +} + +fn process_testnet_subcommand( + cli_args: &ArgMatches, + mut client_config: ClientConfig, + log: &Logger, +) -> Result<(ClientConfig, Eth2Config)> { + let eth2_config = if cli_args.is_present("bootstrap") { + let bootstrapper = Bootstrapper::from_server_string(format!( + "http://{}:{}", + client_config.server, client_config.server_http_port + ))?; + + let eth2_config = bootstrapper.eth2_config()?; + + info!( + log, + "Bootstrapped eth2 config via HTTP"; + "slot_time_millis" => eth2_config.spec.milliseconds_per_slot, + "spec" => ð2_config.spec_constants, + ); + + eth2_config + } else { + return Err("Starting without bootstrap is not implemented".into()); + }; + + client_config.key_source = match cli_args.subcommand() { + ("range", Some(sub_cli_args)) => { + let first = sub_cli_args + .value_of("first_validator") + .ok_or_else(|| "No first validator supplied")? + .parse::() + .map_err(|e| format!("Unable to parse first validator: {:?}", e))?; + let count = sub_cli_args + .value_of("validator_count") + .ok_or_else(|| "No validator count supplied")? + .parse::() + .map_err(|e| format!("Unable to parse validator count: {:?}", e))?; + + info!( + log, + "Generating unsafe testing keys"; + "first_validator" => first, + "count" => count + ); + + KeySource::TestingKeypairRange(first..first + count) + } + _ => KeySource::Disk, + }; + + Ok((client_config, eth2_config)) +} diff --git a/validator_client/src/service.rs b/validator_client/src/service.rs index bd694668bf..ae6f945310 100644 --- a/validator_client/src/service.rs +++ b/validator_client/src/service.rs @@ -73,12 +73,15 @@ impl Service error_chain::Result> { - // initialise the beacon node client to check for a connection + let server_url = format!( + "{}:{}", + client_config.server, client_config.server_grpc_port + ); let env = Arc::new(EnvBuilder::new().build()); // Beacon node gRPC beacon node endpoints. let beacon_node_client = { - let ch = ChannelBuilder::new(env.clone()).connect(&client_config.server); + let ch = ChannelBuilder::new(env.clone()).connect(&server_url); BeaconNodeServiceClient::new(ch) }; @@ -86,9 +89,14 @@ impl Service { - warn!(log, "Could not connect to node. Error: {}", e); - info!(log, "Retrying in 5 seconds..."); - std::thread::sleep(Duration::from_secs(5)); + let retry_seconds = 5; + warn!( + log, + "Could not connect to beacon node"; + "error" => format!("{:?}", e), + "retry_in" => format!("{} seconds", retry_seconds), + ); + std::thread::sleep(Duration::from_secs(retry_seconds)); continue; } Ok(info) => { @@ -122,7 +130,13 @@ impl Service node_info.version.clone(), "Chain ID" => node_info.network_id, "Genesis time" => genesis_time); + info!( + log, + "Beacon node connected"; + "version" => node_info.version.clone(), + "network_id" => node_info.network_id, + "genesis_time" => genesis_time + ); let proto_fork = node_info.get_fork(); let mut previous_version: [u8; 4] = [0; 4]; @@ -139,7 +153,7 @@ impl Service Service Date: Sun, 1 Sep 2019 20:09:46 +1000 Subject: [PATCH 149/186] Add testing keypairs to validator client --- validator_client/src/config.rs | 108 +++++++++++---------- validator_client/src/main.rs | 160 +++++--------------------------- validator_client/src/service.rs | 7 +- 3 files changed, 76 insertions(+), 199 deletions(-) diff --git a/validator_client/src/config.rs b/validator_client/src/config.rs index 8e148cfab2..3e13de7229 100644 --- a/validator_client/src/config.rs +++ b/validator_client/src/config.rs @@ -2,13 +2,13 @@ use bincode; use bls::Keypair; use clap::ArgMatches; use serde_derive::{Deserialize, Serialize}; -use slog::{debug, error, info, o, Drain}; +use slog::{error, info, o, warn, Drain}; use std::fs::{self, File, OpenOptions}; use std::io::{Error, ErrorKind}; use std::ops::Range; use std::path::PathBuf; use std::sync::Mutex; -use types::{EthSpec, MainnetEthSpec}; +use types::{test_utils::generate_deterministic_keypair, EthSpec, MainnetEthSpec}; pub const DEFAULT_SERVER: &str = "localhost"; pub const DEFAULT_SERVER_GRPC_PORT: &str = "5051"; @@ -182,69 +182,65 @@ impl Config { } } - /// Try to load keys from validator_dir, returning None if none are found or an error. - #[allow(dead_code)] - pub fn fetch_keys(&self, log: &slog::Logger) -> Option> { - let key_pairs: Vec = + pub fn fetch_keys_from_disk(&self, log: &slog::Logger) -> Result, String> { + Ok( fs::read_dir(&self.full_data_dir().expect("Data dir must exist")) - .ok()? + .map_err(|e| format!("Failed to read datadir: {:?}", e))? .filter_map(|validator_dir| { - let validator_dir = validator_dir.ok()?; + let path = validator_dir.ok()?.path(); - if !(validator_dir.file_type().ok()?.is_dir()) { - // Skip non-directories (i.e. no files/symlinks) - return None; - } - - let key_filename = validator_dir.path().join(DEFAULT_PRIVATE_KEY_FILENAME); - - if !(key_filename.is_file()) { - info!( - log, - "Private key is not a file: {:?}", - key_filename.to_str() - ); - return None; - } - - debug!( - log, - "Deserializing private key from file: {:?}", - key_filename.to_str() - ); - - let mut key_file = File::open(key_filename.clone()).ok()?; - - let key: Keypair = if let Ok(key_ok) = bincode::deserialize_from(&mut key_file) - { - key_ok + if path.is_dir() { + match self.read_keypair_file(path.clone()) { + Ok(keypair) => Some(keypair), + Err(e) => { + error!( + log, + "Failed to parse a validator keypair"; + "error" => e, + "path" => path.to_str(), + ); + None + } + } } else { - error!( - log, - "Unable to deserialize the private key file: {:?}", key_filename - ); - return None; - }; - - let ki = key.identifier(); - if ki != validator_dir.file_name().into_string().ok()? { - error!( - log, - "The validator key ({:?}) did not match the directory filename {:?}.", - ki, - &validator_dir.path().to_string_lossy() - ); - return None; + None } - Some(key) }) - .collect(); + .collect(), + ) + } + + pub fn fetch_testing_keypairs( + &self, + range: std::ops::Range, + ) -> Result, String> { + Ok(range + .into_iter() + .map(generate_deterministic_keypair) + .collect()) + } + + /// Loads the keypairs according to `self.key_source`. Will return one or more keypairs, or an + /// error. + #[allow(dead_code)] + pub fn fetch_keys(&self, log: &slog::Logger) -> Result, String> { + let keypairs = match &self.key_source { + KeySource::Disk => self.fetch_keys_from_disk(log)?, + KeySource::TestingKeypairRange(range) => { + warn!(log, "Using insecure private keys"); + self.fetch_testing_keypairs(range.clone())? + } + }; // Check if it's an empty vector, and return none. - if key_pairs.is_empty() { - None + if keypairs.is_empty() { + Err( + "No validator keypairs were found, unable to proceed. To generate \ + testing keypairs, see 'testnet range --help'." + .into(), + ) } else { - Some(key_pairs) + Ok(keypairs) } } diff --git a/validator_client/src/main.rs b/validator_client/src/main.rs index 40d5f6ab01..c849be31b6 100644 --- a/validator_client/src/main.rs +++ b/validator_client/src/main.rs @@ -12,12 +12,10 @@ use crate::config::{ }; use crate::service::Service as ValidatorService; use clap::{App, Arg, ArgMatches, SubCommand}; -use eth2_config::{read_from_file, write_to_file, Eth2Config}; +use eth2_config::Eth2Config; use lighthouse_bootstrap::Bootstrapper; use protos::services_grpc::ValidatorServiceClient; -use slog::{crit, error, info, o, warn, Drain, Level, Logger}; -use std::fs; -use std::path::PathBuf; +use slog::{crit, error, info, o, Drain, Level, Logger}; use types::{InteropEthSpec, Keypair, MainnetEthSpec, MinimalEthSpec}; pub const DEFAULT_SPEC: &str = "minimal"; @@ -54,6 +52,17 @@ fn main() { .help("File path where output will be written.") .takes_value(true), ) + .arg( + Arg::with_name("spec") + .short("s") + .long("spec") + .value_name("TITLE") + .help("Specifies the default eth2 spec type.") + .takes_value(true) + .possible_values(&["mainnet", "minimal", "interop"]) + .conflicts_with("eth2-config") + .global(true) + ) .arg( Arg::with_name("eth2-config") .long("eth2-config") @@ -135,135 +144,6 @@ fn main() { _ => unreachable!("guarded by clap"), }; let log = slog::Logger::root(drain.fuse(), o!()); - - /* - let data_dir = match matches - .value_of("datadir") - .and_then(|v| Some(PathBuf::from(v))) - { - Some(v) => v, - None => { - // use the default - let mut default_dir = match dirs::home_dir() { - Some(v) => v, - None => { - crit!(log, "Failed to find a home directory"); - return; - } - }; - default_dir.push(DEFAULT_DATA_DIR); - default_dir - } - }; - - // create the directory if needed - match fs::create_dir_all(&data_dir) { - Ok(_) => {} - Err(e) => { - crit!(log, "Failed to initialize data dir"; "error" => format!("{}", e)); - return; - } - } - - let client_config_path = data_dir.join(CLIENT_CONFIG_FILENAME); - - // Attempt to load the `ClientConfig` from disk. - // - // If file doesn't exist, create a new, default one. - let mut client_config = match read_from_file::(client_config_path.clone()) { - Ok(Some(c)) => c, - Ok(None) => { - let default = ClientConfig::default(); - if let Err(e) = write_to_file(client_config_path.clone(), &default) { - crit!(log, "Failed to write default ClientConfig to file"; "error" => format!("{:?}", e)); - return; - } - default - } - Err(e) => { - crit!(log, "Failed to load a ChainConfig file"; "error" => format!("{:?}", e)); - return; - } - }; - - // Ensure the `data_dir` in the config matches that supplied to the CLI. - client_config.data_dir = data_dir.clone(); - - // Update the client config with any CLI args. - match client_config.apply_cli_args(&matches, &mut log) { - Ok(()) => (), - Err(s) => { - crit!(log, "Failed to parse ClientConfig CLI arguments"; "error" => s); - return; - } - }; - - let eth2_config_path: PathBuf = matches - .value_of("eth2-spec") - .and_then(|s| Some(PathBuf::from(s))) - .unwrap_or_else(|| data_dir.join(ETH2_CONFIG_FILENAME)); - - // Initialise the `Eth2Config`. - // - // If a CLI parameter is set, overwrite any config file present. - // If a parameter is not set, use either the config file present or default to minimal. - let cli_config = match matches.value_of("default-spec") { - Some("mainnet") => Some(Eth2Config::mainnet()), - Some("minimal") => Some(Eth2Config::minimal()), - Some("interop") => Some(Eth2Config::interop()), - _ => None, - }; - // if a CLI flag is specified, write the new config if it doesn't exist, - // otherwise notify the user that the file will not be written. - let eth2_config_from_file = match read_from_file::(eth2_config_path.clone()) { - Ok(config) => config, - Err(e) => { - crit!(log, "Failed to read the Eth2Config from file"; "error" => format!("{:?}", e)); - return; - } - }; - - let mut eth2_config = { - if let Some(cli_config) = cli_config { - if eth2_config_from_file.is_none() { - // write to file if one doesn't exist - if let Err(e) = write_to_file(eth2_config_path, &cli_config) { - crit!(log, "Failed to write default Eth2Config to file"; "error" => format!("{:?}", e)); - return; - } - } else { - warn!( - log, - "Eth2Config file exists. Configuration file is ignored, using default" - ); - } - cli_config - } else { - // CLI config not specified, read from disk - match eth2_config_from_file { - Some(config) => config, - None => { - // set default to minimal - let eth2_config = Eth2Config::minimal(); - if let Err(e) = write_to_file(eth2_config_path, ð2_config) { - crit!(log, "Failed to write default Eth2Config to file"; "error" => format!("{:?}", e)); - return; - } - eth2_config - } - } - } - }; - - // Update the eth2 config with any CLI flags. - match eth2_config.apply_cli_args(&matches) { - Ok(()) => (), - Err(s) => { - crit!(log, "Failed to parse Eth2Config CLI arguments"; "error" => s); - return; - } - }; - */ let (client_config, eth2_config) = match get_configs(&matches, &log) { Ok(tuple) => tuple, Err(e) => { @@ -353,12 +233,13 @@ pub fn get_configs(cli_args: &ArgMatches, log: &Logger) -> Result<(ClientConfig, } process_testnet_subcommand(sub_cli_args, client_config, log) } - _ => { - unimplemented!("Resuming (not starting a testnet)"); - } + _ => return Err("You must use the testnet command. See '--help'.".into()), } } +/// Parses the `testnet` CLI subcommand. +/// +/// This is not a pure function, it reads from disk and may contact network servers. fn process_testnet_subcommand( cli_args: &ArgMatches, mut client_config: ClientConfig, @@ -381,7 +262,12 @@ fn process_testnet_subcommand( eth2_config } else { - return Err("Starting without bootstrap is not implemented".into()); + match cli_args.value_of("spec") { + Some("mainnet") => Eth2Config::mainnet(), + Some("minimal") => Eth2Config::minimal(), + Some("interop") => Eth2Config::interop(), + _ => return Err("No --spec flag provided. See '--help'.".into()), + } }; client_config.key_source = match cli_args.subcommand() { diff --git a/validator_client/src/service.rs b/validator_client/src/service.rs index ae6f945310..8adc79b91e 100644 --- a/validator_client/src/service.rs +++ b/validator_client/src/service.rs @@ -188,12 +188,7 @@ impl Service Arc::new(kps), - None => { - return Err("Unable to locate validator key pairs, nothing to do.".into()); - } - }; + let keypairs = Arc::new(client_config.fetch_keys(&log)?); let slots_per_epoch = E::slots_per_epoch(); From 457e04f1e0cc07da50bcbd0caceda045912d71d0 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Sun, 1 Sep 2019 20:12:51 +1000 Subject: [PATCH 150/186] Rename key gen subcommand in val client --- validator_client/src/main.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/validator_client/src/main.rs b/validator_client/src/main.rs index c849be31b6..5e9c036ca3 100644 --- a/validator_client/src/main.rs +++ b/validator_client/src/main.rs @@ -119,7 +119,7 @@ fn main() { .long("bootstrap") .help("Connect to the RPC server to download the eth2_config via the HTTP API.") ) - .subcommand(SubCommand::with_name("range") + .subcommand(SubCommand::with_name("insecure") .about("Uses the standard, predicatable `interop` keygen method to produce a range \ of predicatable private keys and starts performing their validator duties.") .arg(Arg::with_name("first_validator") @@ -271,7 +271,7 @@ fn process_testnet_subcommand( }; client_config.key_source = match cli_args.subcommand() { - ("range", Some(sub_cli_args)) => { + ("insecure", Some(sub_cli_args)) => { let first = sub_cli_args .value_of("first_validator") .ok_or_else(|| "No first validator supplied")? From d445ae6ee8b605de9fd0f24e29339fbbc382d240 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Sun, 1 Sep 2019 20:34:14 +1000 Subject: [PATCH 151/186] Update interop docs with val client CLI --- book/src/interop.md | 34 +++++++++++++++++++++++--------- book/src/testnets.md | 46 +++++++++++++++++++++++++++++--------------- 2 files changed, 56 insertions(+), 24 deletions(-) diff --git a/book/src/interop.md b/book/src/interop.md index 79d4a13762..ea00c4ce83 100644 --- a/book/src/interop.md +++ b/book/src/interop.md @@ -46,20 +46,36 @@ To start the node (each time creating a fresh database and configuration in ``` $ ./beacon_node testnet -f quick 8 1567222226 ``` - ->This method conforms the ["Quick-start +> Notes: +> +> - This method conforms the ["Quick-start genesis"](https://github.com/ethereum/eth2.0-pm/tree/6e41fcf383ebeb5125938850d8e9b4e9888389b4/interop/mocked_start#quick-start-genesis) method in the `ethereum/eth2.0-pm` repository. -> -> The `-f` flag ignores any existing database or configuration, backing them up -before re-initializing. `8` is the validator count and `1567222226` is the -genesis time. -> -> See `$ ./beacon_node testnet quick --help` for more configuration options. +> - The `-f` flag ignores any existing database or configuration, backing them +> up before re-initializing. +> - `8` is the validator count and `1567222226` is the genesis time. +> - See `$ ./beacon_node testnet quick --help` for more configuration options. #### Validator Client -**TODO** +Start the validator client with: + +``` +$ ./validator_client testnet -b insecure 0 8 +``` +> Notes: +> +> - The `-b` flag means the validator client will "bootstrap" specs and config +> from the beacon node. +> - The `insecure` command means the [interop +> keypairs](https://github.com/ethereum/eth2.0-pm/tree/6e41fcf383ebeb5125938850d8e9b4e9888389b4/interop/mocked_start#pubkeyprivkey-generation) +> will be used. +> - The `0 8` indicates that this validator client should manage 8 validators, +> starting at validator 0 (the first deposited validator). +> - The validator client will try to connect to the beacon node at `localhost`. +> See `--help` to configure that address and other features. +> - The validator client will operate very loosely in `testnet` mode, happily +> swapping between chains and creating double-votes. #### Starting from a genesis file diff --git a/book/src/testnets.md b/book/src/testnets.md index c07797ba0f..bf41e455d9 100644 --- a/book/src/testnets.md +++ b/book/src/testnets.md @@ -12,7 +12,7 @@ Setup a development environment, build the project and navigate to the `target/release` directory. 1. Start the first node: `$ ./beacon_node testnet -f recent 8` -1. Start a validator client: **TODO** +1. Start a validator client: `$ ./validator_client testnet -b insecure 0 8` 1. Start another node `$ ./beacon_node -b 10 testnet -f bootstrap http://localhost:5052` _Repeat #3 to add more nodes._ @@ -30,16 +30,32 @@ Start a new node (creating a fresh database and configuration in `~/.lighthouse` $ ./beacon_node testnet -f recent 8 ``` -> The `-f` flag ignores any existing database or configuration, backing them up -before re-initializing. `8` is number of validators with deposits in the -genesis state. +> Notes: > -> See `$ ./beacon_node testnet recent --help` for more configuration options, -including `minimal`/`mainnet` specification. +> - The `-f` flag ignores any existing database or configuration, backing them +> up before re-initializing. +> - `8` is number of validators with deposits in the genesis state. +> - See `$ ./beacon_node testnet recent --help` for more configuration options, +> including `minimal`/`mainnet` specification. ## Starting the Validator Client -**TODO** +In a new terminal window, start the validator client with: + +``` +$ ./validator_client testnet -b insecure 0 8 +``` + +> Notes: +> +> - The `-b` flag means the validator client will "bootstrap" specs and config +> from the beacon node. +> - The `insecure` command uses predictable, well-known private keys. Since +> this is just a local testnet, these are fine. +> - The `0 8` indicates that this validator client should manage 8 validators, +> starting at validator 0 (the first deposited validator). +> - The validator client will try to connect to the beacon node at `localhost`. +> See `--help` to configure that address and other features. ## Adding another Beacon Node @@ -53,12 +69,12 @@ In a new terminal terminal, run: $ ./beacon_node -b 10 testnet -r bootstrap http://localhost:5052 ``` -> The `-b` (or `--port-bump`) increases all the listening TCP/UDP ports of the -new node to `10` higher. Your first node's HTTP server was at TCP `5052` but -this one will be at `5062`. +> Notes: > -> The `-r` flag creates a new data directory in your home with a random string -appended, to avoid conflicting with any other running node. -> -> The HTTP address is the API of the first node. The new node will download -configuration via HTTP before starting sync via libp2p. +> - The `-b` (or `--port-bump`) increases all the listening TCP/UDP ports of +> the new node to `10` higher. Your first node's HTTP server was at TCP +> `5052` but this one will be at `5062`. +> - The `-r` flag creates a new data directory in your home with a random +> string appended, to avoid conflicting with any other running node. +> - The HTTP address is the API of the first node. The new node will download +> configuration via HTTP before starting sync via libp2p. From 6db1a191696d34a244ccdfe48e3292d3efc54991 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Sun, 1 Sep 2019 22:20:31 +1000 Subject: [PATCH 152/186] Remove stray dbg! --- beacon_node/beacon_chain/src/beacon_chain_builder.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain_builder.rs b/beacon_node/beacon_chain/src/beacon_chain_builder.rs index fdddf64812..06d2818e21 100644 --- a/beacon_node/beacon_chain/src/beacon_chain_builder.rs +++ b/beacon_node/beacon_chain/src/beacon_chain_builder.rs @@ -143,7 +143,6 @@ fn interop_genesis_state( let eth1_block_hash = Hash256::from_slice(&[42; 32]); let eth1_timestamp = 2_u64.pow(40); let amount = spec.max_effective_balance; - dbg!(amount); let withdrawal_credentials = |pubkey: &PublicKey| { let mut credentials = hash(&pubkey.as_ssz_bytes()); From 82b4a1b3eb34b83f8513777aacb0882684ccd40c Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Sun, 1 Sep 2019 22:21:29 +1000 Subject: [PATCH 153/186] Fix multiple data_dirs in config --- beacon_node/src/config.rs | 54 +++++++++++++++++++-------------------- 1 file changed, 27 insertions(+), 27 deletions(-) diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index c9ad964f5a..efc0b125c9 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -33,7 +33,7 @@ pub fn get_configs(cli_args: &ArgMatches, log: &Logger) -> Result { info!( log, "Resuming from existing datadir"; - "path" => format!("{:?}", builder.data_dir) + "path" => format!("{:?}", builder.client_config.data_dir) ); // If no primary subcommand was given, start the beacon chain from an existing @@ -42,7 +42,7 @@ pub fn get_configs(cli_args: &ArgMatches, log: &Logger) -> Result { // Whilst there is no large testnet or mainnet force the user to specify how they want // to start a new chain (e.g., from a genesis YAML file, another node, etc). - if !builder.data_dir.exists() { + if !builder.client_config.data_dir.exists() { return Err( "No datadir found. To start a new beacon chain, see `testnet --help`. \ Use `--datadir` to specify a different directory" @@ -98,7 +98,7 @@ fn process_testnet_subcommand( info!( log, "Creating new datadir"; - "path" => format!("{:?}", builder.data_dir) + "path" => format!("{:?}", builder.client_config.data_dir) ); // Start matching on the second subcommand (e.g., `testnet bootstrap ...`). @@ -166,7 +166,6 @@ fn process_testnet_subcommand( /// Allows for building a set of configurations based upon `clap` arguments. struct ConfigBuilder<'a> { log: &'a Logger, - pub data_dir: PathBuf, eth2_config: Eth2Config, client_config: ClientConfig, } @@ -189,11 +188,13 @@ impl<'a> ConfigBuilder<'a> { }) .ok_or_else(|| "Unable to find a home directory for the datadir".to_string())?; + let mut client_config = ClientConfig::default(); + client_config.data_dir = data_dir; + Ok(Self { log, - data_dir, eth2_config: Eth2Config::minimal(), - client_config: ClientConfig::default(), + client_config, }) } @@ -208,7 +209,7 @@ impl<'a> ConfigBuilder<'a> { let backup_dir = { let mut s = String::from("backup_"); s.push_str(&random_string(6)); - self.data_dir.join(s) + self.client_config.data_dir.join(s) }; fs::create_dir_all(&backup_dir) @@ -229,8 +230,8 @@ impl<'a> ConfigBuilder<'a> { Ok(()) }; - move_to_backup_dir(&self.data_dir.join(CLIENT_CONFIG_FILENAME))?; - move_to_backup_dir(&self.data_dir.join(ETH2_CONFIG_FILENAME))?; + move_to_backup_dir(&self.client_config.data_dir.join(CLIENT_CONFIG_FILENAME))?; + move_to_backup_dir(&self.client_config.data_dir.join(ETH2_CONFIG_FILENAME))?; if let Some(db_path) = self.client_config.db_path() { move_to_backup_dir(&db_path)?; @@ -280,12 +281,10 @@ impl<'a> ConfigBuilder<'a> { /// /// Useful for easily spinning up ephemeral testnets. pub fn set_random_datadir(&mut self) -> Result<()> { - let mut s = DEFAULT_DATA_DIR.to_string(); - s.push_str("_random_"); - s.push_str(&random_string(6)); - - self.data_dir.pop(); - self.data_dir.push(s); + self.client_config + .data_dir + .push(format!("random_{}", random_string(6))); + self.client_config.network.network_dir = self.client_config.data_dir.join("network"); Ok(()) } @@ -339,16 +338,16 @@ impl<'a> ConfigBuilder<'a> { // Do not permit creating a new config when the datadir exists. if db_exists { - return Err("Database already exists. See `-f` in `testnet --help`".into()); + return Err("Database already exists. See `-f` or `-r` in `testnet --help`".into()); } // Create `datadir` and any non-existing parent directories. - fs::create_dir_all(&self.data_dir).map_err(|e| { + fs::create_dir_all(&self.client_config.data_dir).map_err(|e| { crit!(self.log, "Failed to initialize data dir"; "error" => format!("{}", e)); format!("{}", e) })?; - let client_config_file = self.data_dir.join(CLIENT_CONFIG_FILENAME); + let client_config_file = self.client_config.data_dir.join(CLIENT_CONFIG_FILENAME); if client_config_file.exists() { return Err(format!( "Datadir is not clean, {} exists. See `-f` in `testnet --help`.", @@ -357,13 +356,13 @@ impl<'a> ConfigBuilder<'a> { } else { // Write the onfig to a TOML file in the datadir. write_to_file( - self.data_dir.join(CLIENT_CONFIG_FILENAME), + self.client_config.data_dir.join(CLIENT_CONFIG_FILENAME), &self.client_config, ) .map_err(|e| format!("Unable to write {} file: {:?}", CLIENT_CONFIG_FILENAME, e))?; } - let eth2_config_file = self.data_dir.join(ETH2_CONFIG_FILENAME); + let eth2_config_file = self.client_config.data_dir.join(ETH2_CONFIG_FILENAME); if eth2_config_file.exists() { return Err(format!( "Datadir is not clean, {} exists. See `-f` in `testnet --help`.", @@ -371,8 +370,11 @@ impl<'a> ConfigBuilder<'a> { )); } else { // Write the config to a TOML file in the datadir. - write_to_file(self.data_dir.join(ETH2_CONFIG_FILENAME), &self.eth2_config) - .map_err(|e| format!("Unable to write {} file: {:?}", ETH2_CONFIG_FILENAME, e))?; + write_to_file( + self.client_config.data_dir.join(ETH2_CONFIG_FILENAME), + &self.eth2_config, + ) + .map_err(|e| format!("Unable to write {} file: {:?}", ETH2_CONFIG_FILENAME, e))?; } Ok(()) @@ -386,7 +388,7 @@ impl<'a> ConfigBuilder<'a> { // // For now we return an error. In the future we may decide to boot a default (e.g., // public testnet or mainnet). - if !self.data_dir.exists() { + if !self.client_config.data_dir.exists() { return Err( "No datadir found. Either create a new testnet or specify a different `--datadir`." .into(), @@ -407,8 +409,8 @@ impl<'a> ConfigBuilder<'a> { ); } - self.load_eth2_config(self.data_dir.join(ETH2_CONFIG_FILENAME))?; - self.load_client_config(self.data_dir.join(CLIENT_CONFIG_FILENAME))?; + self.load_eth2_config(self.client_config.data_dir.join(ETH2_CONFIG_FILENAME))?; + self.load_client_config(self.client_config.data_dir.join(CLIENT_CONFIG_FILENAME))?; Ok(()) } @@ -464,8 +466,6 @@ impl<'a> ConfigBuilder<'a> { return Err("Specification constant mismatch".into()); } - self.client_config.data_dir = self.data_dir; - Ok((self.client_config, self.eth2_config)) } } From 960082fe4ead7d93d2f8c5a1f9e463132cc5e778 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Sun, 1 Sep 2019 22:21:46 +1000 Subject: [PATCH 154/186] Set all listen addresses to 0.0.0.0 for testnets --- beacon_node/src/config.rs | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index efc0b125c9..a3829a33c3 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -5,6 +5,7 @@ use lighthouse_bootstrap::Bootstrapper; use rand::{distributions::Alphanumeric, Rng}; use slog::{crit, info, warn, Logger}; use std::fs; +use std::net::Ipv4Addr; use std::path::{Path, PathBuf}; pub const DEFAULT_DATA_DIR: &str = ".lighthouse"; @@ -101,6 +102,10 @@ fn process_testnet_subcommand( "path" => format!("{:?}", builder.client_config.data_dir) ); + // When using the testnet command we listen on all addresses. + builder.set_listen_addresses("0.0.0.0".into())?; + warn!(log, "All services listening on 0.0.0.0"); + // Start matching on the second subcommand (e.g., `testnet bootstrap ...`). match cli_args.subcommand() { ("bootstrap", Some(cli_args)) => { @@ -437,6 +442,19 @@ impl<'a> ConfigBuilder<'a> { Ok(()) } + /// Sets all listening addresses to the given `addr`. + pub fn set_listen_addresses(&mut self, addr: String) -> Result<()> { + let addr = addr + .parse::() + .map_err(|e| format!("Unable to parse default listen address: {:?}", e))?; + + self.client_config.network.listen_address = addr.clone().into(); + self.client_config.rpc.listen_address = addr.clone(); + self.client_config.rest_api.listen_address = addr.clone(); + + Ok(()) + } + /// Consumes self, returning the configs. /// /// The supplied `cli_args` should be the base-level `clap` cli_args (i.e., not a subcommand From a78b030f584c19ca129074600b6d984e5c2ae491 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Sun, 1 Sep 2019 22:22:09 +1000 Subject: [PATCH 155/186] Fix rpc port-bump bug --- beacon_node/src/config.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index a3829a33c3..c3dfad9bab 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -472,7 +472,6 @@ impl<'a> ConfigBuilder<'a> { self.client_config.network.libp2p_port += bump; self.client_config.network.discovery_port += bump; self.client_config.rpc.port += bump; - self.client_config.rpc.port += bump; self.client_config.rest_api.port += bump; } From aa3bc6bf670e809e48bc2a2a15a4b039b7d48874 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Sun, 1 Sep 2019 22:22:20 +1000 Subject: [PATCH 156/186] Update book --- book/src/SUMMARY.md | 3 +- book/src/interop.md | 4 +-- book/src/intro.md | 2 +- book/src/testnets.md | 86 +++++--------------------------------------- 4 files changed, 13 insertions(+), 82 deletions(-) diff --git a/book/src/SUMMARY.md b/book/src/SUMMARY.md index e08af247c4..f0ad411449 100644 --- a/book/src/SUMMARY.md +++ b/book/src/SUMMARY.md @@ -3,5 +3,6 @@ * [Introduction](./intro.md) * [Development Environment](./setup.md) * [Testnets](./testnets.md) - * [Simple local testnet](./testnets.md) + * [Simple Local Testnet](./simple-testnet.md) * [Interop](./interop.md) + * [Interop Tips & Tricks](./interop-tips.md) diff --git a/book/src/interop.md b/book/src/interop.md index ea00c4ce83..c1a1d4a69c 100644 --- a/book/src/interop.md +++ b/book/src/interop.md @@ -67,14 +67,14 @@ $ ./validator_client testnet -b insecure 0 8 > > - The `-b` flag means the validator client will "bootstrap" specs and config > from the beacon node. -> - The `insecure` command means the [interop +> - The `insecure` command dictates that the [interop > keypairs](https://github.com/ethereum/eth2.0-pm/tree/6e41fcf383ebeb5125938850d8e9b4e9888389b4/interop/mocked_start#pubkeyprivkey-generation) > will be used. > - The `0 8` indicates that this validator client should manage 8 validators, > starting at validator 0 (the first deposited validator). > - The validator client will try to connect to the beacon node at `localhost`. > See `--help` to configure that address and other features. -> - The validator client will operate very loosely in `testnet` mode, happily +> - The validator client will operate very unsafely in `testnet` mode, happily > swapping between chains and creating double-votes. #### Starting from a genesis file diff --git a/book/src/intro.md b/book/src/intro.md index f290b7e40f..e0e3cd6a0f 100644 --- a/book/src/intro.md +++ b/book/src/intro.md @@ -21,7 +21,7 @@ Documentation is provided for **researchers and developers** working on Ethereum 2.0 and assumes prior knowledge on the topic. - Get started with [development environment setup](setup.html). -- [Run a simple testnet](testnets.html) in Only Three CLI Commands™. +- [Run a simple testnet](simple-testnet.html) in Only Three CLI Commands™. - Read about our interop workflow. - API? diff --git a/book/src/testnets.md b/book/src/testnets.md index bf41e455d9..180673fb36 100644 --- a/book/src/testnets.md +++ b/book/src/testnets.md @@ -1,80 +1,10 @@ -# Simple Local Testnet +# Testnets -You can setup a local, two-node testnet in **Only Three CLI Commands™**. +Lighthouse does not offer a public testnet _yet_. In the meantime, it's easy to +start a local testnet: -Follow the [Quick instructions](#tldr) version if you're confident, or see -[Detailed instructions](#detail) for more. - - -## Quick instructions - -Setup a development environment, build the project and navigate to the -`target/release` directory. - -1. Start the first node: `$ ./beacon_node testnet -f recent 8` -1. Start a validator client: `$ ./validator_client testnet -b insecure 0 8` -1. Start another node `$ ./beacon_node -b 10 testnet -f bootstrap http://localhost:5052` - -_Repeat #3 to add more nodes._ - -## Detailed instructions - -First, setup a Lighthouse development environment and navigate to the -`target/release` directory (this is where the binaries are located). - -## Starting the Beacon Node - -Start a new node (creating a fresh database and configuration in `~/.lighthouse`), using: - -``` -$ ./beacon_node testnet -f recent 8 -``` - -> Notes: -> -> - The `-f` flag ignores any existing database or configuration, backing them -> up before re-initializing. -> - `8` is number of validators with deposits in the genesis state. -> - See `$ ./beacon_node testnet recent --help` for more configuration options, -> including `minimal`/`mainnet` specification. - -## Starting the Validator Client - -In a new terminal window, start the validator client with: - -``` -$ ./validator_client testnet -b insecure 0 8 -``` - -> Notes: -> -> - The `-b` flag means the validator client will "bootstrap" specs and config -> from the beacon node. -> - The `insecure` command uses predictable, well-known private keys. Since -> this is just a local testnet, these are fine. -> - The `0 8` indicates that this validator client should manage 8 validators, -> starting at validator 0 (the first deposited validator). -> - The validator client will try to connect to the beacon node at `localhost`. -> See `--help` to configure that address and other features. - -## Adding another Beacon Node - -You may connect another (non-validating) node to your local network using the -lighthouse `bootstrap` command. - -In a new terminal terminal, run: - - -``` -$ ./beacon_node -b 10 testnet -r bootstrap http://localhost:5052 -``` - -> Notes: -> -> - The `-b` (or `--port-bump`) increases all the listening TCP/UDP ports of -> the new node to `10` higher. Your first node's HTTP server was at TCP -> `5052` but this one will be at `5062`. -> - The `-r` flag creates a new data directory in your home with a random -> string appended, to avoid conflicting with any other running node. -> - The HTTP address is the API of the first node. The new node will download -> configuration via HTTP before starting sync via libp2p. +- [Run a simple testnet](testnets.html) in Only Three CLI Commands™. +- Developers of other Eth2 clients should see the [interop guide](interop.html). +- The [sigp/lighthouse-docker](https://github.com/sigp/lighthouse-docker) repo + contains a `docker-compose` setup that runs a multi-node network with + built-in metrics and monitoring dashboards, all from your local machine. From 8b4b13cb2f6ab332d2f9633ad852b3a97bc6a0b7 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Sun, 1 Sep 2019 22:22:39 +1000 Subject: [PATCH 157/186] Add missed book pages --- book/src/interop-tips.md | 104 +++++++++++++++++++++++++++++++++++++ book/src/simple-testnet.md | 80 ++++++++++++++++++++++++++++ 2 files changed, 184 insertions(+) create mode 100644 book/src/interop-tips.md create mode 100644 book/src/simple-testnet.md diff --git a/book/src/interop-tips.md b/book/src/interop-tips.md new file mode 100644 index 0000000000..e581139c4e --- /dev/null +++ b/book/src/interop-tips.md @@ -0,0 +1,104 @@ +# Interop Tips & Tricks + +This document contains a list of tips and tricks that may be useful during +interop testing. + +## Command-line Interface + +The `--help` command provides detail on the CLI interface. Here are some +interop-specific CLI commands. + +### Specify a boot node by multiaddr + +You can specify a static list of multiaddrs when booting Lighthouse using +the `--libp2p-addresses` command. + +#### Example: + +Runs an 8 validator quick-start chain, peering with `/ip4/192.168.0.1/tcp/9000` on boot. + +``` +$ ./beacon_node --libp2p-addresses /ip4/192.168.0.1/tcp/9000 testnet -f quick 8 1567222226 +``` + +### Specify a boot node by ENR + +You can specify a static list of Discv5 addresses when booting Lighthouse using +the `--boot-nodes` command. + +#### Example: + +Runs an 8 validator quick-start chain, peering with `-IW4QB2...` on boot. + +``` +$ ./beacon_node --boot-nodes -IW4QB2Hi8TPuEzQ41Cdf1r2AUU1FFVFDBJdJyOkWk2qXpZfFZQy2YnJIyoT_5fnbtrXUouoskmydZl4pIg90clIkYUDgmlwhH8AAAGDdGNwgiMog3VkcIIjKIlzZWNwMjU2azGhAjg0-DsTkQynhJCRnLLttBK1RS78lmUkLa-wgzAi-Ob5 testnet -f quick 8 1567222226 +``` + +### Avoid port clashes when starting nodes + +Starting a second Lighthouse node on the same machine will fail due to TCP/UDP +port collisions. Use the `-b` (`--port-bump`) flag to increase all listening +ports by some `n`. + +#### Example: + +Increase all ports by `10` (using multiples of `10` is recommended). + +``` +$ ./beacon_node -b 10 testnet -f quick 8 1567222226 +``` + +## HTTP API + +Examples assume there is a Lighthouse node exposing a HTTP API on +`localhost:5052`. Responses are JSON. + +### Get the node's ENR + +``` +$ curl localhost:5052/network/enr + +"-IW4QFyf1VlY5pZs0xZuvKMRZ9_cdl9WMCDAAJXZiZiuGcfRYoU40VPrYDLQj5prneJIz3zcbTjHp9BbThc-yiymJO8HgmlwhH8AAAGDdGNwgiMog3VkcIIjKIlzZWNwMjU2azGhAjg0-DsTkQynhJCRnLLttBK1RS78lmUkLa-wgzAi-Ob5"% +``` + +### Get a list of connected peer ids + +``` +$ curl localhost:5052/network/peers + +["QmeMFRTWfo3KbVG7dEBXGhyRMa29yfmnJBXW84rKuGEhuL"]% +``` + +### Get the node's peer id + +``` +curl localhost:5052/network/peer_id + +"QmRD1qs2AqNNRdBcGHUGpUGkpih5cmdL32mhh22Sy79xsJ"% +``` + +### Get the list of listening libp2p addresses + +Lists all the libp2p multiaddrs that the node is listening on. + +``` +curl localhost:5052/network/listen_addresses + +["/ip4/127.0.0.1/tcp/9000","/ip4/192.168.1.121/tcp/9000","/ip4/172.17.0.1/tcp/9000","/ip4/172.42.0.1/tcp/9000","/ip6/::1/tcp/9000","/ip6/fdd3:c293:1bc::203/tcp/9000","/ip6/fdd3:c293:1bc:0:9aa9:b2ea:c610:44db/tcp/9000"]% +``` + +### Get the node's beacon chain head + +``` +curl localhost:5052/beacon/head + +{"slot":0,"block_root":"0x827bf71805540aa13f6d8c7d18b41b287b2094a4d7a28cbb8deb061dbf5df4f5","state_root":"0x90a78d73294bc9c7519a64e1912161be0e823eb472012ff54204e15a4d717fa5"}% +``` + +### Get the node's finalized checkpoint + +``` +curl localhost:5052/beacon/latest_finalized_checkpoint + +{"epoch":0,"root":"0x0000000000000000000000000000000000000000000000000000000000000000"}% +``` diff --git a/book/src/simple-testnet.md b/book/src/simple-testnet.md new file mode 100644 index 0000000000..bf41e455d9 --- /dev/null +++ b/book/src/simple-testnet.md @@ -0,0 +1,80 @@ +# Simple Local Testnet + +You can setup a local, two-node testnet in **Only Three CLI Commands™**. + +Follow the [Quick instructions](#tldr) version if you're confident, or see +[Detailed instructions](#detail) for more. + + +## Quick instructions + +Setup a development environment, build the project and navigate to the +`target/release` directory. + +1. Start the first node: `$ ./beacon_node testnet -f recent 8` +1. Start a validator client: `$ ./validator_client testnet -b insecure 0 8` +1. Start another node `$ ./beacon_node -b 10 testnet -f bootstrap http://localhost:5052` + +_Repeat #3 to add more nodes._ + +## Detailed instructions + +First, setup a Lighthouse development environment and navigate to the +`target/release` directory (this is where the binaries are located). + +## Starting the Beacon Node + +Start a new node (creating a fresh database and configuration in `~/.lighthouse`), using: + +``` +$ ./beacon_node testnet -f recent 8 +``` + +> Notes: +> +> - The `-f` flag ignores any existing database or configuration, backing them +> up before re-initializing. +> - `8` is number of validators with deposits in the genesis state. +> - See `$ ./beacon_node testnet recent --help` for more configuration options, +> including `minimal`/`mainnet` specification. + +## Starting the Validator Client + +In a new terminal window, start the validator client with: + +``` +$ ./validator_client testnet -b insecure 0 8 +``` + +> Notes: +> +> - The `-b` flag means the validator client will "bootstrap" specs and config +> from the beacon node. +> - The `insecure` command uses predictable, well-known private keys. Since +> this is just a local testnet, these are fine. +> - The `0 8` indicates that this validator client should manage 8 validators, +> starting at validator 0 (the first deposited validator). +> - The validator client will try to connect to the beacon node at `localhost`. +> See `--help` to configure that address and other features. + +## Adding another Beacon Node + +You may connect another (non-validating) node to your local network using the +lighthouse `bootstrap` command. + +In a new terminal terminal, run: + + +``` +$ ./beacon_node -b 10 testnet -r bootstrap http://localhost:5052 +``` + +> Notes: +> +> - The `-b` (or `--port-bump`) increases all the listening TCP/UDP ports of +> the new node to `10` higher. Your first node's HTTP server was at TCP +> `5052` but this one will be at `5062`. +> - The `-r` flag creates a new data directory in your home with a random +> string appended, to avoid conflicting with any other running node. +> - The HTTP address is the API of the first node. The new node will download +> configuration via HTTP before starting sync via libp2p. From 246dcaa0942067c4199d6559b406120f2166ddfa Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Sun, 1 Sep 2019 22:30:56 +1000 Subject: [PATCH 158/186] Add extra log to validator client --- validator_client/src/main.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/validator_client/src/main.rs b/validator_client/src/main.rs index 5e9c036ca3..d5d2fc27f1 100644 --- a/validator_client/src/main.rs +++ b/validator_client/src/main.rs @@ -246,6 +246,7 @@ fn process_testnet_subcommand( log: &Logger, ) -> Result<(ClientConfig, Eth2Config)> { let eth2_config = if cli_args.is_present("bootstrap") { + info!(log, "Connecting to bootstrap server"); let bootstrapper = Bootstrapper::from_server_string(format!( "http://{}:{}", client_config.server, client_config.server_http_port From 4aa12dc4084513aaaf3aa5bb355e7bcd3005b1d3 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Mon, 2 Sep 2019 09:59:52 +1000 Subject: [PATCH 159/186] Set a default BN bootstrap address --- beacon_node/src/main.rs | 1 + book/src/simple-testnet.md | 11 ++++++----- 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/beacon_node/src/main.rs b/beacon_node/src/main.rs index 26537c6f76..69ac6f1bdd 100644 --- a/beacon_node/src/main.rs +++ b/beacon_node/src/main.rs @@ -252,6 +252,7 @@ fn main() { .arg(Arg::with_name("server") .value_name("HTTP_SERVER") .required(true) + .default_value("http://localhost:5052") .help("A HTTP server, with a http:// prefix")) .arg(Arg::with_name("libp2p-port") .short("p") diff --git a/book/src/simple-testnet.md b/book/src/simple-testnet.md index bf41e455d9..b6fa19d6fe 100644 --- a/book/src/simple-testnet.md +++ b/book/src/simple-testnet.md @@ -66,7 +66,7 @@ In a new terminal terminal, run: ``` -$ ./beacon_node -b 10 testnet -r bootstrap http://localhost:5052 +$ ./beacon_node -b 10 testnet -r bootstrap ``` > Notes: @@ -74,7 +74,8 @@ $ ./beacon_node -b 10 testnet -r bootstrap http://localhost:5052 > - The `-b` (or `--port-bump`) increases all the listening TCP/UDP ports of > the new node to `10` higher. Your first node's HTTP server was at TCP > `5052` but this one will be at `5062`. -> - The `-r` flag creates a new data directory in your home with a random -> string appended, to avoid conflicting with any other running node. -> - The HTTP address is the API of the first node. The new node will download -> configuration via HTTP before starting sync via libp2p. +> - The `-r` flag creates a new data directory with a random string appended +> (avoids data directory collisions between nodes). +> - The default bootstrap HTTP address is `http://localhost:5052`. The new node +> will download configuration via HTTP before starting sync via libp2p. +> - See `$ ./beacon_node testnet bootstrap --help` for more configuration. From 82dc84ebbff604304d5a08c2b6e09d1cdbd4024a Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Mon, 2 Sep 2019 10:22:29 +1000 Subject: [PATCH 160/186] Add slot-time CLI argument --- beacon_node/src/config.rs | 18 +++++++++++++++++- beacon_node/src/main.rs | 7 +++++++ 2 files changed, 24 insertions(+), 1 deletion(-) diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index c3dfad9bab..949b7277e0 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -74,7 +74,7 @@ fn process_testnet_subcommand( if let Some(path_string) = cli_args.value_of("eth2-config") { if is_bootstrap { - return Err("Cannot supply --eth2-config when using bootsrap".to_string()); + return Err("Cannot supply --eth2-config when using bootstrap".to_string()); } let path = path_string @@ -85,6 +85,18 @@ fn process_testnet_subcommand( builder.update_spec_from_subcommand(&cli_args)?; } + if let Some(slot_time) = cli_args.value_of("slot-time") { + if is_bootstrap { + return Err("Cannot supply --slot-time flag whilst using bootstrap.".into()); + } + + let slot_time = slot_time + .parse::() + .map_err(|e| format!("Unable to parse slot-time: {:?}", e))?; + + builder.set_slot_time(slot_time); + } + if let Some(path_string) = cli_args.value_of("client-config") { let path = path_string .parse::() @@ -307,6 +319,10 @@ impl<'a> ConfigBuilder<'a> { self.eth2_config = eth2_config; } + fn set_slot_time(&mut self, milliseconds_per_slot: u64) { + self.eth2_config.spec.milliseconds_per_slot = milliseconds_per_slot; + } + /// Reads the subcommand and tries to update `self.eth2_config` based up on the `--spec` flag. /// /// Returns an error if the `--spec` flag is not present in the given `cli_args`. diff --git a/beacon_node/src/main.rs b/beacon_node/src/main.rs index 69ac6f1bdd..a2a977e854 100644 --- a/beacon_node/src/main.rs +++ b/beacon_node/src/main.rs @@ -241,6 +241,13 @@ fn main() { backup directory.") .conflicts_with("random-datadir") ) + .arg( + Arg::with_name("slot-time") + .long("slot-time") + .short("t") + .value_name("MILLISECONDS") + .help("Defines the slot time when creating a new testnet.") + ) /* * `boostrap` * From 215200e9eba6151d92f77f9ca10bdea59361c093 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Mon, 2 Sep 2019 10:34:24 +1000 Subject: [PATCH 161/186] Add interop tip --- book/src/interop-tips.md | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/book/src/interop-tips.md b/book/src/interop-tips.md index e581139c4e..969d49b4f3 100644 --- a/book/src/interop-tips.md +++ b/book/src/interop-tips.md @@ -48,6 +48,22 @@ Increase all ports by `10` (using multiples of `10` is recommended). $ ./beacon_node -b 10 testnet -f quick 8 1567222226 ``` +### Start a testnet with a custom slot time + +Lighthouse can run at quite low slot times when there are few validators (e.g., +`500 ms` slot times should be fine for 8 validators). + +#### Example + +The `-t` (`--slot-time`) flag specifies the milliseconds per slot. + +``` +$ ./beacon_node -b 10 testnet -t 500 -f quick 8 1567222226 +``` + +> Note: `bootstrap` loads the slot time via HTTP and therefore conflicts with +> this flag. + ## HTTP API Examples assume there is a Lighthouse node exposing a HTTP API on From 5616e0a2393b73ff5d692667dd78a2029d05da03 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Mon, 2 Sep 2019 10:52:22 +1000 Subject: [PATCH 162/186] Update interop docs --- book/src/interop.md | 30 ++++++++++++++++++++++++++---- 1 file changed, 26 insertions(+), 4 deletions(-) diff --git a/book/src/interop.md b/book/src/interop.md index c1a1d4a69c..3f5bfdbd4f 100644 --- a/book/src/interop.md +++ b/book/src/interop.md @@ -23,9 +23,30 @@ After building the binaries with `cargo build --release --all`, there will be a `target/release` directory in the root of the Lighthouse repository. This is where the `beacon_node` and `validator_client` binaries are located. -## Interop Procedure +## CLI Overview -The following scenarios are documented: +The Lighthouse CLI has two primary tasks: + +- **Starting** a new testnet chain using `$ ./beacon_node testnet`. +- **Resuming** an existing chain with `$ ./beacon_node` (omit `testnet`). + +There are several methods for starting a new chain: + +- `quick`: using the `(validator_client, genesis_time)` tuple. +- `recent`: as above but `genesis_time` is set to the start of some recent time + window. +- `bootstrap`: a Lighthouse-specific method where we connect to a running node + and download it's specification and genesis state via the HTTP API. + +See `$ ./beacon_node testnet --help` for more detail. + +Once a chain has been started, it can be resumed by running `$ ./beacon_node` +(potentially supplying the `--datadir`, if a non-default directory was used). + + +## Scenarios + +The following scenarios are documented here: - [Starting a "quick-start" beacon node](#quick-start-beacon-node) from a `(validator_count, genesis)` tuple. @@ -34,8 +55,9 @@ The following scenarios are documented: - [Exporting a genesis state file](#exporting-a-genesis-file) from a running Lighthouse node. -First, setup a Lighthouse development environment and navigate to the -`target/release` directory (this is where the binaries are located). +All scenarios assume a working development environment and commands are based +in the `target/release` directory (this is the build dir for `cargo`). + #### Quick-start Beacon Node From d4bf1390c9cdf6753000bfd879bcb31dba073e20 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Mon, 2 Sep 2019 11:39:28 +1000 Subject: [PATCH 163/186] Add interop eth1 data stub --- beacon_node/beacon_chain/src/beacon_chain.rs | 30 +++++++++++++++---- .../src/beacon_state/beacon_state_types.rs | 7 +++++ 2 files changed, 32 insertions(+), 5 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 6380d03b3e..99dd9a6426 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -4,6 +4,7 @@ use crate::fork_choice::{Error as ForkChoiceError, ForkChoice}; use crate::iter::{ReverseBlockRootIterator, ReverseStateRootIterator}; use crate::metrics; use crate::persisted_beacon_chain::{PersistedBeaconChain, BEACON_CHAIN_DB_KEY}; +use eth2_hashing::hash; use lmd_ghost::LmdGhost; use operation_pool::DepositInsertStatus; use operation_pool::{OperationPool, PersistedOperationPool}; @@ -1198,11 +1199,7 @@ impl BeaconChain { body: BeaconBlockBody { randao_reveal, // TODO: replace with real data. - eth1_data: Eth1Data { - deposit_count: state.eth1_data.deposit_count, - deposit_root: Hash256::zero(), - block_hash: Hash256::zero(), - }, + eth1_data: Self::eth1_data_stub(&state), graffiti, proposer_slashings: proposer_slashings.into(), attester_slashings: attester_slashings.into(), @@ -1231,6 +1228,22 @@ impl BeaconChain { Ok((block, state)) } + fn eth1_data_stub(state: &BeaconState) -> Eth1Data { + let current_epoch = state.current_epoch(); + let slots_per_voting_period = T::EthSpec::slots_per_eth1_voting_period() as u64; + let current_voting_period: u64 = current_epoch.as_u64() / slots_per_voting_period; + + // TODO: confirm that `int_to_bytes32` is correct. + let deposit_root = hash(&int_to_bytes32(current_voting_period)); + let block_hash = hash(&deposit_root); + + Eth1Data { + deposit_root: Hash256::from_slice(&deposit_root), + deposit_count: state.eth1_deposit_index, + block_hash: Hash256::from_slice(&block_hash), + } + } + /// Execute the fork choice algorithm and enthrone the result as the canonical head. pub fn fork_choice(&self) -> Result<(), Error> { metrics::inc_counter(&metrics::FORK_CHOICE_REQUESTS); @@ -1426,6 +1439,13 @@ impl BeaconChain { } } +/// Returns `int` as little-endian bytes with a length of 32. +fn int_to_bytes32(int: u64) -> Vec { + let mut vec = int.to_le_bytes().to_vec(); + vec.resize(32, 0); + vec +} + impl From for Error { fn from(e: DBError) -> Error { Error::DBError(e) diff --git a/eth2/types/src/beacon_state/beacon_state_types.rs b/eth2/types/src/beacon_state/beacon_state_types.rs index 0e76942dde..f589b3d3ef 100644 --- a/eth2/types/src/beacon_state/beacon_state_types.rs +++ b/eth2/types/src/beacon_state/beacon_state_types.rs @@ -120,6 +120,13 @@ pub trait EthSpec: 'static + Default + Sync + Send + Clone + Debug + PartialEq { fn epochs_per_historical_vector() -> usize { Self::EpochsPerHistoricalVector::to_usize() } + + /// Returns the `SLOTS_PER_ETH1_VOTING_PERIOD` constant for this specification. + /// + /// Spec v0.8.1 + fn slots_per_eth1_voting_period() -> usize { + Self::EpochsPerHistoricalVector::to_usize() + } } /// Macro to inherit some type values from another EthSpec. From d05c2d4110d24d02c3be35632b0ab1505da36d33 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Mon, 2 Sep 2019 13:21:53 +1000 Subject: [PATCH 164/186] Start working on u64 json issue --- eth2/types/Cargo.toml | 1 + eth2/types/src/beacon_state/committee_cache/tests.rs | 2 +- eth2/types/src/slot_epoch.rs | 12 ++++++++++++ .../builders/testing_proposer_slashing_builder.rs | 4 ++-- 4 files changed, 16 insertions(+), 3 deletions(-) diff --git a/eth2/types/Cargo.toml b/eth2/types/Cargo.toml index 36cfc39eca..95d7a03174 100644 --- a/eth2/types/Cargo.toml +++ b/eth2/types/Cargo.toml @@ -31,3 +31,4 @@ tree_hash_derive = "0.2" [dev-dependencies] env_logger = "0.6.0" +serde_json = "^1.0" diff --git a/eth2/types/src/beacon_state/committee_cache/tests.rs b/eth2/types/src/beacon_state/committee_cache/tests.rs index 28e9d92f85..4c17d3f96c 100644 --- a/eth2/types/src/beacon_state/committee_cache/tests.rs +++ b/eth2/types/src/beacon_state/committee_cache/tests.rs @@ -9,7 +9,7 @@ fn default_values() { let cache = CommitteeCache::default(); assert_eq!(cache.is_initialized_at(Epoch::new(0)), false); - assert_eq!(cache.active_validator_indices(), &[]); + assert!(&cache.active_validator_indices().is_empty()); assert_eq!(cache.get_crosslink_committee_for_shard(0), None); assert_eq!(cache.get_attestation_duties(0), None); assert_eq!(cache.active_validator_count(), 0); diff --git a/eth2/types/src/slot_epoch.rs b/eth2/types/src/slot_epoch.rs index bd611aa0ce..748d6445f7 100644 --- a/eth2/types/src/slot_epoch.rs +++ b/eth2/types/src/slot_epoch.rs @@ -191,4 +191,16 @@ mod epoch_tests { Epoch::from_ssz_bytes(&max_epoch.as_ssz_bytes()).unwrap() ); } + + #[test] + fn epoch_max_value_json() { + let x: Epoch = Epoch::from(u64::max_value()); + let json = serde_json::to_string(&x).expect("should json encode"); + + assert_eq!(&json, "18446744073709552000"); + assert_eq!( + serde_json::from_str::(&json).expect("should json decode"), + x + ); + } } diff --git a/eth2/types/src/test_utils/builders/testing_proposer_slashing_builder.rs b/eth2/types/src/test_utils/builders/testing_proposer_slashing_builder.rs index 6c72b520fa..b972934276 100644 --- a/eth2/types/src/test_utils/builders/testing_proposer_slashing_builder.rs +++ b/eth2/types/src/test_utils/builders/testing_proposer_slashing_builder.rs @@ -39,15 +39,15 @@ impl TestingProposerSlashingBuilder { ..header_1.clone() }; + let epoch = slot.epoch(T::slots_per_epoch()); + header_1.signature = { let message = header_1.signed_root(); - let epoch = slot.epoch(T::slots_per_epoch()); signer(proposer_index, &message[..], epoch, Domain::BeaconProposer) }; header_2.signature = { let message = header_2.signed_root(); - let epoch = slot.epoch(T::slots_per_epoch()); signer(proposer_index, &message[..], epoch, Domain::BeaconProposer) }; From 6c50758bdf84747ff79e9bf834dec139682b5817 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Mon, 2 Sep 2019 14:29:36 +1000 Subject: [PATCH 165/186] Add ResponseBuilder to rest_api --- beacon_node/rest_api/Cargo.toml | 3 ++ beacon_node/rest_api/src/beacon.rs | 19 +++----- beacon_node/rest_api/src/lib.rs | 2 + beacon_node/rest_api/src/response_builder.rs | 50 ++++++++++++++++++++ 4 files changed, 61 insertions(+), 13 deletions(-) create mode 100644 beacon_node/rest_api/src/response_builder.rs diff --git a/beacon_node/rest_api/Cargo.toml b/beacon_node/rest_api/Cargo.toml index 057de4f946..863ea04da4 100644 --- a/beacon_node/rest_api/Cargo.toml +++ b/beacon_node/rest_api/Cargo.toml @@ -14,9 +14,12 @@ store = { path = "../store" } version = { path = "../version" } serde = { version = "1.0", features = ["derive"] } serde_json = "^1.0" +serde_yaml = "0.8" slog = "^2.2.3" slog-term = "^2.4.0" slog-async = "^2.3.0" +eth2_ssz = { path = "../../eth2/utils/ssz" } +eth2_ssz_derive = { path = "../../eth2/utils/ssz_derive" } state_processing = { path = "../../eth2/state_processing" } types = { path = "../../eth2/types" } clap = "2.32.0" diff --git a/beacon_node/rest_api/src/beacon.rs b/beacon_node/rest_api/src/beacon.rs index 1c66a2819f..85f20294d8 100644 --- a/beacon_node/rest_api/src/beacon.rs +++ b/beacon_node/rest_api/src/beacon.rs @@ -1,8 +1,9 @@ -use super::{success_response, ApiResult}; +use super::{success_response, ApiResult, ResponseBuilder}; use crate::{helpers::*, ApiError, UrlQuery}; use beacon_chain::{BeaconChain, BeaconChainTypes}; use hyper::{Body, Request}; use serde::Serialize; +use ssz_derive::Encode; use std::sync::Arc; use store::Store; use types::{BeaconBlock, BeaconState, EthSpec, Hash256, Slot}; @@ -33,7 +34,7 @@ pub fn get_head(req: Request) -> ApiResult Ok(success_response(Body::from(json))) } -#[derive(Serialize)] +#[derive(Serialize, Encode)] #[serde(bound = "T: EthSpec")] pub struct BlockResponse { pub root: Hash256, @@ -77,11 +78,7 @@ pub fn get_block(req: Request) -> ApiResult beacon_block: block, }; - let json: String = serde_json::to_string(&response).map_err(|e| { - ApiError::ServerError(format!("Unable to serialize BlockResponse: {:?}", e)) - })?; - - Ok(success_response(Body::from(json))) + ResponseBuilder::new(&req).body(&response) } /// HTTP handler to return a `BeaconBlock` root at a given `slot`. @@ -104,7 +101,7 @@ pub fn get_block_root(req: Request) -> ApiR Ok(success_response(Body::from(json))) } -#[derive(Serialize)] +#[derive(Serialize, Encode)] #[serde(bound = "T: EthSpec")] pub struct StateResponse { pub root: Hash256, @@ -144,11 +141,7 @@ pub fn get_state(req: Request) -> ApiResult beacon_state: state, }; - let json: String = serde_json::to_string(&response).map_err(|e| { - ApiError::ServerError(format!("Unable to serialize StateResponse: {:?}", e)) - })?; - - Ok(success_response(Body::from(json))) + ResponseBuilder::new(&req).body(&response) } /// HTTP handler to return a `BeaconState` root at a given `slot`. diff --git a/beacon_node/rest_api/src/lib.rs b/beacon_node/rest_api/src/lib.rs index 7c5ab30ef5..1b5a2d6ee8 100644 --- a/beacon_node/rest_api/src/lib.rs +++ b/beacon_node/rest_api/src/lib.rs @@ -8,6 +8,7 @@ mod helpers; mod metrics; mod network; mod node; +mod response_builder; mod spec; mod url_query; mod validator; @@ -18,6 +19,7 @@ use eth2_config::Eth2Config; use hyper::rt::Future; use hyper::service::service_fn_ok; use hyper::{Body, Method, Response, Server, StatusCode}; +use response_builder::ResponseBuilder; use slog::{info, o, warn}; use std::ops::Deref; use std::path::PathBuf; diff --git a/beacon_node/rest_api/src/response_builder.rs b/beacon_node/rest_api/src/response_builder.rs new file mode 100644 index 0000000000..9b8819996c --- /dev/null +++ b/beacon_node/rest_api/src/response_builder.rs @@ -0,0 +1,50 @@ +use super::{ApiError, ApiResult}; +use http::header; +use hyper::{Body, Request, Response, StatusCode}; +use serde::Serialize; +use ssz::Encode; + +pub enum Encoding { + JSON, + SSZ, + YAML, +} + +pub struct ResponseBuilder { + encoding: Encoding, +} + +impl ResponseBuilder { + pub fn new(req: &Request) -> Self { + let encoding = match req.headers().get(header::CONTENT_TYPE) { + Some(h) if h == "application/ssz" => Encoding::SSZ, + Some(h) if h == "application/yaml" => Encoding::YAML, + _ => Encoding::JSON, + }; + + Self { encoding } + } + + pub fn body(self, item: &T) -> ApiResult { + let body: Body = match self.encoding { + Encoding::JSON => Body::from(serde_json::to_string(&item).map_err(|e| { + ApiError::ServerError(format!( + "Unable to serialize response body as JSON: {:?}", + e + )) + })?), + Encoding::SSZ => Body::from(item.as_ssz_bytes()), + Encoding::YAML => Body::from(serde_yaml::to_string(&item).map_err(|e| { + ApiError::ServerError(format!( + "Unable to serialize response body as YAML: {:?}", + e + )) + })?), + }; + + Response::builder() + .status(StatusCode::OK) + .body(Body::from(body)) + .map_err(|e| ApiError::ServerError(format!("Failed to build response: {:?}", e))) + } +} From 11a1505784570b946a18f2f1883e148c8be0fb78 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Mon, 2 Sep 2019 15:07:32 +1000 Subject: [PATCH 166/186] Allow starting from SSZ genesis state --- .../beacon_chain/src/beacon_chain_builder.rs | 17 ++++++++++++++++- beacon_node/client/src/config.rs | 2 ++ beacon_node/client/src/lib.rs | 9 +++++++++ 3 files changed, 27 insertions(+), 1 deletion(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain_builder.rs b/beacon_node/beacon_chain/src/beacon_chain_builder.rs index 06d2818e21..514a72a405 100644 --- a/beacon_node/beacon_chain/src/beacon_chain_builder.rs +++ b/beacon_node/beacon_chain/src/beacon_chain_builder.rs @@ -4,9 +4,10 @@ use lighthouse_bootstrap::Bootstrapper; use merkle_proof::MerkleTree; use rayon::prelude::*; use slog::Logger; -use ssz::Encode; +use ssz::{Decode, Encode}; use state_processing::initialize_beacon_state_from_eth1; use std::fs::File; +use std::io::prelude::*; use std::path::PathBuf; use std::sync::Arc; use std::time::SystemTime; @@ -61,6 +62,20 @@ impl BeaconChainBuilder { Ok(Self::from_genesis_state(genesis_state, spec, log)) } + pub fn ssz_state(file: &PathBuf, spec: ChainSpec, log: Logger) -> Result { + let mut file = File::open(file.clone()) + .map_err(|e| format!("Unable to open SSZ genesis state file {:?}: {:?}", file, e))?; + + let mut bytes = vec![]; + file.read_to_end(&mut bytes) + .map_err(|e| format!("Failed to read SSZ file: {:?}", e))?; + + let genesis_state = BeaconState::from_ssz_bytes(&bytes) + .map_err(|e| format!("Unable to parse SSZ genesis state file: {:?}", e))?; + + Ok(Self::from_genesis_state(genesis_state, spec, log)) + } + pub fn http_bootstrap(server: &str, spec: ChainSpec, log: Logger) -> Result { let bootstrapper = Bootstrapper::from_server_string(server.to_string()) .map_err(|e| format!("Failed to initialize bootstrap client: {}", e))?; diff --git a/beacon_node/client/src/config.rs b/beacon_node/client/src/config.rs index 2f5389ce54..2fb62c3f9b 100644 --- a/beacon_node/client/src/config.rs +++ b/beacon_node/client/src/config.rs @@ -54,6 +54,8 @@ pub enum BeaconChainStartMethod { }, /// Create a new beacon chain by loading a YAML-encoded genesis state from a file. Yaml { file: PathBuf }, + /// Create a new beacon chain by loading a SSZ-encoded genesis state from a file. + Ssz { file: PathBuf }, /// Create a new beacon chain by using a HTTP server (running our REST-API) to load genesis and /// finalized states and blocks. HttpBootstrap { server: String, port: Option }, diff --git a/beacon_node/client/src/lib.rs b/beacon_node/client/src/lib.rs index 766d12c561..1396ed45ff 100644 --- a/beacon_node/client/src/lib.rs +++ b/beacon_node/client/src/lib.rs @@ -138,6 +138,15 @@ where ); BeaconChainBuilder::yaml_state(file, spec.clone(), log.clone())? } + BeaconChainStartMethod::Ssz { file } => { + info!( + log, + "Starting beacon chain"; + "file" => format!("{:?}", file), + "method" => "ssz" + ); + BeaconChainBuilder::ssz_state(file, spec.clone(), log.clone())? + } BeaconChainStartMethod::HttpBootstrap { server, port } => { info!( log, From bfbe7767123ffabd56bafe5146fd45918b1a1e96 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Mon, 2 Sep 2019 15:07:48 +1000 Subject: [PATCH 167/186] Add special genesis state API endpoint --- beacon_node/rest_api/src/beacon.rs | 15 +++++++++++++++ beacon_node/rest_api/src/lib.rs | 1 + beacon_node/src/config.rs | 25 +++++++++++++++++++++++++ beacon_node/src/main.rs | 11 ++++++++--- 4 files changed, 49 insertions(+), 3 deletions(-) diff --git a/beacon_node/rest_api/src/beacon.rs b/beacon_node/rest_api/src/beacon.rs index 85f20294d8..a4660836d8 100644 --- a/beacon_node/rest_api/src/beacon.rs +++ b/beacon_node/rest_api/src/beacon.rs @@ -101,6 +101,21 @@ pub fn get_block_root(req: Request) -> ApiR Ok(success_response(Body::from(json))) } +/// HTTP handler to return a `BeaconState` at a given `root` or `slot`. +/// +/// Will not return a state if the request slot is in the future. Will return states higher than +/// the current head by skipping slots. +pub fn get_genesis_state(req: Request) -> ApiResult { + let beacon_chain = req + .extensions() + .get::>>() + .ok_or_else(|| ApiError::ServerError("Beacon chain extension missing".to_string()))?; + + let (_root, state) = state_at_slot(&beacon_chain, Slot::new(0))?; + + ResponseBuilder::new(&req).body(&state) +} + #[derive(Serialize, Encode)] #[serde(bound = "T: EthSpec")] pub struct StateResponse { diff --git a/beacon_node/rest_api/src/lib.rs b/beacon_node/rest_api/src/lib.rs index 1b5a2d6ee8..4aab91e699 100644 --- a/beacon_node/rest_api/src/lib.rs +++ b/beacon_node/rest_api/src/lib.rs @@ -147,6 +147,7 @@ pub fn start_server( beacon::get_latest_finalized_checkpoint::(req) } (&Method::GET, "/beacon/state") => beacon::get_state::(req), + (&Method::GET, "/beacon/state/genesis") => beacon::get_genesis_state::(req), (&Method::GET, "/beacon/state_root") => beacon::get_state_root::(req), //TODO: Add aggreggate/filtered state lookups here, e.g. /beacon/validators/balances diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index 949b7277e0..ba831c7332 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -172,6 +172,31 @@ fn process_testnet_subcommand( genesis_time, }) } + ("file", Some(cli_args)) => { + let file = cli_args + .value_of("file") + .ok_or_else(|| "No filename specified")? + .parse::() + .map_err(|e| format!("Unable to parse filename: {:?}", e))?; + + let format = cli_args + .value_of("format") + .ok_or_else(|| "No file format specified")?; + + let start_method = match format { + "yaml" => BeaconChainStartMethod::Yaml { file }, + "ssz" => BeaconChainStartMethod::Ssz { file }, + other => return Err(format!("Unknown genesis file format: {}", other)), + }; + + builder.set_beacon_chain_start_method(start_method) + } + (cmd, Some(_)) => { + return Err(format!( + "Invalid valid method specified: {}. See 'testnet --help'.", + cmd + )) + } _ => return Err("No testnet method specified. See 'testnet --help'.".into()), }; diff --git a/beacon_node/src/main.rs b/beacon_node/src/main.rs index a2a977e854..cf7a7b854d 100644 --- a/beacon_node/src/main.rs +++ b/beacon_node/src/main.rs @@ -312,9 +312,14 @@ fn main() { * * Start a new node, using a genesis state loaded from a YAML file */ - .subcommand(SubCommand::with_name("yaml") - .about("Creates a new datadir where the genesis state is read from YAML. Will fail to parse \ - a YAML state that was generated to a different spec than that specified by --spec.") + .subcommand(SubCommand::with_name("file") + .about("Creates a new datadir where the genesis state is read from YAML. May fail to parse \ + a file that was generated to a different spec than that specified by --spec.") + .arg(Arg::with_name("format") + .value_name("FORMAT") + .required(true) + .possible_values(&["yaml", "ssz"]) + .help("The encoding of the state in the file.")) .arg(Arg::with_name("file") .value_name("YAML_FILE") .required(true) From ba22d28026c0af1e894d0f5394e19e2d7d040d49 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Mon, 2 Sep 2019 15:36:58 +1000 Subject: [PATCH 168/186] Update docs for testnet file start methods --- book/src/interop.md | 36 ++++++++++++++++++++++++++++++++---- 1 file changed, 32 insertions(+), 4 deletions(-) diff --git a/book/src/interop.md b/book/src/interop.md index 3f5bfdbd4f..a2f80584da 100644 --- a/book/src/interop.md +++ b/book/src/interop.md @@ -35,6 +35,7 @@ There are several methods for starting a new chain: - `quick`: using the `(validator_client, genesis_time)` tuple. - `recent`: as above but `genesis_time` is set to the start of some recent time window. +- `file`: loads the genesis file from disk in one of multiple formats. - `bootstrap`: a Lighthouse-specific method where we connect to a running node and download it's specification and genesis state via the HTTP API. @@ -85,12 +86,12 @@ Start the validator client with: ``` $ ./validator_client testnet -b insecure 0 8 ``` + > Notes: > > - The `-b` flag means the validator client will "bootstrap" specs and config > from the beacon node. -> - The `insecure` command dictates that the [interop -> keypairs](https://github.com/ethereum/eth2.0-pm/tree/6e41fcf383ebeb5125938850d8e9b4e9888389b4/interop/mocked_start#pubkeyprivkey-generation) +> - The `insecure` command dictates that the [interop keypairs](https://github.com/ethereum/eth2.0-pm/tree/6e41fcf383ebeb5125938850d8e9b4e9888389b4/interop/mocked_start#pubkeyprivkey-generation) > will be used. > - The `0 8` indicates that this validator client should manage 8 validators, > starting at validator 0 (the first deposited validator). @@ -101,8 +102,35 @@ $ ./validator_client testnet -b insecure 0 8 #### Starting from a genesis file -**TODO** +A genesis state can be read from file using the `testnet file` subcommand. +There are three supported formats: + +- `ssz` (default) +- `json` +- `yaml` + +Start a new node using `/tmp/genesis.ssz` as the genesis state: + +``` +$ ./beacon_node testnet --spec minimal -f file ssz /tmp/genesis.ssz +``` + +> Notes: +> +> - The `-f` flag ignores any existing database or configuration, backing them +> up before re-initializing. +> - See `$ ./beacon_node testnet file --help` for more configuration options. #### Exporting a genesis file -**TODO** +Genesis states can downloaded from a running Lighthouse node via the HTTP API. Three content-types are supported: + +- `application/json` +- `application/yaml` +- `application/ssz` + +Using `curl`, a genesis state can be downloaded to `/tmp/genesis.ssz`: + +``` +$ curl --header "Content-Type: application/ssz" "localhost:5052/beacon/state/genesis" -o /tmp/genesis.ssz +``` From 70f4052b2e67e0d7f14630f4e24c2c45fea54892 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Mon, 2 Sep 2019 15:58:53 +1000 Subject: [PATCH 169/186] Allow starting testnet from JSON state --- beacon_node/beacon_chain/Cargo.toml | 1 + beacon_node/beacon_chain/src/beacon_chain_builder.rs | 10 ++++++++++ beacon_node/client/src/config.rs | 2 ++ beacon_node/client/src/lib.rs | 9 +++++++++ beacon_node/src/config.rs | 1 + beacon_node/src/main.rs | 4 ++-- 6 files changed, 25 insertions(+), 2 deletions(-) diff --git a/beacon_node/beacon_chain/Cargo.toml b/beacon_node/beacon_chain/Cargo.toml index d5594a49af..ae89ac1e13 100644 --- a/beacon_node/beacon_chain/Cargo.toml +++ b/beacon_node/beacon_chain/Cargo.toml @@ -18,6 +18,7 @@ rayon = "1.0" serde = "1.0" serde_derive = "1.0" serde_yaml = "0.8" +serde_json = "^1.0" slog = { version = "^2.2.3" , features = ["max_level_trace"] } sloggers = { version = "^0.3" } slot_clock = { path = "../../eth2/utils/slot_clock" } diff --git a/beacon_node/beacon_chain/src/beacon_chain_builder.rs b/beacon_node/beacon_chain/src/beacon_chain_builder.rs index 514a72a405..93c67447eb 100644 --- a/beacon_node/beacon_chain/src/beacon_chain_builder.rs +++ b/beacon_node/beacon_chain/src/beacon_chain_builder.rs @@ -76,6 +76,16 @@ impl BeaconChainBuilder { Ok(Self::from_genesis_state(genesis_state, spec, log)) } + pub fn json_state(file: &PathBuf, spec: ChainSpec, log: Logger) -> Result { + let file = File::open(file.clone()) + .map_err(|e| format!("Unable to open JSON genesis state file {:?}: {:?}", file, e))?; + + let genesis_state = serde_json::from_reader(file) + .map_err(|e| format!("Unable to parse JSON genesis state file: {:?}", e))?; + + Ok(Self::from_genesis_state(genesis_state, spec, log)) + } + pub fn http_bootstrap(server: &str, spec: ChainSpec, log: Logger) -> Result { let bootstrapper = Bootstrapper::from_server_string(server.to_string()) .map_err(|e| format!("Failed to initialize bootstrap client: {}", e))?; diff --git a/beacon_node/client/src/config.rs b/beacon_node/client/src/config.rs index 2fb62c3f9b..f9b366eb15 100644 --- a/beacon_node/client/src/config.rs +++ b/beacon_node/client/src/config.rs @@ -56,6 +56,8 @@ pub enum BeaconChainStartMethod { Yaml { file: PathBuf }, /// Create a new beacon chain by loading a SSZ-encoded genesis state from a file. Ssz { file: PathBuf }, + /// Create a new beacon chain by loading a JSON-encoded genesis state from a file. + Json { file: PathBuf }, /// Create a new beacon chain by using a HTTP server (running our REST-API) to load genesis and /// finalized states and blocks. HttpBootstrap { server: String, port: Option }, diff --git a/beacon_node/client/src/lib.rs b/beacon_node/client/src/lib.rs index 1396ed45ff..e14da2af9f 100644 --- a/beacon_node/client/src/lib.rs +++ b/beacon_node/client/src/lib.rs @@ -147,6 +147,15 @@ where ); BeaconChainBuilder::ssz_state(file, spec.clone(), log.clone())? } + BeaconChainStartMethod::Json { file } => { + info!( + log, + "Starting beacon chain"; + "file" => format!("{:?}", file), + "method" => "json" + ); + BeaconChainBuilder::json_state(file, spec.clone(), log.clone())? + } BeaconChainStartMethod::HttpBootstrap { server, port } => { info!( log, diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index ba831c7332..4a3f6b6a7c 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -186,6 +186,7 @@ fn process_testnet_subcommand( let start_method = match format { "yaml" => BeaconChainStartMethod::Yaml { file }, "ssz" => BeaconChainStartMethod::Ssz { file }, + "json" => BeaconChainStartMethod::Json { file }, other => return Err(format!("Unknown genesis file format: {}", other)), }; diff --git a/beacon_node/src/main.rs b/beacon_node/src/main.rs index 6ca85bd56a..b914be5492 100644 --- a/beacon_node/src/main.rs +++ b/beacon_node/src/main.rs @@ -318,7 +318,7 @@ fn main() { .arg(Arg::with_name("format") .value_name("FORMAT") .required(true) - .possible_values(&["yaml", "ssz"]) + .possible_values(&["yaml", "ssz", "json"]) .help("The encoding of the state in the file.")) .arg(Arg::with_name("file") .value_name("YAML_FILE") @@ -344,7 +344,7 @@ fn main() { _ => unreachable!("guarded by clap"), }; - let mut log = slog::Logger::root(drain.fuse(), o!()); + let log = slog::Logger::root(drain.fuse(), o!()); warn!( log, From 5a8c31e6bf8e10b6fde71fbd3e93fa0b05d6f72a Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Mon, 2 Sep 2019 16:29:05 +1000 Subject: [PATCH 170/186] Remove JSON epoch test --- eth2/types/src/slot_epoch.rs | 12 ------------ 1 file changed, 12 deletions(-) diff --git a/eth2/types/src/slot_epoch.rs b/eth2/types/src/slot_epoch.rs index 748d6445f7..bd611aa0ce 100644 --- a/eth2/types/src/slot_epoch.rs +++ b/eth2/types/src/slot_epoch.rs @@ -191,16 +191,4 @@ mod epoch_tests { Epoch::from_ssz_bytes(&max_epoch.as_ssz_bytes()).unwrap() ); } - - #[test] - fn epoch_max_value_json() { - let x: Epoch = Epoch::from(u64::max_value()); - let json = serde_json::to_string(&x).expect("should json encode"); - - assert_eq!(&json, "18446744073709552000"); - assert_eq!( - serde_json::from_str::(&json).expect("should json decode"), - x - ); - } } From a0e019b4d7553555b663d1f7aeb34fdb70fb0626 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Mon, 2 Sep 2019 18:19:30 +1000 Subject: [PATCH 171/186] Fix interop eth1 blockhash --- beacon_node/beacon_chain/src/beacon_chain_builder.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain_builder.rs b/beacon_node/beacon_chain/src/beacon_chain_builder.rs index 93c67447eb..37039dce03 100644 --- a/beacon_node/beacon_chain/src/beacon_chain_builder.rs +++ b/beacon_node/beacon_chain/src/beacon_chain_builder.rs @@ -165,7 +165,7 @@ fn interop_genesis_state( spec: &ChainSpec, ) -> Result, String> { let keypairs = generate_deterministic_keypairs(validator_count); - let eth1_block_hash = Hash256::from_slice(&[42; 32]); + let eth1_block_hash = Hash256::from_slice(&[0x42; 32]); let eth1_timestamp = 2_u64.pow(40); let amount = spec.max_effective_balance; @@ -277,7 +277,7 @@ mod test { assert_eq!( state.eth1_data.block_hash, - Hash256::from_slice(&[42; 32]), + Hash256::from_slice(&[0x42; 32]), "eth1 block hash should be co-ordinated junk" ); From 969b6d7575c69007d273630359cd37b0a61afb5b Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Tue, 3 Sep 2019 09:50:15 +1000 Subject: [PATCH 172/186] Tidy BeaconStateCow --- beacon_node/beacon_chain/src/beacon_chain.rs | 75 ++++++++++---------- beacon_node/rpc/src/validator.rs | 2 +- 2 files changed, 37 insertions(+), 40 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 99dd9a6426..97af437187 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -77,25 +77,31 @@ pub enum AttestationProcessingOutcome { Invalid(AttestationValidationError), } -pub enum StateCow<'a, T: EthSpec> { +/// Effectively a `Cow`, however when it is `Borrowed` it holds a `RwLockReadGuard` (a +/// read-lock on some read/write-locked state). +/// +/// Only has a small subset of the functionality of a `std::borrow::Cow`. +pub enum BeaconStateCow<'a, T: EthSpec> { Borrowed(RwLockReadGuard<'a, CheckPoint>), Owned(BeaconState), } -impl<'a, T: EthSpec> AsRef> for StateCow<'a, T> { - fn as_ref(&self) -> &BeaconState { +impl<'a, T: EthSpec> BeaconStateCow<'a, T> { + pub fn maybe_as_mut_ref(&mut self) -> Option<&mut BeaconState> { match self { - StateCow::Borrowed(checkpoint) => &checkpoint.beacon_state, - StateCow::Owned(state) => &state, + BeaconStateCow::Borrowed(_) => None, + BeaconStateCow::Owned(ref mut state) => Some(state), } } } -impl<'a, T: EthSpec> StateCow<'a, T> { - pub fn as_mut_ref(&mut self) -> Option<&mut BeaconState> { +impl<'a, T: EthSpec> std::ops::Deref for BeaconStateCow<'a, T> { + type Target = BeaconState; + + fn deref(&self) -> &BeaconState { match self { - StateCow::Borrowed(_) => None, - StateCow::Owned(ref mut state) => Some(state), + BeaconStateCow::Borrowed(checkpoint) => &checkpoint.beacon_state, + BeaconStateCow::Owned(state) => &state, } } } @@ -374,11 +380,11 @@ impl BeaconChain { /// /// Returns `None` when the state is not found in the database or there is an error skipping /// to a future state. - pub fn state_at_slot(&self, slot: Slot) -> Result, Error> { + pub fn state_at_slot(&self, slot: Slot) -> Result, Error> { let head_state = &self.head().beacon_state; if slot == head_state.slot { - Ok(StateCow::Borrowed(self.head())) + Ok(BeaconStateCow::Borrowed(self.head())) } else if slot > head_state.slot { let head_state_slot = head_state.slot; let mut state = head_state.clone(); @@ -398,7 +404,7 @@ impl BeaconChain { } }; } - Ok(StateCow::Owned(state)) + Ok(BeaconStateCow::Owned(state)) } else { let state_root = self .rev_iter_state_roots() @@ -406,7 +412,7 @@ impl BeaconChain { .map(|(root, _slot)| root) .ok_or_else(|| Error::NoStateForSlot(slot))?; - Ok(StateCow::Owned( + Ok(BeaconStateCow::Owned( self.store .get(&state_root)? .ok_or_else(|| Error::NoStateForSlot(slot))?, @@ -422,7 +428,7 @@ impl BeaconChain { /// /// Returns `None` when there is an error skipping to a future state or the slot clock cannot /// be read. - pub fn state_now(&self) -> Result, Error> { + pub fn state_now(&self) -> Result, Error> { self.state_at_slot(self.slot()?) } @@ -465,25 +471,24 @@ impl BeaconChain { let head_state = &self.head().beacon_state; let mut state = if epoch(slot) == epoch(head_state.slot) { - StateCow::Borrowed(self.head()) + BeaconStateCow::Borrowed(self.head()) } else { self.state_at_slot(slot)? }; - if let Some(state) = state.as_mut_ref() { + if let Some(state) = state.maybe_as_mut_ref() { state.build_committee_cache(RelativeEpoch::Current, &self.spec)?; } - if epoch(state.as_ref().slot) != epoch(slot) { + if epoch(state.slot) != epoch(slot) { return Err(Error::InvariantViolated(format!( "Epochs in consistent in proposer lookup: state: {}, requested: {}", - epoch(state.as_ref().slot), + epoch(state.slot), epoch(slot) ))); } state - .as_ref() .get_beacon_proposer_index(slot, RelativeEpoch::Current, &self.spec) .map_err(Into::into) } @@ -501,26 +506,25 @@ impl BeaconChain { let head_state = &self.head().beacon_state; let mut state = if epoch == as_epoch(head_state.slot) { - StateCow::Borrowed(self.head()) + BeaconStateCow::Borrowed(self.head()) } else { self.state_at_slot(epoch.start_slot(T::EthSpec::slots_per_epoch()))? }; - if let Some(state) = state.as_mut_ref() { + if let Some(state) = state.maybe_as_mut_ref() { state.build_committee_cache(RelativeEpoch::Current, &self.spec)?; } - if as_epoch(state.as_ref().slot) != epoch { + if as_epoch(state.slot) != epoch { return Err(Error::InvariantViolated(format!( "Epochs in consistent in attestation duties lookup: state: {}, requested: {}", - as_epoch(state.as_ref().slot), + as_epoch(state.slot), epoch ))); } - if let Some(attestation_duty) = state - .as_ref() - .get_attestation_duties(validator_index, RelativeEpoch::Current)? + if let Some(attestation_duty) = + state.get_attestation_duties(validator_index, RelativeEpoch::Current)? { Ok(Some((attestation_duty.slot, attestation_duty.shard))) } else { @@ -541,12 +545,7 @@ impl BeaconChain { let head_block_root = self.head().beacon_block_root; let head_block_slot = self.head().beacon_block.slot; - self.produce_attestation_data_for_block( - shard, - head_block_root, - head_block_slot, - state.as_ref(), - ) + self.produce_attestation_data_for_block(shard, head_block_root, head_block_slot, &*state) } /// Produce an `AttestationData` that attests to the chain denoted by `block_root` and `state`. @@ -868,7 +867,7 @@ impl BeaconChain { match self.state_now() { Ok(state) => self .op_pool - .insert_voluntary_exit(exit, state.as_ref(), &self.spec), + .insert_voluntary_exit(exit, &*state, &self.spec), Err(e) => { error!( &self.log, @@ -884,9 +883,7 @@ impl BeaconChain { /// Accept some transfer and queue it for inclusion in an appropriate block. pub fn process_transfer(&self, transfer: Transfer) -> Result<(), TransferValidationError> { match self.state_now() { - Ok(state) => self - .op_pool - .insert_transfer(transfer, state.as_ref(), &self.spec), + Ok(state) => self.op_pool.insert_transfer(transfer, &*state, &self.spec), Err(e) => { error!( &self.log, @@ -907,7 +904,7 @@ impl BeaconChain { match self.state_now() { Ok(state) => { self.op_pool - .insert_proposer_slashing(proposer_slashing, state.as_ref(), &self.spec) + .insert_proposer_slashing(proposer_slashing, &*state, &self.spec) } Err(e) => { error!( @@ -929,7 +926,7 @@ impl BeaconChain { match self.state_now() { Ok(state) => { self.op_pool - .insert_attester_slashing(attester_slashing, state.as_ref(), &self.spec) + .insert_attester_slashing(attester_slashing, &*state, &self.spec) } Err(e) => { error!( @@ -1150,7 +1147,7 @@ impl BeaconChain { .state_at_slot(slot - 1) .map_err(|_| BlockProductionError::UnableToProduceAtSlot(slot))?; - self.produce_block_on_state(state.as_ref().clone(), slot, randao_reveal) + self.produce_block_on_state(state.clone(), slot, randao_reveal) } /// Produce a block for some `slot` upon the given `state`. diff --git a/beacon_node/rpc/src/validator.rs b/beacon_node/rpc/src/validator.rs index 84995ca504..abc1cffc55 100644 --- a/beacon_node/rpc/src/validator.rs +++ b/beacon_node/rpc/src/validator.rs @@ -32,7 +32,7 @@ impl ValidatorService for ValidatorServiceInstance { let slot = epoch.start_slot(T::EthSpec::slots_per_epoch()); let mut state = if let Ok(state) = self.chain.state_at_slot(slot) { - state.as_ref().clone() + state.clone() } else { let log_clone = self.log.clone(); let f = sink From 4bfc1a56885e34df8c140bc81be8a025ef803aa8 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Tue, 3 Sep 2019 11:23:21 +1000 Subject: [PATCH 173/186] Make significant changes to the book --- book/src/SUMMARY.md | 10 ++- book/src/interop-cheat-sheet.md | 143 ++++++++++++++++++++++++++++++++ book/src/interop-cli.md | 29 +++++++ book/src/interop-environment.md | 30 +++++++ book/src/interop-scenarios.md | 97 ++++++++++++++++++++++ book/src/interop-tips.md | 119 -------------------------- book/src/interop.md | 135 ++---------------------------- book/src/intro.md | 32 ++----- 8 files changed, 316 insertions(+), 279 deletions(-) create mode 100644 book/src/interop-cheat-sheet.md create mode 100644 book/src/interop-cli.md create mode 100644 book/src/interop-environment.md create mode 100644 book/src/interop-scenarios.md diff --git a/book/src/SUMMARY.md b/book/src/SUMMARY.md index f0ad411449..4ffa694cd4 100644 --- a/book/src/SUMMARY.md +++ b/book/src/SUMMARY.md @@ -2,7 +2,9 @@ * [Introduction](./intro.md) * [Development Environment](./setup.md) -* [Testnets](./testnets.md) - * [Simple Local Testnet](./simple-testnet.md) - * [Interop](./interop.md) - * [Interop Tips & Tricks](./interop-tips.md) +* [Simple Local Testnet](./simple-testnet.md) +* [Interop](./interop.md) + * [Environment](./interop-environment.md) + * [CLI Overview](./interop-cli.md) + * [Scenarios](./interop-scenarios.md) + * [Cheat-sheet](./interop-cheat-sheet.md) diff --git a/book/src/interop-cheat-sheet.md b/book/src/interop-cheat-sheet.md new file mode 100644 index 0000000000..4f6f079b47 --- /dev/null +++ b/book/src/interop-cheat-sheet.md @@ -0,0 +1,143 @@ +# Interop Cheat-sheet + +This document contains a list of tips and tricks that may be useful during +interop testing. + +- When starting a beacon node: + - [Specify a boot node by multiaddr](#boot-node-multiaddr) + - [Specify a boot node by ENR](#boot-node-enr) + - [Avoid port clashes when starting multiple nodes](#port-bump) + - [Specify a custom slot time](#slot-time) +- Using the beacon node HTTP API: + - [Curl a nodes ENR](#http-enr) + - [Curl a nodes connected peers](#http-peer-ids) + - [Curl a nodes local peer id](#http-peer-id) + - [Curl a nodes listening multiaddrs](#http-listen-addresses) + - [Curl a nodes beacon chain head](#http-head) + - [Curl a nodes finalized checkpoint](#http-finalized) + +## Category: CLI + +The `--help` command provides detail on the CLI interface. Here are some +interop-specific CLI commands. + + +### Specify a boot node by multiaddr + +You can specify a static list of multiaddrs when booting Lighthouse using +the `--libp2p-addresses` command. + +#### Example: + +Runs an 8 validator quick-start chain, peering with `/ip4/192.168.0.1/tcp/9000` on boot. + +``` +$ ./beacon_node --libp2p-addresses /ip4/192.168.0.1/tcp/9000 testnet -f quick 8 1567222226 +``` + + +### Specify a boot node by ENR + +You can specify a static list of Discv5 addresses when booting Lighthouse using +the `--boot-nodes` command. + +#### Example: + +Runs an 8 validator quick-start chain, peering with `-IW4QB2...` on boot. + +``` +$ ./beacon_node --boot-nodes -IW4QB2Hi8TPuEzQ41Cdf1r2AUU1FFVFDBJdJyOkWk2qXpZfFZQy2YnJIyoT_5fnbtrXUouoskmydZl4pIg90clIkYUDgmlwhH8AAAGDdGNwgiMog3VkcIIjKIlzZWNwMjU2azGhAjg0-DsTkQynhJCRnLLttBK1RS78lmUkLa-wgzAi-Ob5 testnet -f quick 8 1567222226 +``` + + +### Avoid port clashes when starting nodes + +Starting a second Lighthouse node on the same machine will fail due to TCP/UDP +port collisions. Use the `-b` (`--port-bump`) flag to increase all listening +ports by some `n`. + +#### Example: + +Increase all ports by `10` (using multiples of `10` is recommended). + +``` +$ ./beacon_node -b 10 testnet -f quick 8 1567222226 +``` + + +### Start a testnet with a custom slot time + +Lighthouse can run at quite low slot times when there are few validators (e.g., +`500 ms` slot times should be fine for 8 validators). + +#### Example + +The `-t` (`--slot-time`) flag specifies the milliseconds per slot. + +``` +$ ./beacon_node -b 10 testnet -t 500 -f quick 8 1567222226 +``` + +> Note: `bootstrap` loads the slot time via HTTP and therefore conflicts with +> this flag. + +## Category: HTTP API + +Examples assume there is a Lighthouse node exposing a HTTP API on +`localhost:5052`. Responses are JSON. + + +### Get the node's ENR + +``` +$ curl localhost:5052/network/enr + +"-IW4QFyf1VlY5pZs0xZuvKMRZ9_cdl9WMCDAAJXZiZiuGcfRYoU40VPrYDLQj5prneJIz3zcbTjHp9BbThc-yiymJO8HgmlwhH8AAAGDdGNwgiMog3VkcIIjKIlzZWNwMjU2azGhAjg0-DsTkQynhJCRnLLttBK1RS78lmUkLa-wgzAi-Ob5"% +``` + + +### Get a list of connected peer ids + +``` +$ curl localhost:5052/network/peers + +["QmeMFRTWfo3KbVG7dEBXGhyRMa29yfmnJBXW84rKuGEhuL"]% +``` + + +### Get the node's peer id + +``` +curl localhost:5052/network/peer_id + +"QmRD1qs2AqNNRdBcGHUGpUGkpih5cmdL32mhh22Sy79xsJ"% +``` + + +### Get the list of listening libp2p addresses + +Lists all the libp2p multiaddrs that the node is listening on. + +``` +curl localhost:5052/network/listen_addresses + +["/ip4/127.0.0.1/tcp/9000","/ip4/192.168.1.121/tcp/9000","/ip4/172.17.0.1/tcp/9000","/ip4/172.42.0.1/tcp/9000","/ip6/::1/tcp/9000","/ip6/fdd3:c293:1bc::203/tcp/9000","/ip6/fdd3:c293:1bc:0:9aa9:b2ea:c610:44db/tcp/9000"]% +``` + + +### Get the node's beacon chain head + +``` +curl localhost:5052/beacon/head + +{"slot":0,"block_root":"0x827bf71805540aa13f6d8c7d18b41b287b2094a4d7a28cbb8deb061dbf5df4f5","state_root":"0x90a78d73294bc9c7519a64e1912161be0e823eb472012ff54204e15a4d717fa5"}% +``` + + +### Get the node's finalized checkpoint + +``` +curl localhost:5052/beacon/latest_finalized_checkpoint + +{"epoch":0,"root":"0x0000000000000000000000000000000000000000000000000000000000000000"}% +``` diff --git a/book/src/interop-cli.md b/book/src/interop-cli.md new file mode 100644 index 0000000000..3658781d49 --- /dev/null +++ b/book/src/interop-cli.md @@ -0,0 +1,29 @@ +# Interop CLI Overview + +The Lighthouse CLI has two primary tasks: + +- **Resuming** an existing database with `$ ./beacon_node`. +- **Creating** a new testnet database using `$ ./beacon_node testnet`. + +_See [Scenarios](./interop-scenarios.md) for methods we're likely to use +during interop._ + +## Creating a new database + +There are several methods for creating a new beacon node database: + +- `quick`: using the `(validator_client, genesis_time)` tuple. +- `recent`: as above but `genesis_time` is set to the start of some recent time + window. +- `file`: loads the genesis file from disk in one of multiple formats. +- `bootstrap`: a Lighthouse-specific method where we connect to a running node + and download it's specification and genesis state via the HTTP API. + +See `$ ./beacon_node testnet --help` for more detail. + +## Resuming from an existing database + +Once a database has been created, it can be resumed by running `$ ./beacon_node`. + +Presently, this command will fail if no existing database is found. You must +use the `$ ./beacon_node testnet` command to create a new database. diff --git a/book/src/interop-environment.md b/book/src/interop-environment.md new file mode 100644 index 0000000000..6d3568e29e --- /dev/null +++ b/book/src/interop-environment.md @@ -0,0 +1,30 @@ +# Interop Environment + +All that is required for inter-op is a built and tested [development +environment](./setup.md). + +## Repositories + +You will only require the [sigp/lighthouse](http://github.com/sigp/lighthouse) +library. + +To allow for faster build/test iterations we will use the +[`interop`](https://github.com/sigp/lighthouse/tree/interop) branch of +[sigp/lighthouse](https://github.com/sigp/lighthouse/tree/interop) for +September 2019 interop. **Please use ensure you `git checkout interop` after +cloning the repo.** + +## File System + +When lighthouse boots, it will create the following +directories: + +- `~/.lighthouse`: database and configuration for the beacon node. +- `~/.lighthouse-validator`: database and configuration for the validator + client. + +After building the binaries with `cargo build --release --all`, there will be a +`target/release` directory in the root of the Lighthouse repository. This is +where the `beacon_node` and `validator_client` binaries are located. + +You do not need to create any of these directories manually. diff --git a/book/src/interop-scenarios.md b/book/src/interop-scenarios.md new file mode 100644 index 0000000000..d54772ee8c --- /dev/null +++ b/book/src/interop-scenarios.md @@ -0,0 +1,97 @@ +# Interop Scenarios + +Here we demonstrate some expected interop scenarios. + +All scenarios assume a working [development environment](./setup.md) and +commands are based in the `target/release` directory (this is the build dir for +`cargo`). + +Additional functions can be found in the [interop +cheat-sheet](./interop-cheat-sheet.md). + +### Table of contents + +- [Starting from a`validator_count, genesis_time` tuple](#quick-start) +- [Starting a node from a genesis state file](#state-file) +- [Starting a validator client](#val-client) +- [Exporting a genesis state file](#export) from a running Lighthouse + node + + + +### Start beacon node given a validator count and genesis_time + + +To start a brand-new beacon node (with no history) use: + +``` +$ ./beacon_node testnet -f quick 8 1567222226 +``` +> Notes: +> +> - This method conforms the ["Quick-start +genesis"](https://github.com/ethereum/eth2.0-pm/tree/6e41fcf383ebeb5125938850d8e9b4e9888389b4/interop/mocked_start#quick-start-genesis) +method in the `ethereum/eth2.0-pm` repository. +> - The `-f` flag ignores any existing database or configuration, backing them +> up before re-initializing. +> - `8` is the validator count and `1567222226` is the genesis time. +> - See `$ ./beacon_node testnet quick --help` for more configuration options. + + +### Start Beacon Node given a genesis state file + +A genesis state can be read from file using the `testnet file` subcommand. +There are three supported formats: + +- `ssz` (default) +- `json` +- `yaml` + +Start a new node using `/tmp/genesis.ssz` as the genesis state: + +``` +$ ./beacon_node testnet --spec minimal -f file ssz /tmp/genesis.ssz +``` + +> Notes: +> +> - The `-f` flag ignores any existing database or configuration, backing them +> up before re-initializing. +> - See `$ ./beacon_node testnet file --help` for more configuration options. + + +### Start an auto-configured validator client + +To start a brand-new validator client (with no history) use: + +``` +$ ./validator_client testnet -b insecure 0 8 +``` + +> Notes: +> +> - The `-b` flag means the validator client will "bootstrap" specs and config +> from the beacon node. +> - The `insecure` command dictates that the [interop keypairs](https://github.com/ethereum/eth2.0-pm/tree/6e41fcf383ebeb5125938850d8e9b4e9888389b4/interop/mocked_start#pubkeyprivkey-generation) +> will be used. +> - The `0 8` indicates that this validator client should manage 8 validators, +> starting at validator 0 (the first deposited validator). +> - The validator client will try to connect to the beacon node at `localhost`. +> See `--help` to configure that address and other features. +> - The validator client will operate very unsafely in `testnet` mode, happily +> swapping between chains and creating double-votes. + + +### Exporting a genesis file + +Genesis states can downloaded from a running Lighthouse node via the HTTP API. Three content-types are supported: + +- `application/json` +- `application/yaml` +- `application/ssz` + +Using `curl`, a genesis state can be downloaded to `/tmp/genesis.ssz`: + +``` +$ curl --header "Content-Type: application/ssz" "localhost:5052/beacon/state/genesis" -o /tmp/genesis.ssz +``` diff --git a/book/src/interop-tips.md b/book/src/interop-tips.md index 969d49b4f3..0d52e896ac 100644 --- a/book/src/interop-tips.md +++ b/book/src/interop-tips.md @@ -1,120 +1 @@ # Interop Tips & Tricks - -This document contains a list of tips and tricks that may be useful during -interop testing. - -## Command-line Interface - -The `--help` command provides detail on the CLI interface. Here are some -interop-specific CLI commands. - -### Specify a boot node by multiaddr - -You can specify a static list of multiaddrs when booting Lighthouse using -the `--libp2p-addresses` command. - -#### Example: - -Runs an 8 validator quick-start chain, peering with `/ip4/192.168.0.1/tcp/9000` on boot. - -``` -$ ./beacon_node --libp2p-addresses /ip4/192.168.0.1/tcp/9000 testnet -f quick 8 1567222226 -``` - -### Specify a boot node by ENR - -You can specify a static list of Discv5 addresses when booting Lighthouse using -the `--boot-nodes` command. - -#### Example: - -Runs an 8 validator quick-start chain, peering with `-IW4QB2...` on boot. - -``` -$ ./beacon_node --boot-nodes -IW4QB2Hi8TPuEzQ41Cdf1r2AUU1FFVFDBJdJyOkWk2qXpZfFZQy2YnJIyoT_5fnbtrXUouoskmydZl4pIg90clIkYUDgmlwhH8AAAGDdGNwgiMog3VkcIIjKIlzZWNwMjU2azGhAjg0-DsTkQynhJCRnLLttBK1RS78lmUkLa-wgzAi-Ob5 testnet -f quick 8 1567222226 -``` - -### Avoid port clashes when starting nodes - -Starting a second Lighthouse node on the same machine will fail due to TCP/UDP -port collisions. Use the `-b` (`--port-bump`) flag to increase all listening -ports by some `n`. - -#### Example: - -Increase all ports by `10` (using multiples of `10` is recommended). - -``` -$ ./beacon_node -b 10 testnet -f quick 8 1567222226 -``` - -### Start a testnet with a custom slot time - -Lighthouse can run at quite low slot times when there are few validators (e.g., -`500 ms` slot times should be fine for 8 validators). - -#### Example - -The `-t` (`--slot-time`) flag specifies the milliseconds per slot. - -``` -$ ./beacon_node -b 10 testnet -t 500 -f quick 8 1567222226 -``` - -> Note: `bootstrap` loads the slot time via HTTP and therefore conflicts with -> this flag. - -## HTTP API - -Examples assume there is a Lighthouse node exposing a HTTP API on -`localhost:5052`. Responses are JSON. - -### Get the node's ENR - -``` -$ curl localhost:5052/network/enr - -"-IW4QFyf1VlY5pZs0xZuvKMRZ9_cdl9WMCDAAJXZiZiuGcfRYoU40VPrYDLQj5prneJIz3zcbTjHp9BbThc-yiymJO8HgmlwhH8AAAGDdGNwgiMog3VkcIIjKIlzZWNwMjU2azGhAjg0-DsTkQynhJCRnLLttBK1RS78lmUkLa-wgzAi-Ob5"% -``` - -### Get a list of connected peer ids - -``` -$ curl localhost:5052/network/peers - -["QmeMFRTWfo3KbVG7dEBXGhyRMa29yfmnJBXW84rKuGEhuL"]% -``` - -### Get the node's peer id - -``` -curl localhost:5052/network/peer_id - -"QmRD1qs2AqNNRdBcGHUGpUGkpih5cmdL32mhh22Sy79xsJ"% -``` - -### Get the list of listening libp2p addresses - -Lists all the libp2p multiaddrs that the node is listening on. - -``` -curl localhost:5052/network/listen_addresses - -["/ip4/127.0.0.1/tcp/9000","/ip4/192.168.1.121/tcp/9000","/ip4/172.17.0.1/tcp/9000","/ip4/172.42.0.1/tcp/9000","/ip6/::1/tcp/9000","/ip6/fdd3:c293:1bc::203/tcp/9000","/ip6/fdd3:c293:1bc:0:9aa9:b2ea:c610:44db/tcp/9000"]% -``` - -### Get the node's beacon chain head - -``` -curl localhost:5052/beacon/head - -{"slot":0,"block_root":"0x827bf71805540aa13f6d8c7d18b41b287b2094a4d7a28cbb8deb061dbf5df4f5","state_root":"0x90a78d73294bc9c7519a64e1912161be0e823eb472012ff54204e15a4d717fa5"}% -``` - -### Get the node's finalized checkpoint - -``` -curl localhost:5052/beacon/latest_finalized_checkpoint - -{"epoch":0,"root":"0x0000000000000000000000000000000000000000000000000000000000000000"}% -``` diff --git a/book/src/interop.md b/book/src/interop.md index a2f80584da..cb119d59da 100644 --- a/book/src/interop.md +++ b/book/src/interop.md @@ -3,134 +3,9 @@ This guide is intended for other Ethereum 2.0 client developers performing inter-operability testing with Lighthouse. -To allow for faster iteration cycles without the "merging to master" overhead, -we will use the [`interop`](https://github.com/sigp/lighthouse/tree/interop) -branch of [sigp/lighthouse](https://github.com/sigp/lighthouse/tree/interop) -for September 2019 interop. **Please use ensure you `git checkout interop` -after cloning the repo.** +## Chapters -## Environment - -All that is required for inter-op is a built and tested [development -environment](setup). When lighthouse boots, it will create the following -directories: - -- `~/.lighthouse`: database and configuration for the beacon node. -- `~/.lighthouse-validator`: database and configuration for the validator - client. - -After building the binaries with `cargo build --release --all`, there will be a -`target/release` directory in the root of the Lighthouse repository. This is -where the `beacon_node` and `validator_client` binaries are located. - -## CLI Overview - -The Lighthouse CLI has two primary tasks: - -- **Starting** a new testnet chain using `$ ./beacon_node testnet`. -- **Resuming** an existing chain with `$ ./beacon_node` (omit `testnet`). - -There are several methods for starting a new chain: - -- `quick`: using the `(validator_client, genesis_time)` tuple. -- `recent`: as above but `genesis_time` is set to the start of some recent time - window. -- `file`: loads the genesis file from disk in one of multiple formats. -- `bootstrap`: a Lighthouse-specific method where we connect to a running node - and download it's specification and genesis state via the HTTP API. - -See `$ ./beacon_node testnet --help` for more detail. - -Once a chain has been started, it can be resumed by running `$ ./beacon_node` -(potentially supplying the `--datadir`, if a non-default directory was used). - - -## Scenarios - -The following scenarios are documented here: - -- [Starting a "quick-start" beacon node](#quick-start-beacon-node) from a - `(validator_count, genesis)` tuple. -- [Starting a validator client](#validator-client) with `n` interop keypairs. -- [Starting a node from a genesis state file](#starting-from-a-genesis-file). -- [Exporting a genesis state file](#exporting-a-genesis-file) from a running Lighthouse - node. - -All scenarios assume a working development environment and commands are based -in the `target/release` directory (this is the build dir for `cargo`). - - -#### Quick-start Beacon Node - - -To start the node (each time creating a fresh database and configuration in -`~/.lighthouse`), use: - -``` -$ ./beacon_node testnet -f quick 8 1567222226 -``` -> Notes: -> -> - This method conforms the ["Quick-start -genesis"](https://github.com/ethereum/eth2.0-pm/tree/6e41fcf383ebeb5125938850d8e9b4e9888389b4/interop/mocked_start#quick-start-genesis) -method in the `ethereum/eth2.0-pm` repository. -> - The `-f` flag ignores any existing database or configuration, backing them -> up before re-initializing. -> - `8` is the validator count and `1567222226` is the genesis time. -> - See `$ ./beacon_node testnet quick --help` for more configuration options. - -#### Validator Client - -Start the validator client with: - -``` -$ ./validator_client testnet -b insecure 0 8 -``` - -> Notes: -> -> - The `-b` flag means the validator client will "bootstrap" specs and config -> from the beacon node. -> - The `insecure` command dictates that the [interop keypairs](https://github.com/ethereum/eth2.0-pm/tree/6e41fcf383ebeb5125938850d8e9b4e9888389b4/interop/mocked_start#pubkeyprivkey-generation) -> will be used. -> - The `0 8` indicates that this validator client should manage 8 validators, -> starting at validator 0 (the first deposited validator). -> - The validator client will try to connect to the beacon node at `localhost`. -> See `--help` to configure that address and other features. -> - The validator client will operate very unsafely in `testnet` mode, happily -> swapping between chains and creating double-votes. - -#### Starting from a genesis file - -A genesis state can be read from file using the `testnet file` subcommand. -There are three supported formats: - -- `ssz` (default) -- `json` -- `yaml` - -Start a new node using `/tmp/genesis.ssz` as the genesis state: - -``` -$ ./beacon_node testnet --spec minimal -f file ssz /tmp/genesis.ssz -``` - -> Notes: -> -> - The `-f` flag ignores any existing database or configuration, backing them -> up before re-initializing. -> - See `$ ./beacon_node testnet file --help` for more configuration options. - -#### Exporting a genesis file - -Genesis states can downloaded from a running Lighthouse node via the HTTP API. Three content-types are supported: - -- `application/json` -- `application/yaml` -- `application/ssz` - -Using `curl`, a genesis state can be downloaded to `/tmp/genesis.ssz`: - -``` -$ curl --header "Content-Type: application/ssz" "localhost:5052/beacon/state/genesis" -o /tmp/genesis.ssz -``` +- Read about the required [development environment](./interop-environment.md). +- Get an [overview](./interop-cli.md) of the Lighthouse CLI. +- See how we expect to handle some [interop scenarios](./interop-scenarios.md). +- See the [interop cheat-sheet](./interop-cheat-sheet.md) for useful CLI tips. diff --git a/book/src/intro.md b/book/src/intro.md index e0e3cd6a0f..ccf867a54e 100644 --- a/book/src/intro.md +++ b/book/src/intro.md @@ -17,31 +17,11 @@ Foundation, Consensys and other individuals and organisations. ## Developer Resources -Documentation is provided for **researchers and developers** working on -Ethereum 2.0 and assumes prior knowledge on the topic. +Documentation is presently targeted at **researchers and developers**. It +assumes significant prior knowledge of Ethereum 2.0. -- Get started with [development environment setup](setup.html). -- [Run a simple testnet](simple-testnet.html) in Only Three CLI Commands™. -- Read about our interop workflow. -- API? +Topics: -## Release - -Ethereum 2.0 is not fully specified or implemented and as such, Lighthouse is -still **under development**. - -We are on-track to provide a public, multi-client testnet in late-2019 and an -initial production-grade blockchain in 2020. - -## Features - -Lighthouse has been in development since mid-2018 and has an extensive feature -set: - -- Libp2p networking stack, featuring Discovery v5. -- Optimized `BeaconChain` state machine, up-to-date and - passing all tests. -- RESTful HTTP API. -- Documented and feature-rich CLI interface. -- Capable of running small, local testnets with 250ms slot times. -- Detailed metrics exposed in the Prometheus format. +- Get started with [development environment setup](./setup.md). +- See the [interop docs](./interop.md). +- [Run a simple testnet](./simple-testnet.md) in Only Three CLI Commands™. From 19dab6422a0df5e8b4dd6dafebb468760a3c499d Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Tue, 3 Sep 2019 11:35:13 +1000 Subject: [PATCH 174/186] Fix some types in book --- book/src/interop-cheat-sheet.md | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/book/src/interop-cheat-sheet.md b/book/src/interop-cheat-sheet.md index 4f6f079b47..f12b9652ca 100644 --- a/book/src/interop-cheat-sheet.md +++ b/book/src/interop-cheat-sheet.md @@ -9,12 +9,12 @@ interop testing. - [Avoid port clashes when starting multiple nodes](#port-bump) - [Specify a custom slot time](#slot-time) - Using the beacon node HTTP API: - - [Curl a nodes ENR](#http-enr) - - [Curl a nodes connected peers](#http-peer-ids) - - [Curl a nodes local peer id](#http-peer-id) - - [Curl a nodes listening multiaddrs](#http-listen-addresses) - - [Curl a nodes beacon chain head](#http-head) - - [Curl a nodes finalized checkpoint](#http-finalized) + - [Curl a node's ENR](#http-enr) + - [Curl a node's connected peers](#http-peer-ids) + - [Curl a node's local peer id](#http-peer-id) + - [Curl a node's listening multiaddrs](#http-listen-addresses) + - [Curl a node's beacon chain head](#http-head) + - [Curl a node's finalized checkpoint](#http-finalized) ## Category: CLI From ae4700660a02c141e066b3b21b5a9fe6447d4a51 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Tue, 3 Sep 2019 11:41:42 +1000 Subject: [PATCH 175/186] Fix typo in book --- book/src/interop-cli.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/book/src/interop-cli.md b/book/src/interop-cli.md index 3658781d49..3dad845f34 100644 --- a/book/src/interop-cli.md +++ b/book/src/interop-cli.md @@ -5,8 +5,8 @@ The Lighthouse CLI has two primary tasks: - **Resuming** an existing database with `$ ./beacon_node`. - **Creating** a new testnet database using `$ ./beacon_node testnet`. -_See [Scenarios](./interop-scenarios.md) for methods we're likely to use -during interop._ +_See [Scenarios](./interop-scenarios.md) for methods we've anticipated will be +used interop._ ## Creating a new database From baaaf59fe5a577aac6b9fe573fcbdef6ce5f924a Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Tue, 3 Sep 2019 11:49:26 +1000 Subject: [PATCH 176/186] Add message about --spec flag to book --- book/src/interop-scenarios.md | 1 + 1 file changed, 1 insertion(+) diff --git a/book/src/interop-scenarios.md b/book/src/interop-scenarios.md index d54772ee8c..dc87893622 100644 --- a/book/src/interop-scenarios.md +++ b/book/src/interop-scenarios.md @@ -58,6 +58,7 @@ $ ./beacon_node testnet --spec minimal -f file ssz /tmp/genesis.ssz > - The `-f` flag ignores any existing database or configuration, backing them > up before re-initializing. > - See `$ ./beacon_node testnet file --help` for more configuration options. +> - The `--spec` flag is required to allow SSZ parsing of fixed-length lists. ### Start an auto-configured validator client From 44a70b94119fb200a19a1e82f90ace833ce76a40 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Tue, 3 Sep 2019 13:50:12 +1000 Subject: [PATCH 177/186] Update book cheat-sheet --- book/src/interop-cheat-sheet.md | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) diff --git a/book/src/interop-cheat-sheet.md b/book/src/interop-cheat-sheet.md index f12b9652ca..ea7794c338 100644 --- a/book/src/interop-cheat-sheet.md +++ b/book/src/interop-cheat-sheet.md @@ -29,10 +29,8 @@ the `--libp2p-addresses` command. #### Example: -Runs an 8 validator quick-start chain, peering with `/ip4/192.168.0.1/tcp/9000` on boot. - ``` -$ ./beacon_node --libp2p-addresses /ip4/192.168.0.1/tcp/9000 testnet -f quick 8 1567222226 +$ ./beacon_node --libp2p-addresses /ip4/192.168.0.1/tcp/9000 ``` @@ -43,10 +41,8 @@ the `--boot-nodes` command. #### Example: -Runs an 8 validator quick-start chain, peering with `-IW4QB2...` on boot. - ``` -$ ./beacon_node --boot-nodes -IW4QB2Hi8TPuEzQ41Cdf1r2AUU1FFVFDBJdJyOkWk2qXpZfFZQy2YnJIyoT_5fnbtrXUouoskmydZl4pIg90clIkYUDgmlwhH8AAAGDdGNwgiMog3VkcIIjKIlzZWNwMjU2azGhAjg0-DsTkQynhJCRnLLttBK1RS78lmUkLa-wgzAi-Ob5 testnet -f quick 8 1567222226 +$ ./beacon_node --boot-nodes -IW4QB2Hi8TPuEzQ41Cdf1r2AUU1FFVFDBJdJyOkWk2qXpZfFZQy2YnJIyoT_5fnbtrXUouoskmydZl4pIg90clIkYUDgmlwhH8AAAGDdGNwgiMog3VkcIIjKIlzZWNwMjU2azGhAjg0-DsTkQynhJCRnLLttBK1RS78lmUkLa-wgzAi-Ob5 ``` @@ -61,7 +57,7 @@ ports by some `n`. Increase all ports by `10` (using multiples of `10` is recommended). ``` -$ ./beacon_node -b 10 testnet -f quick 8 1567222226 +$ ./beacon_node -b 10 ``` @@ -75,7 +71,7 @@ Lighthouse can run at quite low slot times when there are few validators (e.g., The `-t` (`--slot-time`) flag specifies the milliseconds per slot. ``` -$ ./beacon_node -b 10 testnet -t 500 -f quick 8 1567222226 +$ ./beacon_node testnet -t 500 recent 8 ``` > Note: `bootstrap` loads the slot time via HTTP and therefore conflicts with From 1b4679e5bcac6799a5712ab1c1c29a86625d2ea5 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Tue, 3 Sep 2019 14:18:45 +1000 Subject: [PATCH 178/186] Improve block processing outcomes enum --- beacon_node/beacon_chain/src/beacon_chain.rs | 23 ++++++++++++++------ beacon_node/network/src/sync/manager.rs | 8 ++++++- 2 files changed, 23 insertions(+), 8 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 97af437187..72400bd538 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -46,11 +46,14 @@ pub enum BlockProcessingOutcome { block_slot: Slot, }, /// The block state_root does not match the generated state. - StateRootMismatch, + StateRootMismatch { block: Hash256, local: Hash256 }, /// The block was a genesis block, these blocks cannot be re-imported. GenesisBlock, /// The slot is finalized, no need to import. - FinalizedSlot, + WouldRevertFinalizedSlot { + block_slot: Slot, + finalized_slot: Slot, + }, /// Block is already known, no need to re-import. BlockIsAlreadyKnown, /// The block could not be applied to the state, it is invalid. @@ -957,14 +960,17 @@ impl BeaconChain { .epoch .start_slot(T::EthSpec::slots_per_epoch()); - if block.slot <= finalized_slot { - return Ok(BlockProcessingOutcome::FinalizedSlot); - } - if block.slot == 0 { return Ok(BlockProcessingOutcome::GenesisBlock); } + if block.slot <= finalized_slot { + return Ok(BlockProcessingOutcome::WouldRevertFinalizedSlot { + block_slot: block.slot, + finalized_slot: finalized_slot, + }); + } + let block_root_timer = metrics::start_timer(&metrics::BLOCK_PROCESSING_BLOCK_ROOT); let block_root = block.canonical_root(); @@ -1062,7 +1068,10 @@ impl BeaconChain { let state_root = state.canonical_root(); if block.state_root != state_root { - return Ok(BlockProcessingOutcome::StateRootMismatch); + return Ok(BlockProcessingOutcome::StateRootMismatch { + block: block.state_root, + local: state_root, + }); } metrics::stop_timer(state_root_timer); diff --git a/beacon_node/network/src/sync/manager.rs b/beacon_node/network/src/sync/manager.rs index b81da0991f..9cce6300df 100644 --- a/beacon_node/network/src/sync/manager.rs +++ b/beacon_node/network/src/sync/manager.rs @@ -682,13 +682,19 @@ impl ImportManager { ); } } - BlockProcessingOutcome::FinalizedSlot => { + BlockProcessingOutcome::WouldRevertFinalizedSlot { .. } => { trace!( self.log, "Finalized or earlier block processed"; "outcome" => format!("{:?}", outcome), ); // block reached our finalized slot or was earlier, move to the next block } + BlockProcessingOutcome::GenesisBlock => { + trace!( + self.log, "Genesis block was processed"; + "outcome" => format!("{:?}", outcome), + ); + } _ => { trace!( self.log, "InvalidBlock"; From ab2b8accd4503011eaeb399acd45988f46fba906 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Tue, 3 Sep 2019 15:22:00 +1000 Subject: [PATCH 179/186] Add first pass at Eth1Chain trait --- beacon_node/beacon_chain/src/eth1_chain.rs | 61 ++++++++++++++++++++++ beacon_node/beacon_chain/src/lib.rs | 1 + 2 files changed, 62 insertions(+) create mode 100644 beacon_node/beacon_chain/src/eth1_chain.rs diff --git a/beacon_node/beacon_chain/src/eth1_chain.rs b/beacon_node/beacon_chain/src/eth1_chain.rs new file mode 100644 index 0000000000..5f148cd9b1 --- /dev/null +++ b/beacon_node/beacon_chain/src/eth1_chain.rs @@ -0,0 +1,61 @@ +use crate::BeaconChainTypes; +use eth2_hashing::hash; +use std::marker::PhantomData; +use types::{BeaconState, Deposit, DepositData, Eth1Data, EthSpec, Hash256}; + +type Result = std::result::Result; + +pub enum Error { + /// Unable to return an Eth1Data for the given epoch. + EpochUnavailable, + /// An error from the backend service (e.g., the web3 data fetcher). + BackendError(String), +} + +pub trait Eth1Chain { + /// Returns the `Eth1Data` that should be included in a block being produced for the given + /// `state`. + fn eth1_data_for_epoch(&self, beacon_state: &BeaconState) -> Result; + + /// Returns all `Deposits` between `state.eth1_deposit_index` and + /// `state.eth1_data.deposit_count`. + /// + /// # Note: + /// + /// It is possible that not all returned `Deposits` can be included in a block. E.g., there may + /// be more than `MAX_DEPOSIT_COUNT` or the churn may be too high. + fn queued_deposits(&self, beacon_state: &BeaconState) -> Result>; +} + +pub struct InteropEth1Chain { + _phantom: PhantomData, +} + +impl Eth1Chain for InteropEth1Chain { + fn eth1_data_for_epoch(&self, state: &BeaconState) -> Result { + let current_epoch = state.current_epoch(); + let slots_per_voting_period = T::EthSpec::slots_per_eth1_voting_period() as u64; + let current_voting_period: u64 = current_epoch.as_u64() / slots_per_voting_period; + + // TODO: confirm that `int_to_bytes32` is correct. + let deposit_root = hash(&int_to_bytes32(current_voting_period)); + let block_hash = hash(&deposit_root); + + Ok(Eth1Data { + deposit_root: Hash256::from_slice(&deposit_root), + deposit_count: state.eth1_deposit_index, + block_hash: Hash256::from_slice(&block_hash), + }) + } + + fn queued_deposits(&self, beacon_state: &BeaconState) -> Result> { + Ok(vec![]) + } +} + +/// Returns `int` as little-endian bytes with a length of 32. +fn int_to_bytes32(int: u64) -> Vec { + let mut vec = int.to_le_bytes().to_vec(); + vec.resize(32, 0); + vec +} diff --git a/beacon_node/beacon_chain/src/lib.rs b/beacon_node/beacon_chain/src/lib.rs index 9c833f778d..25f8b74eb7 100644 --- a/beacon_node/beacon_chain/src/lib.rs +++ b/beacon_node/beacon_chain/src/lib.rs @@ -6,6 +6,7 @@ mod beacon_chain; mod beacon_chain_builder; mod checkpoint; mod errors; +mod eth1_chain; mod fork_choice; mod iter; mod metrics; From 31557704eb6e789e6e5b65a99b757b0a38cdf718 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Tue, 3 Sep 2019 15:52:25 +1000 Subject: [PATCH 180/186] Add Eth1Chain member to BeaconChain --- beacon_node/beacon_chain/src/beacon_chain.rs | 36 ++++--------- .../beacon_chain/src/beacon_chain_builder.rs | 15 ++++-- beacon_node/beacon_chain/src/errors.rs | 5 +- beacon_node/beacon_chain/src/eth1_chain.rs | 54 +++++++++++++++---- beacon_node/beacon_chain/src/lib.rs | 1 + beacon_node/beacon_chain/src/test_utils.rs | 15 ++++-- 6 files changed, 82 insertions(+), 44 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 72400bd538..5409d37287 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -1,10 +1,10 @@ use crate::checkpoint::CheckPoint; use crate::errors::{BeaconChainError as Error, BlockProductionError}; +use crate::eth1_chain::{Eth1Chain, Eth1ChainBackend}; use crate::fork_choice::{Error as ForkChoiceError, ForkChoice}; use crate::iter::{ReverseBlockRootIterator, ReverseStateRootIterator}; use crate::metrics; use crate::persisted_beacon_chain::{PersistedBeaconChain, BEACON_CHAIN_DB_KEY}; -use eth2_hashing::hash; use lmd_ghost::LmdGhost; use operation_pool::DepositInsertStatus; use operation_pool::{OperationPool, PersistedOperationPool}; @@ -113,6 +113,7 @@ pub trait BeaconChainTypes: Send + Sync + 'static { type Store: store::Store; type SlotClock: slot_clock::SlotClock; type LmdGhost: LmdGhost; + type Eth1Chain: Eth1ChainBackend; type EthSpec: types::EthSpec; } @@ -127,6 +128,8 @@ pub struct BeaconChain { /// Stores all operations (e.g., `Attestation`, `Deposit`, etc) that are candidates for /// inclusion in a block. pub op_pool: OperationPool, + /// Provides information from the Ethereum 1 (PoW) chain. + pub eth1_chain: Eth1Chain, /// Stores a "snapshot" of the chain at the time the head-of-the-chain block was received. canonical_head: RwLock>, /// The root of the genesis block. @@ -142,6 +145,7 @@ impl BeaconChain { /// Instantiate a new Beacon Chain, from genesis. pub fn from_genesis( store: Arc, + eth1_backend: T::Eth1Chain, mut genesis_state: BeaconState, mut genesis_block: BeaconBlock, spec: ChainSpec, @@ -186,6 +190,7 @@ impl BeaconChain { spec, slot_clock, op_pool: OperationPool::new(), + eth1_chain: Eth1Chain::new(eth1_backend), canonical_head, genesis_block_root, fork_choice: ForkChoice::new(store.clone(), &genesis_block, genesis_block_root), @@ -197,6 +202,7 @@ impl BeaconChain { /// Attempt to load an existing instance from the given `store`. pub fn from_store( store: Arc, + eth1_backend: T::Eth1Chain, spec: ChainSpec, log: Logger, ) -> Result>, Error> { @@ -233,6 +239,7 @@ impl BeaconChain { slot_clock, fork_choice: ForkChoice::new(store.clone(), last_finalized_block, last_finalized_root), op_pool, + eth1_chain: Eth1Chain::new(eth1_backend), canonical_head: RwLock::new(p.canonical_head), genesis_block_root: p.genesis_block_root, store, @@ -1205,12 +1212,12 @@ impl BeaconChain { body: BeaconBlockBody { randao_reveal, // TODO: replace with real data. - eth1_data: Self::eth1_data_stub(&state), + eth1_data: self.eth1_chain.eth1_data_for_block_production(&state)?, graffiti, proposer_slashings: proposer_slashings.into(), attester_slashings: attester_slashings.into(), attestations: self.op_pool.get_attestations(&state, &self.spec).into(), - deposits: self.op_pool.get_deposits(&state).into(), + deposits: self.eth1_chain.deposits_for_block_inclusion(&state)?.into(), voluntary_exits: self.op_pool.get_voluntary_exits(&state, &self.spec).into(), transfers: self.op_pool.get_transfers(&state, &self.spec).into(), }, @@ -1234,22 +1241,6 @@ impl BeaconChain { Ok((block, state)) } - fn eth1_data_stub(state: &BeaconState) -> Eth1Data { - let current_epoch = state.current_epoch(); - let slots_per_voting_period = T::EthSpec::slots_per_eth1_voting_period() as u64; - let current_voting_period: u64 = current_epoch.as_u64() / slots_per_voting_period; - - // TODO: confirm that `int_to_bytes32` is correct. - let deposit_root = hash(&int_to_bytes32(current_voting_period)); - let block_hash = hash(&deposit_root); - - Eth1Data { - deposit_root: Hash256::from_slice(&deposit_root), - deposit_count: state.eth1_deposit_index, - block_hash: Hash256::from_slice(&block_hash), - } - } - /// Execute the fork choice algorithm and enthrone the result as the canonical head. pub fn fork_choice(&self) -> Result<(), Error> { metrics::inc_counter(&metrics::FORK_CHOICE_REQUESTS); @@ -1445,13 +1436,6 @@ impl BeaconChain { } } -/// Returns `int` as little-endian bytes with a length of 32. -fn int_to_bytes32(int: u64) -> Vec { - let mut vec = int.to_le_bytes().to_vec(); - vec.resize(32, 0); - vec -} - impl From for Error { fn from(e: DBError) -> Error { Error::DBError(e) diff --git a/beacon_node/beacon_chain/src/beacon_chain_builder.rs b/beacon_node/beacon_chain/src/beacon_chain_builder.rs index 37039dce03..f03cbcc969 100644 --- a/beacon_node/beacon_chain/src/beacon_chain_builder.rs +++ b/beacon_node/beacon_chain/src/beacon_chain_builder.rs @@ -127,16 +127,23 @@ impl BeaconChainBuilder { } } - pub fn build(self, store: Arc) -> Result, String> { + pub fn build( + self, + store: Arc, + eth1_backend: T::Eth1Chain, + ) -> Result, String> { Ok(match self.build_strategy { - BuildStrategy::LoadFromStore => BeaconChain::from_store(store, self.spec, self.log) - .map_err(|e| format!("Error loading BeaconChain from database: {:?}", e))? - .ok_or_else(|| format!("Unable to find exising BeaconChain in database."))?, + BuildStrategy::LoadFromStore => { + BeaconChain::from_store(store, eth1_backend, self.spec, self.log) + .map_err(|e| format!("Error loading BeaconChain from database: {:?}", e))? + .ok_or_else(|| format!("Unable to find exising BeaconChain in database."))? + } BuildStrategy::FromGenesis { genesis_block, genesis_state, } => BeaconChain::from_genesis( store, + eth1_backend, genesis_state.as_ref().clone(), genesis_block.as_ref().clone(), self.spec, diff --git a/beacon_node/beacon_chain/src/errors.rs b/beacon_node/beacon_chain/src/errors.rs index 5ef68f2cdd..58cfed2712 100644 --- a/beacon_node/beacon_chain/src/errors.rs +++ b/beacon_node/beacon_chain/src/errors.rs @@ -1,3 +1,4 @@ +use crate::eth1_chain::Error as Eth1ChainError; use crate::fork_choice::Error as ForkChoiceError; use state_processing::per_block_processing::errors::AttestationValidationError; use state_processing::BlockProcessingError; @@ -42,6 +43,7 @@ pub enum BeaconChainError { } easy_from_to!(SlotProcessingError, BeaconChainError); +easy_from_to!(AttestationValidationError, BeaconChainError); #[derive(Debug, PartialEq)] pub enum BlockProductionError { @@ -50,10 +52,11 @@ pub enum BlockProductionError { UnableToProduceAtSlot(Slot), SlotProcessingError(SlotProcessingError), BlockProcessingError(BlockProcessingError), + Eth1ChainError(Eth1ChainError), BeaconStateError(BeaconStateError), } easy_from_to!(BlockProcessingError, BlockProductionError); easy_from_to!(BeaconStateError, BlockProductionError); easy_from_to!(SlotProcessingError, BlockProductionError); -easy_from_to!(AttestationValidationError, BeaconChainError); +easy_from_to!(Eth1ChainError, BlockProductionError); diff --git a/beacon_node/beacon_chain/src/eth1_chain.rs b/beacon_node/beacon_chain/src/eth1_chain.rs index 5f148cd9b1..3ea37c21d0 100644 --- a/beacon_node/beacon_chain/src/eth1_chain.rs +++ b/beacon_node/beacon_chain/src/eth1_chain.rs @@ -5,6 +5,35 @@ use types::{BeaconState, Deposit, DepositData, Eth1Data, EthSpec, Hash256}; type Result = std::result::Result; +pub struct Eth1Chain { + backend: T::Eth1Chain, +} + +impl Eth1Chain { + pub fn new(backend: T::Eth1Chain) -> Self { + Self { backend } + } + + pub fn eth1_data_for_block_production( + &self, + state: &BeaconState, + ) -> Result { + self.backend.eth1_data(state) + } + + pub fn deposits_for_block_inclusion( + &self, + state: &BeaconState, + ) -> Result> { + let deposits = self.backend.queued_deposits(state)?; + + // TODO: truncate deposits if required. + + Ok(deposits) + } +} + +#[derive(Debug, PartialEq)] pub enum Error { /// Unable to return an Eth1Data for the given epoch. EpochUnavailable, @@ -12,10 +41,10 @@ pub enum Error { BackendError(String), } -pub trait Eth1Chain { +pub trait Eth1ChainBackend { /// Returns the `Eth1Data` that should be included in a block being produced for the given /// `state`. - fn eth1_data_for_epoch(&self, beacon_state: &BeaconState) -> Result; + fn eth1_data(&self, beacon_state: &BeaconState) -> Result; /// Returns all `Deposits` between `state.eth1_deposit_index` and /// `state.eth1_data.deposit_count`. @@ -24,20 +53,19 @@ pub trait Eth1Chain { /// /// It is possible that not all returned `Deposits` can be included in a block. E.g., there may /// be more than `MAX_DEPOSIT_COUNT` or the churn may be too high. - fn queued_deposits(&self, beacon_state: &BeaconState) -> Result>; + fn queued_deposits(&self, beacon_state: &BeaconState) -> Result>; } -pub struct InteropEth1Chain { +pub struct InteropEth1ChainBackend { _phantom: PhantomData, } -impl Eth1Chain for InteropEth1Chain { - fn eth1_data_for_epoch(&self, state: &BeaconState) -> Result { +impl Eth1ChainBackend for InteropEth1ChainBackend { + fn eth1_data(&self, state: &BeaconState) -> Result { let current_epoch = state.current_epoch(); - let slots_per_voting_period = T::EthSpec::slots_per_eth1_voting_period() as u64; + let slots_per_voting_period = T::slots_per_eth1_voting_period() as u64; let current_voting_period: u64 = current_epoch.as_u64() / slots_per_voting_period; - // TODO: confirm that `int_to_bytes32` is correct. let deposit_root = hash(&int_to_bytes32(current_voting_period)); let block_hash = hash(&deposit_root); @@ -48,11 +76,19 @@ impl Eth1Chain for InteropEth1Chain { }) } - fn queued_deposits(&self, beacon_state: &BeaconState) -> Result> { + fn queued_deposits(&self, beacon_state: &BeaconState) -> Result> { Ok(vec![]) } } +impl Default for InteropEth1ChainBackend { + fn default() -> Self { + Self { + _phantom: PhantomData, + } + } +} + /// Returns `int` as little-endian bytes with a length of 32. fn int_to_bytes32(int: u64) -> Vec { let mut vec = int.to_le_bytes().to_vec(); diff --git a/beacon_node/beacon_chain/src/lib.rs b/beacon_node/beacon_chain/src/lib.rs index 25f8b74eb7..7883019d75 100644 --- a/beacon_node/beacon_chain/src/lib.rs +++ b/beacon_node/beacon_chain/src/lib.rs @@ -19,6 +19,7 @@ pub use self::beacon_chain::{ pub use self::checkpoint::CheckPoint; pub use self::errors::{BeaconChainError, BlockProductionError}; pub use beacon_chain_builder::BeaconChainBuilder; +pub use eth1_chain::InteropEth1ChainBackend; pub use lmd_ghost; pub use metrics::scrape_for_metrics; pub use parking_lot; diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index 1006fabf53..07d181a535 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -1,4 +1,4 @@ -use crate::{BeaconChain, BeaconChainTypes, BlockProcessingOutcome}; +use crate::{BeaconChain, BeaconChainTypes, BlockProcessingOutcome, InteropEth1ChainBackend}; use lmd_ghost::LmdGhost; use rayon::prelude::*; use sloggers::{null::NullLoggerBuilder, Build}; @@ -60,6 +60,7 @@ where type Store = MemoryStore; type SlotClock = TestingSlotClock; type LmdGhost = L; + type Eth1Chain = InteropEth1ChainBackend; type EthSpec = E; } @@ -114,9 +115,15 @@ where let builder = NullLoggerBuilder; let log = builder.build().expect("logger should build"); - let chain = - BeaconChain::from_genesis(store, genesis_state, genesis_block, spec.clone(), log) - .expect("Terminate if beacon chain generation fails"); + let chain = BeaconChain::from_genesis( + store, + InteropEth1ChainBackend::default(), + genesis_state, + genesis_block, + spec.clone(), + log, + ) + .expect("Terminate if beacon chain generation fails"); Self { chain, From 29584ca08784fd8c4621cb15e5bd358f9f9ddc26 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Tue, 3 Sep 2019 15:56:11 +1000 Subject: [PATCH 181/186] Add docs to Eth1Chain --- beacon_node/beacon_chain/src/eth1_chain.rs | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/beacon_node/beacon_chain/src/eth1_chain.rs b/beacon_node/beacon_chain/src/eth1_chain.rs index 3ea37c21d0..8e578ea9a1 100644 --- a/beacon_node/beacon_chain/src/eth1_chain.rs +++ b/beacon_node/beacon_chain/src/eth1_chain.rs @@ -1,10 +1,11 @@ use crate::BeaconChainTypes; use eth2_hashing::hash; use std::marker::PhantomData; -use types::{BeaconState, Deposit, DepositData, Eth1Data, EthSpec, Hash256}; +use types::{BeaconState, Deposit, Eth1Data, EthSpec, Hash256}; type Result = std::result::Result; +/// Holds an `Eth1ChainBackend` and serves requests from the `BeaconChain`. pub struct Eth1Chain { backend: T::Eth1Chain, } @@ -14,6 +15,8 @@ impl Eth1Chain { Self { backend } } + /// Returns the `Eth1Data` that should be included in a block being produced for the given + /// `state`. pub fn eth1_data_for_block_production( &self, state: &BeaconState, @@ -21,6 +24,10 @@ impl Eth1Chain { self.backend.eth1_data(state) } + /// Returns a list of `Deposits` that may be included in a block. + /// + /// Including all of the returned `Deposits` in a block should _not_ cause it to become + /// invalid. pub fn deposits_for_block_inclusion( &self, state: &BeaconState, @@ -76,7 +83,7 @@ impl Eth1ChainBackend for InteropEth1ChainBackend { }) } - fn queued_deposits(&self, beacon_state: &BeaconState) -> Result> { + fn queued_deposits(&self, _: &BeaconState) -> Result> { Ok(vec![]) } } From d80d9dba4c62d9b81b9fd233460cdc5e47d23c5b Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Tue, 3 Sep 2019 16:40:53 +1000 Subject: [PATCH 182/186] Add flag for web3 server --- beacon_node/beacon_chain/src/eth1_chain.rs | 8 ++- beacon_node/beacon_chain/src/lib.rs | 2 +- beacon_node/client/src/config.rs | 18 +++++ beacon_node/client/src/lib.rs | 13 ++-- beacon_node/src/config.rs | 15 ++++- beacon_node/src/main.rs | 10 +++ beacon_node/src/run.rs | 76 ++++++++-------------- 7 files changed, 85 insertions(+), 57 deletions(-) diff --git a/beacon_node/beacon_chain/src/eth1_chain.rs b/beacon_node/beacon_chain/src/eth1_chain.rs index 8e578ea9a1..e4ccee3ba4 100644 --- a/beacon_node/beacon_chain/src/eth1_chain.rs +++ b/beacon_node/beacon_chain/src/eth1_chain.rs @@ -48,7 +48,9 @@ pub enum Error { BackendError(String), } -pub trait Eth1ChainBackend { +pub trait Eth1ChainBackend: Sized + Send + Sync { + fn new(server: String) -> Result; + /// Returns the `Eth1Data` that should be included in a block being produced for the given /// `state`. fn eth1_data(&self, beacon_state: &BeaconState) -> Result; @@ -68,6 +70,10 @@ pub struct InteropEth1ChainBackend { } impl Eth1ChainBackend for InteropEth1ChainBackend { + fn new(_server: String) -> Result { + Ok(Self::default()) + } + fn eth1_data(&self, state: &BeaconState) -> Result { let current_epoch = state.current_epoch(); let slots_per_voting_period = T::slots_per_eth1_voting_period() as u64; diff --git a/beacon_node/beacon_chain/src/lib.rs b/beacon_node/beacon_chain/src/lib.rs index 7883019d75..0361723489 100644 --- a/beacon_node/beacon_chain/src/lib.rs +++ b/beacon_node/beacon_chain/src/lib.rs @@ -19,7 +19,7 @@ pub use self::beacon_chain::{ pub use self::checkpoint::CheckPoint; pub use self::errors::{BeaconChainError, BlockProductionError}; pub use beacon_chain_builder::BeaconChainBuilder; -pub use eth1_chain::InteropEth1ChainBackend; +pub use eth1_chain::{Eth1ChainBackend, InteropEth1ChainBackend}; pub use lmd_ghost; pub use metrics::scrape_for_metrics; pub use parking_lot; diff --git a/beacon_node/client/src/config.rs b/beacon_node/client/src/config.rs index f9b366eb15..5b0553c5bd 100644 --- a/beacon_node/client/src/config.rs +++ b/beacon_node/client/src/config.rs @@ -23,6 +23,7 @@ pub struct Config { /// files. It can only be configured via the CLI. #[serde(skip)] pub beacon_chain_start_method: BeaconChainStartMethod, + pub eth1_backend_method: Eth1BackendMethod, pub network: network::NetworkConfig, pub rpc: rpc::RPCConfig, pub rest_api: rest_api::ApiConfig, @@ -69,6 +70,22 @@ impl Default for BeaconChainStartMethod { } } +/// Defines which Eth1 backend the client should use. +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(tag = "type")] +pub enum Eth1BackendMethod { + /// Use the mocked eth1 backend used in interop testing + Interop, + /// Use a web3 connection to a running Eth1 node. + Web3 { server: String }, +} + +impl Default for Eth1BackendMethod { + fn default() -> Self { + Eth1BackendMethod::Interop + } +} + impl Default for Config { fn default() -> Self { Self { @@ -81,6 +98,7 @@ impl Default for Config { rest_api: <_>::default(), spec_constants: TESTNET_SPEC_CONSTANTS.into(), beacon_chain_start_method: <_>::default(), + eth1_backend_method: <_>::default(), } } } diff --git a/beacon_node/client/src/lib.rs b/beacon_node/client/src/lib.rs index e14da2af9f..33f27f2539 100644 --- a/beacon_node/client/src/lib.rs +++ b/beacon_node/client/src/lib.rs @@ -21,14 +21,14 @@ use tokio::runtime::TaskExecutor; use tokio::timer::Interval; use types::EthSpec; -pub use beacon_chain::BeaconChainTypes; -pub use config::{BeaconChainStartMethod, Config as ClientConfig}; +pub use beacon_chain::{BeaconChainTypes, Eth1ChainBackend, InteropEth1ChainBackend}; +pub use config::{BeaconChainStartMethod, Config as ClientConfig, Eth1BackendMethod}; pub use eth2_config::Eth2Config; #[derive(Clone)] pub struct ClientType { - _phantom_t: PhantomData, - _phantom_u: PhantomData, + _phantom_s: PhantomData, + _phantom_e: PhantomData, } impl BeaconChainTypes for ClientType @@ -39,6 +39,7 @@ where type Store = S; type SlotClock = SystemTimeSlotClock; type LmdGhost = ThreadSafeReducedTree; + type Eth1Chain = InteropEth1ChainBackend; type EthSpec = E; } @@ -168,9 +169,11 @@ where } }; + let eth1_backend = T::Eth1Chain::new(String::new()).map_err(|e| format!("{:?}", e))?; + let beacon_chain: Arc> = Arc::new( beacon_chain_builder - .build(store) + .build(store, eth1_backend) .map_err(error::Error::from)?, ); diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index 4a3f6b6a7c..47b877ecb8 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -1,5 +1,5 @@ use clap::ArgMatches; -use client::{BeaconChainStartMethod, ClientConfig, Eth2Config}; +use client::{BeaconChainStartMethod, ClientConfig, Eth1BackendMethod, Eth2Config}; use eth2_config::{read_from_file, write_to_file}; use lighthouse_bootstrap::Bootstrapper; use rand::{distributions::Alphanumeric, Rng}; @@ -25,6 +25,14 @@ type Config = (ClientConfig, Eth2Config); pub fn get_configs(cli_args: &ArgMatches, log: &Logger) -> Result { let mut builder = ConfigBuilder::new(cli_args, log)?; + if let Some(server) = cli_args.value_of("eth1-server") { + builder.set_eth1_backend_method(Eth1BackendMethod::Web3 { + server: server.into(), + }) + } else { + builder.set_eth1_backend_method(Eth1BackendMethod::Interop) + } + match cli_args.subcommand() { ("testnet", Some(sub_cmd_args)) => { process_testnet_subcommand(&mut builder, sub_cmd_args, log)? @@ -288,6 +296,11 @@ impl<'a> ConfigBuilder<'a> { self.client_config.beacon_chain_start_method = method; } + /// Sets the method for starting the beacon chain. + pub fn set_eth1_backend_method(&mut self, method: Eth1BackendMethod) { + self.client_config.eth1_backend_method = method; + } + /// Import the libp2p address for `server` into the list of bootnodes in `self`. /// /// If `port` is `Some`, it is used as the port for the `Multiaddr`. If `port` is `None`, diff --git a/beacon_node/src/main.rs b/beacon_node/src/main.rs index b914be5492..fab75ea4ea 100644 --- a/beacon_node/src/main.rs +++ b/beacon_node/src/main.rs @@ -162,6 +162,16 @@ fn main() { .takes_value(true), ) + /* + * Eth1 Integration + */ + .arg( + Arg::with_name("eth1-server") + .long("eth1-server") + .value_name("SERVER") + .help("Specifies the server for a web3 connection to the Eth1 chain.") + .takes_value(true) + ) /* * Database parameters. */ diff --git a/beacon_node/src/run.rs b/beacon_node/src/run.rs index 26225cc920..d036ef0c40 100644 --- a/beacon_node/src/run.rs +++ b/beacon_node/src/run.rs @@ -1,4 +1,7 @@ -use client::{error, notifier, BeaconChainTypes, Client, ClientConfig, ClientType, Eth2Config}; +use client::{ + error, notifier, BeaconChainTypes, Client, ClientConfig, ClientType, Eth1BackendMethod, + Eth2Config, +}; use futures::sync::oneshot; use futures::Future; use slog::{error, info}; @@ -47,55 +50,30 @@ pub fn run_beacon_node( "spec_constants" => &spec_constants, ); + macro_rules! run_client { + ($store: ty, $eth_spec: ty) => { + run::>( + &db_path, + client_config, + eth2_config, + executor, + runtime, + log, + ) + }; + } + + if let Eth1BackendMethod::Web3 { .. } = client_config.eth1_backend_method { + return Err("Starting from web3 backend is not supported for interop.".into()); + } + match (db_type.as_str(), spec_constants.as_str()) { - ("disk", "minimal") => run::>( - &db_path, - client_config, - eth2_config, - executor, - runtime, - log, - ), - ("memory", "minimal") => run::>( - &db_path, - client_config, - eth2_config, - executor, - runtime, - log, - ), - ("disk", "mainnet") => run::>( - &db_path, - client_config, - eth2_config, - executor, - runtime, - log, - ), - ("memory", "mainnet") => run::>( - &db_path, - client_config, - eth2_config, - executor, - runtime, - log, - ), - ("disk", "interop") => run::>( - &db_path, - client_config, - eth2_config, - executor, - runtime, - log, - ), - ("memory", "interop") => run::>( - &db_path, - client_config, - eth2_config, - executor, - runtime, - log, - ), + ("disk", "minimal") => run_client!(DiskStore, MinimalEthSpec), + ("disk", "mainnet") => run_client!(DiskStore, MainnetEthSpec), + ("disk", "interop") => run_client!(DiskStore, InteropEthSpec), + ("memory", "minimal") => run_client!(MemoryStore, MinimalEthSpec), + ("memory", "mainnet") => run_client!(MemoryStore, MainnetEthSpec), + ("memory", "interop") => run_client!(MemoryStore, InteropEthSpec), (db_type, spec) => { error!(log, "Unknown runtime configuration"; "spec_constants" => spec, "db_type" => db_type); Err("Unknown specification and/or db_type.".into()) From 2706025a3450bc074b38c9851386a1c201c434aa Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Wed, 4 Sep 2019 09:07:33 +1000 Subject: [PATCH 183/186] Move data dir cleaning in node runtime start --- beacon_node/src/config.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index 47b877ecb8..f2c56c5241 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -78,6 +78,10 @@ fn process_testnet_subcommand( builder.set_random_datadir()?; } + if cli_args.is_present("force") { + builder.clean_datadir()?; + } + let is_bootstrap = cli_args.subcommand_name() == Some("bootstrap"); if let Some(path_string) = cli_args.value_of("eth2-config") { @@ -112,10 +116,6 @@ fn process_testnet_subcommand( builder.load_client_config(path)?; } - if cli_args.is_present("force") { - builder.clean_datadir()?; - } - info!( log, "Creating new datadir"; From 7edc5f37b9759b328d31221d747eff42af2d1ddb Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Wed, 4 Sep 2019 10:25:30 +1000 Subject: [PATCH 184/186] Move BeaconChainHarness to interop spec --- .../beacon_chain/src/beacon_chain_builder.rs | 23 ++--- beacon_node/beacon_chain/src/errors.rs | 1 + beacon_node/beacon_chain/src/test_utils.rs | 94 +++++++------------ beacon_node/beacon_chain/tests/tests.rs | 42 ++++++++- beacon_node/client/src/lib.rs | 8 +- eth2/lmd_ghost/tests/test.rs | 5 +- 6 files changed, 91 insertions(+), 82 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain_builder.rs b/beacon_node/beacon_chain/src/beacon_chain_builder.rs index f03cbcc969..e59aae22b9 100644 --- a/beacon_node/beacon_chain/src/beacon_chain_builder.rs +++ b/beacon_node/beacon_chain/src/beacon_chain_builder.rs @@ -13,8 +13,8 @@ use std::sync::Arc; use std::time::SystemTime; use tree_hash::{SignedRoot, TreeHash}; use types::{ - test_utils::generate_deterministic_keypairs, BeaconBlock, BeaconState, ChainSpec, Deposit, - DepositData, Domain, EthSpec, Fork, Hash256, PublicKey, Signature, + BeaconBlock, BeaconState, ChainSpec, Deposit, DepositData, Domain, EthSpec, Fork, Hash256, + Keypair, PublicKey, Signature, }; enum BuildStrategy { @@ -33,21 +33,21 @@ pub struct BeaconChainBuilder { impl BeaconChainBuilder { pub fn recent_genesis( - validator_count: usize, + keypairs: &[Keypair], minutes: u64, spec: ChainSpec, log: Logger, ) -> Result { - Self::quick_start(recent_genesis_time(minutes), validator_count, spec, log) + Self::quick_start(recent_genesis_time(minutes), keypairs, spec, log) } pub fn quick_start( genesis_time: u64, - validator_count: usize, + keypairs: &[Keypair], spec: ChainSpec, log: Logger, ) -> Result { - let genesis_state = interop_genesis_state(validator_count, genesis_time, &spec)?; + let genesis_state = interop_genesis_state(keypairs, genesis_time, &spec)?; Ok(Self::from_genesis_state(genesis_state, spec, log)) } @@ -167,11 +167,10 @@ fn genesis_block(genesis_state: &BeaconState, spec: &ChainSpec) - /// Reference: /// https://github.com/ethereum/eth2.0-pm/tree/6e41fcf383ebeb5125938850d8e9b4e9888389b4/interop/mocked_start fn interop_genesis_state( - validator_count: usize, + keypairs: &[Keypair], genesis_time: u64, spec: &ChainSpec, ) -> Result, String> { - let keypairs = generate_deterministic_keypairs(validator_count); let eth1_block_hash = Hash256::from_slice(&[0x42; 32]); let eth1_timestamp = 2_u64.pow(40); let amount = spec.max_effective_balance; @@ -187,7 +186,7 @@ fn interop_genesis_state( .map(|keypair| { let mut data = DepositData { withdrawal_credentials: withdrawal_credentials(&keypair.pk), - pubkey: keypair.pk.into(), + pubkey: keypair.pk.clone().into(), amount, signature: Signature::empty_signature().into(), }; @@ -269,7 +268,7 @@ fn recent_genesis_time(minutes: u64) -> u64 { #[cfg(test)] mod test { use super::*; - use types::{EthSpec, MinimalEthSpec}; + use types::{test_utils::generate_deterministic_keypairs, EthSpec, MinimalEthSpec}; type TestEthSpec = MinimalEthSpec; @@ -279,7 +278,9 @@ mod test { let genesis_time = 42; let spec = &TestEthSpec::default_spec(); - let state = interop_genesis_state::(validator_count, genesis_time, spec) + let keypairs = generate_deterministic_keypairs(validator_count); + + let state = interop_genesis_state::(&keypairs, genesis_time, spec) .expect("should build state"); assert_eq!( diff --git a/beacon_node/beacon_chain/src/errors.rs b/beacon_node/beacon_chain/src/errors.rs index 58cfed2712..0306899288 100644 --- a/beacon_node/beacon_chain/src/errors.rs +++ b/beacon_node/beacon_chain/src/errors.rs @@ -34,6 +34,7 @@ pub enum BeaconChainError { MissingBeaconBlock(Hash256), MissingBeaconState(Hash256), SlotProcessingError(SlotProcessingError), + UnableToAdvanceState(String), NoStateForAttestation { beacon_block_root: Hash256, }, diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index 07d181a535..7670ac74e4 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -1,22 +1,28 @@ -use crate::{BeaconChain, BeaconChainTypes, BlockProcessingOutcome, InteropEth1ChainBackend}; +use crate::{ + AttestationProcessingOutcome, BeaconChain, BeaconChainBuilder, BeaconChainTypes, + BlockProcessingOutcome, InteropEth1ChainBackend, +}; use lmd_ghost::LmdGhost; use rayon::prelude::*; -use sloggers::{null::NullLoggerBuilder, Build}; +use sloggers::{terminal::TerminalLoggerBuilder, types::Severity, Build}; use slot_clock::TestingSlotClock; use state_processing::per_slot_processing; use std::marker::PhantomData; use std::sync::Arc; use store::MemoryStore; -use store::Store; use tree_hash::{SignedRoot, TreeHash}; use types::{ - test_utils::TestingBeaconStateBuilder, AggregateSignature, Attestation, - AttestationDataAndCustodyBit, BeaconBlock, BeaconState, BitList, ChainSpec, Domain, EthSpec, - Hash256, Keypair, RelativeEpoch, SecretKey, Signature, Slot, + AggregateSignature, Attestation, AttestationDataAndCustodyBit, BeaconBlock, BeaconState, + BitList, ChainSpec, Domain, EthSpec, Hash256, Keypair, RelativeEpoch, SecretKey, Signature, + Slot, }; +pub use types::test_utils::generate_deterministic_keypairs; + pub use crate::persisted_beacon_chain::{PersistedBeaconChain, BEACON_CHAIN_DB_KEY}; +pub const HARNESS_GENESIS_TIME: u64 = 1567552690; // 4th September 2019 + /// Indicates how the `BeaconChainHarness` should produce blocks. #[derive(Clone, Copy, Debug)] pub enum BlockStrategy { @@ -84,46 +90,21 @@ where E: EthSpec, { /// Instantiate a new harness with `validator_count` initial validators. - pub fn new(validator_count: usize) -> Self { - let state_builder = TestingBeaconStateBuilder::from_default_keypairs_file_if_exists( - validator_count, - &E::default_spec(), - ); - let (genesis_state, keypairs) = state_builder.build(); - - Self::from_state_and_keypairs(genesis_state, keypairs) - } - - /// Instantiate a new harness with an initial validator for each key supplied. - pub fn from_keypairs(keypairs: Vec) -> Self { - let state_builder = TestingBeaconStateBuilder::from_keypairs(keypairs, &E::default_spec()); - let (genesis_state, keypairs) = state_builder.build(); - - Self::from_state_and_keypairs(genesis_state, keypairs) - } - - /// Instantiate a new harness with the given genesis state and a keypair for each of the - /// initial validators in the given state. - pub fn from_state_and_keypairs(genesis_state: BeaconState, keypairs: Vec) -> Self { + pub fn new(keypairs: Vec) -> Self { let spec = E::default_spec(); + let log = TerminalLoggerBuilder::new() + .level(Severity::Warning) + .build() + .expect("logger should build"); + let store = Arc::new(MemoryStore::open()); - let mut genesis_block = BeaconBlock::empty(&spec); - genesis_block.state_root = Hash256::from_slice(&genesis_state.tree_hash_root()); - - let builder = NullLoggerBuilder; - let log = builder.build().expect("logger should build"); - - let chain = BeaconChain::from_genesis( - store, - InteropEth1ChainBackend::default(), - genesis_state, - genesis_block, - spec.clone(), - log, - ) - .expect("Terminate if beacon chain generation fails"); + let chain = + BeaconChainBuilder::quick_start(HARNESS_GENESIS_TIME, &keypairs, spec.clone(), log) + .unwrap_or_else(|e| panic!("Failed to create beacon chain builder: {}", e)) + .build(store.clone(), InteropEth1ChainBackend::default()) + .unwrap_or_else(|e| panic!("Failed to build beacon chain: {}", e)); Self { chain, @@ -163,7 +144,10 @@ where BlockStrategy::ForkCanonicalChainAt { previous_slot, .. } => previous_slot, }; - self.get_state_at_slot(state_slot) + self.chain + .state_at_slot(state_slot) + .expect("should find state for slot") + .clone() }; // Determine the first slot where a block should be built. @@ -201,21 +185,6 @@ where head_block_root.expect("did not produce any blocks") } - fn get_state_at_slot(&self, state_slot: Slot) -> BeaconState { - let state_root = self - .chain - .rev_iter_state_roots() - .find(|(_hash, slot)| *slot == state_slot) - .map(|(hash, _slot)| hash) - .expect("could not find state root"); - - self.chain - .store - .get(&state_root) - .expect("should read db") - .expect("should find state root") - } - /// Returns a newly created block, signed by the proposer for the given slot. fn build_block( &self, @@ -289,9 +258,14 @@ where ) .into_iter() .for_each(|attestation| { - self.chain + match self + .chain .process_attestation(attestation) - .expect("should process attestation"); + .expect("should not error during attestation processing") + { + AttestationProcessingOutcome::Processed => (), + other => panic!("did not successfully process attestation: {:?}", other), + } }); } diff --git a/beacon_node/beacon_chain/tests/tests.rs b/beacon_node/beacon_chain/tests/tests.rs index ba7f7bf84b..bf853f2842 100644 --- a/beacon_node/beacon_chain/tests/tests.rs +++ b/beacon_node/beacon_chain/tests/tests.rs @@ -3,11 +3,14 @@ #[macro_use] extern crate lazy_static; -use beacon_chain::test_utils::{ - AttestationStrategy, BeaconChainHarness, BlockStrategy, CommonTypes, PersistedBeaconChain, - BEACON_CHAIN_DB_KEY, -}; use beacon_chain::AttestationProcessingOutcome; +use beacon_chain::{ + test_utils::{ + AttestationStrategy, BeaconChainHarness, BlockStrategy, CommonTypes, PersistedBeaconChain, + BEACON_CHAIN_DB_KEY, + }, + BlockProcessingOutcome, +}; use lmd_ghost::ThreadSafeReducedTree; use rand::Rng; use store::{MemoryStore, Store}; @@ -25,7 +28,7 @@ lazy_static! { type TestForkChoice = ThreadSafeReducedTree; fn get_harness(validator_count: usize) -> BeaconChainHarness { - let harness = BeaconChainHarness::from_keypairs(KEYPAIRS[0..validator_count].to_vec()); + let harness = BeaconChainHarness::new(KEYPAIRS[0..validator_count].to_vec()); harness.advance_slot(); @@ -461,3 +464,32 @@ fn free_attestations_added_to_fork_choice_all_updated() { } } } + +#[test] +fn produces_and_processes_with_genesis_skip_slots() { + let num_validators = 8; + let harness_a = get_harness(num_validators); + let harness_b = get_harness(num_validators); + let skip_slots = 9; + + for _ in 0..skip_slots { + harness_a.advance_slot(); + harness_b.advance_slot(); + } + + harness_a.extend_chain( + 1, + BlockStrategy::OnCanonicalHead, + // No attestation required for test. + AttestationStrategy::SomeValidators(vec![]), + ); + + assert_eq!( + harness_b + .chain + .process_block(harness_a.chain.head().beacon_block.clone()), + Ok(BlockProcessingOutcome::Processed { + block_root: harness_a.chain.head().beacon_block_root + }) + ); +} diff --git a/beacon_node/client/src/lib.rs b/beacon_node/client/src/lib.rs index 33f27f2539..1d3cb40ecf 100644 --- a/beacon_node/client/src/lib.rs +++ b/beacon_node/client/src/lib.rs @@ -6,8 +6,8 @@ pub mod error; pub mod notifier; use beacon_chain::{ - lmd_ghost::ThreadSafeReducedTree, slot_clock::SystemTimeSlotClock, store::Store, BeaconChain, - BeaconChainBuilder, + lmd_ghost::ThreadSafeReducedTree, slot_clock::SystemTimeSlotClock, store::Store, + test_utils::generate_deterministic_keypairs, BeaconChain, BeaconChainBuilder, }; use exit_future::Signal; use futures::{future::Future, Stream}; @@ -106,7 +106,7 @@ where "method" => "recent" ); BeaconChainBuilder::recent_genesis( - *validator_count, + &generate_deterministic_keypairs(*validator_count), *minutes, spec.clone(), log.clone(), @@ -125,7 +125,7 @@ where ); BeaconChainBuilder::quick_start( *genesis_time, - *validator_count, + &generate_deterministic_keypairs(*validator_count), spec.clone(), log.clone(), )? diff --git a/eth2/lmd_ghost/tests/test.rs b/eth2/lmd_ghost/tests/test.rs index 4c79a704eb..49e9ff738a 100644 --- a/eth2/lmd_ghost/tests/test.rs +++ b/eth2/lmd_ghost/tests/test.rs @@ -4,7 +4,8 @@ extern crate lazy_static; use beacon_chain::test_utils::{ - AttestationStrategy, BeaconChainHarness as BaseBeaconChainHarness, BlockStrategy, + generate_deterministic_keypairs, AttestationStrategy, + BeaconChainHarness as BaseBeaconChainHarness, BlockStrategy, }; use lmd_ghost::{LmdGhost, ThreadSafeReducedTree as BaseThreadSafeReducedTree}; use rand::{prelude::*, rngs::StdRng}; @@ -51,7 +52,7 @@ struct ForkedHarness { impl ForkedHarness { /// A new standard instance of with constant parameters. pub fn new() -> Self { - let harness = BeaconChainHarness::new(VALIDATOR_COUNT); + let harness = BeaconChainHarness::new(generate_deterministic_keypairs(VALIDATOR_COUNT)); // Move past the zero slot. harness.advance_slot(); From 009a7eb9c72f53733c7d50425c2139c961fc547d Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Wed, 4 Sep 2019 12:04:15 +1000 Subject: [PATCH 185/186] Fix bug with invalid state root --- .../beacon_chain/src/beacon_chain_builder.rs | 3 +++ beacon_node/beacon_chain/tests/tests.rs | 22 ++++++++++++++++--- 2 files changed, 22 insertions(+), 3 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain_builder.rs b/beacon_node/beacon_chain/src/beacon_chain_builder.rs index e59aae22b9..ef25c33ece 100644 --- a/beacon_node/beacon_chain/src/beacon_chain_builder.rs +++ b/beacon_node/beacon_chain/src/beacon_chain_builder.rs @@ -243,6 +243,9 @@ fn interop_genesis_state( state.genesis_time = genesis_time; + // Invalid all the caches after all the manual state surgery. + state.drop_all_caches(); + Ok(state) } diff --git a/beacon_node/beacon_chain/tests/tests.rs b/beacon_node/beacon_chain/tests/tests.rs index bf853f2842..82fc882168 100644 --- a/beacon_node/beacon_chain/tests/tests.rs +++ b/beacon_node/beacon_chain/tests/tests.rs @@ -465,12 +465,10 @@ fn free_attestations_added_to_fork_choice_all_updated() { } } -#[test] -fn produces_and_processes_with_genesis_skip_slots() { +fn run_skip_slot_test(skip_slots: u64) { let num_validators = 8; let harness_a = get_harness(num_validators); let harness_b = get_harness(num_validators); - let skip_slots = 9; for _ in 0..skip_slots { harness_a.advance_slot(); @@ -484,6 +482,12 @@ fn produces_and_processes_with_genesis_skip_slots() { AttestationStrategy::SomeValidators(vec![]), ); + assert_eq!( + harness_a.chain.head().beacon_block.slot, + Slot::new(skip_slots + 1) + ); + assert_eq!(harness_b.chain.head().beacon_block.slot, Slot::new(0)); + assert_eq!( harness_b .chain @@ -492,4 +496,16 @@ fn produces_and_processes_with_genesis_skip_slots() { block_root: harness_a.chain.head().beacon_block_root }) ); + + assert_eq!( + harness_b.chain.head().beacon_block.slot, + Slot::new(skip_slots + 1) + ); +} + +#[test] +fn produces_and_processes_with_genesis_skip_slots() { + for i in 0..MinimalEthSpec::slots_per_epoch() * 4 { + run_skip_slot_test(i) + } } From 572df4f37e093c08d1c1a63d047969c5e3ee0781 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Wed, 4 Sep 2019 13:56:30 +1000 Subject: [PATCH 186/186] Make bootstrapper block til connection established --- .../beacon_chain/src/beacon_chain_builder.rs | 2 +- beacon_node/src/config.rs | 4 +- eth2/utils/lighthouse_bootstrap/Cargo.toml | 1 + eth2/utils/lighthouse_bootstrap/src/lib.rs | 37 +++++++++++++++++-- validator_client/src/main.rs | 11 ++++-- 5 files changed, 44 insertions(+), 11 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain_builder.rs b/beacon_node/beacon_chain/src/beacon_chain_builder.rs index ef25c33ece..2a3537020a 100644 --- a/beacon_node/beacon_chain/src/beacon_chain_builder.rs +++ b/beacon_node/beacon_chain/src/beacon_chain_builder.rs @@ -87,7 +87,7 @@ impl BeaconChainBuilder { } pub fn http_bootstrap(server: &str, spec: ChainSpec, log: Logger) -> Result { - let bootstrapper = Bootstrapper::from_server_string(server.to_string()) + let bootstrapper = Bootstrapper::connect(server.to_string(), &log) .map_err(|e| format!("Failed to initialize bootstrap client: {}", e))?; let (genesis_state, genesis_block) = bootstrapper diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index f2c56c5241..6a13a9aaec 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -310,7 +310,7 @@ impl<'a> ConfigBuilder<'a> { server: &str, port: Option, ) -> Result<()> { - let bootstrapper = Bootstrapper::from_server_string(server.to_string())?; + let bootstrapper = Bootstrapper::connect(server.to_string(), &self.log)?; if let Some(server_multiaddr) = bootstrapper.best_effort_multiaddr(port) { info!( @@ -347,7 +347,7 @@ impl<'a> ConfigBuilder<'a> { /// Imports an `Eth2Config` from `server`, returning an error if this fails. pub fn import_bootstrap_eth2_config(&mut self, server: &str) -> Result<()> { - let bootstrapper = Bootstrapper::from_server_string(server.to_string())?; + let bootstrapper = Bootstrapper::connect(server.to_string(), &self.log)?; self.update_eth2_config(bootstrapper.eth2_config()?); diff --git a/eth2/utils/lighthouse_bootstrap/Cargo.toml b/eth2/utils/lighthouse_bootstrap/Cargo.toml index 3f48505b80..cfc4c6bafd 100644 --- a/eth2/utils/lighthouse_bootstrap/Cargo.toml +++ b/eth2/utils/lighthouse_bootstrap/Cargo.toml @@ -13,3 +13,4 @@ reqwest = "0.9" url = "1.2" types = { path = "../../types" } serde = "1.0" +slog = { version = "^2.2.3" , features = ["max_level_trace", "release_max_level_trace"] } diff --git a/eth2/utils/lighthouse_bootstrap/src/lib.rs b/eth2/utils/lighthouse_bootstrap/src/lib.rs index dc70c6d211..92a587ff2e 100644 --- a/eth2/utils/lighthouse_bootstrap/src/lib.rs +++ b/eth2/utils/lighthouse_bootstrap/src/lib.rs @@ -5,11 +5,16 @@ use eth2_libp2p::{ }; use reqwest::{Error as HttpError, Url}; use serde::Deserialize; +use slog::{error, Logger}; use std::borrow::Cow; use std::net::Ipv4Addr; +use std::time::Duration; use types::{BeaconBlock, BeaconState, Checkpoint, EthSpec, Hash256, Slot}; use url::Host; +pub const RETRY_SLEEP_MILLIS: u64 = 100; +pub const RETRY_WARN_INTERVAL: u64 = 30; + #[derive(Debug)] enum Error { InvalidUrl, @@ -31,11 +36,35 @@ pub struct Bootstrapper { } impl Bootstrapper { - /// Parses the given `server` as a URL, instantiating `Self`. - pub fn from_server_string(server: String) -> Result { - Ok(Self { + /// Parses the given `server` as a URL, instantiating `Self` and blocking until a connection + /// can be made with the server. + /// + /// Never times out. + pub fn connect(server: String, log: &Logger) -> Result { + let bootstrapper = Self { url: Url::parse(&server).map_err(|e| format!("Invalid bootstrap server url: {}", e))?, - }) + }; + + let mut retry_count = 0; + loop { + match bootstrapper.enr() { + Ok(_) => break, + Err(_) => { + if retry_count % RETRY_WARN_INTERVAL == 0 { + error!( + log, + "Failed to contact bootstrap server"; + "retry_count" => retry_count, + "retry_delay_millis" => RETRY_SLEEP_MILLIS, + ); + } + retry_count += 1; + std::thread::sleep(Duration::from_millis(RETRY_SLEEP_MILLIS)); + } + } + } + + Ok(bootstrapper) } /// Build a multiaddr using the HTTP server URL that is not guaranteed to be correct. diff --git a/validator_client/src/main.rs b/validator_client/src/main.rs index d5d2fc27f1..39b2e3eaee 100644 --- a/validator_client/src/main.rs +++ b/validator_client/src/main.rs @@ -247,10 +247,13 @@ fn process_testnet_subcommand( ) -> Result<(ClientConfig, Eth2Config)> { let eth2_config = if cli_args.is_present("bootstrap") { info!(log, "Connecting to bootstrap server"); - let bootstrapper = Bootstrapper::from_server_string(format!( - "http://{}:{}", - client_config.server, client_config.server_http_port - ))?; + let bootstrapper = Bootstrapper::connect( + format!( + "http://{}:{}", + client_config.server, client_config.server_http_port + ), + &log, + )?; let eth2_config = bootstrapper.eth2_config()?;